diff --git a/.changeset/free-impalas-doubt.md b/.changeset/free-impalas-doubt.md new file mode 100644 index 000000000..4a6b66900 --- /dev/null +++ b/.changeset/free-impalas-doubt.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-groq': minor +--- + +Adds a new @tanstack/ai-groq package(minor release), a Groq AI adapter for TanStack AI. diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index 9077312b0..db2974a56 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -15,6 +15,7 @@ "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", "@tanstack/ai-grok": "workspace:*", + "@tanstack/ai-groq": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-openrouter": "workspace:*", diff --git a/examples/ts-react-chat/src/lib/model-selection.ts b/examples/ts-react-chat/src/lib/model-selection.ts index a767428e6..0d0b04c57 100644 --- a/examples/ts-react-chat/src/lib/model-selection.ts +++ b/examples/ts-react-chat/src/lib/model-selection.ts @@ -4,6 +4,7 @@ export type Provider = | 'gemini' | 'ollama' | 'grok' + | 'groq' | 'openrouter' export interface ModelOption { @@ -91,6 +92,23 @@ export const MODEL_OPTIONS: Array = [ label: 'Ollama - SmolLM', }, + // Groq + { + provider: 'groq', + model: 'llama-3.3-70b-versatile', + label: 'Groq - Llama 3.3 70B', + }, + { + provider: 'groq', + model: 'meta-llama/llama-4-maverick-17b-128e-instruct', + label: 'Groq - Llama 4 Maverick', + }, + { + provider: 'groq', + model: 'meta-llama/llama-4-scout-17b-16e-instruct', + label: 'Groq - Llama 4 Scout', + }, + // Grok { provider: 'grok', diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index 45454c32d..62cb2f1e7 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -11,6 +11,7 @@ import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' import { openRouterText } from '@tanstack/ai-openrouter' import { grokText } from '@tanstack/ai-grok' +import { groqText } from '@tanstack/ai-groq' import type { AnyTextAdapter } from '@tanstack/ai' import { addToCartToolDef, @@ -26,6 +27,7 @@ type Provider = | 'gemini' | 'ollama' | 'grok' + | 'groq' | 'openrouter' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -131,6 +133,13 @@ export const Route = createFileRoute('/api/tanchat')({ adapter: grokText((model || 'grok-3') as 'grok-3'), modelOptions: {}, }), + groq: () => + createChatOptions({ + adapter: groqText( + (model || + 'llama-3.3-70b-versatile') as 'llama-3.3-70b-versatile', + ), + }), ollama: () => createChatOptions({ adapter: ollamaText((model || 'gpt-oss:120b') as 'gpt-oss:120b'), diff --git a/examples/ts-react-chat/src/routes/index.tsx b/examples/ts-react-chat/src/routes/index.tsx index ddd725747..f40a6bc91 100644 --- a/examples/ts-react-chat/src/routes/index.tsx +++ b/examples/ts-react-chat/src/routes/index.tsx @@ -264,19 +264,25 @@ function ChatPage() { [selectedModel.provider, selectedModel.model], ) - const { messages, sendMessage, isLoading, addToolApprovalResponse, stop } = - useChat({ - connection: fetchServerSentEvents('/api/tanchat'), - tools, - body, - onCustomEvent: (eventType, data, context) => { - console.log( - `[CustomEvent] ${eventType}`, - data, - context.toolCallId ? `(tool call: ${context.toolCallId})` : '', - ) - }, - }) + const { + messages, + sendMessage, + isLoading, + error, + addToolApprovalResponse, + stop, + } = useChat({ + connection: fetchServerSentEvents('/api/tanchat'), + tools, + body, + onCustomEvent: (eventType, data, context) => { + console.log( + `[CustomEvent] ${eventType}`, + data, + context.toolCallId ? `(tool call: ${context.toolCallId})` : '', + ) + }, + }) const [input, setInput] = useState('') /** @@ -415,6 +421,12 @@ function ChatPage() { addToolApprovalResponse={addToolApprovalResponse} /> + {error && ( +
+ {error.message} +
+ )} +
{isLoading && ( diff --git a/packages/typescript/ai-groq/README.md b/packages/typescript/ai-groq/README.md new file mode 100644 index 000000000..984b35e72 --- /dev/null +++ b/packages/typescript/ai-groq/README.md @@ -0,0 +1,91 @@ +# @tanstack/ai-groq + +Groq adapter for TanStack AI + +## Installation + +```bash +npm install @tanstack/ai-groq +# or +pnpm add @tanstack/ai-groq +# or +yarn add @tanstack/ai-groq +``` + +## Setup + +Get your API key from [Groq Console](https://console.groq.com) and set it as an environment variable: + +```bash +export GROQ_API_KEY="gsk_..." +``` + +## Usage + +### Text/Chat Adapter + +```typescript +import { groqText } from '@tanstack/ai-groq' +import { generate } from '@tanstack/ai' + +const adapter = groqText('llama-3.3-70b-versatile') + +const result = await generate({ + adapter, + model: 'llama-3.3-70b-versatile', + messages: [ + { role: 'user', content: 'Explain quantum computing in simple terms' }, + ], +}) + +console.log(result.text) +``` + +### With Explicit API Key + +```typescript +import { createGroqText } from '@tanstack/ai-groq' + +const adapter = createGroqText('llama-3.3-70b-versatile', 'gsk_api_key') +``` + +## Supported Models + +### Chat Models + +- `llama-3.3-70b-versatile` - Meta Llama 3.3 70B (131k context) +- `llama-3.1-8b-instant` - Meta Llama 3.1 8B (131k context) +- `meta-llama/llama-4-maverick-17b-128e-instruct` - Meta Llama 4 Maverick (vision) +- `meta-llama/llama-4-scout-17b-16e-instruct` - Meta Llama 4 Scout +- `meta-llama/llama-guard-4-12b` - Meta Llama Guard 4 (content moderation, vision) +- `meta-llama/llama-prompt-guard-2-86m` - Meta Llama Prompt Guard (content moderation) +- `meta-llama/llama-prompt-guard-2-22m` - Meta Llama Prompt Guard (content moderation) +- `openai/gpt-oss-120b` - GPT OSS 120B (reasoning, tools, search) +- `openai/gpt-oss-20b` - GPT OSS 20B (reasoning, search) +- `openai/gpt-oss-safeguard-20b` - GPT OSS Safeguard 20B (content moderation, reasoning) +- `moonshotai/kimi-k2-instruct-0905` - Kimi K2 Instruct (262k context) +- `qwen/qwen3-32b` - Qwen3 32B (reasoning, tools) + +## Features + +- ✅ Streaming chat completions +- ✅ Structured output (JSON Schema) +- ✅ Function/tool calling +- ✅ Multimodal input (text + images for vision models) +- ❌ Embeddings (not supported by Groq) +- ❌ Image generation (not supported by Groq) + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```typescript +// Only imports text adapter +import { groqText } from '@tanstack/ai-groq' +``` + +This keeps your bundle size small! + +## License + +MIT diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json new file mode 100644 index 000000000..a967b177f --- /dev/null +++ b/packages/typescript/ai-groq/package.json @@ -0,0 +1,52 @@ +{ + "name": "@tanstack/ai-groq", + "version": "0.0.0", + "type": "module", + "description": "Groq adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-groq" + }, + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "groq", + "tanstack", + "adapter" + ], + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + }, + "dependencies": { + "groq-sdk": "^0.37.0" + } +} diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts new file mode 100644 index 000000000..6e6465f74 --- /dev/null +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -0,0 +1,599 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools' +import { + createGroqClient, + generateId, + getGroqApiKeyFromEnv, + makeGroqStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { + GROQ_CHAT_MODELS, + ResolveInputModalities, + ResolveProviderOptions, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type GROQ_SDK from 'groq-sdk' +import type { ChatCompletionCreateParamsStreaming } from 'groq-sdk/resources/chat/completions' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { + ChatCompletionContentPart, + ChatCompletionMessageParam, + GroqImageMetadata, + GroqMessageMetadataByModality, +} from '../message-types' +import type { GroqClientConfig } from '../utils' + +/** + * Configuration for Groq text adapter + */ +export interface GroqTextConfig extends GroqClientConfig {} + +/** + * Alias for TextProviderOptions for external use + */ +export type { ExternalTextProviderOptions as GroqTextProviderOptions } from '../text/text-provider-options' + +/** + * Groq Text (Chat) Adapter + * + * Tree-shakeable adapter for Groq chat/text completion functionality. + * Uses the Groq SDK which provides an OpenAI-compatible Chat Completions API. + */ +export class GroqTextAdapter< + TModel extends (typeof GROQ_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + GroqMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'groq' as const + + private client: GROQ_SDK + + constructor(config: GroqTextConfig, model: TModel) { + super({}, model) + this.client = createGroqClient(config) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToGroq(options) + const timestamp = Date.now() + + const aguiState = { + runId: generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const stream = await this.client.chat.completions.create({ + ...requestParams, + stream: true, + }) + + yield* this.processGroqStreamChunks(stream, options, aguiState) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: options.model, + timestamp, + } + } + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } + + console.error('>>> chatStream: Fatal error during response creation <<<') + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + } + } + + /** + * Generate structured output using Groq's JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * Groq has strict requirements for structured output: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply Groq-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapTextOptionsToGroq(chatOptions) + + const jsonSchema = makeGroqStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.chat.completions.create({ + ...requestParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }) + + const rawText = response.choices[0]?.message.content || '' + + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error('>>> structuredOutput: Error during response creation <<<') + console.error('>>> Error message:', err.message) + throw error + } + } + + /** + * Processes streaming chunks from the Groq API and yields AG-UI stream events. + * Handles text content deltas, tool call assembly, and lifecycle events. + */ + private async *processGroqStreamChunks( + stream: AsyncIterable, + options: TextOptions, + aguiState: { + runId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = aguiState.timestamp + let hasEmittedTextMessageStart = false + + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean + } + >() + + try { + for await (const chunk of stream) { + const choice = chunk.choices[0] + + if (!choice) continue + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + } + } + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + if (deltaContent) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + role: 'assistant', + } + } + + accumulatedContent += deltaContent + + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent, + content: accumulatedContent, + } + } + + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + started: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + + if (toolCall.id && toolCall.name && !toolCall.started) { + toolCall.started = true + yield { + type: 'TOOL_CALL_START', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + index, + } + } + + if (toolCallDelta.function?.arguments && toolCall.started) { + yield { + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunk.model || options.model, + timestamp, + delta: toolCallDelta.function.arguments, + } + } + } + } + + if (choice.finish_reason) { + if ( + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [, toolCall] of toolCallsInProgress) { + if (!toolCall.started || !toolCall.id || !toolCall.name) { + continue + } + + let parsedInput: unknown = {} + try { + parsedInput = toolCall.arguments + ? JSON.parse(toolCall.arguments) + : {} + } catch { + parsedInput = {} + } + + yield { + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + input: parsedInput, + } + } + } + + const computedFinishReason = + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : choice.finish_reason === 'length' + ? 'length' + : 'stop' + + if (hasEmittedTextMessageStart) { + yield { + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + } + } + + const groqUsage = chunk.x_groq?.usage + + yield { + type: 'RUN_FINISHED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + usage: groqUsage + ? { + promptTokens: groqUsage.prompt_tokens || 0, + completionTokens: groqUsage.completion_tokens || 0, + totalTokens: groqUsage.total_tokens || 0, + } + : undefined, + finishReason: computedFinishReason, + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log('[Groq Adapter] Stream ended with error:', err.message) + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common TextOptions to Groq-specific Chat Completions request parameters. + */ + private mapTextOptionsToGroq( + options: TextOptions, + ): ChatCompletionCreateParamsStreaming { + const modelOptions = options.modelOptions as + | Omit< + InternalTextProviderOptions, + 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' + > + | undefined + + if (modelOptions) { + validateTextProviderOptions({ + ...modelOptions, + model: options.model, + }) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + const messages: Array = [] + + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + for (const message of options.messages) { + messages.push(this.convertMessageToGroq(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + tools, + stream: true, + } + } + + /** + * Converts a TanStack AI ModelMessage to a Groq ChatCompletionMessageParam. + * Handles tool, assistant, and user messages including multimodal content. + */ + private convertMessageToGroq( + message: ModelMessage, + ): ChatCompletionMessageParam { + if (message.role === 'tool') { + return { + role: 'tool', + tool_call_id: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), + } + } + + const contentParts = this.normalizeContent(message.content) + + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + const parts: Array = [] + for (const part of contentParts) { + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content }) + } else if (part.type === 'image') { + const imageMetadata = part.metadata as GroqImageMetadata | undefined + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + parts.push({ + type: 'image_url', + image_url: { + url: imageUrl, + detail: imageMetadata?.detail || 'auto', + }, + }) + } + } + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Creates a Groq text adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') + * @param apiKey - Your Groq API key + * @param config - Optional additional configuration + * @returns Configured Groq text adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGroqText('llama-3.3-70b-versatile', "gsk_..."); + * // adapter has type-safe providerOptions for llama-3.3-70b-versatile + * ``` + */ +export function createGroqText< + TModel extends (typeof GROQ_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): GroqTextAdapter { + return new GroqTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Groq text adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `GROQ_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Groq text adapter instance with resolved types + * @throws Error if GROQ_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses GROQ_API_KEY from environment + * const adapter = groqText('llama-3.3-70b-versatile'); + * + * const stream = chat({ + * adapter, + * messages: [{ role: "user", content: "Hello!" }] + * }); + * ``` + */ +export function groqText( + model: TModel, + config?: Omit, +): GroqTextAdapter { + const apiKey = getGroqApiKeyFromEnv() + return createGroqText(model, apiKey, config) +} diff --git a/packages/typescript/ai-groq/src/index.ts b/packages/typescript/ai-groq/src/index.ts new file mode 100644 index 000000000..ff2d02872 --- /dev/null +++ b/packages/typescript/ai-groq/src/index.ts @@ -0,0 +1,33 @@ +/** + * @module @tanstack/ai-groq + * + * Groq provider adapter for TanStack AI. + * Provides tree-shakeable adapters for Groq's Chat Completions API. + */ + +// Text (Chat) adapter +export { + GroqTextAdapter, + createGroqText, + groqText, + type GroqTextConfig, + type GroqTextProviderOptions, +} from './adapters/text' + +// Types +export type { + GroqChatModelProviderOptionsByName, + GroqModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, + GroqChatModels, +} from './model-meta' +export { GROQ_CHAT_MODELS } from './model-meta' +export type { + GroqTextMetadata, + GroqImageMetadata, + GroqAudioMetadata, + GroqVideoMetadata, + GroqDocumentMetadata, + GroqMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-groq/src/message-types.ts b/packages/typescript/ai-groq/src/message-types.ts new file mode 100644 index 000000000..42c218189 --- /dev/null +++ b/packages/typescript/ai-groq/src/message-types.ts @@ -0,0 +1,359 @@ +/** + * Groq-specific message types for the Chat Completions API. + * + * These type definitions mirror the Groq SDK types and are used internally + * by the adapter to avoid tight coupling to the SDK's exported types. + * + * @see https://console.groq.com/docs/api-reference#chat + */ + +export interface ChatCompletionContentPartText { + /** The text content. */ + text: string + + /** The type of the content part. */ + type: 'text' +} + +export interface ChatCompletionContentPartImage { + image_url: { + /** Either a URL of the image or the base64 encoded image data. */ + url: string + + /** Specifies the detail level of the image. */ + detail?: 'auto' | 'low' | 'high' + } + + /** The type of the content part. */ + type: 'image_url' +} + +export interface ChatCompletionMessageToolCall { + /** The ID of the tool call. */ + id: string + + /** The function that the model called. */ + function: { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments: string + + /** The name of the function to call. */ + name: string + } + + /** The type of the tool. Currently, only `function` is supported. */ + type: 'function' +} + +export interface ChatCompletionRequestMessageContentPartDocument { + document: { + /** The JSON document data. */ + data: { [key: string]: unknown } + + /** Optional unique identifier for the document. */ + id?: string | null + } + + /** The type of the content part. */ + type: 'document' +} + +export type FunctionParameters = { [key: string]: unknown } + +export interface ChatCompletionNamedToolChoice { + Function: { + /** The name of the function to call. */ + name: string + } +} + +export interface FunctionDefinition { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + * underscores and dashes, with a maximum length of 64. + */ + name: string + + /** + * A description of what the function does, used by the model to choose when and + * how to call the function. + */ + description?: string + + /** + * Function parameters defined as a JSON Schema object. + * @see https://json-schema.org/understanding-json-schema/ + */ + parameters?: FunctionParameters + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. + */ + strict?: boolean +} + +/** + * Controls which (if any) tool is called by the model. + * + * - `none` — the model will not call any tool and instead generates a message + * - `auto` — the model can pick between generating a message or calling tools + * - `required` — the model must call one or more tools + * - Named tool choice — forces the model to call a specific tool + */ +export type ChatCompletionToolChoiceOption = + | 'none' + | 'auto' + | 'required' + | ChatCompletionNamedToolChoice + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionRequestMessageContentPartDocument + +export interface ChatCompletionAssistantMessageParam { + /** The role of the messages author, in this case `assistant`. */ + role: 'assistant' + + /** + * The contents of the assistant message. Required unless `tool_calls` or + * `function_call` is specified. + */ + content?: string | Array | null + + /** An optional name for the participant. */ + name?: string + + /** + * The reasoning output by the assistant if reasoning_format was set to 'parsed'. + * This field is only useable with qwen3 models. + */ + reasoning?: string | null + + /** The tool calls generated by the model, such as function calls. */ + tool_calls?: Array +} + +export interface ChatCompletionTool { + /** + * The type of the tool. `function`, `browser_search`, and `code_interpreter` are + * supported. + */ + type: 'function' | 'browser_search' | 'code_interpreter' | (string & {}) + + function?: FunctionDefinition +} + +export interface ChatCompletionToolMessageParam { + /** The contents of the tool message. */ + content: string | Array + + /** The role of the messages author, in this case `tool`. */ + role: 'tool' + + /** Tool call that this message is responding to. */ + tool_call_id: string +} + +export interface ChatCompletionSystemMessageParam { + /** The contents of the system message. */ + content: string | Array + + /** The role of the messages author, in this case `system`. */ + role: 'system' | 'developer' + + /** An optional name for the participant. */ + name?: string +} + +export interface ChatCompletionUserMessageParam { + /** The contents of the user message. */ + content: string | Array + + /** The role of the messages author, in this case `user`. */ + role: 'user' + + /** An optional name for the participant. */ + name?: string +} + +/** + * Union of all supported chat completion message params. + */ +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + +export interface CompoundCustomModels { + /** Custom model to use for answering. */ + answering_model?: string | null + + /** Custom model to use for reasoning. */ + reasoning_model?: string | null +} + +export interface CompoundCustomTools { + /** A list of tool names that are enabled for the request. */ + enabled_tools?: Array | null + + /** Configuration for the Wolfram tool integration. */ + wolfram_settings?: CompoundCustomToolsWolframSettings | null +} + +export interface CompoundCustomToolsWolframSettings { + /** API key used to authorize requests to Wolfram services. */ + authorization?: string | null +} + +export interface CompoundCustom { + models?: CompoundCustomModels | null + + /** Configuration options for tools available to Compound. */ + tools?: CompoundCustomTools | null +} + +export interface DocumentSourceText { + /** The document contents. */ + text: string + + /** Identifies this document source as inline text. */ + type: 'text' +} + +export interface DocumentSourceJson { + /** The JSON payload associated with the document. */ + data: { [key: string]: unknown } + + /** Identifies this document source as JSON data. */ + type: 'json' +} + +export interface Document { + /** The source of the document. Only text and JSON sources are currently supported. */ + source: DocumentSourceText | DocumentSourceJson + + /** Optional unique identifier that can be used for citations in responses. */ + id?: string | null +} + +export interface ResponseFormatText { + /** The type of response format being defined. Always `text`. */ + type: 'text' +} + +export interface ResponseFormatJsonSchemaJsonSchema { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string + + /** + * The schema for the response format, described as a JSON Schema object. + * @see https://json-schema.org/ + */ + schema?: { [key: string]: unknown } + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. + */ + strict?: boolean | null +} + +export interface ResponseFormatJsonSchema { + /** Structured Outputs configuration options, including a JSON Schema. */ + json_schema: ResponseFormatJsonSchemaJsonSchema + + /** The type of response format being defined. Always `json_schema`. */ + type: 'json_schema' +} + +export interface ResponseFormatJsonObject { + /** The type of response format being defined. Always `json_object`. */ + type: 'json_object' +} + +export interface SearchSettings { + /** + * Name of country to prioritize search results from + * (e.g., "united states", "germany", "france"). + */ + country?: string | null + + /** A list of domains to exclude from the search results. */ + exclude_domains?: Array | null + + /** A list of domains to include in the search results. */ + include_domains?: Array | null + + /** Whether to include images in the search results. */ + include_images?: boolean | null +} + +/** + * Metadata for Groq document content parts. + */ +export interface GroqDocumentMetadata {} + +/** + * Metadata for Groq text content parts. + * Currently no specific metadata options for text in Groq. + */ +export interface GroqTextMetadata {} + +/** + * Metadata for Groq image content parts. + * Controls how the model processes and analyzes images. + */ +export interface GroqImageMetadata { + /** + * Specifies the detail level of the image. + * - 'auto': Let the model decide based on image size and content + * - 'low': Use low resolution processing (faster, cheaper, less detail) + * - 'high': Use high resolution processing (slower, more expensive, more detail) + * + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Groq audio content parts. + * Note: Audio support in Groq is limited; check current API capabilities. + */ +export interface GroqAudioMetadata {} + +/** + * Metadata for Groq video content parts. + * Note: Groq does not currently support video input. + */ +export interface GroqVideoMetadata {} + +/** + * Map of modality types to their Groq-specific metadata types. + * Used for type inference when constructing multimodal messages. + */ +export interface GroqMessageMetadataByModality { + text: GroqTextMetadata + image: GroqImageMetadata + audio: GroqAudioMetadata + video: GroqVideoMetadata + document: GroqDocumentMetadata +} diff --git a/packages/typescript/ai-groq/src/model-meta.ts b/packages/typescript/ai-groq/src/model-meta.ts new file mode 100644 index 000000000..83ce38800 --- /dev/null +++ b/packages/typescript/ai-groq/src/model-meta.ts @@ -0,0 +1,370 @@ +import type { GroqTextProviderOptions } from './text/text-provider-options' + +/** + * Internal metadata structure describing a Groq model's capabilities and pricing. + */ +interface ModelMeta { + name: string + context_window?: number + max_completion_tokens?: number + pricing: { + input?: { normal: number; cached?: number } + output?: { normal: number } + } + supports: { + input: Array<'text' | 'image' | 'audio'> + output: Array<'text' | 'audio'> + endpoints: Array<'chat' | 'tts' | 'transcription' | 'batch'> + + features: Array< + | 'streaming' + | 'tools' + | 'json_object' + | 'browser_search' + | 'code_execution' + | 'reasoning' + | 'content_moderation' + | 'json_schema' + | 'vision' + > + } + /** + * Type-level description of which provider options this model supports. + */ + providerOptions?: TProviderOptions +} + +const LLAMA_3_3_70B_VERSATILE = { + name: 'llama-3.3-70b-versatile', + context_window: 131_072, + max_completion_tokens: 32_768, + pricing: { + input: { + normal: 0.59, + }, + output: { + normal: 0.79, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_4_MAVERICK_17B_128E_INSTRUCT = { + name: 'meta-llama/llama-4-maverick-17b-128e-instruct', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { + normal: 0.2, + }, + output: { + normal: 0.6, + }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const LLAMA_4_SCOUT_17B_16E_INSTRUCT = { + name: 'meta-llama/llama-4-scout-17b-16e-instruct', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { + normal: 0.05, + }, + output: { + normal: 0.08, + }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_GUARD_4_12B = { + name: 'meta-llama/llama-guard-4-12b', + context_window: 131_072, + max_completion_tokens: 1024, + pricing: { + input: { + normal: 0.2, + }, + output: { + normal: 0.2, + }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'content_moderation', 'vision'], + }, +} as const satisfies ModelMeta + +const LLAMA_PROMPT_GUARD_2_86M = { + name: 'meta-llama/llama-prompt-guard-2-86m', + context_window: 512, + max_completion_tokens: 512, + pricing: { + input: { + normal: 0.04, + }, + output: { + normal: 0.04, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'content_moderation', 'json_object'], + }, +} as const satisfies ModelMeta + +const LLAMA_3_1_8B_INSTANT = { + name: 'llama-3.1-8b-instant', + context_window: 131_072, + max_completion_tokens: 131_072, + pricing: { + input: { + normal: 0.05, + }, + output: { + normal: 0.08, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'tools'], + }, +} as const satisfies ModelMeta + +const LLAMA_PROMPT_GUARD_2_22M = { + name: 'meta-llama/llama-prompt-guard-2-22m', + context_window: 512, + max_completion_tokens: 512, + pricing: { + input: { + normal: 0.03, + }, + output: { + normal: 0.03, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'content_moderation'], + }, +} as const satisfies ModelMeta + +const GPT_OSS_120B = { + name: 'openai/gpt-oss-120b', + context_window: 131_072, + max_completion_tokens: 65_536, + pricing: { + input: { + normal: 0.15, + cached: 0.075, + }, + output: { + normal: 0.6, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'json_object', + 'json_schema', + 'tools', + 'browser_search', + 'code_execution', + 'reasoning', + ], + }, +} as const satisfies ModelMeta + +const GPT_OSS_SAFEGUARD_20B = { + name: 'openai/gpt-oss-safeguard-20b', + context_window: 131_072, + max_completion_tokens: 65_536, + pricing: { + input: { + normal: 0.075, + cached: 0.037, + }, + output: { + normal: 0.3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'tools', + 'browser_search', + 'code_execution', + 'json_object', + 'json_schema', + 'reasoning', + 'content_moderation', + ], + }, +} as const satisfies ModelMeta + +const GPT_OSS_20B = { + name: 'openai/gpt-oss-20b', + context_window: 131_072, + max_completion_tokens: 65_536, + pricing: { + input: { + normal: 0.075, + cached: 0.037, + }, + output: { + normal: 0.3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: [ + 'streaming', + 'browser_search', + 'code_execution', + 'json_object', + 'json_schema', + 'reasoning', + 'tools', + ], + }, +} as const satisfies ModelMeta + +const KIMI_K2_INSTRUCT_0905 = { + name: 'moonshotai/kimi-k2-instruct-0905', + context_window: 262_144, + max_completion_tokens: 16_384, + pricing: { + input: { + normal: 1, + cached: 0.5, + }, + output: { + normal: 3, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const QWEN3_32B = { + name: 'qwen/qwen3-32b', + context_window: 131_072, + max_completion_tokens: 40_960, + pricing: { + input: { + normal: 0.29, + }, + output: { + normal: 0.59, + }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'json_object', 'tools', 'reasoning'], + }, +} as const satisfies ModelMeta + +/** + * All supported Groq chat model identifiers. + */ +export const GROQ_CHAT_MODELS = [ + LLAMA_3_1_8B_INSTANT.name, + LLAMA_3_3_70B_VERSATILE.name, + LLAMA_4_MAVERICK_17B_128E_INSTRUCT.name, + LLAMA_4_SCOUT_17B_16E_INSTRUCT.name, + LLAMA_GUARD_4_12B.name, + LLAMA_PROMPT_GUARD_2_86M.name, + LLAMA_PROMPT_GUARD_2_22M.name, + GPT_OSS_20B.name, + GPT_OSS_120B.name, + GPT_OSS_SAFEGUARD_20B.name, + KIMI_K2_INSTRUCT_0905.name, + QWEN3_32B.name, +] as const + +/** + * Union type of all supported Groq chat model names. + */ +export type GroqChatModels = (typeof GROQ_CHAT_MODELS)[number] + +/** + * Type-only map from Groq chat model name to its supported input modalities. + */ +export type GroqModelInputModalitiesByName = { + [LLAMA_3_1_8B_INSTANT.name]: typeof LLAMA_3_1_8B_INSTANT.supports.input + [LLAMA_3_3_70B_VERSATILE.name]: typeof LLAMA_3_3_70B_VERSATILE.supports.input + [LLAMA_4_MAVERICK_17B_128E_INSTRUCT.name]: typeof LLAMA_4_MAVERICK_17B_128E_INSTRUCT.supports.input + [LLAMA_4_SCOUT_17B_16E_INSTRUCT.name]: typeof LLAMA_4_SCOUT_17B_16E_INSTRUCT.supports.input + [LLAMA_GUARD_4_12B.name]: typeof LLAMA_GUARD_4_12B.supports.input + [LLAMA_PROMPT_GUARD_2_86M.name]: typeof LLAMA_PROMPT_GUARD_2_86M.supports.input + [LLAMA_PROMPT_GUARD_2_22M.name]: typeof LLAMA_PROMPT_GUARD_2_22M.supports.input + [GPT_OSS_20B.name]: typeof GPT_OSS_20B.supports.input + [GPT_OSS_120B.name]: typeof GPT_OSS_120B.supports.input + [GPT_OSS_SAFEGUARD_20B.name]: typeof GPT_OSS_SAFEGUARD_20B.supports.input + [KIMI_K2_INSTRUCT_0905.name]: typeof KIMI_K2_INSTRUCT_0905.supports.input + [QWEN3_32B.name]: typeof QWEN3_32B.supports.input +} + +/** + * Type-only map from Groq chat model name to its provider options type. + */ +export type GroqChatModelProviderOptionsByName = { + [K in (typeof GROQ_CHAT_MODELS)[number]]: GroqTextProviderOptions +} + +/** + * Resolves the provider options type for a specific Groq model. + * Falls back to generic GroqTextProviderOptions for unknown models. + */ +export type ResolveProviderOptions = + TModel extends keyof GroqChatModelProviderOptionsByName + ? GroqChatModelProviderOptionsByName[TModel] + : GroqTextProviderOptions + +/** + * Resolve input modalities for a specific model. + * If the model has explicit modalities in the map, use those; otherwise use text only. + */ +export type ResolveInputModalities = + TModel extends keyof GroqModelInputModalitiesByName + ? GroqModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-groq/src/text/text-provider-options.ts b/packages/typescript/ai-groq/src/text/text-provider-options.ts new file mode 100644 index 000000000..c3ee2309e --- /dev/null +++ b/packages/typescript/ai-groq/src/text/text-provider-options.ts @@ -0,0 +1,225 @@ +import type { + ChatCompletionMessageParam, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + CompoundCustom, + Document, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, + ResponseFormatText, + SearchSettings, +} from '../message-types' + +/** + * Groq-specific provider options for text/chat models. + * + * These options extend the standard Chat Completions API parameters + * with Groq-specific features like compound models and search settings. + * + * @see https://console.groq.com/docs/api-reference#chat + */ +export interface GroqTextProviderOptions { + /** + * Whether to enable citations in the response. When enabled, the model will + * include citations for information retrieved from provided documents or web + * searches. + */ + citation_options?: 'enabled' | 'disabled' | null + + /** Custom configuration of models and tools for Compound. */ + compound_custom?: CompoundCustom | null + + /** + * If set to true, groq will return called tools without validating that the tool + * is present in request.tools. tool_choice=required/none will still be enforced, + * but the request cannot require a specific tool be used. + */ + disable_tool_validation?: boolean + + /** + * A list of documents to provide context for the conversation. Each document + * contains text that can be referenced by the model. + */ + documents?: Array | null + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to + * repeat the same line verbatim. + */ + frequency_penalty?: number | null + + /** + * Whether to include reasoning in the response. This field is mutually exclusive + * with `reasoning_format`. + */ + include_reasoning?: boolean | null + + /** Modify the likelihood of specified tokens appearing in the completion. */ + logit_bias?: { [key: string]: number } | null + + /** + * Whether to return log probabilities of the output tokens or not. If true, + * returns the log probabilities of each output token returned in the `content` + * of `message`. + */ + logprobs?: boolean | null + + /** + * The maximum number of tokens that can be generated in the chat completion. The + * total length of input tokens and generated tokens is limited by the model's + * context length. + */ + max_completion_tokens?: number | null + + /** Request metadata. */ + metadata?: { [key: string]: string } | null + + /** + * How many chat completion choices to generate for each input message. + * Currently only n=1 is supported. + */ + n?: number | null + + /** Whether to enable parallel function calling during tool use. */ + parallel_tool_calls?: boolean | null + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to + * talk about new topics. + */ + presence_penalty?: number | null + + /** + * Controls reasoning effort for supported models. + * + * - qwen3 models: `'none'` to disable, `'default'` or null to enable + * - openai/gpt-oss models: `'low'`, `'medium'` (default), or `'high'` + */ + reasoning_effort?: 'none' | 'default' | 'low' | 'medium' | 'high' | null + + /** + * Specifies how to output reasoning tokens. + * This field is mutually exclusive with `include_reasoning`. + */ + reasoning_format?: 'hidden' | 'raw' | 'parsed' | null + + /** + * An object specifying the format that the model must output. + * + * - `json_schema` — enables Structured Outputs (preferred) + * - `json_object` — enables the older JSON mode + * - `text` — plain text output (default) + * + * @see https://console.groq.com/docs/structured-outputs + */ + response_format?: + | ResponseFormatText + | ResponseFormatJsonSchema + | ResponseFormatJsonObject + | null + + /** Settings for web search functionality when the model uses a web search tool. */ + search_settings?: SearchSettings | null + + /** + * If specified, our system will make a best effort to sample deterministically, + * such that repeated requests with the same `seed` and parameters should return + * the same result. + */ + seed?: number | null + + /** + * The service tier to use for the request. + * + * - `auto` — automatically select the highest tier available + * - `flex` — uses the flex tier, which will succeed or fail quickly + */ + service_tier?: 'auto' | 'on_demand' | 'flex' | 'performance' | null + + /** + * Up to 4 sequences where the API will stop generating further tokens. + * The returned text will not contain the stop sequence. + */ + stop?: string | null | Array + + /** Whether to store the request for future use. */ + store?: boolean | null + + /** + * Sampling temperature between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused + * and deterministic. We generally recommend altering this or top_p but not both. + */ + temperature?: number | null + + /** + * Controls which (if any) tool is called by the model. + * + * - `none` — never call tools + * - `auto` — model decides (default when tools are present) + * - `required` — model must call tools + * - Named choice — forces a specific tool + */ + tool_choice?: ChatCompletionToolChoiceOption | null + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to + * return at each token position. `logprobs` must be set to `true` if this + * parameter is used. + */ + top_logprobs?: number | null + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null + + /** + * A unique identifier representing your end-user, which can help monitor and + * detect abuse. + */ + user?: string | null +} + +/** + * Internal options interface used for validation within the adapter. + * Extends provider options with required fields for API requests. + */ +export interface InternalTextProviderOptions extends GroqTextProviderOptions { + /** An array of messages comprising the conversation. */ + messages: Array + + /** + * The model name (e.g. "llama-3.3-70b-versatile", "openai/gpt-oss-120b"). + * @see https://console.groq.com/docs/models + */ + model: string + + /** Whether to stream partial message deltas as server-sent events. */ + stream?: boolean | null + + /** + * Tools the model may call (functions, code_interpreter, etc). + * @see https://console.groq.com/docs/tool-use + */ + tools?: Array +} + +/** + * External provider options (what users pass in) + */ +export type ExternalTextProviderOptions = GroqTextProviderOptions + +/** + * Validates text provider options. + * Basic validation stub — Groq API handles detailed validation. + */ +export function validateTextProviderOptions( + _options: InternalTextProviderOptions, +): void { + // Groq API handles detailed validation +} diff --git a/packages/typescript/ai-groq/src/tools/function-tool.ts b/packages/typescript/ai-groq/src/tools/function-tool.ts new file mode 100644 index 000000000..40a68400c --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/function-tool.ts @@ -0,0 +1,44 @@ +import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type { ChatCompletionTool } from '../message-types' + +export type FunctionTool = ChatCompletionTool + +/** + * Converts a standard Tool to Groq ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Groq-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + // Ensure object schemas always have properties (e.g. z.object({}) may produce { type: 'object' } without properties) + if (inputSchema.type === 'object' && !inputSchema.properties) { + inputSchema.properties = {} + } + + const jsonSchema = makeGroqStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-groq/src/tools/index.ts b/packages/typescript/ai-groq/src/tools/index.ts new file mode 100644 index 000000000..c90334153 --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-groq/src/tools/tool-converter.ts b/packages/typescript/ai-groq/src/tools/tool-converter.ts new file mode 100644 index 000000000..08013bdb8 --- /dev/null +++ b/packages/typescript/ai-groq/src/tools/tool-converter.ts @@ -0,0 +1,15 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Groq-specific format. + * Groq uses an OpenAI-compatible API, so we primarily support function tools. + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts new file mode 100644 index 000000000..f143193d2 --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -0,0 +1,42 @@ +import Groq_SDK from 'groq-sdk' +import type { ClientOptions } from 'groq-sdk' + +export interface GroqClientConfig extends ClientOptions { + apiKey: string +} + +/** + * Creates a Groq SDK client instance + */ +export function createGroqClient(config: GroqClientConfig): Groq_SDK { + return new Groq_SDK(config) +} + +/** + * Gets Groq API key from environment variables + * @throws Error if GROQ_API_KEY is not found + */ +export function getGroqApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.GROQ_API_KEY + + if (!key) { + throw new Error( + 'GROQ_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-groq/src/utils/index.ts b/packages/typescript/ai-groq/src/utils/index.ts new file mode 100644 index 000000000..17899f56a --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createGroqClient, + getGroqApiKeyFromEnv, + generateId, + type GroqClientConfig, +} from './client' +export { + makeGroqStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts new file mode 100644 index 000000000..d0a57cf44 --- /dev/null +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -0,0 +1,110 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Groq's structured output requires all fields to be + * in the `required` array, with optional fields made nullable (type: ["string", "null"]). + * When Groq returns null for optional fields, we need to convert them back to + * undefined to match the original Zod schema expectations. + * + * @param obj - Object to transform + * @returns Object with nulls converted to undefined + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if (typeof obj === 'object') { + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + const transformed = transformNullsToUndefined(value) + if (transformed !== undefined) { + result[key] = transformed + } + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Groq's structured output requirements. + * + * Groq requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with Groq structured output + */ +export function makeGroqStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + if (result.type === 'object') { + if (!result.properties) { + result.properties = {} + } + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeGroqStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeGroqStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (wasOptional) { + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + // Groq rejects `required` when there are no properties, even if it's an empty array + if (allPropertyNames.length > 0) { + result.required = allPropertyNames + } else { + delete result.required + } + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeGroqStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts new file mode 100644 index 000000000..1562b0623 --- /dev/null +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -0,0 +1,577 @@ +import { + describe, + it, + expect, + vi, + afterEach, + beforeEach, + type Mock, +} from 'vitest' +import { createGroqText, groqText } from '../src/adapters/text' +import type { StreamChunk, Tool } from '@tanstack/ai' + +// Declare mockCreate at module level +let mockCreate: Mock<(...args: Array) => unknown> + +// Mock the Groq SDK +vi.mock('groq-sdk', () => { + return { + default: class { + chat = { + completions: { + create: (...args: Array) => mockCreate(...args), + }, + } + }, + } +}) + +// Helper to create async iterable from chunks +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +// Helper to setup the mock SDK client for streaming responses +function setupMockSdkClient( + streamChunks: Array>, + nonStreamResponse?: Record, +) { + mockCreate = vi.fn().mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve(nonStreamResponse) + }) +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('Groq adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('Text adapter', () => { + it('creates a text adapter with explicit API key', () => { + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('groq') + expect(adapter.model).toBe('llama-3.3-70b-versatile') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('GROQ_API_KEY', 'env-api-key') + + const adapter = groqText('llama-3.1-8b-instant') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('llama-3.1-8b-instant') + }) + + it('throws if GROQ_API_KEY is not set when using groqText', () => { + vi.stubEnv('GROQ_API_KEY', '') + + expect(() => groqText('llama-3.3-70b-versatile')).toThrow( + 'GROQ_API_KEY is required', + ) + }) + + it('allows custom baseURL override', () => { + const adapter = createGroqText( + 'llama-3.3-70b-versatile', + 'test-api-key', + { + baseURL: 'https://custom.api.example.com/v1', + }, + ) + + expect(adapter).toBeDefined() + }) + }) +}) + +describe('Groq AG-UI event emission', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + }) + + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + x_groq: { + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('llama-3.3-70b-versatile') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + x_groq: { + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + + const textStart = chunks[textStartIndex] + if (textStart?.type === 'TEXT_MESSAGE_START') { + expect(textStart.messageId).toBeDefined() + expect(textStart.role).toBe('assistant') + } + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + x_groq: { + usage: { + prompt_tokens: 5, + completion_tokens: 1, + total_tokens: 6, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + if (textEndChunk?.type === 'TEXT_MESSAGE_END') { + expect(textEndChunk.messageId).toBeDefined() + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.runId).toBeDefined() + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('emits AG-UI tool call events', async () => { + const streamChunks = [ + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + id: 'call_abc123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + function: { + arguments: '"Berlin"}', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-456', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'tool_calls', + }, + ], + x_groq: { + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // Check AG-UI tool events + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBeGreaterThan(0) + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + // Check finish reason + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + + it('emits RUN_ERROR on stream error', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + }, + ] + + // Create an async iterable that throws mid-stream + const errorIterable = { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < streamChunks.length) { + return { value: streamChunks[index++]!, done: false } + } + throw new Error('Stream interrupted') + }, + } + }, + } + + mockCreate = vi.fn().mockResolvedValue(errorIterable) + + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Should emit RUN_ERROR + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error.message).toBe('Stream interrupted') + } + }) + + it('emits proper AG-UI event sequence', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + x_groq: { + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // Verify proper AG-UI event sequence + const eventTypes = chunks.map((c) => c.type) + + // Should start with RUN_STARTED + expect(eventTypes[0]).toBe('RUN_STARTED') + + // Should have TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT + const textStartIndex = eventTypes.indexOf('TEXT_MESSAGE_START') + const textContentIndex = eventTypes.indexOf('TEXT_MESSAGE_CONTENT') + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(textStartIndex) + + // Should have TEXT_MESSAGE_END before RUN_FINISHED + const textEndIndex = eventTypes.indexOf('TEXT_MESSAGE_END') + const runFinishedIndex = eventTypes.indexOf('RUN_FINISHED') + expect(textEndIndex).toBeGreaterThan(-1) + expect(runFinishedIndex).toBeGreaterThan(textEndIndex) + + // Verify RUN_FINISHED has proper data + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toBeDefined() + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'Hello ' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: { content: 'world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-stream', + model: 'llama-3.3-70b-versatile', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + x_groq: { + usage: { + prompt_tokens: 5, + completion_tokens: 2, + total_tokens: 7, + }, + }, + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'llama-3.3-70b-versatile', + messages: [{ role: 'user', content: 'Say hello' }], + })) { + chunks.push(chunk) + } + + // Check TEXT_MESSAGE_CONTENT events have correct accumulated content + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) +}) diff --git a/packages/typescript/ai-groq/tsconfig.json b/packages/typescript/ai-groq/tsconfig.json new file mode 100644 index 000000000..ea11c1096 --- /dev/null +++ b/packages/typescript/ai-groq/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-groq/vite.config.ts b/packages/typescript/ai-groq/vite.config.ts new file mode 100644 index 000000000..77bcc2e60 --- /dev/null +++ b/packages/typescript/ai-groq/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/ai-groq/vitest.config.ts b/packages/typescript/ai-groq/vitest.config.ts new file mode 100644 index 000000000..fa2531743 --- /dev/null +++ b/packages/typescript/ai-groq/vitest.config.ts @@ -0,0 +1,22 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a4234cade..e6c1ee218 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -207,6 +207,9 @@ importers: '@tanstack/ai-grok': specifier: workspace:* version: link:../../packages/typescript/ai-grok + '@tanstack/ai-groq': + specifier: workspace:* + version: link:../../packages/typescript/ai-groq '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -803,6 +806,25 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-groq: + dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + groq-sdk: + specifier: ^0.37.0 + version: 0.37.0 + zod: + specifier: ^4.0.0 + version: 4.3.6 + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-ollama: dependencies: ollama: @@ -4451,9 +4473,15 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + '@types/node-fetch@2.6.13': + resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} + '@types/node@12.20.55': resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} + '@types/node@18.19.130': + resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} + '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} @@ -4866,6 +4894,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agentkeepalive@4.6.0: + resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} + engines: {node: '>= 8.0.0'} + ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -6023,6 +6055,9 @@ packages: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} + form-data-encoder@1.7.2: + resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} + form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -6032,6 +6067,10 @@ packages: engines: {node: '>=18.3.0'} hasBin: true + formdata-node@4.4.1: + resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} + engines: {node: '>= 12.20'} + formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -6183,6 +6222,9 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + groq-sdk@0.37.0: + resolution: {integrity: sha512-lT72pcT8b/X5XrzdKf+rWVzUGW1OQSKESmL8fFN5cTbsf02gq6oFam4SVeNtzELt9cYE2Pt3pdGgSImuTbHFDg==} + gtoken@8.0.0: resolution: {integrity: sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==} engines: {node: '>=18'} @@ -6345,6 +6387,9 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -8656,6 +8701,9 @@ packages: unctx@2.5.0: resolution: {integrity: sha512-p+Rz9x0R7X+CYDkT+Xg8/GhpcShTlU8n+cf9OtOEf7zEQsNcCZO1dPKNRDqvUTaq+P32PMMkxWHwfrxkqfqAYg==} + undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + undici-types@5.28.4: resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} @@ -9219,6 +9267,10 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} + web-streams-polyfill@4.0.0-beta.3: + resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} + engines: {node: '>= 14'} + web-vitals@5.1.0: resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} @@ -10332,7 +10384,7 @@ snapshots: https-proxy-agent: 7.0.6 node-fetch: 2.7.0 nopt: 8.1.0 - semver: 7.7.3 + semver: 7.7.4 tar: 7.5.2 transitivePeerDependencies: - encoding @@ -12873,8 +12925,17 @@ snapshots: '@types/ms@2.1.0': {} + '@types/node-fetch@2.6.13': + dependencies: + '@types/node': 24.10.3 + form-data: 4.0.5 + '@types/node@12.20.55': {} + '@types/node@18.19.130': + dependencies: + undici-types: 5.26.5 + '@types/node@20.19.26': dependencies: undici-types: 6.21.0 @@ -12978,7 +13039,7 @@ snapshots: '@typescript-eslint/visitor-keys': 8.49.0 debug: 4.4.3 minimatch: 9.0.5 - semver: 7.7.3 + semver: 7.7.4 tinyglobby: 0.2.15 ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 @@ -13475,6 +13536,10 @@ snapshots: agent-base@7.1.4: {} + agentkeepalive@4.6.0: + dependencies: + humanize-ms: 1.2.1 + ajv-draft-04@1.0.0(ajv@8.13.0): optionalDependencies: ajv: 8.13.0 @@ -14227,7 +14292,7 @@ snapshots: '@one-ini/wasm': 0.1.1 commander: 10.0.1 minimatch: 9.0.1 - semver: 7.7.3 + semver: 7.7.4 ee-first@1.1.1: {} @@ -14437,7 +14502,7 @@ snapshots: eslint-compat-utils@0.5.1(eslint@9.39.1(jiti@2.6.1)): dependencies: eslint: 9.39.1(jiti@2.6.1) - semver: 7.7.3 + semver: 7.7.4 eslint-import-context@0.1.9(unrs-resolver@1.11.1): dependencies: @@ -14798,6 +14863,8 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 + form-data-encoder@1.7.2: {} + form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -14810,6 +14877,11 @@ snapshots: dependencies: fd-package-json: 2.0.0 + formdata-node@4.4.1: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 4.0.0-beta.3 + formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -14986,6 +15058,18 @@ snapshots: graceful-fs@4.2.11: {} + groq-sdk@0.37.0: + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + gtoken@8.0.0: dependencies: gaxios: 7.1.3 @@ -15236,6 +15320,10 @@ snapshots: human-signals@5.0.0: {} + humanize-ms@1.2.1: + dependencies: + ms: 2.1.3 + iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -16412,7 +16500,7 @@ snapshots: rollup: 4.57.1 rollup-plugin-visualizer: 6.0.5(rolldown@1.0.0-beta.53)(rollup@4.57.1) scule: 1.3.0 - semver: 7.7.3 + semver: 7.7.4 serve-placeholder: 2.0.2 serve-static: 2.2.0 source-map: 0.7.6 @@ -18117,6 +18205,8 @@ snapshots: magic-string: 0.30.21 unplugin: 2.3.11 + undici-types@5.26.5: {} + undici-types@5.28.4: {} undici-types@6.21.0: {} @@ -18808,6 +18898,8 @@ snapshots: web-streams-polyfill@3.3.3: {} + web-streams-polyfill@4.0.0-beta.3: {} + web-vitals@5.1.0: {} webidl-conversions@3.0.1: {}