From eb25e55215f0cbde8bacf2da7db60cec1b763fe0 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 3 Sep 2024 10:20:34 +0200 Subject: [PATCH 01/11] feat (docs): improve useObject error handling docs (#2879) --- .../05-ai-sdk-ui/08-object-generation.mdx | 98 ++++++++++++++++++- .../07-reference/ai-sdk-ui/03-use-object.mdx | 2 +- 2 files changed, 97 insertions(+), 3 deletions(-) diff --git a/content/docs/05-ai-sdk-ui/08-object-generation.mdx b/content/docs/05-ai-sdk-ui/08-object-generation.mdx index 98dd5472f6c8..c197a10fba06 100644 --- a/content/docs/05-ai-sdk-ui/08-object-generation.mdx +++ b/content/docs/05-ai-sdk-ui/08-object-generation.mdx @@ -53,7 +53,7 @@ export default function Page() { }); return ( -
+ <> @@ -64,7 +64,7 @@ export default function Page() {

{notification?.message}

))} - + ); } ``` @@ -95,6 +95,100 @@ export async function POST(req: Request) { } ``` +## Customized UI + +`useObject` also provides ways to show loading and error states: + +### Loading State + +The `isLoading` state returned by the `useObject` hook can be used for several +purposes + +- To show a loading spinner while the object is generated. +- To show a "Stop" button to abort the current message. +- To disable the submit button. + +```tsx filename='app/page.tsx' highlight="6,13-20,24" +'use client'; + +import { useObject } from 'ai/react'; + +export default function Page() { + const { isLoading, stop, object, submit } = useObject({ + api: '/api/notifications', + schema: notificationSchema, + }); + + return ( + <> + {isLoading && ( +
+ + +
+ )} + + + + {object?.notifications?.map((notification, index) => ( +
+

{notification?.name}

+

{notification?.message}

+
+ ))} + + ); +} +``` + +### Error State + +Similarly, the `error` state reflects the error object thrown during the fetch request. +It can be used to display an error message, or to disable the submit button: + + + We recommend showing a generic error message to the user, such as "Something + went wrong." This is a good practice to avoid leaking information from the + server. + + +```tsx file="app/page.tsx" highlight="6,13" +'use client'; + +import { useObject } from 'ai/react'; + +export default function Page() { + const { error, object, submit } = useObject({ + api: '/api/notifications', + schema: notificationSchema, + }); + + return ( + <> + {error &&
An error occurred.
} + + + + {object?.notifications?.map((notification, index) => ( +
+

{notification?.name}

+

{notification?.message}

+
+ ))} + + ); +} +``` + ## Event Callbacks `useObject` provides optional event callbacks that you can use to handle life-cycle events. diff --git a/content/docs/07-reference/ai-sdk-ui/03-use-object.mdx b/content/docs/07-reference/ai-sdk-ui/03-use-object.mdx index 6d1303da8700..133902a04bd2 100644 --- a/content/docs/07-reference/ai-sdk-ui/03-use-object.mdx +++ b/content/docs/07-reference/ai-sdk-ui/03-use-object.mdx @@ -126,7 +126,7 @@ export default function Page() { }, { name: 'error', - type: 'undefined | unknown', + type: 'Error | unknown', description: 'The error object if the API call fails.', }, { From 28cbf2edcf987a7479e4c01fb1d8f705d51d422a Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Tue, 3 Sep 2024 12:11:11 +0200 Subject: [PATCH 02/11] fix (provider/openai): support tool call delta when arguments are sent in the first chunk (#2883) Co-authored-by: minpeter --- .changeset/real-beers-bake.md | 5 + .../src/openai-chat-language-model.test.ts | 128 ++++++++++++++++++ .../openai/src/openai-chat-language-model.ts | 43 +++--- 3 files changed, 156 insertions(+), 20 deletions(-) create mode 100644 .changeset/real-beers-bake.md diff --git a/.changeset/real-beers-bake.md b/.changeset/real-beers-bake.md new file mode 100644 index 000000000000..231d2beb5f3f --- /dev/null +++ b/.changeset/real-beers-bake.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai': patch +--- + +fix (provider/openai): support tool call deltas when arguments are sent in the first chunk diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index 306091bb7ffa..d1f991ff1a03 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -999,6 +999,134 @@ describe('doStream', () => { ]); }); + it('should stream tool call deltas when tool call arguments are passed in the first chunk', async () => { + server.responseChunks = [ + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"role":"assistant","content":null,` + + `"tool_calls":[{"index":0,"id":"call_O17Uplv4lJvD6DVdIvFFeRMw","type":"function","function":{"name":"test-tool","arguments":"{\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"va"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"lue"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\":\\""}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Spark"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"le"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Day"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\\"}"}}]},` + + `"logprobs":null,"finish_reason":null}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}\n\n`, + `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + + `"system_fingerprint":"fp_3bc1b5746c","choices":[],"usage":{"prompt_tokens":53,"completion_tokens":17,"total_tokens":70}}\n\n`, + 'data: [DONE]\n\n', + ]; + + const { stream } = await model.doStream({ + inputFormat: 'prompt', + mode: { + type: 'regular', + tools: [ + { + type: 'function', + name: 'test-tool', + parameters: { + type: 'object', + properties: { value: { type: 'string' } }, + required: ['value'], + additionalProperties: false, + $schema: 'http://json-schema.org/draft-07/schema#', + }, + }, + ], + }, + prompt: TEST_PROMPT, + }); + + expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: '{"', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: 'va', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: 'lue', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: '":"', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: 'Spark', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: 'le', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: ' Day', + }, + { + type: 'tool-call-delta', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + argsTextDelta: '"}', + }, + { + type: 'tool-call', + toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', + toolCallType: 'function', + toolName: 'test-tool', + args: '{"value":"Sparkle Day"}', + }, + { + type: 'finish', + finishReason: 'tool-calls', + logprobs: undefined, + usage: { promptTokens: 53, completionTokens: 17 }, + }, + ]); + }); + it('should stream tool call that is sent in one chunk', async () => { server.responseChunks = [ `data: {"id":"chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP","object":"chat.completion.chunk","created":1711357598,"model":"gpt-3.5-turbo-0125",` + diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index e3531225b350..2ec098b46299 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -453,29 +453,32 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { const toolCall = toolCalls[index]; - // check if tool call is complete (some providers send the full tool call in one chunk) if ( toolCall.function?.name != null && - toolCall.function?.arguments != null && - isParsableJson(toolCall.function.arguments) + toolCall.function?.arguments != null ) { - // send delta - controller.enqueue({ - type: 'tool-call-delta', - toolCallType: 'function', - toolCallId: toolCall.id, - toolName: toolCall.function.name, - argsTextDelta: toolCall.function.arguments, - }); - - // send tool call - controller.enqueue({ - type: 'tool-call', - toolCallType: 'function', - toolCallId: toolCall.id ?? generateId(), - toolName: toolCall.function.name, - args: toolCall.function.arguments, - }); + // send delta if the argument text has already started: + if (toolCall.function.arguments.length > 0) { + controller.enqueue({ + type: 'tool-call-delta', + toolCallType: 'function', + toolCallId: toolCall.id, + toolName: toolCall.function.name, + argsTextDelta: toolCall.function.arguments, + }); + } + + // check if tool call is complete + // (some providers send the full tool call in one chunk): + if (isParsableJson(toolCall.function.arguments)) { + controller.enqueue({ + type: 'tool-call', + toolCallType: 'function', + toolCallId: toolCall.id ?? generateId(), + toolName: toolCall.function.name, + args: toolCall.function.arguments, + }); + } } continue; From b26eb367b4aa304a3fadf7f05bf587da453e4682 Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:11:26 +0100 Subject: [PATCH 03/11] docs: add note to rag guide and troubleshooting page for maxToolRoundTrips (#2880) --- content/docs/02-guides/01-rag-chatbot.mdx | 5 +++ .../10-use-chat-tools-no-response.mdx | 34 +++++++++++++++++++ .../03-common-issues/index.mdx | 1 + 3 files changed, 40 insertions(+) create mode 100644 content/docs/08-troubleshooting/03-common-issues/10-use-chat-tools-no-response.mdx diff --git a/content/docs/02-guides/01-rag-chatbot.mdx b/content/docs/02-guides/01-rag-chatbot.mdx index b7076bd40271..c9d37d8f062f 100644 --- a/content/docs/02-guides/01-rag-chatbot.mdx +++ b/content/docs/02-guides/01-rag-chatbot.mdx @@ -532,6 +532,11 @@ export async function POST(req: Request) { } ``` + + Remember to pass the incoming messages through the `convertToCoreMessages` + function. + + In this code, you define a tool called `addResource`. This tool has three elements: - **description**: description of the tool that will influence when the tool is picked. diff --git a/content/docs/08-troubleshooting/03-common-issues/10-use-chat-tools-no-response.mdx b/content/docs/08-troubleshooting/03-common-issues/10-use-chat-tools-no-response.mdx new file mode 100644 index 000000000000..8c0567467e15 --- /dev/null +++ b/content/docs/08-troubleshooting/03-common-issues/10-use-chat-tools-no-response.mdx @@ -0,0 +1,34 @@ +--- +title: useChat No Response with maxToolsRoundtrips +description: Troubleshooting errors related to the Use Chat Failed to Parse Stream error. +--- + +# `useChat` No Response with maxToolsRoundtrips + +## Issue + +I am using [`useChat`](/docs/reference/ai-sdk-ui/use-chat) with [`maxToolRoundtrips`](/docs/reference/ai-sdk-ui/use-chat#max-tool-roundtrips). When I log the incoming messages on the server, I can see the tool call and the tool result, but the model does not respond with anything. + +## Background + +The `useChat` hook uses a message structure (`Message`) that pre-dates the AI SDK Core message structure (`CoreMessage`). + +## Solution + +To resolve this issue, convert the incoming messages to the `CoreMessage` format using the [`convertToCoreMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages) function. + +```tsx highlight="9" +import { openai } from '@ai-sdk/openai'; +import { convertToCoreMessages, streamText } from 'ai'; + +export async function POST(req: Request) { + const { messages } = await req.json(); + + const result = await streamText({ + model: openai('gpt-4o'), + messages: convertToCoreMessages(messages), + }); + + return result.toDataStreamResponse(); +} +``` diff --git a/content/docs/08-troubleshooting/03-common-issues/index.mdx b/content/docs/08-troubleshooting/03-common-issues/index.mdx index ddaf6d2bc82f..181d034fc8c5 100644 --- a/content/docs/08-troubleshooting/03-common-issues/index.mdx +++ b/content/docs/08-troubleshooting/03-common-issues/index.mdx @@ -17,3 +17,4 @@ description: Troubleshooting information for common issues encountered with the - [ useChat Failed to Parse Stream ](/docs/troubleshooting/common-issues/use-chat-failed-to-parse-stream) - [ NaN token counts when using streamText with OpenAI models](/docs/troubleshooting/common-issues/nan-token-counts-openai-streaming) - [ Model is not assignable to type "LanguageModelV1" ](/docs/troubleshooting/common-issues/model-is-not-assignable-to-type) +- [ useChat no response with `maxToolRoundTrips` ](/docs/troubleshooting/common-issues/use-chat-tools-no-response) From f0cf50f067e47b7681d39079422e2038197d6c47 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:43:09 +0200 Subject: [PATCH 04/11] Version Packages (#2884) Co-authored-by: github-actions[bot] --- .changeset/real-beers-bake.md | 5 - packages/azure/CHANGELOG.md | 7 + packages/azure/package.json | 4 +- packages/openai/CHANGELOG.md | 6 + packages/openai/package.json | 2 +- pnpm-lock.yaml | 243 +--------------------------------- 6 files changed, 18 insertions(+), 249 deletions(-) delete mode 100644 .changeset/real-beers-bake.md diff --git a/.changeset/real-beers-bake.md b/.changeset/real-beers-bake.md deleted file mode 100644 index 231d2beb5f3f..000000000000 --- a/.changeset/real-beers-bake.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@ai-sdk/openai': patch ---- - -fix (provider/openai): support tool call deltas when arguments are sent in the first chunk diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index f19faa0b9ef0..d5876d6cfa7e 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/azure +## 0.0.33 + +### Patch Changes + +- Updated dependencies [28cbf2e] + - @ai-sdk/openai@0.0.55 + ## 0.0.32 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index c7365afb001c..20835221084b 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "0.0.32", + "version": "0.0.33", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,7 +30,7 @@ } }, "dependencies": { - "@ai-sdk/openai": "0.0.54", + "@ai-sdk/openai": "0.0.55", "@ai-sdk/provider": "0.0.22", "@ai-sdk/provider-utils": "1.0.17" }, diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index 560cfeb74eb2..dfc86c409dfb 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,11 @@ # @ai-sdk/openai +## 0.0.55 + +### Patch Changes + +- 28cbf2e: fix (provider/openai): support tool call deltas when arguments are sent in the first chunk + ## 0.0.54 ### Patch Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index 1d22892e0d89..ea39d3f2b6bc 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "0.0.54", + "version": "0.0.55", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dfc26cd98b22..b0b6330cc602 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1140,7 +1140,7 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 0.0.54 + specifier: 0.0.55 version: link:../openai '@ai-sdk/provider': specifier: 0.0.22 @@ -5247,17 +5247,6 @@ packages: deprecated: Use @eslint/object-schema instead dev: false - /@img/sharp-darwin-arm64@0.33.4: - resolution: {integrity: sha512-p0suNqXufJs9t3RqLBO6vvrgr5OhgbWp76s5gTRvdmxmuv9E1rcaqGUsl3l4mKVmXPkTkTErXediAui4x+8PSA==} - engines: {glibc: '>=2.26', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [darwin] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-darwin-arm64': 1.0.2 - dev: true - optional: true - /@img/sharp-darwin-arm64@0.33.5: resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} @@ -5266,18 +5255,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-darwin-arm64': 1.0.4 - dev: false - optional: true - - /@img/sharp-darwin-x64@0.33.4: - resolution: {integrity: sha512-0l7yRObwtTi82Z6ebVI2PnHT8EB2NxBgpK2MiKJZJ7cz32R4lxd001ecMhzzsZig3Yv9oclvqqdV93jo9hy+Dw==} - engines: {glibc: '>=2.26', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [darwin] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-darwin-x64': 1.0.2 - dev: true optional: true /@img/sharp-darwin-x64@0.33.5: @@ -5288,16 +5265,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-darwin-x64': 1.0.4 - dev: false - optional: true - - /@img/sharp-libvips-darwin-arm64@1.0.2: - resolution: {integrity: sha512-tcK/41Rq8IKlSaKRCCAuuY3lDJjQnYIW1UXU1kxcEKrfL8WR7N6+rzNoOxoQRJWTAECuKwgAHnPvqXGN8XfkHA==} - engines: {macos: '>=11', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [darwin] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-darwin-arm64@1.0.4: @@ -5305,16 +5272,6 @@ packages: cpu: [arm64] os: [darwin] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-darwin-x64@1.0.2: - resolution: {integrity: sha512-Ofw+7oaWa0HiiMiKWqqaZbaYV3/UGL2wAPeLuJTx+9cXpCRdvQhCLG0IH8YGwM0yGWGLpsF4Su9vM1o6aer+Fw==} - engines: {macos: '>=10.13', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [darwin] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-darwin-x64@1.0.4: @@ -5322,16 +5279,6 @@ packages: cpu: [x64] os: [darwin] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linux-arm64@1.0.2: - resolution: {integrity: sha512-x7kCt3N00ofFmmkkdshwj3vGPCnmiDh7Gwnd4nUwZln2YjqPxV1NlTyZOvoDWdKQVDL911487HOueBvrpflagw==} - engines: {glibc: '>=2.26', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linux-arm64@1.0.4: @@ -5339,16 +5286,6 @@ packages: cpu: [arm64] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linux-arm@1.0.2: - resolution: {integrity: sha512-iLWCvrKgeFoglQxdEwzu1eQV04o8YeYGFXtfWU26Zr2wWT3q3MTzC+QTCO3ZQfWd3doKHT4Pm2kRmLbupT+sZw==} - engines: {glibc: '>=2.28', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linux-arm@1.0.5: @@ -5356,16 +5293,6 @@ packages: cpu: [arm] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linux-s390x@1.0.2: - resolution: {integrity: sha512-cmhQ1J4qVhfmS6szYW7RT+gLJq9dH2i4maq+qyXayUSn9/3iY2ZeWpbAgSpSVbV2E1JUL2Gg7pwnYQ1h8rQIog==} - engines: {glibc: '>=2.28', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [s390x] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linux-s390x@1.0.4: @@ -5373,16 +5300,6 @@ packages: cpu: [s390x] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linux-x64@1.0.2: - resolution: {integrity: sha512-E441q4Qdb+7yuyiADVi5J+44x8ctlrqn8XgkDTwr4qPJzWkaHwD489iZ4nGDgcuya4iMN3ULV6NwbhRZJ9Z7SQ==} - engines: {glibc: '>=2.26', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linux-x64@1.0.4: @@ -5390,16 +5307,6 @@ packages: cpu: [x64] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linuxmusl-arm64@1.0.2: - resolution: {integrity: sha512-3CAkndNpYUrlDqkCM5qhksfE+qSIREVpyoeHIU6jd48SJZViAmznoQQLAv4hVXF7xyUB9zf+G++e2v1ABjCbEQ==} - engines: {musl: '>=1.2.2', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linuxmusl-arm64@1.0.4: @@ -5407,16 +5314,6 @@ packages: cpu: [arm64] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-libvips-linuxmusl-x64@1.0.2: - resolution: {integrity: sha512-VI94Q6khIHqHWNOh6LLdm9s2Ry4zdjWJwH56WoiJU7NTeDwyApdZZ8c+SADC8OH98KWNQXnE01UdJ9CSfZvwZw==} - engines: {musl: '>=1.2.2', npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [linux] - requiresBuild: true - dev: true optional: true /@img/sharp-libvips-linuxmusl-x64@1.0.4: @@ -5424,18 +5321,6 @@ packages: cpu: [x64] os: [linux] requiresBuild: true - dev: false - optional: true - - /@img/sharp-linux-arm64@0.33.4: - resolution: {integrity: sha512-2800clwVg1ZQtxwSoTlHvtm9ObgAax7V6MTAB/hDT945Tfyy3hVkmiHpeLPCKYqYR1Gcmv1uDZ3a4OFwkdBL7Q==} - engines: {glibc: '>=2.26', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linux-arm64': 1.0.2 - dev: true optional: true /@img/sharp-linux-arm64@0.33.5: @@ -5446,18 +5331,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linux-arm64': 1.0.4 - dev: false - optional: true - - /@img/sharp-linux-arm@0.33.4: - resolution: {integrity: sha512-RUgBD1c0+gCYZGCCe6mMdTiOFS0Zc/XrN0fYd6hISIKcDUbAW5NtSQW9g/powkrXYm6Vzwd6y+fqmExDuCdHNQ==} - engines: {glibc: '>=2.28', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linux-arm': 1.0.2 - dev: true optional: true /@img/sharp-linux-arm@0.33.5: @@ -5468,18 +5341,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linux-arm': 1.0.5 - dev: false - optional: true - - /@img/sharp-linux-s390x@0.33.4: - resolution: {integrity: sha512-h3RAL3siQoyzSoH36tUeS0PDmb5wINKGYzcLB5C6DIiAn2F3udeFAum+gj8IbA/82+8RGCTn7XW8WTFnqag4tQ==} - engines: {glibc: '>=2.31', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [s390x] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linux-s390x': 1.0.2 - dev: true optional: true /@img/sharp-linux-s390x@0.33.5: @@ -5490,18 +5351,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linux-s390x': 1.0.4 - dev: false - optional: true - - /@img/sharp-linux-x64@0.33.4: - resolution: {integrity: sha512-GoR++s0XW9DGVi8SUGQ/U4AeIzLdNjHka6jidVwapQ/JebGVQIpi52OdyxCNVRE++n1FCLzjDovJNozif7w/Aw==} - engines: {glibc: '>=2.26', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linux-x64': 1.0.2 - dev: true optional: true /@img/sharp-linux-x64@0.33.5: @@ -5512,18 +5361,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linux-x64': 1.0.4 - dev: false - optional: true - - /@img/sharp-linuxmusl-arm64@0.33.4: - resolution: {integrity: sha512-nhr1yC3BlVrKDTl6cO12gTpXMl4ITBUZieehFvMntlCXFzH2bvKG76tBL2Y/OqhupZt81pR7R+Q5YhJxW0rGgQ==} - engines: {musl: '>=1.2.2', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [arm64] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linuxmusl-arm64': 1.0.2 - dev: true optional: true /@img/sharp-linuxmusl-arm64@0.33.5: @@ -5534,18 +5371,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 - dev: false - optional: true - - /@img/sharp-linuxmusl-x64@0.33.4: - resolution: {integrity: sha512-uCPTku0zwqDmZEOi4ILyGdmW76tH7dm8kKlOIV1XC5cLyJ71ENAAqarOHQh0RLfpIpbV5KOpXzdU6XkJtS0daw==} - engines: {musl: '>=1.2.2', node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [linux] - requiresBuild: true - optionalDependencies: - '@img/sharp-libvips-linuxmusl-x64': 1.0.2 - dev: true optional: true /@img/sharp-linuxmusl-x64@0.33.5: @@ -5556,17 +5381,6 @@ packages: requiresBuild: true optionalDependencies: '@img/sharp-libvips-linuxmusl-x64': 1.0.4 - dev: false - optional: true - - /@img/sharp-wasm32@0.33.4: - resolution: {integrity: sha512-Bmmauh4sXUsUqkleQahpdNXKvo+wa1V9KhT2pDA4VJGKwnKMJXiSTGphn0gnJrlooda0QxCtXc6RX1XAU6hMnQ==} - engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [wasm32] - requiresBuild: true - dependencies: - '@emnapi/runtime': 1.2.0 - dev: true optional: true /@img/sharp-wasm32@0.33.5: @@ -5576,16 +5390,6 @@ packages: requiresBuild: true dependencies: '@emnapi/runtime': 1.2.0 - dev: false - optional: true - - /@img/sharp-win32-ia32@0.33.4: - resolution: {integrity: sha512-99SJ91XzUhYHbx7uhK3+9Lf7+LjwMGQZMDlO/E/YVJ7Nc3lyDFZPGhjwiYdctoH2BOzW9+TnfqcaMKt0jHLdqw==} - engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [ia32] - os: [win32] - requiresBuild: true - dev: true optional: true /@img/sharp-win32-ia32@0.33.5: @@ -5594,16 +5398,6 @@ packages: cpu: [ia32] os: [win32] requiresBuild: true - dev: false - optional: true - - /@img/sharp-win32-x64@0.33.4: - resolution: {integrity: sha512-3QLocdTRVIrFNye5YocZl+KKpYKP+fksi1QhmOArgx7GyhIbQp/WrJRu176jm8IxromS7RIkzMiMINVdBtC8Aw==} - engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0', yarn: '>=3.2.0'} - cpu: [x64] - os: [win32] - requiresBuild: true - dev: true optional: true /@img/sharp-win32-x64@0.33.5: @@ -5612,7 +5406,6 @@ packages: cpu: [x64] os: [win32] requiresBuild: true - dev: false optional: true /@inkeep/ai-api@0.1.8(zod@3.23.8): @@ -18128,7 +17921,7 @@ packages: '@next/swc-win32-arm64-msvc': 15.0.0-canary.23 '@next/swc-win32-ia32-msvc': 15.0.0-canary.23 '@next/swc-win32-x64-msvc': 15.0.0-canary.23 - sharp: 0.33.4 + sharp: 0.33.5 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros @@ -20579,37 +20372,6 @@ packages: /setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} - /sharp@0.33.4: - resolution: {integrity: sha512-7i/dt5kGl7qR4gwPRD2biwD2/SvBn3O04J77XKFgL2OnZtQw+AG9wnuS/csmu80nPRHLYE9E41fyEiG8nhH6/Q==} - engines: {libvips: '>=8.15.2', node: ^18.17.0 || ^20.3.0 || >=21.0.0} - requiresBuild: true - dependencies: - color: 4.2.3 - detect-libc: 2.0.3 - semver: 7.6.3 - optionalDependencies: - '@img/sharp-darwin-arm64': 0.33.4 - '@img/sharp-darwin-x64': 0.33.4 - '@img/sharp-libvips-darwin-arm64': 1.0.2 - '@img/sharp-libvips-darwin-x64': 1.0.2 - '@img/sharp-libvips-linux-arm': 1.0.2 - '@img/sharp-libvips-linux-arm64': 1.0.2 - '@img/sharp-libvips-linux-s390x': 1.0.2 - '@img/sharp-libvips-linux-x64': 1.0.2 - '@img/sharp-libvips-linuxmusl-arm64': 1.0.2 - '@img/sharp-libvips-linuxmusl-x64': 1.0.2 - '@img/sharp-linux-arm': 0.33.4 - '@img/sharp-linux-arm64': 0.33.4 - '@img/sharp-linux-s390x': 0.33.4 - '@img/sharp-linux-x64': 0.33.4 - '@img/sharp-linuxmusl-arm64': 0.33.4 - '@img/sharp-linuxmusl-x64': 0.33.4 - '@img/sharp-wasm32': 0.33.4 - '@img/sharp-win32-ia32': 0.33.4 - '@img/sharp-win32-x64': 0.33.4 - dev: true - optional: true - /sharp@0.33.5: resolution: {integrity: sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} @@ -20638,7 +20400,6 @@ packages: '@img/sharp-wasm32': 0.33.5 '@img/sharp-win32-ia32': 0.33.5 '@img/sharp-win32-x64': 0.33.5 - dev: false optional: true /shebang-command@1.2.0: From 548698b560dd8cc94066bac42e86ca1e119586ec Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:25:56 +0100 Subject: [PATCH 05/11] docs: add workers provider page (#2885) --- .../11-cloudflare-workers-ai.mdx | 155 ++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 content/providers/03-community-providers/11-cloudflare-workers-ai.mdx diff --git a/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx b/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx new file mode 100644 index 000000000000..aebf19273e2e --- /dev/null +++ b/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx @@ -0,0 +1,155 @@ +--- +title: Cloudflare Workers AI +description: Learn how to use the Cloudflare Workers AI provider with the Vercel AI SDK. +--- + +# Cloudflare Workers AI + +[threepointone/workers-ai-provider](https://github.com/threepointone/workers-ai-provider/) is a community provider that enables Cloudflare's [Workers AI](https://ai.cloudflare.com/) models for use the Vercel AI SDK. + +## Setup + +The Cloudflare Workers AI provider is available in the `workers-ai-provider` module. You can install it with: + + + + + + + + + + + + + +Then, setup an AI binding in your Cloudflare Workers project `wrangler.toml` file: + +```bash filename="wrangler.toml" +# ... +[ai] +binding = "AI" +# ... +``` + +## Provider Instance + +To create a `workersai` provider instance, use the `createWorkersAI` function, passing in the AI binding as an option: + +```typescript +import { createWorkersAI } from 'workers-ai-provider'; + +const workersai = createWorkersAI({ binding: env.AI }); +``` + +## Language Models + +To create a model instance, call the provider instance and specify the model you would like to use as the first argument. You can also pass additional settings in the second argument: + +```typescript highlight="4-7" +import { createWorkersAI } from 'workers-ai-provider'; + +const workersai = createWorkersAI({ binding: env.AI }); +const model = workersai('@cf/meta/llama-3.1-8b-instruct', { + // additional settings + safePrompt: true, +}); +``` + +You can use the following optional settings to customize: + +- **safePrompt** _boolean_ + + Whether to inject a safety prompt before all conversations. Defaults to `false` + +### Examples + +You can use Cloudflare Workers AI language models to generate text with the **`generateText`** or **`streamText`** function: + +#### `generateText` + +```typescript +import { createWorkersAI } from 'workers-ai-provider'; +import { generateText } from 'ai'; + +type Env = { + AI: Ai; +}; + +export default { + async fetch(_: Request, env: Env) { + const workersai = createWorkersAI({ binding: env.AI }); + const result = await generateText({ + model: workersai('@cf/meta/llama-2-7b-chat-int8'), + prompt: 'Write a 50-word essay about hello world.', + }); + + return new Response(result.text); + }, +}; +``` + +#### `streamText` + +```typescript +import { createWorkersAI } from 'workers-ai-provider'; +import { streamText } from 'ai'; + +type Env = { + AI: Ai; +}; + +export default { + async fetch(_: Request, env: Env) { + const workersai = createWorkersAI({ binding: env.AI }); + const result = await streamText({ + model: workersai('@cf/meta/llama-2-7b-chat-int8'), + prompt: 'Write a 50-word essay about hello world.', + }); + + return result.toTextStreamResponse({ + headers: { + // add these headers to ensure that the + // response is chunked and streamed + 'Content-Type': 'text/x-unknown', + 'content-encoding': 'identity', + 'transfer-encoding': 'chunked', + }, + }); + }, +}; +``` + +#### `generateObject` + +Some Cloudflare Workers AI language models can also be used with the `generateObject` function: + +```typescript +import { createWorkersAI } from 'workers-ai-provider'; +import { generateObject } from 'ai'; +import { z } from 'zod'; + +type Env = { + AI: Ai; +}; + +export default { + async fetch(_: Request, env: Env) { + const workersai = createWorkersAI({ binding: env.AI }); + const result = await generateObject({ + model: workersai('@cf/meta/llama-3.1-8b-instruct'), + prompt: 'Generate a Lasagna recipe', + schema: z.object({ + recipe: z.object({ + ingredients: z.array(z.string()), + description: z.string(), + }), + }), + }); + + return Response.json(result.object); + }, +}; +``` + +`tools` and `streamObject` are currently not supported. From 8b19cf0685e4a218d664b51eec3f5795708d24e5 Mon Sep 17 00:00:00 2001 From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:57:27 +0100 Subject: [PATCH 06/11] docs: fix cf provider page (#2886) --- .../03-community-providers/11-cloudflare-workers-ai.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx b/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx index aebf19273e2e..fe86c54dce4e 100644 --- a/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx +++ b/content/providers/03-community-providers/11-cloudflare-workers-ai.mdx @@ -5,7 +5,7 @@ description: Learn how to use the Cloudflare Workers AI provider with the Vercel # Cloudflare Workers AI -[threepointone/workers-ai-provider](https://github.com/threepointone/workers-ai-provider/) is a community provider that enables Cloudflare's [Workers AI](https://ai.cloudflare.com/) models for use the Vercel AI SDK. +[threepointone/workers-ai-provider](https://github.com/threepointone/workers-ai-provider/) is a community provider that allows you to use Cloudflare's [Workers AI](https://ai.cloudflare.com/) models with the Vercel AI SDK. ## Setup From 3be7c1cc734eb2a5c0ea16dc22d0f11b89170079 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 4 Sep 2024 09:32:58 +0200 Subject: [PATCH 07/11] fix (provider/anthropic): support prompt caching on assistant messages (#2890) --- .changeset/two-boxes-attend.md | 7 + .../core/generate-text/generate-text.test.ts | 11 +- .../ai/core/generate-text/stream-text.test.ts | 11 +- packages/ai/core/prompt/content-part.ts | 7 + .../convert-to-language-model-prompt.test.ts | 43 ++++++ .../convert-to-language-model-prompt.ts | 16 ++- .../src/anthropic-messages-prompt.ts | 1 + ...nvert-to-anthropic-messages-prompt.test.ts | 129 ++++++++++++++++++ .../convert-to-anthropic-messages-prompt.ts | 25 +++- .../v1/language-model-v1-call-options.ts | 8 ++ .../v1/language-model-v1-prompt.ts | 7 + 11 files changed, 248 insertions(+), 17 deletions(-) create mode 100644 .changeset/two-boxes-attend.md diff --git a/.changeset/two-boxes-attend.md b/.changeset/two-boxes-attend.md new file mode 100644 index 000000000000..84408377808c --- /dev/null +++ b/.changeset/two-boxes-attend.md @@ -0,0 +1,7 @@ +--- +'@ai-sdk/anthropic': patch +'@ai-sdk/provider': patch +'ai': patch +--- + +fix (provider/anthropic): support prompt caching on assistant messages diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index ed09f5312e3e..ec133fa2ca54 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -390,7 +390,7 @@ describe('result.responseMessages', () => { doGenerate: async ({ prompt, mode }) => { switch (responseCount++) { case 0: - assert.deepStrictEqual(mode, { + expect(mode).toStrictEqual({ type: 'regular', toolChoice: { type: 'auto' }, tools: [ @@ -408,7 +408,8 @@ describe('result.responseMessages', () => { }, ], }); - assert.deepStrictEqual(prompt, [ + + expect(prompt).toStrictEqual([ { role: 'user', content: [{ type: 'text', text: 'test-input' }], @@ -441,7 +442,7 @@ describe('result.responseMessages', () => { }, }; case 1: - assert.deepStrictEqual(mode, { + expect(mode).toStrictEqual({ type: 'regular', toolChoice: { type: 'auto' }, tools: [ @@ -459,7 +460,8 @@ describe('result.responseMessages', () => { }, ], }); - assert.deepStrictEqual(prompt, [ + + expect(prompt).toStrictEqual([ { role: 'user', content: [ @@ -477,6 +479,7 @@ describe('result.responseMessages', () => { toolCallId: 'call-1', toolName: 'tool1', args: { value: 'value' }, + providerMetadata: undefined, }, ], providerMetadata: undefined, diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index 6d009a29ce0c..bce829b2dfeb 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -1997,7 +1997,7 @@ describe('options.maxToolRoundtrips', () => { doStream: async ({ prompt, mode }) => { switch (responseCount++) { case 0: - assert.deepStrictEqual(mode, { + expect(mode).toStrictEqual({ type: 'regular', tools: [ { @@ -2015,7 +2015,8 @@ describe('options.maxToolRoundtrips', () => { ], toolChoice: { type: 'auto' }, }); - assert.deepStrictEqual(prompt, [ + + expect(prompt).toStrictEqual([ { role: 'user', content: [{ type: 'text', text: 'test-input' }], @@ -2041,7 +2042,7 @@ describe('options.maxToolRoundtrips', () => { rawCall: { rawPrompt: 'prompt', rawSettings: {} }, }; case 1: - assert.deepStrictEqual(mode, { + expect(mode).toStrictEqual({ type: 'regular', tools: [ { @@ -2059,7 +2060,8 @@ describe('options.maxToolRoundtrips', () => { ], toolChoice: { type: 'auto' }, }); - assert.deepStrictEqual(prompt, [ + + expect(prompt).toStrictEqual([ { role: 'user', content: [{ type: 'text', text: 'test-input' }], @@ -2072,6 +2074,7 @@ describe('options.maxToolRoundtrips', () => { toolCallId: 'call-1', toolName: 'tool1', args: { value: 'value' }, + providerMetadata: undefined, }, ], providerMetadata: undefined, diff --git a/packages/ai/core/prompt/content-part.ts b/packages/ai/core/prompt/content-part.ts index bc27c2024d3c..6f2bcfe971b9 100644 --- a/packages/ai/core/prompt/content-part.ts +++ b/packages/ai/core/prompt/content-part.ts @@ -84,6 +84,13 @@ Name of the tool that is being called. Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema. */ args: unknown; + + /** +Additional provider-specific metadata. They are passed through +to the provider from the AI SDK and enable provider-specific +functionality that can be fully encapsulated in the provider. + */ + experimental_providerMetadata?: ProviderMetadata; } export const toolCallPartSchema: z.ZodType = z.object({ diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts index 46623d503c73..591846a9bee0 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.test.ts @@ -230,5 +230,48 @@ describe('convertToLanguageModelMessage', () => { }); }); }); + + describe('tool call parts', () => { + it('should pass through provider metadata', () => { + const result = convertToLanguageModelMessage( + { + role: 'assistant', + content: [ + { + type: 'tool-call', + toolName: 'toolName', + toolCallId: 'toolCallId', + args: {}, + experimental_providerMetadata: { + 'test-provider': { + 'key-a': 'test-value-1', + 'key-b': 'test-value-2', + }, + }, + }, + ], + }, + null, + ); + + expect(result).toEqual({ + role: 'assistant', + content: [ + { + type: 'tool-call', + args: {}, + toolCallId: 'toolCallId', + toolName: 'toolName', + providerMetadata: { + 'test-provider': { + 'key-a': 'test-value-1', + 'key-b': 'test-value-2', + }, + }, + }, + ], + }); + }); + }); }); }); diff --git a/packages/ai/core/prompt/convert-to-language-model-prompt.ts b/packages/ai/core/prompt/convert-to-language-model-prompt.ts index f613851a6ab0..5ae117f7e6b7 100644 --- a/packages/ai/core/prompt/convert-to-language-model-prompt.ts +++ b/packages/ai/core/prompt/convert-to-language-model-prompt.ts @@ -219,10 +219,18 @@ export function convertToLanguageModelMessage( return { role: 'assistant', - content: message.content.filter( - // remove empty text parts: - part => part.type !== 'text' || part.text !== '', - ), + content: message.content + .filter( + // remove empty text parts: + part => part.type !== 'text' || part.text !== '', + ) + .map(part => { + const { experimental_providerMetadata, ...rest } = part; + return { + ...rest, + providerMetadata: experimental_providerMetadata, + }; + }), providerMetadata: message.experimental_providerMetadata, }; } diff --git a/packages/anthropic/src/anthropic-messages-prompt.ts b/packages/anthropic/src/anthropic-messages-prompt.ts index 375d9ae91311..676a1060e986 100644 --- a/packages/anthropic/src/anthropic-messages-prompt.ts +++ b/packages/anthropic/src/anthropic-messages-prompt.ts @@ -40,6 +40,7 @@ export interface AnthropicToolCallContent { id: string; name: string; input: unknown; + cache_control?: AnthropicCacheControl; } export interface AnthropicToolResultContent { diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts index 30bed9d5ea43..9604c23ded05 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.test.ts @@ -405,6 +405,135 @@ describe('cache control', () => { }); }); + describe('assistant message', () => { + it('should set cache_control on assistant message text part with part cache control', async () => { + const result = convertToAnthropicMessagesPrompt({ + prompt: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { + type: 'text', + text: 'test', + providerMetadata: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ], + }, + ], + cacheControl: true, + }); + + expect(result).toEqual({ + messages: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { + type: 'text', + text: 'test', + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ], + system: undefined, + }); + }); + + it('should set cache_control on assistant tool call part with part cache control', async () => { + const result = convertToAnthropicMessagesPrompt({ + prompt: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { + type: 'tool-call', + toolCallId: 'test-id', + toolName: 'test-tool', + args: { some: 'arg' }, + providerMetadata: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ], + }, + ], + cacheControl: true, + }); + + expect(result).toEqual({ + messages: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { + type: 'tool_use', + name: 'test-tool', + id: 'test-id', + input: { some: 'arg' }, + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ], + system: undefined, + }); + }); + + it('should set cache_control on last assistant message part with message cache control', async () => { + const result = convertToAnthropicMessagesPrompt({ + prompt: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { type: 'text', text: 'part1' }, + { type: 'text', text: 'part2' }, + ], + providerMetadata: { + anthropic: { + cacheControl: { type: 'ephemeral' }, + }, + }, + }, + ], + cacheControl: true, + }); + + expect(result).toEqual({ + messages: [ + { role: 'user', content: [{ type: 'text', text: 'user-content' }] }, + { + role: 'assistant', + content: [ + { + type: 'text', + text: 'part1', + cache_control: undefined, + }, + { + type: 'text', + text: 'part2', + cache_control: { type: 'ephemeral' }, + }, + ], + }, + ], + system: undefined, + }); + }); + }); + describe('tool message', () => { it('should set cache_control on tool result message part with part cache control', async () => { const result = convertToAnthropicMessagesPrompt({ diff --git a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts index 4a28c9e8a231..a357d9abafc0 100644 --- a/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts +++ b/packages/anthropic/src/convert-to-anthropic-messages-prompt.ts @@ -72,13 +72,13 @@ export function convertToAnthropicMessagesPrompt({ const { role, content } = message; switch (role) { case 'user': { - for (let i = 0; i < content.length; i++) { - const part = content[i]; + for (let j = 0; j < content.length; j++) { + const part = content[j]; // cache control: first add cache control from part. // for the last part of a message, // check also if the message has cache control. - const isLastPart = i === content.length - 1; + const isLastPart = j === content.length - 1; const cacheControl = getCacheControl(part.providerMetadata) ?? @@ -162,9 +162,23 @@ export function convertToAnthropicMessagesPrompt({ // combines multiple assistant messages in this block into a single message: const anthropicContent: AnthropicAssistantMessage['content'] = []; - for (const { content } of block.messages) { + for (const message of block.messages) { + const { content } = message; + for (let j = 0; j < content.length; j++) { const part = content[j]; + + // cache control: first add cache control from part. + // for the last part of a message, + // check also if the message has cache control. + const isLastPart = j === content.length - 1; + + const cacheControl = + getCacheControl(part.providerMetadata) ?? + (isLastPart + ? getCacheControl(message.providerMetadata) + : undefined); + switch (part.type) { case 'text': { anthropicContent.push({ @@ -177,7 +191,7 @@ export function convertToAnthropicMessagesPrompt({ ? part.text.trim() : part.text, - cache_control: undefined, // not used in assistant messages + cache_control: cacheControl, }); break; } @@ -188,6 +202,7 @@ export function convertToAnthropicMessagesPrompt({ id: part.toolCallId, name: part.toolName, input: part.args, + cache_control: cacheControl, }); break; } diff --git a/packages/provider/src/language-model/v1/language-model-v1-call-options.ts b/packages/provider/src/language-model/v1/language-model-v1-call-options.ts index abe9da877e98..1b40674db947 100644 --- a/packages/provider/src/language-model/v1/language-model-v1-call-options.ts +++ b/packages/provider/src/language-model/v1/language-model-v1-call-options.ts @@ -2,6 +2,7 @@ import { JSONSchema7 } from 'json-schema'; import { LanguageModelV1CallSettings } from './language-model-v1-call-settings'; import { LanguageModelV1FunctionTool } from './language-model-v1-function-tool'; import { LanguageModelV1Prompt } from './language-model-v1-prompt'; +import { LanguageModelV1ProviderMetadata } from './language-model-v1-provider-metadata'; import { LanguageModelV1ToolChoice } from './language-model-v1-tool-choice'; export type LanguageModelV1CallOptions = LanguageModelV1CallSettings & { @@ -76,4 +77,11 @@ That approach allows us to evolve the user facing prompts without breaking the language model interface. */ prompt: LanguageModelV1Prompt; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV1ProviderMetadata; }; diff --git a/packages/provider/src/language-model/v1/language-model-v1-prompt.ts b/packages/provider/src/language-model/v1/language-model-v1-prompt.ts index 2661821789c0..05f96344ee8f 100644 --- a/packages/provider/src/language-model/v1/language-model-v1-prompt.ts +++ b/packages/provider/src/language-model/v1/language-model-v1-prompt.ts @@ -104,6 +104,13 @@ Name of the tool that is being called. Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema. */ args: unknown; + + /** + * Additional provider-specific metadata. They are passed through + * to the provider from the AI SDK and enable provider-specific + * functionality that can be fully encapsulated in the provider. + */ + providerMetadata?: LanguageModelV1ProviderMetadata; } /** From b39c33ef57df51bc83715543d56041c235d0f6b1 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Wed, 4 Sep 2024 10:12:54 +0200 Subject: [PATCH 08/11] chore (ai/provider): language model v1 spec cleanups (#2892) --- .../src/language-model/v1/language-model-v1.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/provider/src/language-model/v1/language-model-v1.ts b/packages/provider/src/language-model/v1/language-model-v1.ts index 152884dda8a4..a5b51d163122 100644 --- a/packages/provider/src/language-model/v1/language-model-v1.ts +++ b/packages/provider/src/language-model/v1/language-model-v1.ts @@ -135,8 +135,10 @@ results that can be fully encapsulated in the provider. providerMetadata?: LanguageModelV1ProviderMetadata; /** - Logprobs for the completion. - `undefined` if the mode does not support logprobs or if was not enabled +Logprobs for the completion. +`undefined` if the mode does not support logprobs or if was not enabled + +@deprecated will be changed into a provider-specific extension in v2 */ logprobs?: LanguageModelV1LogProbs; }>; @@ -205,12 +207,12 @@ export type LanguageModelV1StreamPart = | { type: 'finish'; finishReason: LanguageModelV1FinishReason; - logprobs?: LanguageModelV1LogProbs; providerMetadata?: LanguageModelV1ProviderMetadata; usage: { promptTokens: number; completionTokens: number }; + + // @deprecated - will be changed into a provider-specific extension in v2 + logprobs?: LanguageModelV1LogProbs; } // error parts are streamed, allowing for multiple errors | { type: 'error'; error: unknown }; - -export type LanguageModelV1ResponseMetadata = {}; From b93f963462f52a72a59a77619ccff669244fcfe2 Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Wed, 4 Sep 2024 23:05:20 -0700 Subject: [PATCH 09/11] fix (docs): Correct minor typo in `tool` reference (#2898) --- content/docs/07-reference/ai-sdk-core/20-tool.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/docs/07-reference/ai-sdk-core/20-tool.mdx b/content/docs/07-reference/ai-sdk-core/20-tool.mdx index ca8557928606..5434e8276822 100644 --- a/content/docs/07-reference/ai-sdk-core/20-tool.mdx +++ b/content/docs/07-reference/ai-sdk-core/20-tool.mdx @@ -5,7 +5,7 @@ description: Helper function for tool type inference # `tool()` -Tool is a helper function that infers the tool paramaters for its `execute` method. +Tool is a helper function that infers the tool parameters for its `execute` method. It does not have any runtime behavior, but it helps TypeScript infer the types of the parameters for the `execute` method. From 03313cd6255b395f6a13ca62af2b7b265fabb9ef Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Thu, 5 Sep 2024 13:57:16 +0200 Subject: [PATCH 10/11] feat (ai): expose response id, response model, response timestamp in telemetry and api (#2893) --- .changeset/calm-walls-sin.md | 11 + content/docs/03-ai-sdk-core/60-telemetry.mdx | 73 +- .../ai-sdk-core/01-generate-text.mdx | 56 +- .../ai-sdk-core/02-stream-text.mdx | 119 ++- .../ai-sdk-core/03-generate-object.mdx | 28 +- .../ai-sdk-core/04-stream-object.mdx | 88 ++- .../src/generate-object/openai-full-json.ts | 30 - .../src/generate-object/openai-full-result.ts | 24 + .../ai-core/src/generate-object/openai.ts | 4 +- .../generate-text/anthropic-full-result.ts | 14 + .../src/generate-text/mistral-full-result.ts | 14 + .../src/generate-text/openai-full-result.ts | 14 + .../src/stream-text/cohere-response.ts | 20 + .../src/stream-text/openai-fullstream-raw.ts | 16 + ...response-headers.ts => openai-response.ts} | 14 +- .../ai-core/src/telemetry/generate-object.ts | 48 ++ .../ai-core/src/telemetry/generate-text.ts | 2 +- .../ai-core/src/telemetry/stream-object.ts | 51 ++ examples/ai-core/src/telemetry/stream-text.ts | 11 +- packages/ai/core/embed/embed-many-result.ts | 4 +- packages/ai/core/embed/embed-result.ts | 4 +- .../generate-object.test.ts.snap | 20 + .../__snapshots__/stream-object.test.ts.snap | 147 ++++ .../generate-object/generate-object-result.ts | 20 +- .../generate-object/generate-object.test.ts | 95 +++ .../core/generate-object/generate-object.ts | 95 ++- packages/ai/core/generate-object/index.ts | 11 +- .../generate-object/stream-object-result.ts | 44 +- .../generate-object/stream-object.test.ts | 347 +++++---- .../ai/core/generate-object/stream-object.ts | 145 +++- .../__snapshots__/generate-text.test.ts.snap | 78 ++ .../__snapshots__/stream-text.test.ts.snap | 100 +++ .../generate-text/generate-text-result.ts | 37 +- .../core/generate-text/generate-text.test.ts | 117 +-- .../ai/core/generate-text/generate-text.ts | 63 +- packages/ai/core/generate-text/index.ts | 4 +- .../generate-text/run-tools-transformation.ts | 24 +- .../core/generate-text/stream-text-result.ts | 29 +- .../ai/core/generate-text/stream-text.test.ts | 702 ++++++++++++------ packages/ai/core/generate-text/stream-text.ts | 95 ++- .../ai/core/test/mock-embedding-model-v1.ts | 4 +- packages/ai/core/test/mock-id.ts | 4 + .../core/test/{mock-now.ts => mock-values.ts} | 2 +- packages/ai/core/types/index.ts | 23 +- packages/ai/core/types/language-model.ts | 24 + .../core/types/{token-usage.ts => usage.ts} | 22 +- packages/ai/rsc/stream-ui/stream-ui.tsx | 10 +- .../anthropic-messages-language-model.test.ts | 46 +- .../src/anthropic-messages-language-model.ts | 14 + .../src/cohere-chat-language-model.test.ts | 27 +- .../cohere/src/cohere-chat-language-model.ts | 16 +- packages/mistral/src/get-response-metadata.ts | 15 + .../src/mistral-chat-language-model.test.ts | 45 +- .../src/mistral-chat-language-model.ts | 24 +- packages/openai/src/get-response-metadata.ts | 15 + .../src/openai-chat-language-model.test.ts | 62 +- .../openai/src/openai-chat-language-model.ts | 18 + .../openai-completion-language-model.test.ts | 38 +- .../src/openai-completion-language-model.ts | 27 +- packages/provider-utils/src/generate-id.ts | 27 +- packages/provider-utils/src/index.ts | 2 +- .../language-model/v1/language-model-v1.ts | 32 +- 62 files changed, 2605 insertions(+), 710 deletions(-) create mode 100644 .changeset/calm-walls-sin.md delete mode 100644 examples/ai-core/src/generate-object/openai-full-json.ts create mode 100644 examples/ai-core/src/generate-object/openai-full-result.ts create mode 100644 examples/ai-core/src/generate-text/anthropic-full-result.ts create mode 100644 examples/ai-core/src/generate-text/mistral-full-result.ts create mode 100644 examples/ai-core/src/generate-text/openai-full-result.ts create mode 100644 examples/ai-core/src/stream-text/cohere-response.ts create mode 100644 examples/ai-core/src/stream-text/openai-fullstream-raw.ts rename examples/ai-core/src/stream-text/{openai-response-headers.ts => openai-response.ts} (64%) create mode 100644 examples/ai-core/src/telemetry/generate-object.ts create mode 100644 examples/ai-core/src/telemetry/stream-object.ts create mode 100644 packages/ai/core/test/mock-id.ts rename packages/ai/core/test/{mock-now.ts => mock-values.ts} (59%) rename packages/ai/core/types/{token-usage.ts => usage.ts} (58%) create mode 100644 packages/mistral/src/get-response-metadata.ts create mode 100644 packages/openai/src/get-response-metadata.ts diff --git a/.changeset/calm-walls-sin.md b/.changeset/calm-walls-sin.md new file mode 100644 index 000000000000..62e5c1fc7da9 --- /dev/null +++ b/.changeset/calm-walls-sin.md @@ -0,0 +1,11 @@ +--- +'@ai-sdk/provider-utils': patch +'@ai-sdk/anthropic': patch +'@ai-sdk/provider': patch +'@ai-sdk/mistral': patch +'@ai-sdk/cohere': patch +'@ai-sdk/openai': patch +'ai': patch +--- + +feat (ai): expose response id, response model, response timestamp in telemetry and api diff --git a/content/docs/03-ai-sdk-core/60-telemetry.mdx b/content/docs/03-ai-sdk-core/60-telemetry.mdx index 3316eaf69569..35e502f0faff 100644 --- a/content/docs/03-ai-sdk-core/60-telemetry.mdx +++ b/content/docs/03-ai-sdk-core/60-telemetry.mdx @@ -61,8 +61,9 @@ const result = await generateText({ `generateText` records 3 types of spans: -- `ai.generateText`: the full length of the generateText call. It contains 1 or more `ai.generateText.doGenerate` spans. +- `ai.generateText` (span): the full length of the generateText call. It contains 1 or more `ai.generateText.doGenerate` spans. It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + - `operation.name`: `ai.generateText` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.generateText"` - `ai.prompt`: the prompt that was used when calling `generateText` @@ -70,8 +71,10 @@ const result = await generateText({ - `ai.response.toolCalls`: the tool calls that were made as part of the generation (stringified JSON) - `ai.response.finishReason`: the reason why the generation finished - `ai.settings.maxToolRoundtrips`: the maximum number of tool roundtrips that were set -- `ai.generateText.doGenerate`: a provider doGenerate call. It can contain `ai.toolCall` spans. - It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + +- `ai.generateText.doGenerate` (span): a provider doGenerate call. It can contain `ai.toolCall` spans. + It contains the [call LLM span information](#call-llm-span-information) and the following attributes: + - `operation.name`: `ai.generateText.doGenerate` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.generateText.doGenerate"` - `ai.prompt.format`: the format of the prompt @@ -79,23 +82,26 @@ const result = await generateText({ - `ai.response.text`: the text that was generated - `ai.response.toolCalls`: the tool calls that were made as part of the generation (stringified JSON) - `ai.response.finishReason`: the reason why the generation finished -- `ai.toolCall`: a tool call that is made as part of the generateText call. See [Tool call spans](#tool-call-spans) for more details. + +- `ai.toolCall` (span): a tool call that is made as part of the generateText call. See [Tool call spans](#tool-call-spans) for more details. ### streamText function -`streamText` records 3 types of spans: +`streamText` records 3 types of spans and 2 types of events: -- `ai.streamText`: the full length of the streamText call. It contains a `ai.streamText.doStream` span. +- `ai.streamText` (span): the full length of the streamText call. It contains a `ai.streamText.doStream` span. It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + - `operation.name`: `ai.streamText` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.streamText"` - `ai.prompt`: the prompt that was used when calling `streamText` - `ai.response.text`: the text that was generated - `ai.response.toolCalls`: the tool calls that were made as part of the generation (stringified JSON) - `ai.response.finishReason`: the reason why the generation finished -- `ai.streamText.doStream`: a provider doStream call. + +- `ai.streamText.doStream` (span): a provider doStream call. This span contains an `ai.stream.firstChunk` event and `ai.toolCall` spans. - It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + It contains the [call LLM span information](#call-llm-span-information) and the following attributes: - `operation.name`: `ai.streamText.doStream` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.streamText.doStream"` @@ -108,10 +114,13 @@ const result = await generateText({ - `ai.response.avgCompletionTokensPerSecond`: the average number of completion tokens per second - `ai.response.finishReason`: the reason why the generation finished +- `ai.toolCall` (span): a tool call that is made as part of the generateText call. See [Tool call spans](#tool-call-spans) for more details. + - `ai.stream.firstChunk` (event): an event that is emitted when the first chunk of the stream is received. + - `ai.response.msToFirstChunk`: the time it took to receive the first chunk + - `ai.stream.finish` (event): an event that is emitted when the finish part of the LLM stream is received. -- `ai.toolCall`: a tool call that is made as part of the generateText call. See [Tool call spans](#tool-call-spans) for more details. It also records a `ai.stream.firstChunk` event when the first chunk of the stream is received. @@ -119,8 +128,9 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea `generateObject` records 2 types of spans: -- `ai.generateObject`: the full length of the generateObject call. It contains 1 or more `ai.generateObject.doGenerate` spans. +- `ai.generateObject` (span): the full length of the generateObject call. It contains 1 or more `ai.generateObject.doGenerate` spans. It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + - `operation.name`: `ai.generateObject` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.generateObject"` - `ai.prompt`: the prompt that was used when calling `generateObject` @@ -130,8 +140,10 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea - `ai.response.object`: the object that was generated (stringified JSON) - `ai.settings.mode`: the object generation mode, e.g. `json` - `ai.settings.output`: the output type that was used, e.g. `object` or `no-schema` -- `ai.generateObject.doGenerate`: a provider doGenerate call. - It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + +- `ai.generateObject.doGenerate` (span): a provider doGenerate call. + It contains the [call LLM span information](#call-llm-span-information) and the following attributes: + - `operation.name`: `ai.generateObject.doGenerate` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.generateObject.doGenerate"` - `ai.prompt.format`: the format of the prompt @@ -142,10 +154,11 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea ### streamObject function -`streamObject` records 2 types of spans: +`streamObject` records 2 types of spans and 1 type of event: -- `ai.streamObject`: the full length of the streamObject call. It contains 1 or more `ai.streamObject.doStream` spans. +- `ai.streamObject` (span): the full length of the streamObject call. It contains 1 or more `ai.streamObject.doStream` spans. It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + - `operation.name`: `ai.streamObject` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.streamObject"` - `ai.prompt`: the prompt that was used when calling `streamObject` @@ -155,9 +168,11 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea - `ai.response.object`: the object that was generated (stringified JSON) - `ai.settings.mode`: the object generation mode, e.g. `json` - `ai.settings.output`: the output type that was used, e.g. `object` or `no-schema` -- `ai.streamObject.doStream`: a provider doStream call. + +- `ai.streamObject.doStream` (span): a provider doStream call. This span contains an `ai.stream.firstChunk` event. - It contains the [basic LLM span information](#basic-llm-span-information) and the following attributes: + It contains the [call LLM span information](#call-llm-span-information) and the following attributes: + - `operation.name`: `ai.streamObject.doStream` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.streamObject.doStream"` - `ai.prompt.format`: the format of the prompt @@ -166,6 +181,7 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea - `ai.response.object`: the object that was generated (stringified JSON) - `ai.response.msToFirstChunk`: the time it took to receive the first chunk - `ai.response.finishReason`: the reason why the generation finished + - `ai.stream.firstChunk` (event): an event that is emitted when the first chunk of the stream is received. - `ai.response.msToFirstChunk`: the time it took to receive the first chunk @@ -173,14 +189,17 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea `embed` records 2 types of spans: -- `ai.embed`: the full length of the embed call. It contains 1 `ai.embed.doEmbed` spans. +- `ai.embed` (span): the full length of the embed call. It contains 1 `ai.embed.doEmbed` spans. It contains the [basic embedding span information](#basic-embedding-span-information) and the following attributes: + - `operation.name`: `ai.embed` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.embed"` - `ai.value`: the value that was passed into the `embed` function - `ai.embedding`: a JSON-stringified embedding -- `ai.embed.doEmbed`: a provider doEmbed call. + +- `ai.embed.doEmbed` (span): a provider doEmbed call. It contains the [basic embedding span information](#basic-embedding-span-information) and the following attributes: + - `operation.name`: `ai.embed.doEmbed` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.embed.doEmbed"` - `ai.values`: the values that were passed into the provider (array) @@ -190,14 +209,17 @@ It also records a `ai.stream.firstChunk` event when the first chunk of the strea `embedMany` records 2 types of spans: -- `ai.embedMany`: the full length of the embedMany call. It contains 1 or more `ai.embedMany.doEmbed` spans. +- `ai.embedMany` (span): the full length of the embedMany call. It contains 1 or more `ai.embedMany.doEmbed` spans. It contains the [basic embedding span information](#basic-embedding-span-information) and the following attributes: + - `operation.name`: `ai.embedMany` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.embedMany"` - `ai.values`: the values that were passed into the `embedMany` function - `ai.embeddings`: an array of JSON-stringified embedding -- `ai.embedMany.doEmbed`: a provider doEmbed call. + +- `ai.embedMany.doEmbed` (span): a provider doEmbed call. It contains the [basic embedding span information](#basic-embedding-span-information) and the following attributes: + - `operation.name`: `ai.embedMany.doEmbed` and the functionId that was set through `telemetry.functionId` - `ai.operationId`: `"ai.embedMany.doEmbed"` - `ai.values`: the values that were sent to the provider @@ -219,6 +241,15 @@ Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.s - `ai.telemetry.metadata.*`: the metadata that was passed in through `telemetry.metadata` - `ai.usage.completionTokens`: the number of completion tokens that were used - `ai.usage.promptTokens`: the number of prompt tokens that were used + +### Call LLM span information + +Spans that correspond to individual LLM calls (`ai.generateText.doGenerate`, `ai.streamText.doStream`, `ai.generateObject.doGenerate`, `ai.streamObject.doStream`) contain +[basic LLM span information](#basic-llm-span-information) and the following attributes: + +- `ai.response.model`: the model that was used to generate the response. This can be different from the model that was requested if the provider supports aliases. +- `ai.response.id`: the id of the response. Uses the ID from the provider when available. +- `ai.response.timestamp`: the timestamp of the response. Uses the timestamp from the provider when available. - [Semantic Conventions for GenAI operations](https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/) - `gen_ai.system`: the provider that was used - `gen_ai.request.model`: the model that was requested @@ -230,6 +261,8 @@ Many spans that use LLMs (`ai.generateText`, `ai.generateText.doGenerate`, `ai.s - `gen_ai.request.top_p`: the topP parameter value that was set - `gen_ai.request.stop_sequences`: the stop sequences - `gen_ai.response.finish_reasons`: the finish reasons that were returned by the provider + - `gen_ai.response.model`: the model that was used to generate the response. This can be different from the model that was requested if the provider supports aliases. + - `gen_ai.response.id`: the id of the response. Uses the ID from the provider when available. - `gen_ai.usage.input_tokens`: the number of prompt tokens that were used - `gen_ai.usage.output_tokens`: the number of completion tokens that were used diff --git a/content/docs/07-reference/ai-sdk-core/01-generate-text.mdx b/content/docs/07-reference/ai-sdk-core/01-generate-text.mdx index bc3a66a91847..38da8f0914b5 100644 --- a/content/docs/07-reference/ai-sdk-core/01-generate-text.mdx +++ b/content/docs/07-reference/ai-sdk-core/01-generate-text.mdx @@ -457,19 +457,37 @@ To see `generateText` in action, check out [these examples](#examples). ], }, { - name: 'rawResponse', - type: 'RawResponse', + name: 'response', + type: 'Response', optional: true, - description: 'Optional raw response data.', + description: 'Response metadata.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, @@ -552,19 +570,37 @@ To see `generateText` in action, check out [these examples](#examples). ], }, { - name: 'rawResponse', - type: 'RawResponse', + name: 'response', + type: 'Response', optional: true, - description: 'Optional raw response data.', + description: 'Response metadata.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, diff --git a/content/docs/07-reference/ai-sdk-core/02-stream-text.mdx b/content/docs/07-reference/ai-sdk-core/02-stream-text.mdx index 40c69ae7a08f..70f106585c60 100644 --- a/content/docs/07-reference/ai-sdk-core/02-stream-text.mdx +++ b/content/docs/07-reference/ai-sdk-core/02-stream-text.mdx @@ -630,18 +630,37 @@ To see `streamText` in action, check out [these examples](#examples). 'Warnings from the model provider (e.g. unsupported settings).', }, { - name: 'rawResponse', - type: 'RawResponse', - description: 'Optional raw response data.', + name: 'response', + type: 'Response', + optional: true, + description: 'Response metadata.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, @@ -717,19 +736,37 @@ To see `streamText` in action, check out [these examples](#examples). 'The tool results that have been generated. Resolved when the all tool executions are finished.', }, { - name: 'rawResponse', - type: 'RawResponse', + name: 'response', + type: 'Promise', optional: true, - description: 'Optional raw response data.', + description: 'Response metadata. Resolved when the response is finished.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, @@ -936,6 +973,37 @@ To see `streamText` in action, check out [these examples](#examples). }, ], }, + { + name: 'response', + type: 'Response', + optional: true, + description: 'Response metadata.', + properties: [ + { + type: 'Response', + parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, + ], + }, + ], + }, ], }, { @@ -979,6 +1047,37 @@ To see `streamText` in action, check out [these examples](#examples). }, ], }, + { + name: 'response', + type: 'Response', + optional: true, + description: 'Response metadata.', + properties: [ + { + type: 'Response', + parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, + ], + }, + ], + }, ], }, ], diff --git a/content/docs/07-reference/ai-sdk-core/03-generate-object.mdx b/content/docs/07-reference/ai-sdk-core/03-generate-object.mdx index e9870a26ed34..a2deedcd1e1e 100644 --- a/content/docs/07-reference/ai-sdk-core/03-generate-object.mdx +++ b/content/docs/07-reference/ai-sdk-core/03-generate-object.mdx @@ -478,19 +478,37 @@ To see `generateObject` in action, check out the [additional examples](#more-exa ], }, { - name: 'rawResponse', - type: 'RawResponse', + name: 'response', + type: 'Response', optional: true, - description: 'Optional raw response data.', + description: 'Response metadata.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, diff --git a/content/docs/07-reference/ai-sdk-core/04-stream-object.mdx b/content/docs/07-reference/ai-sdk-core/04-stream-object.mdx index 5e974d97c60e..b47e8883d387 100644 --- a/content/docs/07-reference/ai-sdk-core/04-stream-object.mdx +++ b/content/docs/07-reference/ai-sdk-core/04-stream-object.mdx @@ -508,18 +508,37 @@ To see `streamObject` in action, check out the [additional examples](#more-examp 'Warnings from the model provider (e.g. unsupported settings).', }, { - name: 'rawResponse', - type: 'RawResponse', - description: 'Optional raw response data.', + name: 'response', + type: 'Response', + optional: true, + description: 'Response metadata.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, @@ -662,24 +681,73 @@ To see `streamObject` in action, check out the [additional examples](#more-examp type: 'Usage', description: 'Token usage.', }, + { + name: 'response', + type: 'Response', + optional: true, + description: 'Response metadata.', + properties: [ + { + type: 'Response', + parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, + ], + }, + ], + }, ], }, ], }, { - name: 'rawResponse', - type: 'RawResponse', + name: 'response', + type: 'Promise', optional: true, - description: 'Optional raw response data.', + description: 'Response metadata. Resolved when the response is finished.', properties: [ { - type: 'RawResponse', + type: 'Response', parameters: [ + { + name: 'id', + type: 'string', + description: + 'The response identifier. The AI SDK uses the ID from the provider response when available, and generates an ID otherwise.', + }, + { + name: 'model', + type: 'string', + description: + 'The model that was used to generate the response. The AI SDK uses the response model from the provider response when available, and the model from the function call otherwise.', + }, + { + name: 'timestamp', + type: 'Date', + description: + 'The timestamp of the response. The AI SDK uses the response timestamp from the provider response when available, and creates a timestamp otherwise.', + }, { name: 'headers', optional: true, type: 'Record', - description: 'Response headers.', + description: 'Optional response headers.', }, ], }, diff --git a/examples/ai-core/src/generate-object/openai-full-json.ts b/examples/ai-core/src/generate-object/openai-full-json.ts deleted file mode 100644 index 75853dfecef9..000000000000 --- a/examples/ai-core/src/generate-object/openai-full-json.ts +++ /dev/null @@ -1,30 +0,0 @@ -import { openai } from '@ai-sdk/openai'; -import { generateObject } from 'ai'; -import dotenv from 'dotenv'; -import { z } from 'zod'; - -dotenv.config(); - -async function main() { - const result = await generateObject({ - model: openai('gpt-4-turbo', { logprobs: 2 }), - schema: z.object({ - characters: z.array( - z.object({ - name: z.string(), - class: z - .string() - .describe('Character class, e.g. warrior, mage, or thief.'), - description: z.string(), - }), - ), - }), - mode: 'json', - prompt: - 'Generate 3 character descriptions for a fantasy role playing game.', - }); - - console.log(result); -} - -main().catch(console.error); diff --git a/examples/ai-core/src/generate-object/openai-full-result.ts b/examples/ai-core/src/generate-object/openai-full-result.ts new file mode 100644 index 000000000000..4f9e7d2fce96 --- /dev/null +++ b/examples/ai-core/src/generate-object/openai-full-result.ts @@ -0,0 +1,24 @@ +import { openai } from '@ai-sdk/openai'; +import { generateObject } from 'ai'; +import 'dotenv/config'; +import { z } from 'zod'; + +async function main() { + const result = await generateObject({ + model: openai('gpt-4o-mini', { structuredOutputs: true }), + schema: z.object({ + recipe: z.object({ + name: z.string(), + ingredients: z.array( + z.object({ name: z.string(), amount: z.string() }), + ), + steps: z.array(z.string()), + }), + }), + prompt: 'Generate a lasagna recipe.', + }); + + console.log(JSON.stringify(result, null, 2)); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/generate-object/openai.ts b/examples/ai-core/src/generate-object/openai.ts index cc62e8c14026..0fd3b628a633 100644 --- a/examples/ai-core/src/generate-object/openai.ts +++ b/examples/ai-core/src/generate-object/openai.ts @@ -7,9 +7,7 @@ dotenv.config(); async function main() { const result = await generateObject({ - model: openai('gpt-4o-2024-08-06', { - structuredOutputs: true, - }), + model: openai('gpt-4o-mini', { structuredOutputs: true }), schema: z.object({ recipe: z.object({ name: z.string(), diff --git a/examples/ai-core/src/generate-text/anthropic-full-result.ts b/examples/ai-core/src/generate-text/anthropic-full-result.ts new file mode 100644 index 000000000000..75fdd09823d7 --- /dev/null +++ b/examples/ai-core/src/generate-text/anthropic-full-result.ts @@ -0,0 +1,14 @@ +import { anthropic } from '@ai-sdk/anthropic'; +import { generateText } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await generateText({ + model: anthropic('claude-3-5-sonnet-20240620'), + prompt: 'Invent a new holiday and describe its traditions.', + }); + + console.log(JSON.stringify(result, null, 2)); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/generate-text/mistral-full-result.ts b/examples/ai-core/src/generate-text/mistral-full-result.ts new file mode 100644 index 000000000000..80f11ece1fc5 --- /dev/null +++ b/examples/ai-core/src/generate-text/mistral-full-result.ts @@ -0,0 +1,14 @@ +import { mistral } from '@ai-sdk/mistral'; +import { generateText } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await generateText({ + model: mistral('open-mistral-7b'), + prompt: 'Invent a new holiday and describe its traditions.', + }); + + console.log(JSON.stringify(result, null, 2)); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/generate-text/openai-full-result.ts b/examples/ai-core/src/generate-text/openai-full-result.ts new file mode 100644 index 000000000000..a5f68cf5a363 --- /dev/null +++ b/examples/ai-core/src/generate-text/openai-full-result.ts @@ -0,0 +1,14 @@ +import { openai } from '@ai-sdk/openai'; +import { generateText } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await generateText({ + model: openai('gpt-4o-mini'), + prompt: 'Invent a new holiday and describe its traditions.', + }); + + console.log(JSON.stringify(result, null, 2)); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/stream-text/cohere-response.ts b/examples/ai-core/src/stream-text/cohere-response.ts new file mode 100644 index 000000000000..45d0420ec65a --- /dev/null +++ b/examples/ai-core/src/stream-text/cohere-response.ts @@ -0,0 +1,20 @@ +import 'dotenv/config'; +import { cohere } from '@ai-sdk/cohere'; +import { streamText } from 'ai'; + +async function main() { + const result = await streamText({ + model: cohere('command-r-plus'), + maxTokens: 512, + prompt: 'Invent a new holiday and describe its traditions.', + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } + + console.log(); + console.log(JSON.stringify(await result.response, null, 2)); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/stream-text/openai-fullstream-raw.ts b/examples/ai-core/src/stream-text/openai-fullstream-raw.ts new file mode 100644 index 000000000000..efa66a1b0479 --- /dev/null +++ b/examples/ai-core/src/stream-text/openai-fullstream-raw.ts @@ -0,0 +1,16 @@ +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; +import 'dotenv/config'; + +async function main() { + const result = await streamText({ + model: openai('gpt-4o-mini'), + prompt: 'Invent a new holiday and describe its traditions.', + }); + + for await (const part of result.fullStream) { + console.log(JSON.stringify(part)); + } +} + +main().catch(console.error); diff --git a/examples/ai-core/src/stream-text/openai-response-headers.ts b/examples/ai-core/src/stream-text/openai-response.ts similarity index 64% rename from examples/ai-core/src/stream-text/openai-response-headers.ts rename to examples/ai-core/src/stream-text/openai-response.ts index 011c20131e72..2169c5a03cbf 100644 --- a/examples/ai-core/src/stream-text/openai-response-headers.ts +++ b/examples/ai-core/src/stream-text/openai-response.ts @@ -1,24 +1,20 @@ +import 'dotenv/config'; import { openai } from '@ai-sdk/openai'; import { streamText } from 'ai'; -import dotenv from 'dotenv'; - -dotenv.config(); async function main() { const result = await streamText({ - model: openai('gpt-3.5-turbo'), + model: openai('gpt-4o-mini'), maxTokens: 512, - temperature: 0.3, - maxRetries: 5, prompt: 'Invent a new holiday and describe its traditions.', }); - console.log(`Request ID: ${result.rawResponse?.headers?.['x-request-id']}`); - console.log(); - for await (const textPart of result.textStream) { process.stdout.write(textPart); } + + console.log(); + console.log(JSON.stringify(await result.response, null, 2)); } main().catch(console.error); diff --git a/examples/ai-core/src/telemetry/generate-object.ts b/examples/ai-core/src/telemetry/generate-object.ts new file mode 100644 index 000000000000..e617394d3400 --- /dev/null +++ b/examples/ai-core/src/telemetry/generate-object.ts @@ -0,0 +1,48 @@ +import 'dotenv/config'; + +import { openai } from '@ai-sdk/openai'; +import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; +import { NodeSDK } from '@opentelemetry/sdk-node'; +import { ConsoleSpanExporter } from '@opentelemetry/sdk-trace-node'; +import { generateObject } from 'ai'; +import { z } from 'zod'; + +const sdk = new NodeSDK({ + traceExporter: new ConsoleSpanExporter(), + instrumentations: [getNodeAutoInstrumentations()], +}); + +sdk.start(); + +async function main() { + const result = await generateObject({ + model: openai('gpt-4o-mini', { structuredOutputs: true }), + schema: z.object({ + recipe: z.object({ + name: z.string(), + ingredients: z.array( + z.object({ + name: z.string(), + amount: z.string(), + }), + ), + steps: z.array(z.string()), + }), + }), + prompt: 'Generate a lasagna recipe.', + experimental_telemetry: { + isEnabled: true, + functionId: 'my-awesome-function', + metadata: { + something: 'custom', + someOtherThing: 'other-value', + }, + }, + }); + + console.log(JSON.stringify(result.object.recipe, null, 2)); + + await sdk.shutdown(); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/telemetry/generate-text.ts b/examples/ai-core/src/telemetry/generate-text.ts index f820380a4b79..6a3f7efe9a4f 100644 --- a/examples/ai-core/src/telemetry/generate-text.ts +++ b/examples/ai-core/src/telemetry/generate-text.ts @@ -17,7 +17,7 @@ sdk.start(); async function main() { const result = await generateText({ - model: openai('gpt-3.5-turbo'), + model: openai('gpt-4o'), maxTokens: 50, prompt: 'Invent a new holiday and describe its traditions.', experimental_telemetry: { diff --git a/examples/ai-core/src/telemetry/stream-object.ts b/examples/ai-core/src/telemetry/stream-object.ts new file mode 100644 index 000000000000..e069a87bcdcd --- /dev/null +++ b/examples/ai-core/src/telemetry/stream-object.ts @@ -0,0 +1,51 @@ +import 'dotenv/config'; + +import { openai } from '@ai-sdk/openai'; +import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; +import { NodeSDK } from '@opentelemetry/sdk-node'; +import { ConsoleSpanExporter } from '@opentelemetry/sdk-trace-node'; +import { streamObject } from 'ai'; +import { z } from 'zod'; + +const sdk = new NodeSDK({ + traceExporter: new ConsoleSpanExporter(), + instrumentations: [getNodeAutoInstrumentations()], +}); + +sdk.start(); + +async function main() { + const result = await streamObject({ + model: openai('gpt-4o-mini', { structuredOutputs: true }), + schema: z.object({ + recipe: z.object({ + name: z.string(), + ingredients: z.array( + z.object({ + name: z.string(), + amount: z.string(), + }), + ), + steps: z.array(z.string()), + }), + }), + prompt: 'Generate a lasagna recipe.', + experimental_telemetry: { + isEnabled: true, + functionId: 'my-awesome-function', + metadata: { + something: 'custom', + someOtherThing: 'other-value', + }, + }, + }); + + for await (const partialObject of result.partialObjectStream) { + console.clear(); + console.log(partialObject); + } + + await sdk.shutdown(); +} + +main().catch(console.error); diff --git a/examples/ai-core/src/telemetry/stream-text.ts b/examples/ai-core/src/telemetry/stream-text.ts index 6e6513d40de8..78fb4c06c1a1 100644 --- a/examples/ai-core/src/telemetry/stream-text.ts +++ b/examples/ai-core/src/telemetry/stream-text.ts @@ -1,12 +1,9 @@ -import { openai } from '@ai-sdk/openai'; -import { streamText } from 'ai'; -import dotenv from 'dotenv'; - -dotenv.config(); - +import { anthropic } from '@ai-sdk/anthropic'; import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; import { NodeSDK } from '@opentelemetry/sdk-node'; import { ConsoleSpanExporter } from '@opentelemetry/sdk-trace-node'; +import { streamText } from 'ai'; +import 'dotenv/config'; const sdk = new NodeSDK({ traceExporter: new ConsoleSpanExporter(), @@ -17,7 +14,7 @@ sdk.start(); async function main() { const result = await streamText({ - model: openai('gpt-3.5-turbo'), + model: anthropic('claude-3-5-sonnet-20240620'), maxTokens: 50, prompt: 'Invent a new holiday and describe its traditions.', experimental_telemetry: { diff --git a/packages/ai/core/embed/embed-many-result.ts b/packages/ai/core/embed/embed-many-result.ts index d79039fd740c..ec08a686bc98 100644 --- a/packages/ai/core/embed/embed-many-result.ts +++ b/packages/ai/core/embed/embed-many-result.ts @@ -1,5 +1,5 @@ import { Embedding } from '../types'; -import { EmbeddingTokenUsage } from '../types/token-usage'; +import { EmbeddingModelUsage } from '../types/usage'; /** The result of a `embedMany` call. @@ -19,5 +19,5 @@ export interface EmbedManyResult { /** The embedding token usage. */ - readonly usage: EmbeddingTokenUsage; + readonly usage: EmbeddingModelUsage; } diff --git a/packages/ai/core/embed/embed-result.ts b/packages/ai/core/embed/embed-result.ts index e915ac197bab..8fe139af5ac6 100644 --- a/packages/ai/core/embed/embed-result.ts +++ b/packages/ai/core/embed/embed-result.ts @@ -1,5 +1,5 @@ import { Embedding } from '../types'; -import { EmbeddingTokenUsage } from '../types/token-usage'; +import { EmbeddingModelUsage } from '../types/usage'; /** The result of a `embed` call. @@ -19,7 +19,7 @@ export interface EmbedResult { /** The embedding token usage. */ - readonly usage: EmbeddingTokenUsage; + readonly usage: EmbeddingModelUsage; /** Optional raw response data. diff --git a/packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap b/packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap index fffc663464d7..512c807b39de 100644 --- a/packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap +++ b/packages/ai/core/generate-object/__snapshots__/generate-object.test.ts.snap @@ -25,6 +25,9 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.model.provider": "mock-provider", "ai.operationId": "ai.generateObject.doGenerate", "ai.response.finishReason": "stop", + "ai.response.id": "test-id-from-model", + "ai.response.model": "test-response-model-id", + "ai.response.timestamp": "1970-01-01T00:00:10.000Z", "ai.settings.mode": "json", "ai.usage.completionTokens": 20, "ai.usage.promptTokens": 10, @@ -32,6 +35,8 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id-from-model", + "gen_ai.response.model": "test-response-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.completion_tokens": 20, "gen_ai.usage.prompt_tokens": 10, @@ -68,6 +73,9 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.model.provider": "mock-provider", "ai.operationId": "ai.generateObject.doGenerate", "ai.response.finishReason": "stop", + "ai.response.id": "test-id-from-model", + "ai.response.model": "test-response-model-id", + "ai.response.timestamp": "1970-01-01T00:00:10.000Z", "ai.settings.mode": "tool", "ai.usage.completionTokens": 20, "ai.usage.promptTokens": 10, @@ -75,6 +83,8 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id-from-model", + "gen_ai.response.model": "test-response-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, @@ -132,7 +142,10 @@ exports[`telemetry > should record telemetry data when enabled with mode "json" "ai.request.headers.header1": "value1", "ai.request.headers.header2": "value2", "ai.response.finishReason": "stop", + "ai.response.id": "test-id-from-model", + "ai.response.model": "test-response-model-id", "ai.response.object": "{ "content": "Hello, world!" }", + "ai.response.timestamp": "1970-01-01T00:00:10.000Z", "ai.result.object": "{ "content": "Hello, world!" }", "ai.settings.frequencyPenalty": 0.3, "ai.settings.mode": "json", @@ -154,6 +167,8 @@ exports[`telemetry > should record telemetry data when enabled with mode "json" "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id-from-model", + "gen_ai.response.model": "test-response-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.completion_tokens": 20, "gen_ai.usage.prompt_tokens": 10, @@ -212,7 +227,10 @@ exports[`telemetry > should record telemetry data when enabled with mode "tool" "ai.request.headers.header1": "value1", "ai.request.headers.header2": "value2", "ai.response.finishReason": "stop", + "ai.response.id": "test-id-from-model", + "ai.response.model": "test-response-model-id", "ai.response.object": "{ "content": "Hello, world!" }", + "ai.response.timestamp": "1970-01-01T00:00:10.000Z", "ai.result.object": "{ "content": "Hello, world!" }", "ai.settings.frequencyPenalty": 0.3, "ai.settings.mode": "tool", @@ -234,6 +252,8 @@ exports[`telemetry > should record telemetry data when enabled with mode "tool" "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id-from-model", + "gen_ai.response.model": "test-response-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, diff --git a/packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap b/packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap index 9fbd25de7d45..1b26878fc276 100644 --- a/packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap +++ b/packages/ai/core/generate-object/__snapshots__/stream-object.test.ts.snap @@ -1,5 +1,132 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html +exports[`output = "object" > options.onFinish > should be called when a valid object is generated 1`] = ` +{ + "error": undefined, + "experimental_providerMetadata": { + "testProvider": { + "testKey": "testValue", + }, + }, + "object": { + "content": "Hello, world!", + }, + "rawResponse": undefined, + "response": { + "headers": undefined, + "id": "id-0", + "modelId": "mock-model-id", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "usage": { + "completionTokens": 10, + "promptTokens": 3, + "totalTokens": 13, + }, + "warnings": undefined, +} +`; + +exports[`output = "object" > options.onFinish > should be called when object doesn't match the schema 1`] = ` +{ + "error": [AI_TypeValidationError: Type validation failed: Value: {"invalid":"Hello, world!"}. +Error message: [ + { + "code": "invalid_type", + "expected": "string", + "received": "undefined", + "path": [ + "content" + ], + "message": "Required" + } +]], + "experimental_providerMetadata": undefined, + "object": undefined, + "rawResponse": undefined, + "response": { + "headers": undefined, + "id": "id-0", + "modelId": "mock-model-id", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "usage": { + "completionTokens": 10, + "promptTokens": 3, + "totalTokens": 13, + }, + "warnings": undefined, +} +`; + +exports[`output = "object" > result.fullStream > should send full stream data 1`] = ` +[ + { + "object": {}, + "type": "object", + }, + { + "textDelta": "{ ", + "type": "text-delta", + }, + { + "object": { + "content": "Hello, ", + }, + "type": "object", + }, + { + "textDelta": ""content": "Hello, ", + "type": "text-delta", + }, + { + "object": { + "content": "Hello, world", + }, + "type": "object", + }, + { + "textDelta": "world", + "type": "text-delta", + }, + { + "object": { + "content": "Hello, world!", + }, + "type": "object", + }, + { + "textDelta": "!"", + "type": "text-delta", + }, + { + "textDelta": " }", + "type": "text-delta", + }, + { + "finishReason": "stop", + "logprobs": [ + { + "logprob": 1, + "token": "-", + "topLogprobs": [], + }, + ], + "response": { + "id": "id-0", + "modelId": "mock-model-id", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "type": "finish", + "usage": { + "completionTokens": 10, + "promptTokens": 2, + "totalTokens": 12, + }, + }, +] +`; + exports[`telemetry > should not record any telemetry data when not explicitly enabled 1`] = `[]`; exports[`telemetry > should not record telemetry inputs / outputs when disabled with mode "json" 1`] = ` @@ -25,6 +152,9 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.model.provider": "mock-provider", "ai.operationId": "ai.streamObject.doStream", "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.settings.mode": "json", "ai.stream.msToFirstChunk": 0, "ai.usage.completionTokens": 10, @@ -33,6 +163,8 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 3, "gen_ai.usage.output_tokens": 10, @@ -74,6 +206,9 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.model.provider": "mock-provider", "ai.operationId": "ai.streamObject.doStream", "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.settings.mode": "tool", "ai.stream.msToFirstChunk": 0, "ai.usage.completionTokens": 10, @@ -82,6 +217,8 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 3, "gen_ai.usage.output_tokens": 10, @@ -144,7 +281,10 @@ exports[`telemetry > should record telemetry data when enabled with mode "json" "ai.request.headers.header1": "value1", "ai.request.headers.header2": "value2", "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.object": "{"content":"Hello, world!"}", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.result.object": "{"content":"Hello, world!"}", "ai.settings.frequencyPenalty": 0.3, "ai.settings.mode": "json", @@ -167,6 +307,8 @@ exports[`telemetry > should record telemetry data when enabled with mode "json" "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 3, "gen_ai.usage.output_tokens": 10, @@ -230,7 +372,10 @@ exports[`telemetry > should record telemetry data when enabled with mode "tool" "ai.request.headers.header1": "value1", "ai.request.headers.header2": "value2", "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.object": "{"content":"Hello, world!"}", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.result.object": "{"content":"Hello, world!"}", "ai.settings.frequencyPenalty": 0.3, "ai.settings.mode": "tool", @@ -253,6 +398,8 @@ exports[`telemetry > should record telemetry data when enabled with mode "tool" "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 3, "gen_ai.usage.output_tokens": 10, diff --git a/packages/ai/core/generate-object/generate-object-result.ts b/packages/ai/core/generate-object/generate-object-result.ts index f77f53f6b780..a70c748518f4 100644 --- a/packages/ai/core/generate-object/generate-object-result.ts +++ b/packages/ai/core/generate-object/generate-object-result.ts @@ -1,10 +1,11 @@ import { CallWarning, FinishReason, + LanguageModelResponseMetadataWithHeaders, LogProbs, ProviderMetadata, } from '../types'; -import { CompletionTokenUsage } from '../types/token-usage'; +import { LanguageModelUsage } from '../types/usage'; /** The result of a `generateObject` call. @@ -23,7 +24,7 @@ export interface GenerateObjectResult { /** The token usage of the generated text. */ - readonly usage: CompletionTokenUsage; + readonly usage: LanguageModelUsage; /** Warnings from the model provider (e.g. unsupported settings) @@ -31,7 +32,9 @@ export interface GenerateObjectResult { readonly warnings: CallWarning[] | undefined; /** - Optional raw response data. + Optional raw response data. + +@deprecated Use `response.headers` instead. */ readonly rawResponse?: { /** @@ -41,8 +44,15 @@ export interface GenerateObjectResult { }; /** - Logprobs for the completion. - `undefined` if the mode does not support logprobs or if was not enabled +Additional response information. + */ + readonly response: LanguageModelResponseMetadataWithHeaders; + + /** + Logprobs for the completion. +`undefined` if the mode does not support logprobs or if was not enabled. + +@deprecated Will become a provider extension in the future. */ readonly logprobs: LogProbs | undefined; diff --git a/packages/ai/core/generate-object/generate-object.test.ts b/packages/ai/core/generate-object/generate-object.test.ts index e5ef35d74b04..5def6db7099c 100644 --- a/packages/ai/core/generate-object/generate-object.test.ts +++ b/packages/ai/core/generate-object/generate-object.test.ts @@ -222,6 +222,81 @@ describe('output = "object"', () => { }); }); + describe('result.response', () => { + it('should contain response information with json mode', async () => { + const result = await generateObject({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + ...dummyResponseValues, + text: `{ "content": "Hello, world!" }`, + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, + rawResponse: { + headers: { + 'custom-response-header': 'response-header-value', + }, + }, + }), + }), + schema: z.object({ content: z.string() }), + mode: 'json', + prompt: 'prompt', + }); + + expect(result.response).toStrictEqual({ + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + headers: { + 'custom-response-header': 'response-header-value', + }, + }); + }); + + it('should contain response information with tool mode', async () => { + const result = await generateObject({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + ...dummyResponseValues, + toolCalls: [ + { + toolCallType: 'function', + toolCallId: 'tool-call-1', + toolName: 'json', + args: `{ "content": "Hello, world!" }`, + }, + ], + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, + rawResponse: { + headers: { + 'custom-response-header': 'response-header-value', + }, + }, + }), + }), + schema: z.object({ content: z.string() }), + mode: 'tool', + prompt: 'prompt', + }); + + expect(result.response).toStrictEqual({ + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + headers: { + 'custom-response-header': 'response-header-value', + }, + }); + }); + }); + describe('zod schema', () => { it('should generate object when using zod transform', async () => { const result = await generateObject({ @@ -577,6 +652,11 @@ describe('telemetry', () => { doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, }), }), schema: z.object({ content: z.string() }), @@ -619,6 +699,11 @@ describe('telemetry', () => { args: `{ "content": "Hello, world!" }`, }, ], + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, }), }), schema: z.object({ content: z.string() }), @@ -654,6 +739,11 @@ describe('telemetry', () => { doGenerate: async () => ({ ...dummyResponseValues, text: `{ "content": "Hello, world!" }`, + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, }), }), schema: z.object({ content: z.string() }), @@ -682,6 +772,11 @@ describe('telemetry', () => { args: `{ "content": "Hello, world!" }`, }, ], + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, }), }), schema: z.object({ content: z.string() }), diff --git a/packages/ai/core/generate-object/generate-object.ts b/packages/ai/core/generate-object/generate-object.ts index 909e3332ede0..979f746a114c 100644 --- a/packages/ai/core/generate-object/generate-object.ts +++ b/packages/ai/core/generate-object/generate-object.ts @@ -1,5 +1,5 @@ import { JSONValue } from '@ai-sdk/provider'; -import { safeParseJSON } from '@ai-sdk/provider-utils'; +import { createIdGenerator, safeParseJSON } from '@ai-sdk/provider-utils'; import { Schema } from '@ai-sdk/ui-utils'; import { z } from 'zod'; import { retryWithExponentialBackoff } from '../../util/retry-with-exponential-backoff'; @@ -18,10 +18,11 @@ import { CallWarning, FinishReason, LanguageModel, + LanguageModelResponseMetadata, LogProbs, ProviderMetadata, } from '../types'; -import { calculateCompletionTokenUsage } from '../types/token-usage'; +import { calculateLanguageModelUsage } from '../types/usage'; import { prepareResponseHeaders } from '../util/prepare-response-headers'; import { GenerateObjectResult } from './generate-object-result'; import { injectJsonInstruction } from './inject-json-instruction'; @@ -29,6 +30,8 @@ import { NoObjectGeneratedError } from './no-object-generated-error'; import { getOutputStrategy } from './output-strategy'; import { validateObjectGenerationInput } from './validate-object-generation-input'; +const originalGenerateId = createIdGenerator({ prefix: 'aiobj-', length: 24 }); + /** Generate a structured, typed object for a given prompt and schema using a language model. @@ -86,6 +89,14 @@ Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; + + /** + * Internal. For test use only. May change without notice. + */ + _internal?: { + generateId?: () => string; + currentDate?: () => Date; + }; }, ): Promise>; /** @@ -144,6 +155,14 @@ Default and recommended: 'auto' (best mode for the model). Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; + + /** + * Internal. For test use only. May change without notice. + */ + _internal?: { + generateId?: () => string; + currentDate?: () => Date; + }; }, ): Promise>>; /** @@ -173,6 +192,14 @@ The mode to use for object generation. Must be "json" for no-schema output. Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; + + /** + * Internal. For test use only. May change without notice. + */ + _internal?: { + generateId?: () => string; + currentDate?: () => Date; + }; }, ): Promise>; export async function generateObject({ @@ -189,6 +216,10 @@ export async function generateObject({ abortSignal, headers, experimental_telemetry: telemetry, + _internal: { + generateId = originalGenerateId, + currentDate = () => new Date(), + } = {}, ...settings }: Omit & Prompt & { @@ -209,6 +240,14 @@ export async function generateObject({ schemaDescription?: string; mode?: 'auto' | 'json' | 'tool'; experimental_telemetry?: TelemetrySettings; + + /** + * Internal. For test use only. May change without notice. + */ + _internal?: { + generateId?: () => string; + currentDate?: () => Date; + }; }): Promise> { validateObjectGenerationInput({ output, @@ -268,9 +307,10 @@ export async function generateObject({ let result: string; let finishReason: FinishReason; - let usage: Parameters[0]; + let usage: Parameters[0]; let warnings: CallWarning[] | undefined; let rawResponse: { headers?: Record } | undefined; + let response: LanguageModelResponseMetadata; let logprobs: LogProbs | undefined; let providerMetadata: ProviderMetadata | undefined; @@ -347,6 +387,12 @@ export async function generateObject({ throw new NoObjectGeneratedError(); } + const responseData = { + id: result.response?.id ?? generateId(), + timestamp: result.response?.timestamp ?? currentDate(), + modelId: result.response?.modelId ?? model.modelId, + }; + // Add response information to the span: span.setAttributes( selectTelemetryAttributes({ @@ -354,6 +400,10 @@ export async function generateObject({ attributes: { 'ai.response.finishReason': result.finishReason, 'ai.response.object': { output: () => result.text }, + 'ai.response.id': responseData.id, + 'ai.response.model': responseData.modelId, + 'ai.response.timestamp': + responseData.timestamp.toISOString(), 'ai.usage.promptTokens': result.usage.promptTokens, 'ai.usage.completionTokens': @@ -365,6 +415,8 @@ export async function generateObject({ // standardized gen-ai llm span attributes: 'gen_ai.response.finish_reasons': [result.finishReason], + 'gen_ai.response.id': responseData.id, + 'gen_ai.response.model': responseData.modelId, 'gen_ai.usage.prompt_tokens': result.usage.promptTokens, 'gen_ai.usage.completion_tokens': result.usage.completionTokens, @@ -372,7 +424,7 @@ export async function generateObject({ }), ); - return { ...result, objectText: result.text }; + return { ...result, objectText: result.text, responseData }; }, }), ); @@ -384,6 +436,7 @@ export async function generateObject({ rawResponse = generateResult.rawResponse; logprobs = generateResult.logprobs; providerMetadata = generateResult.providerMetadata; + response = generateResult.responseData; break; } @@ -457,6 +510,12 @@ export async function generateObject({ throw new NoObjectGeneratedError(); } + const responseData = { + id: result.response?.id ?? generateId(), + timestamp: result.response?.timestamp ?? currentDate(), + modelId: result.response?.modelId ?? model.modelId, + }; + // Add response information to the span: span.setAttributes( selectTelemetryAttributes({ @@ -464,6 +523,10 @@ export async function generateObject({ attributes: { 'ai.response.finishReason': result.finishReason, 'ai.response.object': { output: () => objectText }, + 'ai.response.id': responseData.id, + 'ai.response.model': responseData.modelId, + 'ai.response.timestamp': + responseData.timestamp.toISOString(), 'ai.usage.promptTokens': result.usage.promptTokens, 'ai.usage.completionTokens': @@ -475,6 +538,8 @@ export async function generateObject({ // standardized gen-ai llm span attributes: 'gen_ai.response.finish_reasons': [result.finishReason], + 'gen_ai.response.id': responseData.id, + 'gen_ai.response.model': responseData.modelId, 'gen_ai.usage.input_tokens': result.usage.promptTokens, 'gen_ai.usage.output_tokens': result.usage.completionTokens, @@ -482,7 +547,7 @@ export async function generateObject({ }), ); - return { ...result, objectText }; + return { ...result, objectText, responseData }; }, }), ); @@ -494,6 +559,7 @@ export async function generateObject({ rawResponse = generateResult.rawResponse; logprobs = generateResult.logprobs; providerMetadata = generateResult.providerMetadata; + response = generateResult.responseData; break; } @@ -549,9 +615,12 @@ export async function generateObject({ return new DefaultGenerateObjectResult({ object: validationResult.value, finishReason, - usage: calculateCompletionTokenUsage(usage), + usage: calculateLanguageModelUsage(usage), warnings, - rawResponse, + response: { + ...response, + headers: rawResponse?.headers, + }, logprobs, providerMetadata, }); @@ -567,23 +636,29 @@ class DefaultGenerateObjectResult implements GenerateObjectResult { readonly rawResponse: GenerateObjectResult['rawResponse']; readonly logprobs: GenerateObjectResult['logprobs']; readonly experimental_providerMetadata: GenerateObjectResult['experimental_providerMetadata']; + readonly response: GenerateObjectResult['response']; constructor(options: { object: GenerateObjectResult['object']; finishReason: GenerateObjectResult['finishReason']; usage: GenerateObjectResult['usage']; warnings: GenerateObjectResult['warnings']; - rawResponse: GenerateObjectResult['rawResponse']; logprobs: GenerateObjectResult['logprobs']; providerMetadata: GenerateObjectResult['experimental_providerMetadata']; + response: GenerateObjectResult['response']; }) { this.object = options.object; this.finishReason = options.finishReason; this.usage = options.usage; this.warnings = options.warnings; - this.rawResponse = options.rawResponse; - this.logprobs = options.logprobs; this.experimental_providerMetadata = options.providerMetadata; + this.response = options.response; + + // deprecated: + this.rawResponse = { + headers: options.response.headers, + }; + this.logprobs = options.logprobs; } toJsonResponse(init?: ResponseInit): Response { diff --git a/packages/ai/core/generate-object/index.ts b/packages/ai/core/generate-object/index.ts index 32f84a574418..77a382f312c2 100644 --- a/packages/ai/core/generate-object/index.ts +++ b/packages/ai/core/generate-object/index.ts @@ -1,4 +1,7 @@ -export * from './generate-object'; -export * from './generate-object-result'; -export * from './stream-object'; -export * from './stream-object-result'; +export { generateObject, experimental_generateObject } from './generate-object'; +export type { GenerateObjectResult } from './generate-object-result'; +export { streamObject, experimental_streamObject } from './stream-object'; +export type { + ObjectStreamPart, + StreamObjectResult, +} from './stream-object-result'; diff --git a/packages/ai/core/generate-object/stream-object-result.ts b/packages/ai/core/generate-object/stream-object-result.ts index 698440a2456d..e8750796a45f 100644 --- a/packages/ai/core/generate-object/stream-object-result.ts +++ b/packages/ai/core/generate-object/stream-object-result.ts @@ -2,10 +2,12 @@ import { ServerResponse } from 'http'; import { CallWarning, FinishReason, + LanguageModelResponseMetadata, + LanguageModelResponseMetadataWithHeaders, LogProbs, ProviderMetadata, } from '../types'; -import { CompletionTokenUsage } from '../types/token-usage'; +import { LanguageModelUsage } from '../types/usage'; import { AsyncIterableStream } from '../util/async-iterable-stream'; /** @@ -20,7 +22,7 @@ export interface StreamObjectResult { /** The token usage of the generated response. Resolved when the response is finished. */ - readonly usage: Promise; + readonly usage: Promise; /** Additional provider-specific metadata. They are passed through @@ -30,8 +32,11 @@ results that can be fully encapsulated in the provider. readonly experimental_providerMetadata: Promise; /** - Optional raw response data. +Optional raw response data. + +@deprecated Use `response` instead. */ + // TODO removed in v4 readonly rawResponse?: { /** Response headers. @@ -39,6 +44,11 @@ results that can be fully encapsulated in the provider. headers?: Record; }; + /** +Additional response information. + */ + readonly response: Promise; + /** The generated object (typed according to the schema). Resolved when the response is finished. */ @@ -93,7 +103,15 @@ results that can be fully encapsulated in the provider. toTextStreamResponse(init?: ResponseInit): Response; } -export type ObjectStreamInputPart = +export type ObjectStreamPart = + | { + type: 'object'; + object: PARTIAL; + } + | { + type: 'text-delta'; + textDelta: string; + } | { type: 'error'; error: unknown; @@ -102,21 +120,7 @@ export type ObjectStreamInputPart = type: 'finish'; finishReason: FinishReason; logprobs?: LogProbs; - usage: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; + usage: LanguageModelUsage; + response: LanguageModelResponseMetadata; providerMetadata?: ProviderMetadata; }; - -export type ObjectStreamPart = - | ObjectStreamInputPart - | { - type: 'object'; - object: PARTIAL; - } - | { - type: 'text-delta'; - textDelta: string; - }; diff --git a/packages/ai/core/generate-object/stream-object.test.ts b/packages/ai/core/generate-object/stream-object.test.ts index e18f9d1b803f..20c59b79c643 100644 --- a/packages/ai/core/generate-object/stream-object.test.ts +++ b/packages/ai/core/generate-object/stream-object.test.ts @@ -388,6 +388,12 @@ describe('output = "object"', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: '{ ' }, { type: 'text-delta', textDelta: '"content": ' }, { type: 'text-delta', textDelta: `"Hello, ` }, @@ -409,59 +415,9 @@ describe('output = "object"', () => { prompt: 'prompt', }); - assert.deepStrictEqual( + expect( await convertAsyncIterableToArray(result.fullStream), - [ - { - type: 'object', - object: {}, - }, - { - type: 'text-delta', - textDelta: '{ ', - }, - { - type: 'object', - object: { content: 'Hello, ' }, - }, - { - type: 'text-delta', - textDelta: '"content": "Hello, ', - }, - { - type: 'object', - object: { content: 'Hello, world' }, - }, - { - type: 'text-delta', - textDelta: 'world', - }, - { - type: 'object', - object: { content: 'Hello, world!' }, - }, - { - type: 'text-delta', - textDelta: '!"', - }, - { - type: 'text-delta', - textDelta: ' }', - }, - { - type: 'finish', - finishReason: 'stop', - usage: { promptTokens: 2, completionTokens: 10, totalTokens: 12 }, - logprobs: [ - { - token: '-', - logprob: 1, - topLogprobs: [], - }, - ], - }, - ], - ); + ).toMatchSnapshot(); }); }); @@ -667,6 +623,92 @@ describe('output = "object"', () => { }); }); + describe('result.response', () => { + it('should resolve with response information in json mode', async () => { + const result = await streamObject({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { type: 'text-delta', textDelta: '{"content": "Hello, world!"}' }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, + rawResponse: { headers: { call: '2' } }, + }), + }), + schema: z.object({ content: z.string() }), + mode: 'json', + prompt: 'prompt', + }); + + // consume stream (runs in parallel) + convertAsyncIterableToArray(result.partialObjectStream); + + expect(await result.response).toStrictEqual({ + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + headers: { call: '2' }, + }); + }); + + it('should resolve with response information in tool mode', async () => { + const result = await streamObject({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { + type: 'tool-call-delta', + toolCallType: 'function', + toolCallId: 'tool-call-1', + toolName: 'json', + argsTextDelta: '{"content": "Hello, world!"}', + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, + rawResponse: { headers: { call: '2' } }, + }), + }), + schema: z.object({ content: z.string() }), + mode: 'tool', + prompt: 'prompt', + }); + + // consume stream (runs in parallel) + convertAsyncIterableToArray(result.partialObjectStream); + + expect(await result.response).toStrictEqual({ + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + headers: { call: '2' }, + }); + }); + }); + describe('result.object', () => { it('should resolve with typed object', async () => { const result = await streamObject({ @@ -771,127 +813,96 @@ describe('output = "object"', () => { }); describe('options.onFinish', () => { - describe('with successfully validated object', () => { + it('should be called when a valid object is generated', async () => { let result: Parameters< Required[0]>['onFinish'] >[0]; - beforeEach(async () => { - const { partialObjectStream } = await streamObject({ - model: new MockLanguageModelV1({ - doStream: async () => ({ - stream: convertArrayToReadableStream([ - { - type: 'text-delta', - textDelta: '{ "content": "Hello, world!" }', - }, - { - type: 'finish', - finishReason: 'stop', - usage: { completionTokens: 10, promptTokens: 3 }, - providerMetadata: { - testProvider: { testKey: 'testValue' }, - }, + const { partialObjectStream } = await streamObject({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { + type: 'text-delta', + textDelta: '{ "content": "Hello, world!" }', + }, + { + type: 'finish', + finishReason: 'stop', + usage: { completionTokens: 10, promptTokens: 3 }, + providerMetadata: { + testProvider: { testKey: 'testValue' }, }, - ]), - rawCall: { rawPrompt: 'prompt', rawSettings: {} }, - }), + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, }), - schema: z.object({ content: z.string() }), - mode: 'json', - prompt: 'prompt', - onFinish: async event => { - result = event as unknown as typeof result; - }, - }); - - // consume stream - await convertAsyncIterableToArray(partialObjectStream); - }); - - it('should contain token usage', async () => { - assert.deepStrictEqual(result.usage, { - completionTokens: 10, - promptTokens: 3, - totalTokens: 13, - }); - }); - - it('should contain the full object', async () => { - assert.deepStrictEqual(result.object, { - content: 'Hello, world!', - }); + }), + schema: z.object({ content: z.string() }), + mode: 'json', + prompt: 'prompt', + onFinish: async event => { + result = event as unknown as typeof result; + }, }); - it('should not contain an error object', async () => { - assert.deepStrictEqual(result.error, undefined); - }); + // consume stream + await convertAsyncIterableToArray(partialObjectStream); - it('should contain provider metadata', async () => { - assert.deepStrictEqual(result.experimental_providerMetadata, { - testProvider: { testKey: 'testValue' }, - }); - }); + expect(result!).toMatchSnapshot(); }); - describe("with object that doesn't match the schema", () => { + it("should be called when object doesn't match the schema", async () => { let result: Parameters< Required[0]>['onFinish'] >[0]; - beforeEach(async () => { - const { partialObjectStream, object } = await streamObject({ - model: new MockLanguageModelV1({ - doStream: async () => ({ - stream: convertArrayToReadableStream([ - { type: 'text-delta', textDelta: '{ ' }, - { type: 'text-delta', textDelta: '"invalid": ' }, - { type: 'text-delta', textDelta: `"Hello, ` }, - { type: 'text-delta', textDelta: `world` }, - { type: 'text-delta', textDelta: `!"` }, - { type: 'text-delta', textDelta: ' }' }, - { - type: 'finish', - finishReason: 'stop', - usage: { completionTokens: 10, promptTokens: 3 }, - }, - ]), - rawCall: { rawPrompt: 'prompt', rawSettings: {} }, - }), + const { partialObjectStream, object } = await streamObject({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { type: 'text-delta', textDelta: '{ ' }, + { type: 'text-delta', textDelta: '"invalid": ' }, + { type: 'text-delta', textDelta: `"Hello, ` }, + { type: 'text-delta', textDelta: `world` }, + { type: 'text-delta', textDelta: `!"` }, + { type: 'text-delta', textDelta: ' }' }, + { + type: 'finish', + finishReason: 'stop', + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, }), - schema: z.object({ content: z.string() }), - mode: 'json', - prompt: 'prompt', - onFinish: async event => { - result = event as unknown as typeof result; - }, - }); - - // consume stream - await convertAsyncIterableToArray(partialObjectStream); - - // consume expected error rejection - await object.catch(() => {}); + }), + schema: z.object({ content: z.string() }), + mode: 'json', + prompt: 'prompt', + onFinish: async event => { + result = event as unknown as typeof result; + }, }); - it('should contain token usage', async () => { - assert.deepStrictEqual(result.usage, { - completionTokens: 10, - promptTokens: 3, - totalTokens: 13, - }); - }); + // consume stream + await convertAsyncIterableToArray(partialObjectStream); - it('should not contain a full object', async () => { - assert.deepStrictEqual(result.object, undefined); - }); + // consume expected error rejection + await object.catch(() => {}); - it('should contain an error object', async () => { - assert.deepStrictEqual( - TypeValidationError.isInstance(result.error), - true, - ); - }); + expect(result!).toMatchSnapshot(); }); }); @@ -1342,6 +1353,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: '{ ' }, { type: 'text-delta', textDelta: '"content": ' }, { type: 'text-delta', textDelta: `"Hello, ` }, @@ -1374,6 +1391,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: '{ ' }, { type: 'text-delta', textDelta: '"content": ' }, { type: 'text-delta', textDelta: `"Hello, ` }, @@ -1425,6 +1448,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call-delta', toolCallType: 'function', @@ -1512,6 +1541,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: '{ ' }, { type: 'text-delta', textDelta: '"content": ' }, { type: 'text-delta', textDelta: `"Hello, ` }, @@ -1549,6 +1584,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call-delta', toolCallType: 'function', diff --git a/packages/ai/core/generate-object/stream-object.ts b/packages/ai/core/generate-object/stream-object.ts index 6b858d2f5391..5f4024e2acfe 100644 --- a/packages/ai/core/generate-object/stream-object.ts +++ b/packages/ai/core/generate-object/stream-object.ts @@ -27,11 +27,18 @@ import { getTracer } from '../telemetry/get-tracer'; import { recordSpan } from '../telemetry/record-span'; import { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes'; import { TelemetrySettings } from '../telemetry/telemetry-settings'; -import { CallWarning, LanguageModel, ProviderMetadata } from '../types'; import { - CompletionTokenUsage, - calculateCompletionTokenUsage, -} from '../types/token-usage'; + CallWarning, + FinishReason, + LanguageModel, + LanguageModelResponseMetadataWithHeaders, + LogProbs, + ProviderMetadata, +} from '../types'; +import { + LanguageModelUsage, + calculateLanguageModelUsage, +} from '../types/usage'; import { AsyncIterableStream, createAsyncIterableStream, @@ -40,18 +47,17 @@ import { now as originalNow } from '../util/now'; import { prepareResponseHeaders } from '../util/prepare-response-headers'; import { injectJsonInstruction } from './inject-json-instruction'; import { OutputStrategy, getOutputStrategy } from './output-strategy'; -import { - ObjectStreamInputPart, - ObjectStreamPart, - StreamObjectResult, -} from './stream-object-result'; +import { ObjectStreamPart, StreamObjectResult } from './stream-object-result'; import { validateObjectGenerationInput } from './validate-object-generation-input'; +import { createIdGenerator } from '@ai-sdk/provider-utils'; + +const originalGenerateId = createIdGenerator({ prefix: 'aiobj-', length: 24 }); type OnFinishCallback = (event: { /** The token usage of the generated response. */ - usage: CompletionTokenUsage; + usage: LanguageModelUsage; /** The generated object. Can be undefined if the final object does not match the schema. @@ -65,14 +71,21 @@ Optional error object. This is e.g. a TypeValidationError when the final object /** Optional raw response data. -*/ + +@deprecated Use `response` instead. + */ rawResponse?: { /** Response headers. -*/ + */ headers?: Record; }; + /** +Response metadata. + */ + response: LanguageModelResponseMetadataWithHeaders; + /** Warnings from the model provider (e.g. unsupported settings). */ @@ -152,6 +165,8 @@ Callback that is called when the LLM response and the final object validation ar * Internal. For test use only. May change without notice. */ _internal?: { + generateId?: () => string; + currentDate?: () => Date; now?: () => number; }; }, @@ -222,6 +237,8 @@ Callback that is called when the LLM response and the final object validation ar * Internal. For test use only. May change without notice. */ _internal?: { + generateId?: () => string; + currentDate?: () => Date; now?: () => number; }; }, @@ -269,6 +286,8 @@ Callback that is called when the LLM response and the final object validation ar * Internal. For test use only. May change without notice. */ _internal?: { + generateId?: () => string; + currentDate?: () => Date; now?: () => number; }; }, @@ -288,7 +307,11 @@ export async function streamObject({ headers, experimental_telemetry: telemetry, onFinish, - _internal: { now = originalNow } = {}, + _internal: { + generateId = originalGenerateId, + currentDate = () => new Date(), + now = originalNow, + } = {}, ...settings }: Omit & Prompt & { @@ -309,17 +332,10 @@ export async function streamObject({ schemaDescription?: string; mode?: 'auto' | 'json' | 'tool'; experimental_telemetry?: TelemetrySettings; - onFinish?: (event: { - usage: CompletionTokenUsage; - object: RESULT | undefined; - error: unknown | undefined; - rawResponse?: { - headers?: Record; - }; - warnings?: CallWarning[]; - experimental_providerMetadata: ProviderMetadata | undefined; - }) => Promise | void; + onFinish?: OnFinishCallback; _internal?: { + generateId?: () => string; + currentDate?: () => Date; now?: () => number; }; }): Promise> { @@ -426,6 +442,7 @@ export async function streamObject({ case 'text-delta': controller.enqueue(chunk.textDelta); break; + case 'response-metadata': case 'finish': case 'error': controller.enqueue(chunk); @@ -470,6 +487,7 @@ export async function streamObject({ case 'tool-call-delta': controller.enqueue(chunk.argsTextDelta); break; + case 'response-metadata': case 'finish': case 'error': controller.enqueue(chunk); @@ -547,7 +565,10 @@ export async function streamObject({ doStreamSpan, telemetry, startTimestampMs, + modelId: model.modelId, now, + currentDate, + generateId, }); }, }); @@ -576,6 +597,11 @@ class DefaultStreamObjectResult ELEMENT_STREAM >['rawResponse']; readonly outputStrategy: OutputStrategy; + readonly response: StreamObjectResult< + PARTIAL, + RESULT, + ELEMENT_STREAM + >['response']; constructor({ stream, @@ -587,13 +613,16 @@ class DefaultStreamObjectResult doStreamSpan, telemetry, startTimestampMs, + modelId, now, + currentDate, + generateId, }: { stream: ReadableStream< string | Omit >; warnings: StreamObjectResult['warnings']; - rawResponse?: StreamObjectResult< + rawResponse: StreamObjectResult< PARTIAL, RESULT, ELEMENT_STREAM @@ -604,7 +633,10 @@ class DefaultStreamObjectResult doStreamSpan: Span; telemetry: TelemetrySettings | undefined; startTimestampMs: number; + modelId: string; now: () => number; + currentDate: () => Date; + generateId: () => string; }) { this.warnings = warnings; this.rawResponse = rawResponse; @@ -615,9 +647,14 @@ class DefaultStreamObjectResult // initialize usage promise const { resolve: resolveUsage, promise: usagePromise } = - createResolvablePromise(); + createResolvablePromise(); this.usage = usagePromise; + // initialize response promise + const { resolve: resolveResponse, promise: responsePromise } = + createResolvablePromise(); + this.response = responsePromise; + // initialize experimental_providerMetadata promise const { resolve: resolveProviderMetadata, @@ -626,7 +663,7 @@ class DefaultStreamObjectResult this.experimental_providerMetadata = providerMetadataPromise; // store information for onFinish callback: - let usage: CompletionTokenUsage | undefined; + let usage: LanguageModelUsage | undefined; let finishReason: LanguageModelV1FinishReason | undefined; let providerMetadata: ProviderMetadata | undefined; let object: RESULT | undefined; @@ -635,6 +672,15 @@ class DefaultStreamObjectResult // pipe chunks through a transformation stream that extracts metadata: let accumulatedText = ''; let textDelta = ''; + let response: { + id: string; + timestamp: Date; + modelId: string; + } = { + id: generateId(), + timestamp: currentDate(), + modelId, + }; // Keep track of raw parse result before type validation, since e.g. Zod might // change the object by mapping properties. @@ -712,6 +758,15 @@ class DefaultStreamObjectResult } switch (chunk.type) { + case 'response-metadata': { + response = { + id: chunk.id ?? response.id, + timestamp: chunk.timestamp ?? response.timestamp, + modelId: chunk.modelId ?? response.modelId, + }; + break; + } + case 'finish': { // send final text delta: if (textDelta !== '') { @@ -722,14 +777,18 @@ class DefaultStreamObjectResult finishReason = chunk.finishReason; // store usage and metadata for promises and onFinish callback: - usage = calculateCompletionTokenUsage(chunk.usage); + usage = calculateLanguageModelUsage(chunk.usage); providerMetadata = chunk.providerMetadata; - controller.enqueue({ ...chunk, usage }); + controller.enqueue({ ...chunk, usage, response }); // resolve promises that can be resolved now: resolveUsage(usage); resolveProviderMetadata(providerMetadata); + resolveResponse({ + ...response, + headers: rawResponse?.headers, + }); // resolve the object promise with the latest object: const validationResult = @@ -770,6 +829,9 @@ class DefaultStreamObjectResult 'ai.response.object': { output: () => JSON.stringify(object), }, + 'ai.response.id': response.id, + 'ai.response.model': response.modelId, + 'ai.response.timestamp': response.timestamp.toISOString(), 'ai.usage.promptTokens': finalUsage.promptTokens, 'ai.usage.completionTokens': finalUsage.completionTokens, @@ -779,9 +841,11 @@ class DefaultStreamObjectResult 'ai.result.object': { output: () => JSON.stringify(object) }, // standardized gen-ai llm span attributes: + 'gen_ai.response.finish_reasons': [finishReason], + 'gen_ai.response.id': response.id, + 'gen_ai.response.model': response.modelId, 'gen_ai.usage.input_tokens': finalUsage.promptTokens, 'gen_ai.usage.output_tokens': finalUsage.completionTokens, - 'gen_ai.response.finish_reasons': [finishReason], }, }), ); @@ -812,6 +876,10 @@ class DefaultStreamObjectResult object, error, rawResponse, + response: { + ...response, + headers: rawResponse?.headers, + }, warnings, experimental_providerMetadata: providerMetadata, }); @@ -935,3 +1003,22 @@ class DefaultStreamObjectResult * @deprecated Use `streamObject` instead. */ export const experimental_streamObject = streamObject; + +export type ObjectStreamInputPart = + | { + type: 'error'; + error: unknown; + } + | { + type: 'response-metadata'; + id?: string; + timestamp?: Date; + modelId?: string; + } + | { + type: 'finish'; + finishReason: FinishReason; + logprobs?: LogProbs; + usage: LanguageModelUsage; + providerMetadata?: ProviderMetadata; + }; diff --git a/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap b/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap index 512009e23803..8075733ff7f1 100644 --- a/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap +++ b/packages/ai/core/generate-text/__snapshots__/generate-text.test.ts.snap @@ -1,5 +1,68 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html +exports[`result.responseMessages > single roundtrip > should return information about all roundtrips 1`] = ` +[ + { + "finishReason": "tool-calls", + "logprobs": undefined, + "response": { + "headers": undefined, + "id": "test-id-1-from-model", + "modelId": "test-response-model-id", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "text": "", + "toolCalls": [ + { + "args": { + "value": "value", + }, + "toolCallId": "call-1", + "toolName": "tool1", + "type": "tool-call", + }, + ], + "toolResults": [ + { + "args": { + "value": "value", + }, + "result": "result1", + "toolCallId": "call-1", + "toolName": "tool1", + }, + ], + "usage": { + "completionTokens": 5, + "promptTokens": 10, + "totalTokens": 15, + }, + "warnings": undefined, + }, + { + "finishReason": "stop", + "logprobs": undefined, + "response": { + "headers": { + "custom-response-header": "response-header-value", + }, + "id": "test-id-2-from-model", + "modelId": "test-response-model-id", + "timestamp": 1970-01-01T00:00:10.000Z, + }, + "text": "Hello, world!", + "toolCalls": [], + "toolResults": [], + "usage": { + "completionTokens": 20, + "promptTokens": 10, + "totalTokens": 30, + }, + "warnings": undefined, + }, +] +`; + exports[`telemetry > should not record any telemetry data when not explicitly enabled 1`] = `[]`; exports[`telemetry > should not record telemetry inputs / outputs when disabled 1`] = ` @@ -26,12 +89,17 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.model.provider": "mock-provider", "ai.operationId": "ai.generateText.doGenerate", "ai.response.finishReason": "stop", + "ai.response.id": "test-id", + "ai.response.model": "mock-model-id", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.usage.completionTokens": 20, "ai.usage.promptTokens": 10, "gen_ai.request.model": "mock-model-id", "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, @@ -82,6 +150,9 @@ exports[`telemetry > should record successful tool call 1`] = ` "ai.prompt.format": "prompt", "ai.prompt.messages": "[{"role":"user","content":[{"type":"text","text":"test-input"}]}]", "ai.response.finishReason": "stop", + "ai.response.id": "test-id", + "ai.response.model": "mock-model-id", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.response.toolCalls": "[{"toolCallType":"function","toolCallId":"call-1","toolName":"tool1","args":"{ \\"value\\": \\"value\\" }"}]", "ai.result.toolCalls": "[{"toolCallType":"function","toolCallId":"call-1","toolName":"tool1","args":"{ \\"value\\": \\"value\\" }"}]", "ai.usage.completionTokens": 20, @@ -90,6 +161,8 @@ exports[`telemetry > should record successful tool call 1`] = ` "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, @@ -158,7 +231,10 @@ exports[`telemetry > should record telemetry data when enabled 1`] = ` "ai.request.headers.header1": "value1", "ai.request.headers.header2": "value2", "ai.response.finishReason": "stop", + "ai.response.id": "test-id-from-model", + "ai.response.model": "test-response-model-id", "ai.response.text": "Hello, world!", + "ai.response.timestamp": "1970-01-01T00:00:10.000Z", "ai.result.text": "Hello, world!", "ai.settings.frequencyPenalty": 0.3, "ai.settings.presencePenalty": 0.4, @@ -185,6 +261,8 @@ exports[`telemetry > should record telemetry data when enabled 1`] = ` "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "test-id-from-model", + "gen_ai.response.model": "test-response-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, diff --git a/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap b/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap index 605d889827d9..fedb7f2d1dad 100644 --- a/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap +++ b/packages/ai/core/generate-text/__snapshots__/stream-text.test.ts.snap @@ -1,5 +1,28 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html +exports[`options.maxToolRoundtrips > 2 roundtrips > onFinish should send correct information 1`] = ` +{ + "experimental_providerMetadata": undefined, + "finishReason": "stop", + "rawResponse": undefined, + "response": { + "headers": undefined, + "id": "id-1", + "modelId": "mock-model-id", + "timestamp": 1970-01-01T00:00:01.000Z, + }, + "text": "Hello, world!", + "toolCalls": [], + "toolResults": [], + "usage": { + "completionTokens": 15, + "promptTokens": 4, + "totalTokens": 19, + }, + "warnings": undefined, +} +`; + exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data for each roundtrip 1`] = ` [ { @@ -29,9 +52,12 @@ exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data "ai.prompt.messages": "[{"role":"user","content":[{"type":"text","text":"test-input"}]}]", "ai.response.avgCompletionTokensPerSecond": 20, "ai.response.finishReason": "tool-calls", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.msToFinish": 500, "ai.response.msToFirstChunk": 100, "ai.response.text": "", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"value"}}]", "ai.result.text": "", "ai.result.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"value"}}]", @@ -42,6 +68,8 @@ exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data "gen_ai.response.finish_reasons": [ "tool-calls", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 3, "gen_ai.usage.output_tokens": 10, @@ -84,9 +112,12 @@ exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data "ai.prompt.messages": "[{"role":"user","content":[{"type":"text","text":"test-input"}]},{"role":"assistant","content":[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"value"}}]},{"role":"tool","content":[{"type":"tool-result","toolCallId":"call-1","toolName":"tool1","result":"result1"}]}]", "ai.response.avgCompletionTokensPerSecond": 12.5, "ai.response.finishReason": "stop", + "ai.response.id": "id-1", + "ai.response.model": "mock-model-id", "ai.response.msToFinish": 400, "ai.response.msToFirstChunk": 400, "ai.response.text": "Hello, world!", + "ai.response.timestamp": "1970-01-01T00:00:01.000Z", "ai.result.text": "Hello, world!", "ai.stream.msToFirstChunk": 400, "ai.usage.completionTokens": 5, @@ -95,6 +126,8 @@ exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-1", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 1, "gen_ai.usage.output_tokens": 5, @@ -118,6 +151,58 @@ exports[`options.maxToolRoundtrips > 2 roundtrips > should record telemetry data ] `; +exports[`options.onFinish should send correct information 1`] = ` +{ + "experimental_providerMetadata": { + "testProvider": { + "testKey": "testValue", + }, + }, + "finishReason": "stop", + "rawResponse": { + "headers": { + "call": "2", + }, + }, + "response": { + "headers": { + "call": "2", + }, + "id": "id-0", + "modelId": "mock-model-id", + "timestamp": 1970-01-01T00:00:00.000Z, + }, + "text": "Hello, world!", + "toolCalls": [ + { + "args": { + "value": "value", + }, + "toolCallId": "call-1", + "toolName": "tool1", + "type": "tool-call", + }, + ], + "toolResults": [ + { + "args": { + "value": "value", + }, + "result": "value-result", + "toolCallId": "call-1", + "toolName": "tool1", + "type": "tool-result", + }, + ], + "usage": { + "completionTokens": 10, + "promptTokens": 3, + "totalTokens": 13, + }, + "warnings": undefined, +} +`; + exports[`telemetry > should not record any telemetry data when not explicitly enabled 1`] = `[]`; exports[`telemetry > should not record telemetry inputs / outputs when disabled 1`] = ` @@ -144,8 +229,11 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "ai.operationId": "ai.streamText.doStream", "ai.response.avgCompletionTokensPerSecond": 40, "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.msToFinish": 500, "ai.response.msToFirstChunk": 100, + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.stream.msToFirstChunk": 100, "ai.usage.completionTokens": 20, "ai.usage.promptTokens": 10, @@ -153,6 +241,8 @@ exports[`telemetry > should not record telemetry inputs / outputs when disabled "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, @@ -217,9 +307,12 @@ exports[`telemetry > should record successful tool call 1`] = ` "ai.prompt.messages": "[{"role":"user","content":[{"type":"text","text":"test-input"}]}]", "ai.response.avgCompletionTokensPerSecond": 40, "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.msToFinish": 500, "ai.response.msToFirstChunk": 100, "ai.response.text": "", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.response.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"value"}}]", "ai.result.text": "", "ai.result.toolCalls": "[{"type":"tool-call","toolCallId":"call-1","toolName":"tool1","args":{"value":"value"}}]", @@ -230,6 +323,8 @@ exports[`telemetry > should record successful tool call 1`] = ` "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, @@ -310,9 +405,12 @@ exports[`telemetry > should record telemetry data when enabled 1`] = ` "ai.request.headers.header2": "value2", "ai.response.avgCompletionTokensPerSecond": 40, "ai.response.finishReason": "stop", + "ai.response.id": "id-0", + "ai.response.model": "mock-model-id", "ai.response.msToFinish": 500, "ai.response.msToFirstChunk": 100, "ai.response.text": "Hello, world!", + "ai.response.timestamp": "1970-01-01T00:00:00.000Z", "ai.result.text": "Hello, world!", "ai.settings.frequencyPenalty": 0.3, "ai.settings.presencePenalty": 0.4, @@ -340,6 +438,8 @@ exports[`telemetry > should record telemetry data when enabled 1`] = ` "gen_ai.response.finish_reasons": [ "stop", ], + "gen_ai.response.id": "id-0", + "gen_ai.response.model": "mock-model-id", "gen_ai.system": "mock-provider", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, diff --git a/packages/ai/core/generate-text/generate-text-result.ts b/packages/ai/core/generate-text/generate-text-result.ts index 79222ea29325..c5fd66d75458 100644 --- a/packages/ai/core/generate-text/generate-text-result.ts +++ b/packages/ai/core/generate-text/generate-text-result.ts @@ -3,10 +3,11 @@ import { CoreTool } from '../tool/tool'; import { CallWarning, FinishReason, + LanguageModelResponseMetadataWithHeaders, LogProbs, ProviderMetadata, } from '../types'; -import { CompletionTokenUsage } from '../types/token-usage'; +import { LanguageModelUsage } from '../types/usage'; import { ToToolCallArray } from './tool-call'; import { ToToolResultArray } from './tool-result'; @@ -38,7 +39,7 @@ export interface GenerateTextResult> { /** The token usage of the generated text. */ - readonly usage: CompletionTokenUsage; + readonly usage: LanguageModelUsage; /** Warnings from the model provider (e.g. unsupported settings) @@ -82,7 +83,7 @@ export interface GenerateTextResult> { /** The token usage of the generated text. */ - readonly usage: CompletionTokenUsage; + readonly usage: LanguageModelUsage; /** Warnings from the model provider (e.g. unsupported settings) @@ -96,18 +97,27 @@ export interface GenerateTextResult> { readonly logprobs: LogProbs | undefined; /** - Optional raw response data. - */ +Optional raw response data. + +@deprecated Use `response.headers` instead. + */ readonly rawResponse?: { /** - Response headers. - */ +Response headers. + */ readonly headers?: Record; }; + + /** +Additional response information. + */ + readonly response: LanguageModelResponseMetadataWithHeaders; }>; /** - Optional raw response data. +Optional raw response data. + +@deprecated Use `response.headers` instead. */ readonly rawResponse?: { /** @@ -117,8 +127,15 @@ export interface GenerateTextResult> { }; /** - Logprobs for the completion. - `undefined` if the mode does not support logprobs or if was not enabled. +Additional response information. + */ + readonly response: LanguageModelResponseMetadataWithHeaders; + + /** +Logprobs for the completion. +`undefined` if the mode does not support logprobs or if was not enabled. + +@deprecated Will become a provider extension in the future. */ readonly logprobs: LogProbs | undefined; diff --git a/packages/ai/core/generate-text/generate-text.test.ts b/packages/ai/core/generate-text/generate-text.test.ts index ec133fa2ca54..dd75c86c0070 100644 --- a/packages/ai/core/generate-text/generate-text.test.ts +++ b/packages/ai/core/generate-text/generate-text.test.ts @@ -6,6 +6,8 @@ import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; import { MockTracer } from '../test/mock-tracer'; import { generateText } from './generate-text'; import { GenerateTextResult } from './generate-text-result'; +import { mockId } from '../test/mock-id'; +import { mockValues } from '../test/mock-values'; const dummyResponseValues = { rawCall: { rawPrompt: 'prompt', rawSettings: {} }, @@ -440,6 +442,11 @@ describe('result.responseMessages', () => { promptTokens: 10, totalTokens: 15, }, + response: { + id: 'test-id-1-from-model', + timestamp: new Date(0), + modelId: 'test-response-model-id', + }, }; case 1: expect(mode).toStrictEqual({ @@ -501,6 +508,16 @@ describe('result.responseMessages', () => { return { ...dummyResponseValues, text: 'Hello, world!', + response: { + id: 'test-id-2-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, + rawResponse: { + headers: { + 'custom-response-header': 'response-header-value', + }, + }, }; default: throw new Error(`Unexpected response count: ${responseCount}`); @@ -542,52 +559,51 @@ describe('result.responseMessages', () => { }); it('should return information about all roundtrips', () => { - assert.deepStrictEqual(result.roundtrips, [ - { - finishReason: 'tool-calls', - logprobs: undefined, - text: '', - toolCalls: [ - { - args: { - value: 'value', - }, - toolCallId: 'call-1', - toolName: 'tool1', - type: 'tool-call', + expect(result.roundtrips).toMatchSnapshot(); + }); + }); +}); + +describe('result.response', () => { + it('should contain response information', async () => { + const result = await generateText({ + model: new MockLanguageModelV1({ + doGenerate: async ({ prompt, mode }) => { + assert.deepStrictEqual(mode, { + type: 'regular', + tools: undefined, + toolChoice: undefined, + }); + assert.deepStrictEqual(prompt, [ + { role: 'user', content: [{ type: 'text', text: 'prompt' }] }, + ]); + + return { + ...dummyResponseValues, + text: `Hello, world!`, + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', }, - ], - toolResults: [ - { - args: { - value: 'value', + rawResponse: { + headers: { + 'custom-response-header': 'response-header-value', }, - result: 'result1', - toolCallId: 'call-1', - toolName: 'tool1', }, - ], - usage: { - completionTokens: 5, - promptTokens: 10, - totalTokens: 15, - }, - warnings: undefined, - }, - { - finishReason: 'stop', - logprobs: undefined, - text: 'Hello, world!', - toolCalls: [], - toolResults: [], - usage: { - completionTokens: 20, - promptTokens: 10, - totalTokens: 30, - }, - warnings: undefined, + }; }, - ]); + }), + prompt: 'prompt', + }); + + expect(result.response).toStrictEqual({ + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + headers: { + 'custom-response-header': 'response-header-value', + }, }); }); }); @@ -647,6 +663,11 @@ describe('telemetry', () => { doGenerate: async ({}) => ({ ...dummyResponseValues, text: `Hello, world!`, + response: { + id: 'test-id-from-model', + timestamp: new Date(10000), + modelId: 'test-response-model-id', + }, }), }), prompt: 'prompt', @@ -698,6 +719,10 @@ describe('telemetry', () => { experimental_telemetry: { isEnabled: true, }, + _internal: { + generateId: () => 'test-id', + currentDate: () => new Date(0), + }, }); expect(tracer.jsonSpans).toMatchSnapshot(); @@ -730,6 +755,10 @@ describe('telemetry', () => { recordInputs: false, recordOutputs: false, }, + _internal: { + generateId: () => 'test-id', + currentDate: () => new Date(0), + }, }); expect(tracer.jsonSpans).toMatchSnapshot(); @@ -807,6 +836,10 @@ describe('tools with custom schema', () => { }, toolChoice: 'required', prompt: 'test-input', + _internal: { + generateId: () => 'test-id', + currentDate: () => new Date(0), + }, }); // test type inference diff --git a/packages/ai/core/generate-text/generate-text.ts b/packages/ai/core/generate-text/generate-text.ts index 68553753375d..d6583a4040de 100644 --- a/packages/ai/core/generate-text/generate-text.ts +++ b/packages/ai/core/generate-text/generate-text.ts @@ -1,3 +1,4 @@ +import { createIdGenerator } from '@ai-sdk/provider-utils'; import { Tracer } from '@opentelemetry/api'; import { retryWithExponentialBackoff } from '../../util/retry-with-exponential-backoff'; import { CoreAssistantMessage, CoreToolMessage } from '../prompt'; @@ -19,14 +20,16 @@ import { TelemetrySettings } from '../telemetry/telemetry-settings'; import { CoreTool } from '../tool/tool'; import { CoreToolChoice, LanguageModel } from '../types'; import { - CompletionTokenUsage, - calculateCompletionTokenUsage, -} from '../types/token-usage'; + LanguageModelUsage, + calculateLanguageModelUsage, +} from '../types/usage'; import { GenerateTextResult } from './generate-text-result'; import { toResponseMessages } from './to-response-messages'; import { ToToolCallArray, parseToolCall } from './tool-call'; import { ToToolResultArray } from './tool-result'; +const originalGenerateId = createIdGenerator({ prefix: 'aitxt-', length: 24 }); + /** Generate a text and call tools for a given prompt using a language model. @@ -84,6 +87,10 @@ export async function generateText>({ maxAutomaticRoundtrips = 0, maxToolRoundtrips = maxAutomaticRoundtrips, experimental_telemetry: telemetry, + _internal: { + generateId = originalGenerateId, + currentDate = () => new Date(), + } = {}, ...settings }: CallSettings & Prompt & { @@ -125,6 +132,14 @@ By default, it's set to 0, which will disable the feature. * Optional telemetry configuration (experimental). */ experimental_telemetry?: TelemetrySettings; + + /** + * Internal. For test use only. May change without notice. + */ + _internal?: { + generateId?: () => string; + currentDate?: () => Date; + }; }): Promise> { const baseTelemetryAttributes = getBaseTelemetryAttributes({ model, @@ -172,14 +187,14 @@ By default, it's set to 0, which will disable the feature. let currentModelResponse: Awaited< ReturnType - >; + > & { response: { id: string; timestamp: Date; modelId: string } }; let currentToolCalls: ToToolCallArray = []; let currentToolResults: ToToolResultArray = []; let roundtripCount = 0; const responseMessages: Array = []; const roundtrips: GenerateTextResult['roundtrips'] = []; - const usage: CompletionTokenUsage = { + const usage: LanguageModelUsage = { completionTokens: 0, promptTokens: 0, totalTokens: 0, @@ -229,6 +244,13 @@ By default, it's set to 0, which will disable the feature. headers, }); + // Fill in default values: + const responseData = { + id: result.response?.id ?? generateId(), + timestamp: result.response?.timestamp ?? currentDate(), + modelId: result.response?.modelId ?? model.modelId, + }; + // Add response information to the span: span.setAttributes( selectTelemetryAttributes({ @@ -241,6 +263,10 @@ By default, it's set to 0, which will disable the feature. 'ai.response.toolCalls': { output: () => JSON.stringify(result.toolCalls), }, + 'ai.response.id': responseData.id, + 'ai.response.model': responseData.modelId, + 'ai.response.timestamp': + responseData.timestamp.toISOString(), 'ai.usage.promptTokens': result.usage.promptTokens, 'ai.usage.completionTokens': result.usage.completionTokens, @@ -256,13 +282,15 @@ By default, it's set to 0, which will disable the feature. // standardized gen-ai llm span attributes: 'gen_ai.response.finish_reasons': [result.finishReason], + 'gen_ai.response.id': responseData.id, + 'gen_ai.response.model': responseData.modelId, 'gen_ai.usage.input_tokens': result.usage.promptTokens, 'gen_ai.usage.output_tokens': result.usage.completionTokens, }, }), ); - return result; + return { ...result, response: responseData }; }, }), ); @@ -284,7 +312,7 @@ By default, it's set to 0, which will disable the feature. }); // token usage: - const currentUsage = calculateCompletionTokenUsage( + const currentUsage = calculateLanguageModelUsage( currentModelResponse.usage, ); usage.completionTokens += currentUsage.completionTokens; @@ -300,6 +328,10 @@ By default, it's set to 0, which will disable the feature. usage: currentUsage, warnings: currentModelResponse.warnings, logprobs: currentModelResponse.logprobs, + response: { + ...currentModelResponse.response, + headers: currentModelResponse.rawResponse?.headers, + }, }); // append to messages for potential next roundtrip: @@ -362,7 +394,10 @@ By default, it's set to 0, which will disable the feature. finishReason: currentModelResponse.finishReason, usage, warnings: currentModelResponse.warnings, - rawResponse: currentModelResponse.rawResponse, + response: { + ...currentModelResponse.response, + headers: currentModelResponse.rawResponse?.headers, + }, logprobs: currentModelResponse.logprobs, responseMessages, roundtrips, @@ -461,6 +496,7 @@ class DefaultGenerateTextResult> readonly rawResponse: GenerateTextResult['rawResponse']; readonly logprobs: GenerateTextResult['logprobs']; readonly experimental_providerMetadata: GenerateTextResult['experimental_providerMetadata']; + readonly response: GenerateTextResult['response']; constructor(options: { text: GenerateTextResult['text']; @@ -469,11 +505,11 @@ class DefaultGenerateTextResult> finishReason: GenerateTextResult['finishReason']; usage: GenerateTextResult['usage']; warnings: GenerateTextResult['warnings']; - rawResponse?: GenerateTextResult['rawResponse']; logprobs: GenerateTextResult['logprobs']; responseMessages: GenerateTextResult['responseMessages']; roundtrips: GenerateTextResult['roundtrips']; providerMetadata: GenerateTextResult['experimental_providerMetadata']; + response: GenerateTextResult['response']; }) { this.text = options.text; this.toolCalls = options.toolCalls; @@ -481,11 +517,16 @@ class DefaultGenerateTextResult> this.finishReason = options.finishReason; this.usage = options.usage; this.warnings = options.warnings; - this.rawResponse = options.rawResponse; - this.logprobs = options.logprobs; + this.response = options.response; this.responseMessages = options.responseMessages; this.roundtrips = options.roundtrips; this.experimental_providerMetadata = options.providerMetadata; + + // deprecated: + this.rawResponse = { + headers: options.response.headers, + }; + this.logprobs = options.logprobs; } } diff --git a/packages/ai/core/generate-text/index.ts b/packages/ai/core/generate-text/index.ts index c0cd1f5669f6..cbc96f2afa31 100644 --- a/packages/ai/core/generate-text/index.ts +++ b/packages/ai/core/generate-text/index.ts @@ -1,4 +1,4 @@ -export { generateText } from './generate-text'; +export { generateText, experimental_generateText } from './generate-text'; export type { GenerateTextResult } from './generate-text-result'; -export { streamText } from './stream-text'; +export { streamText, experimental_streamText } from './stream-text'; export type { StreamTextResult, TextStreamPart } from './stream-text-result'; diff --git a/packages/ai/core/generate-text/run-tools-transformation.ts b/packages/ai/core/generate-text/run-tools-transformation.ts index 89bfc719bd36..67f4169c7134 100644 --- a/packages/ai/core/generate-text/run-tools-transformation.ts +++ b/packages/ai/core/generate-text/run-tools-transformation.ts @@ -7,8 +7,13 @@ import { recordSpan } from '../telemetry/record-span'; import { selectTelemetryAttributes } from '../telemetry/select-telemetry-attributes'; import { TelemetrySettings } from '../telemetry/telemetry-settings'; import { CoreTool } from '../tool'; -import { FinishReason, LogProbs, ProviderMetadata } from '../types'; -import { calculateCompletionTokenUsage } from '../types/token-usage'; +import { + LanguageModelUsage, + FinishReason, + LogProbs, + ProviderMetadata, +} from '../types'; +import { calculateLanguageModelUsage } from '../types/usage'; import { parseToolCall, ToToolCall } from './tool-call'; import { ToToolResult } from './tool-result'; @@ -36,15 +41,17 @@ export type SingleRequestTextStreamPart< | ({ type: 'tool-result'; } & ToToolResult) + | { + type: 'response-metadata'; + id?: string; + timestamp?: Date; + modelId?: string; + } | { type: 'finish'; finishReason: FinishReason; logprobs?: LogProbs; - usage: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; + usage: LanguageModelUsage; experimental_providerMetadata?: ProviderMetadata; } | { @@ -99,6 +106,7 @@ export function runToolsTransformation>({ switch (chunkType) { // forward: case 'text-delta': + case 'response-metadata': case 'error': { controller.enqueue(chunk); break; @@ -252,7 +260,7 @@ export function runToolsTransformation>({ type: 'finish', finishReason: chunk.finishReason, logprobs: chunk.logprobs, - usage: calculateCompletionTokenUsage(chunk.usage), + usage: calculateLanguageModelUsage(chunk.usage), experimental_providerMetadata: chunk.providerMetadata, }); break; diff --git a/packages/ai/core/generate-text/stream-text-result.ts b/packages/ai/core/generate-text/stream-text-result.ts index eeaf1a8fbc82..c112f4614156 100644 --- a/packages/ai/core/generate-text/stream-text-result.ts +++ b/packages/ai/core/generate-text/stream-text-result.ts @@ -4,10 +4,12 @@ import { CoreTool } from '../tool'; import { CallWarning, FinishReason, + LanguageModelResponseMetadata, + LanguageModelResponseMetadataWithHeaders, LogProbs, ProviderMetadata, } from '../types'; -import { CompletionTokenUsage } from '../types/token-usage'; +import { LanguageModelUsage } from '../types/usage'; import { AsyncIterableStream } from '../util/async-iterable-stream'; import { ToToolCall } from './tool-call'; import { ToToolResult } from './tool-result'; @@ -28,7 +30,7 @@ When there are multiple roundtrips, the usage is the sum of all roundtrip usages Resolved when the response is finished. */ - readonly usage: Promise; + readonly usage: Promise; /** The reason why the generation finished. Taken from the last roundtrip. @@ -67,8 +69,10 @@ Resolved when the all tool executions are finished. /** Optional raw response data. + +@deprecated Use `response` instead. */ - // TODO change to async in v4 and use value from last roundtrip + // TODO removed in v4 readonly rawResponse?: { /** Response headers. @@ -76,6 +80,11 @@ Optional raw response data. headers?: Record; }; + /** +Additional response information. + */ + readonly response: Promise; + /** A text stream that returns only the generated text deltas. You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the @@ -217,22 +226,16 @@ export type TextStreamPart> = type: 'roundtrip-finish'; finishReason: FinishReason; logprobs?: LogProbs; - usage: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; + usage: LanguageModelUsage; + response: LanguageModelResponseMetadata; experimental_providerMetadata?: ProviderMetadata; } | { type: 'finish'; finishReason: FinishReason; logprobs?: LogProbs; - usage: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; + usage: LanguageModelUsage; + response: LanguageModelResponseMetadata; experimental_providerMetadata?: ProviderMetadata; } | { diff --git a/packages/ai/core/generate-text/stream-text.test.ts b/packages/ai/core/generate-text/stream-text.test.ts index bce829b2dfeb..25aecdd75235 100644 --- a/packages/ai/core/generate-text/stream-text.test.ts +++ b/packages/ai/core/generate-text/stream-text.test.ts @@ -13,13 +13,13 @@ import { formatStreamPart, jsonSchema, } from '../../streams'; +import { delay } from '../../util/delay'; import { setTestTracer } from '../telemetry/get-tracer'; import { MockLanguageModelV1 } from '../test/mock-language-model-v1'; import { createMockServerResponse } from '../test/mock-server-response'; import { MockTracer } from '../test/mock-tracer'; +import { mockValues } from '../test/mock-values'; import { streamText } from './stream-text'; -import { delay } from '../../util/delay'; -import { mockNow } from '../test/mock-now'; describe('result.textStream', () => { it('should send text deltas', async () => { @@ -110,6 +110,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'response-id', + modelId: 'response-model-id', + timestamp: new Date(5000), + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: `world!` }, @@ -137,6 +143,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'response-id', + modelId: 'response-model-id', + timestamp: new Date(5000), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -144,6 +155,81 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'response-id', + modelId: 'response-model-id', + timestamp: new Date(5000), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + ], + ); + }); + + it('should use fallback response metadata when response metadata is not provided', async () => { + const result = await streamText({ + model: new MockLanguageModelV1({ + doStream: async ({ prompt, mode }) => { + assert.deepStrictEqual(mode, { + type: 'regular', + tools: undefined, + toolChoice: undefined, + }); + assert.deepStrictEqual(prompt, [ + { role: 'user', content: [{ type: 'text', text: 'test-input' }] }, + ]); + + return { + stream: convertArrayToReadableStream([ + { type: 'text-delta', textDelta: 'Hello' }, + { type: 'text-delta', textDelta: ', ' }, + { type: 'text-delta', textDelta: `world!` }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, + }; + }, + }), + prompt: 'test-input', + _internal: { + currentDate: mockValues(new Date(2000)), + generateId: mockValues('id-2000'), + }, + }); + + assert.deepStrictEqual( + await convertAsyncIterableToArray(result.fullStream), + [ + { type: 'text-delta', textDelta: 'Hello' }, + { type: 'text-delta', textDelta: ', ' }, + { type: 'text-delta', textDelta: 'world!' }, + { + type: 'roundtrip-finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-2000', + modelId: 'mock-model-id', + timestamp: new Date(2000), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-2000', + modelId: 'mock-model-id', + timestamp: new Date(2000), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -179,6 +265,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -219,6 +311,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -226,6 +323,11 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -261,6 +363,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call-delta', toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', @@ -350,6 +458,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'tool-calls', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { promptTokens: 53, completionTokens: 17, totalTokens: 70 }, experimental_providerMetadata: undefined, }, @@ -357,6 +470,11 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'tool-calls', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { promptTokens: 53, completionTokens: 17, totalTokens: 70 }, experimental_providerMetadata: undefined, }, @@ -393,6 +511,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call-delta', toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', @@ -529,6 +653,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'tool-calls', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { promptTokens: 53, completionTokens: 17, totalTokens: 70 }, experimental_providerMetadata: undefined, }, @@ -536,6 +665,11 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'tool-calls', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { promptTokens: 53, completionTokens: 17, totalTokens: 70 }, experimental_providerMetadata: undefined, }, @@ -571,6 +705,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -618,6 +758,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -625,6 +770,11 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -660,6 +810,12 @@ describe('result.fullStream', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -710,6 +866,11 @@ describe('result.fullStream', () => { type: 'roundtrip-finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -717,6 +878,11 @@ describe('result.fullStream', () => { type: 'finish', finishReason: 'stop', logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, experimental_providerMetadata: undefined, }, @@ -729,6 +895,12 @@ describe('result.fullStream', () => { model: new MockLanguageModelV1({ doStream: async () => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: '' }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: '' }, @@ -749,28 +921,35 @@ describe('result.fullStream', () => { prompt: 'test-input', }); - assert.deepStrictEqual( - await convertAsyncIterableToArray(result.fullStream), - [ - { type: 'text-delta', textDelta: 'Hello' }, - { type: 'text-delta', textDelta: ', ' }, - { type: 'text-delta', textDelta: 'world!' }, - { - type: 'roundtrip-finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - { - type: 'finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - ], - ); + expect(await convertAsyncIterableToArray(result.fullStream)).toStrictEqual([ + { type: 'text-delta', textDelta: 'Hello' }, + { type: 'text-delta', textDelta: ', ' }, + { type: 'text-delta', textDelta: 'world!' }, + { + type: 'roundtrip-finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + ]); }); }); @@ -1409,6 +1588,12 @@ describe('multiple stream consumption', () => { doStream: async () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: 'world!' }, @@ -1450,28 +1635,35 @@ describe('multiple stream consumption', () => { ], ); - assert.deepStrictEqual( - await convertAsyncIterableToArray(result.fullStream), - [ - { type: 'text-delta', textDelta: 'Hello' }, - { type: 'text-delta', textDelta: ', ' }, - { type: 'text-delta', textDelta: 'world!' }, - { - type: 'roundtrip-finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - { - type: 'finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - ], - ); + expect(await convertAsyncIterableToArray(result.fullStream)).toStrictEqual([ + { type: 'text-delta', textDelta: 'Hello' }, + { type: 'text-delta', textDelta: ', ' }, + { type: 'text-delta', textDelta: 'world!' }, + { + type: 'roundtrip-finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + ]); }); }); @@ -1567,6 +1759,45 @@ describe('result.providerMetadata', () => { }); }); +describe('result.response', () => { + it('should resolve with response information', async () => { + const result = await streamText({ + model: new MockLanguageModelV1({ + doStream: async () => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { type: 'text-delta', textDelta: 'Hello' }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, + rawResponse: { headers: { call: '2' } }, + }), + }), + prompt: 'test-input', + }); + + // consume stream (runs in parallel) + convertAsyncIterableToArray(result.textStream); + + assert.deepStrictEqual(await result.response, { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + headers: { call: '2' }, + }); + }); +}); + describe('result.text', () => { it('should resolve with full text', async () => { const result = await streamText({ @@ -1875,99 +2106,60 @@ describe('options.onChunk', () => { }); }); -describe('options.onFinish', () => { +it('options.onFinish should send correct information', async () => { let result: Parameters< Required[0]>['onFinish'] >[0]; - beforeEach(async () => { - const { textStream } = await streamText({ - model: new MockLanguageModelV1({ - doStream: async ({}) => ({ - stream: convertArrayToReadableStream([ - { type: 'text-delta', textDelta: 'Hello' }, - { type: 'text-delta', textDelta: ', ' }, - { - type: 'tool-call', - toolCallType: 'function', - toolCallId: 'call-1', - toolName: 'tool1', - args: `{ "value": "value" }`, - }, - { type: 'text-delta', textDelta: `world!` }, - { - type: 'finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3 }, - providerMetadata: { - testProvider: { testKey: 'testValue' }, - }, + const { textStream } = await streamText({ + model: new MockLanguageModelV1({ + doStream: async ({}) => ({ + stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + { type: 'text-delta', textDelta: 'Hello' }, + { type: 'text-delta', textDelta: ', ' }, + { + type: 'tool-call', + toolCallType: 'function', + toolCallId: 'call-1', + toolName: 'tool1', + args: `{ "value": "value" }`, + }, + { type: 'text-delta', textDelta: `world!` }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + usage: { completionTokens: 10, promptTokens: 3 }, + providerMetadata: { + testProvider: { testKey: 'testValue' }, }, - ]), - rawCall: { rawPrompt: 'prompt', rawSettings: {} }, - }), + }, + ]), + rawCall: { rawPrompt: 'prompt', rawSettings: {} }, + rawResponse: { headers: { call: '2' } }, }), - tools: { - tool1: { - parameters: z.object({ value: z.string() }), - execute: async ({ value }) => `${value}-result`, - }, - }, - prompt: 'test-input', - onFinish: async event => { - result = event as unknown as typeof result; - }, - }); - - // consume stream - await convertAsyncIterableToArray(textStream); - }); - - it('should contain token usage', async () => { - assert.deepStrictEqual(result.usage, { - completionTokens: 10, - promptTokens: 3, - totalTokens: 13, - }); - }); - - it('should contain finish reason', async () => { - assert.strictEqual(result.finishReason, 'stop'); - }); - - it('should contain full text', async () => { - assert.strictEqual(result.text, 'Hello, world!'); - }); - - it('should contain tool calls', async () => { - assert.deepStrictEqual(result.toolCalls, [ - { - type: 'tool-call', - toolCallId: 'call-1', - toolName: 'tool1', - args: { value: 'value' }, + }), + tools: { + tool1: { + parameters: z.object({ value: z.string() }), + execute: async ({ value }) => `${value}-result`, }, - ]); + }, + prompt: 'test-input', + onFinish: async event => { + result = event as unknown as typeof result; + }, }); - it('should contain tool results', async () => { - assert.deepStrictEqual(result.toolResults, [ - { - type: 'tool-result', - toolCallId: 'call-1', - toolName: 'tool1', - args: { value: 'value' }, - result: 'value-result', - }, - ]); - }); + await convertAsyncIterableToArray(textStream); // consume stream - it('should contain provider metadata', async () => { - assert.deepStrictEqual(result.experimental_providerMetadata, { - testProvider: { testKey: 'testValue' }, - }); - }); + expect(result!).toMatchSnapshot(); }); describe('options.maxToolRoundtrips', () => { @@ -2025,6 +2217,12 @@ describe('options.maxToolRoundtrips', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -2096,6 +2294,12 @@ describe('options.maxToolRoundtrips', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-1', + modelId: 'mock-model-id', + timestamp: new Date(1000), + }, { type: 'text-delta', textDelta: 'Hello, ' }, { type: 'text-delta', textDelta: `world!` }, { @@ -2129,66 +2333,82 @@ describe('options.maxToolRoundtrips', () => { }, experimental_telemetry: { isEnabled: true }, maxToolRoundtrips: 2, - _internal: { now: mockNow([0, 100, 500, 600, 1000]) }, + _internal: { + now: mockValues(0, 100, 500, 600, 1000), + }, }); }); it('should contain assistant response message and tool message from all roundtrips', async () => { - assert.deepStrictEqual( + expect( await convertAsyncIterableToArray(result.fullStream), - [ - { - type: 'tool-call', - toolCallId: 'call-1', - toolName: 'tool1', - args: { value: 'value' }, - }, - { - type: 'tool-result', - toolCallId: 'call-1', - toolName: 'tool1', - args: { value: 'value' }, - result: 'result1', - }, - { - type: 'roundtrip-finish', - finishReason: 'tool-calls', - logprobs: undefined, - experimental_providerMetadata: undefined, - usage: { - completionTokens: 10, - promptTokens: 3, - totalTokens: 13, - }, + ).toStrictEqual([ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'tool1', + args: { value: 'value' }, + }, + { + type: 'tool-result', + toolCallId: 'call-1', + toolName: 'tool1', + args: { value: 'value' }, + result: 'result1', + }, + { + type: 'roundtrip-finish', + finishReason: 'tool-calls', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), }, - { - type: 'text-delta', - textDelta: 'Hello, ', + experimental_providerMetadata: undefined, + usage: { + completionTokens: 10, + promptTokens: 3, + totalTokens: 13, }, - { - type: 'text-delta', - textDelta: 'world!', + }, + { + type: 'text-delta', + textDelta: 'Hello, ', + }, + { + type: 'text-delta', + textDelta: 'world!', + }, + { + type: 'roundtrip-finish', + finishReason: 'stop', + logprobs: undefined, + experimental_providerMetadata: undefined, + response: { + id: 'id-1', + modelId: 'mock-model-id', + timestamp: new Date(1000), }, - { - type: 'roundtrip-finish', - finishReason: 'stop', - logprobs: undefined, - experimental_providerMetadata: undefined, - usage: { - completionTokens: 5, - promptTokens: 1, - totalTokens: 6, - }, + usage: { + completionTokens: 5, + promptTokens: 1, + totalTokens: 6, }, - { - type: 'finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 15, promptTokens: 4, totalTokens: 19 }, - experimental_providerMetadata: undefined, + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-1', + modelId: 'mock-model-id', + timestamp: new Date(1000), }, - ], - ); + usage: { completionTokens: 15, promptTokens: 4, totalTokens: 19 }, + experimental_providerMetadata: undefined, + }, + ]); }); describe('rawSettings', () => { @@ -2201,26 +2421,9 @@ describe('options.maxToolRoundtrips', () => { }); }); - describe('onFinish', () => { - beforeEach(async () => { - await convertAsyncIterableToArray(result.fullStream); // consume stream - }); - - it('should contain total token usage', async () => { - assert.deepStrictEqual(onFinishResult.usage, { - completionTokens: 15, - promptTokens: 4, - totalTokens: 19, - }); - }); - - it('should contain finish reason from final roundtrip', async () => { - assert.strictEqual(onFinishResult.finishReason, 'stop'); - }); - - it('should contain text from final roundtrip', async () => { - assert.strictEqual(onFinishResult.text, 'Hello, world!'); - }); + it('onFinish should send correct information', async () => { + await convertAsyncIterableToArray(result.fullStream); // consume stream + expect(onFinishResult).toMatchSnapshot(); }); describe('value promises', () => { @@ -2305,6 +2508,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async ({}) => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: `world!` }, @@ -2319,7 +2528,9 @@ describe('telemetry', () => { }), }), prompt: 'test-input', - _internal: { now: mockNow([0, 100, 500]) }, + _internal: { + now: mockValues(0, 100, 500), + }, }); // consume stream @@ -2333,6 +2544,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async ({}) => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: `world!` }, @@ -2365,7 +2582,7 @@ describe('telemetry', () => { test2: false, }, }, - _internal: { now: mockNow([0, 100, 500]) }, + _internal: { now: mockValues(0, 100, 500) }, }); // consume stream @@ -2379,6 +2596,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async ({}) => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -2404,7 +2627,7 @@ describe('telemetry', () => { }, prompt: 'test-input', experimental_telemetry: { isEnabled: true }, - _internal: { now: mockNow([0, 100, 500]) }, + _internal: { now: mockValues(0, 100, 500) }, }); // consume stream @@ -2418,6 +2641,12 @@ describe('telemetry', () => { model: new MockLanguageModelV1({ doStream: async ({}) => ({ stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -2447,7 +2676,7 @@ describe('telemetry', () => { recordInputs: false, recordOutputs: false, }, - _internal: { now: mockNow([0, 100, 500]) }, + _internal: { now: mockValues(0, 100, 500) }, }); // consume stream @@ -2485,6 +2714,12 @@ describe('tools with custom schema', () => { return { stream: convertArrayToReadableStream([ + { + type: 'response-metadata', + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, { type: 'tool-call', toolCallType: 'function', @@ -2515,33 +2750,42 @@ describe('tools with custom schema', () => { }, toolChoice: 'required', prompt: 'test-input', - _internal: { now: mockNow([0, 100, 500]) }, + _internal: { + now: mockValues(0, 100, 500), + }, }); - assert.deepStrictEqual( - await convertAsyncIterableToArray(result.fullStream), - [ - { - type: 'tool-call', - toolCallId: 'call-1', - toolName: 'tool1', - args: { value: 'value' }, - }, - { - type: 'roundtrip-finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - { - type: 'finish', - finishReason: 'stop', - logprobs: undefined, - usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, - experimental_providerMetadata: undefined, - }, - ], - ); + expect(await convertAsyncIterableToArray(result.fullStream)).toStrictEqual([ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'tool1', + args: { value: 'value' }, + }, + { + type: 'roundtrip-finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + { + type: 'finish', + finishReason: 'stop', + logprobs: undefined, + response: { + id: 'id-0', + modelId: 'mock-model-id', + timestamp: new Date(0), + }, + usage: { completionTokens: 10, promptTokens: 3, totalTokens: 13 }, + experimental_providerMetadata: undefined, + }, + ]); }); }); diff --git a/packages/ai/core/generate-text/stream-text.ts b/packages/ai/core/generate-text/stream-text.ts index c6d149577c4f..e05bda2eca44 100644 --- a/packages/ai/core/generate-text/stream-text.ts +++ b/packages/ai/core/generate-text/stream-text.ts @@ -1,4 +1,5 @@ import { LanguageModelV1Prompt } from '@ai-sdk/provider'; +import { createIdGenerator } from '@ai-sdk/provider-utils'; import { Span } from '@opentelemetry/api'; import { ServerResponse } from 'node:http'; import { @@ -30,10 +31,11 @@ import { CoreToolChoice, FinishReason, LanguageModel, + LanguageModelResponseMetadataWithHeaders, LogProbs, ProviderMetadata, } from '../types'; -import { CompletionTokenUsage } from '../types/token-usage'; +import { LanguageModelUsage } from '../types/usage'; import { AsyncIterableStream, createAsyncIterableStream, @@ -51,6 +53,8 @@ import { toResponseMessages } from './to-response-messages'; import { ToToolCall } from './tool-call'; import { ToToolResult } from './tool-result'; +const originalGenerateId = createIdGenerator({ prefix: 'aitxt-', length: 24 }); + /** Generate a text and call tools for a given prompt using a language model. @@ -112,7 +116,11 @@ export async function streamText>({ experimental_toolCallStreaming: toolCallStreaming = false, onChunk, onFinish, - _internal: { now = originalNow } = {}, + _internal: { + now = originalNow, + generateId = originalGenerateId, + currentDate = () => new Date(), + } = {}, ...settings }: CallSettings & Prompt & { @@ -185,7 +193,7 @@ The reason why the generation finished. /** The token usage of the generated response. */ - usage: CompletionTokenUsage; + usage: LanguageModelUsage; /** The full text that has been generated. @@ -204,6 +212,8 @@ The tool results that have been generated. /** Optional raw response data. + +@deprecated Use `response` instead. */ rawResponse?: { /** @@ -212,6 +222,11 @@ Response headers. headers?: Record; }; + /** +Response metadata. + */ + response: LanguageModelResponseMetadataWithHeaders; + /** Warnings from the model provider (e.g. unsupported settings). */ @@ -230,6 +245,8 @@ results that can be fully encapsulated in the provider. */ _internal?: { now?: () => number; + generateId?: () => string; + currentDate?: () => Date; }; }): Promise> { const baseTelemetryAttributes = getBaseTelemetryAttributes({ @@ -364,7 +381,10 @@ results that can be fully encapsulated in the provider. maxToolRoundtrips, startRoundtrip, promptMessages, + modelId: model.modelId, now, + currentDate, + generateId, }); }, }); @@ -402,6 +422,7 @@ class DefaultStreamTextResult> readonly text: StreamTextResult['text']; readonly toolCalls: StreamTextResult['toolCalls']; readonly toolResults: StreamTextResult['toolResults']; + readonly response: StreamTextResult['response']; constructor({ stream, @@ -416,7 +437,10 @@ class DefaultStreamTextResult> maxToolRoundtrips, startRoundtrip, promptMessages, + modelId, now, + currentDate, + generateId, }: { stream: ReadableStream>; warnings: StreamTextResult['warnings']; @@ -430,14 +454,17 @@ class DefaultStreamTextResult> maxToolRoundtrips: number; startRoundtrip: StartRoundtripFunction; promptMessages: LanguageModelV1Prompt; + modelId: string; now: () => number; + currentDate: () => Date; + generateId: () => string; }) { this.warnings = warnings; this.rawResponse = rawResponse; // initialize usage promise const { resolve: resolveUsage, promise: usagePromise } = - createResolvablePromise(); + createResolvablePromise(); this.usage = usagePromise; // initialize finish reason promise @@ -467,6 +494,11 @@ class DefaultStreamTextResult> } = createResolvablePromise(); this.experimental_providerMetadata = providerMetadataPromise; + // initialize response promise + const { resolve: resolveResponse, promise: responsePromise } = + createResolvablePromise['response']>>(); + this.response = responsePromise; + // create a stitchable stream to send roundtrips in a single response stream const { stream: stitchableStream, @@ -491,17 +523,17 @@ class DefaultStreamTextResult> totalTokens: 0, }, }: { - stream: ReadableStream>; + stream: ReadableStream>; startTimestamp: number; doStreamSpan: Span; currentToolRoundtrip: number; promptMessages: LanguageModelV1Prompt; - usage: CompletionTokenUsage | undefined; + usage: LanguageModelUsage | undefined; }) { const roundtripToolCalls: ToToolCall[] = []; const roundtripToolResults: ToToolResult[] = []; let roundtripFinishReason: FinishReason = 'unknown'; - let roundtripUsage: CompletionTokenUsage = { + let roundtripUsage: LanguageModelUsage = { promptTokens: 0, completionTokens: 0, totalTokens: 0, @@ -510,6 +542,15 @@ class DefaultStreamTextResult> let roundtripFirstChunk = true; let roundtripText = ''; let roundtripLogProbs: LogProbs | undefined; + let roundtripResponse: { + id: string; + timestamp: Date; + modelId: string; + } = { + id: generateId(), + timestamp: currentDate(), + modelId, + }; addStream( stream.pipeThrough( @@ -546,29 +587,41 @@ class DefaultStreamTextResult> const chunkType = chunk.type; switch (chunkType) { - case 'text-delta': + case 'text-delta': { controller.enqueue(chunk); // create the full text from text deltas (for onFinish callback and text promise): roundtripText += chunk.textDelta; await onChunk?.({ chunk }); break; + } - case 'tool-call': + case 'tool-call': { controller.enqueue(chunk); // store tool calls for onFinish callback and toolCalls promise: roundtripToolCalls.push(chunk); await onChunk?.({ chunk }); break; + } - case 'tool-result': + case 'tool-result': { controller.enqueue(chunk); // store tool results for onFinish callback and toolResults promise: roundtripToolResults.push(chunk); // as any needed, bc type inferences mixed up tool-result with tool-call await onChunk?.({ chunk: chunk as any }); break; + } + + case 'response-metadata': { + roundtripResponse = { + id: chunk.id ?? roundtripResponse.id, + timestamp: chunk.timestamp ?? roundtripResponse.timestamp, + modelId: chunk.modelId ?? roundtripResponse.modelId, + }; + break; + } - case 'finish': + case 'finish': { // Note: tool executions might not be finished yet when the finish event is emitted. // store usage and finish reason for promises and onFinish callback: roundtripUsage = chunk.usage; @@ -588,6 +641,7 @@ class DefaultStreamTextResult> }); break; + } case 'tool-call-streaming-start': case 'tool-call-delta': { @@ -596,10 +650,11 @@ class DefaultStreamTextResult> break; } - case 'error': + case 'error': { controller.enqueue(chunk); roundtripFinishReason = 'error'; break; + } default: { const exhaustiveCheck: never = chunkType; @@ -616,6 +671,7 @@ class DefaultStreamTextResult> usage: roundtripUsage, experimental_providerMetadata: roundtripProviderMetadata, logprobs: roundtripLogProbs, + response: roundtripResponse, }); const telemetryToolCalls = @@ -633,6 +689,10 @@ class DefaultStreamTextResult> 'ai.response.toolCalls': { output: () => telemetryToolCalls, }, + 'ai.response.id': roundtripResponse.id, + 'ai.response.model': roundtripResponse.modelId, + 'ai.response.timestamp': + roundtripResponse.timestamp.toISOString(), 'ai.usage.promptTokens': roundtripUsage.promptTokens, 'ai.usage.completionTokens': @@ -647,6 +707,8 @@ class DefaultStreamTextResult> // standardized gen-ai llm span attributes: 'gen_ai.response.finish_reasons': [roundtripFinishReason], + 'gen_ai.response.id': roundtripResponse.id, + 'gen_ai.response.model': roundtripResponse.modelId, 'gen_ai.usage.input_tokens': roundtripUsage.promptTokens, 'gen_ai.usage.output_tokens': roundtripUsage.completionTokens, @@ -722,6 +784,7 @@ class DefaultStreamTextResult> usage: combinedUsage, experimental_providerMetadata: roundtripProviderMetadata, logprobs: roundtripLogProbs, + response: roundtripResponse, }); // close the stitchable stream @@ -759,6 +822,10 @@ class DefaultStreamTextResult> resolveToolCalls(roundtripToolCalls); resolveProviderMetadata(roundtripProviderMetadata); resolveToolResults(roundtripToolResults); + resolveResponse({ + ...roundtripResponse, + headers: rawResponse?.headers, + }); // call onFinish callback: await onFinish?.({ @@ -772,6 +839,10 @@ class DefaultStreamTextResult> // The type exposed to the users will be correctly inferred. toolResults: roundtripToolResults as any, rawResponse, + response: { + ...roundtripResponse, + headers: rawResponse?.headers, + }, warnings, experimental_providerMetadata: roundtripProviderMetadata, }); diff --git a/packages/ai/core/test/mock-embedding-model-v1.ts b/packages/ai/core/test/mock-embedding-model-v1.ts index c9606dfa7c40..a77b465d682f 100644 --- a/packages/ai/core/test/mock-embedding-model-v1.ts +++ b/packages/ai/core/test/mock-embedding-model-v1.ts @@ -1,6 +1,6 @@ import { EmbeddingModelV1 } from '@ai-sdk/provider'; import { Embedding } from '../types'; -import { EmbeddingTokenUsage } from '../types/token-usage'; +import { EmbeddingModelUsage } from '../types/usage'; export class MockEmbeddingModelV1 implements EmbeddingModelV1 { readonly specificationVersion = 'v1'; @@ -38,7 +38,7 @@ export class MockEmbeddingModelV1 implements EmbeddingModelV1 { export function mockEmbed( expectedValues: Array, embeddings: Array, - usage?: EmbeddingTokenUsage, + usage?: EmbeddingModelUsage, ): EmbeddingModelV1['doEmbed'] { return async ({ values }) => { assert.deepStrictEqual(expectedValues, values); diff --git a/packages/ai/core/test/mock-id.ts b/packages/ai/core/test/mock-id.ts new file mode 100644 index 000000000000..e8ae98e347f9 --- /dev/null +++ b/packages/ai/core/test/mock-id.ts @@ -0,0 +1,4 @@ +export function mockId(): () => string { + let counter = 0; + return () => `id-${counter++}`; +} diff --git a/packages/ai/core/test/mock-now.ts b/packages/ai/core/test/mock-values.ts similarity index 59% rename from packages/ai/core/test/mock-now.ts rename to packages/ai/core/test/mock-values.ts index 9f9684ace4b4..bcf5f14da356 100644 --- a/packages/ai/core/test/mock-now.ts +++ b/packages/ai/core/test/mock-values.ts @@ -1,4 +1,4 @@ -export function mockNow(values: number[]): () => number { +export function mockValues(...values: T[]): () => T { let counter = 0; return () => values[counter++] ?? values[values.length - 1]; } diff --git a/packages/ai/core/types/index.ts b/packages/ai/core/types/index.ts index b244aff221b9..c61c686370d9 100644 --- a/packages/ai/core/types/index.ts +++ b/packages/ai/core/types/index.ts @@ -1,12 +1,25 @@ -import type { CompletionTokenUsage as CompletionTokenUsageOriginal } from './token-usage'; +import type { + LanguageModelUsage as LanguageModelUsageOriginal, + EmbeddingModelUsage as EmbeddingModelUsageOriginal, +} from './usage'; export * from './embedding-model'; export * from './language-model'; export type { Provider } from './provider'; export type { ProviderMetadata } from './provider-metadata'; -export type { EmbeddingTokenUsage } from './token-usage'; + +/** + * @deprecated Use LanguageModelUsage instead. + */ +export type TokenUsage = LanguageModelUsageOriginal; +/** + * @deprecated Use LanguageModelUsage instead. + */ +export type CompletionTokenUsage = LanguageModelUsageOriginal; +export type LanguageModelUsage = LanguageModelUsageOriginal; + /** - * @deprecated Use CompletionTokenUsage instead. + * @deprecated Use EmbeddingModelUsage instead. */ -export type TokenUsage = CompletionTokenUsageOriginal; -export type CompletionTokenUsage = CompletionTokenUsageOriginal; +export type EmbeddingTokenUsage = EmbeddingModelUsageOriginal; +export type EmbeddingModelUsage = EmbeddingModelUsageOriginal; diff --git a/packages/ai/core/types/language-model.ts b/packages/ai/core/types/language-model.ts index a7d490a0beb5..56ba75701dd1 100644 --- a/packages/ai/core/types/language-model.ts +++ b/packages/ai/core/types/language-model.ts @@ -25,6 +25,8 @@ export type FinishReason = LanguageModelV1FinishReason; /** Log probabilities for each token and its top log probabilities. + +@deprecated Will become a provider extension in the future. */ export type LogProbs = LanguageModelV1LogProbs; @@ -48,3 +50,25 @@ export type CoreToolChoice> = | 'none' | 'required' | { type: 'tool'; toolName: keyof TOOLS }; + +export type LanguageModelResponseMetadata = { + /** +ID for the generated response. + */ + id: string; + + /** +Timestamp for the start of the generated response. +*/ + timestamp: Date; + + /** +The ID of the response model that was used to generate the response. +*/ + modelId: string; +}; + +export type LanguageModelResponseMetadataWithHeaders = + LanguageModelResponseMetadata & { + headers?: Record; + }; diff --git a/packages/ai/core/types/token-usage.ts b/packages/ai/core/types/usage.ts similarity index 58% rename from packages/ai/core/types/token-usage.ts rename to packages/ai/core/types/usage.ts index 73166a0a56b8..022f80f091da 100644 --- a/packages/ai/core/types/token-usage.ts +++ b/packages/ai/core/types/usage.ts @@ -1,7 +1,7 @@ /** Represents the number of tokens used in a prompt and completion. */ -export type CompletionTokenUsage = { +export type LanguageModelUsage = { /** The number of tokens used in the prompt. */ @@ -18,20 +18,34 @@ The total number of tokens used (promptTokens + completionTokens). totalTokens: number; }; +/** +Represents the number of tokens used in a prompt and completion. + +@deprecated Use `LanguageModelUsage` instead. + */ +export type CompletionTokenUsage = LanguageModelUsage; + /** Represents the number of tokens used in an embedding. */ -export type EmbeddingTokenUsage = { +export type EmbeddingModelUsage = { /** The number of tokens used in the embedding. */ tokens: number; }; -export function calculateCompletionTokenUsage(usage: { +/** +Represents the number of tokens used in an embedding. + +@deprecated Use `EmbeddingModelUsage` instead. + */ +export type EmbeddingTokenUsage = EmbeddingModelUsage; + +export function calculateLanguageModelUsage(usage: { promptTokens: number; completionTokens: number; -}): CompletionTokenUsage { +}): LanguageModelUsage { return { promptTokens: usage.promptTokens, completionTokens: usage.completionTokens, diff --git a/packages/ai/rsc/stream-ui/stream-ui.tsx b/packages/ai/rsc/stream-ui/stream-ui.tsx index a093d9a8ab52..b0631051cb18 100644 --- a/packages/ai/rsc/stream-ui/stream-ui.tsx +++ b/packages/ai/rsc/stream-ui/stream-ui.tsx @@ -10,9 +10,9 @@ import { Prompt } from '../../core/prompt/prompt'; import { validatePrompt } from '../../core/prompt/validate-prompt'; import { CallWarning, CoreToolChoice, FinishReason } from '../../core/types'; import { - CompletionTokenUsage, - calculateCompletionTokenUsage, -} from '../../core/types/token-usage'; + LanguageModelUsage, + calculateLanguageModelUsage, +} from '../../core/types/usage'; import { InvalidToolArgumentsError } from '../../errors/invalid-tool-arguments-error'; import { NoSuchToolError } from '../../errors/no-such-tool-error'; import { createResolvablePromise } from '../../util/create-resolvable-promise'; @@ -124,7 +124,7 @@ export async function streamUI< /** * The token usage of the generated response. */ - usage: CompletionTokenUsage; + usage: LanguageModelUsage; /** * The final ui node that was generated. */ @@ -327,7 +327,7 @@ export async function streamUI< case 'finish': { onFinish?.({ finishReason: value.finishReason, - usage: calculateCompletionTokenUsage(value.usage), + usage: calculateLanguageModelUsage(value.usage), value: ui.value, warnings: result.warnings, rawResponse: result.rawResponse, diff --git a/packages/anthropic/src/anthropic-messages-language-model.test.ts b/packages/anthropic/src/anthropic-messages-language-model.test.ts index 9386616a43da..f0a9a049c900 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.test.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.test.ts @@ -26,6 +26,8 @@ describe('doGenerate', () => { output_tokens: 30, }, stopReason = 'end_turn', + id = 'msg_017TfcQ4AgGxKyBduUpqYPZn', + model = 'claude-3-haiku-20240307', }: { content?: AnthropicAssistantMessage['content']; usage?: { @@ -35,13 +37,15 @@ describe('doGenerate', () => { cache_read_input_tokens?: number; }; stopReason?: string; + id?: string; + model?: string; }) { server.responseBodyJson = { - id: 'msg_017TfcQ4AgGxKyBduUpqYPZn', + id, type: 'message', role: 'assistant', content, - model: 'claude-3-haiku-20240307', + model, stop_reason: stopReason, stop_sequence: null, usage, @@ -196,6 +200,24 @@ describe('doGenerate', () => { }); }); + it('should send additional response information', async () => { + prepareJsonResponse({ + id: 'test-id', + model: 'test-model', + }); + + const { response } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(response).toStrictEqual({ + id: 'test-id', + modelId: 'test-model', + }); + }); + it('should expose the raw response headers', async () => { prepareJsonResponse({}); @@ -404,6 +426,11 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'msg_01KfpJoAEabmH2iHRRFjQMAG', + modelId: 'claude-3-haiku-20240307', + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: 'World!' }, @@ -458,6 +485,11 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'msg_01GouTqNCGXzrj5LQ5jEkw67', + modelId: 'claude-3-haiku-20240307', + }, { type: 'text-delta', textDelta: 'Okay', @@ -539,6 +571,11 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'msg_01KfpJoAEabmH2iHRRFjQMAG', + modelId: 'claude-3-haiku-20240307', + }, { type: 'error', error: { type: 'error', message: 'test error' } }, ]); }); @@ -640,6 +677,11 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'msg_01KfpJoAEabmH2iHRRFjQMAG', + modelId: 'claude-3-haiku-20240307', + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'finish', diff --git a/packages/anthropic/src/anthropic-messages-language-model.ts b/packages/anthropic/src/anthropic-messages-language-model.ts index b12d381836db..64f652f0a18b 100644 --- a/packages/anthropic/src/anthropic-messages-language-model.ts +++ b/packages/anthropic/src/anthropic-messages-language-model.ts @@ -223,6 +223,10 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { }, rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, + response: { + id: response.id ?? undefined, + modelId: response.model ?? undefined, + }, warnings, providerMetadata: this.settings.cacheControl === true @@ -392,6 +396,12 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { }; } + controller.enqueue({ + type: 'response-metadata', + id: value.message.id ?? undefined, + modelId: value.message.model ?? undefined, + }); + return; } @@ -435,6 +445,8 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 { // this approach limits breakages when the API changes and increases efficiency const anthropicMessagesResponseSchema = z.object({ type: z.literal('message'), + id: z.string().nullish(), + model: z.string().nullish(), content: z.array( z.discriminatedUnion('type', [ z.object({ @@ -464,6 +476,8 @@ const anthropicMessagesChunkSchema = z.discriminatedUnion('type', [ z.object({ type: z.literal('message_start'), message: z.object({ + id: z.string().nullish(), + model: z.string().nullish(), usage: z.object({ input_tokens: z.number(), output_tokens: z.number(), diff --git a/packages/cohere/src/cohere-chat-language-model.test.ts b/packages/cohere/src/cohere-chat-language-model.test.ts index 25c54778d298..c2e936748065 100644 --- a/packages/cohere/src/cohere-chat-language-model.test.ts +++ b/packages/cohere/src/cohere-chat-language-model.test.ts @@ -38,6 +38,7 @@ describe('doGenerate', () => { input_tokens: 4, output_tokens: 30, }, + generation_id = 'dad0c7cd-7982-42a7-acfb-706ccf598291', }: { input?: string; text?: string; @@ -47,11 +48,12 @@ describe('doGenerate', () => { input_tokens: number; output_tokens: number; }; + generation_id?: string; }) { server.responseBodyJson = { response_id: '0cf61ae0-1f60-4c18-9802-be7be809e712', text, - generation_id: 'dad0c7cd-7982-42a7-acfb-706ccf598291', + generation_id, chat_history: [ { role: 'USER', message: input }, { role: 'CHATBOT', message: text }, @@ -139,6 +141,22 @@ describe('doGenerate', () => { }); }); + it('should send additional response information', async () => { + prepareJsonResponse({ + generation_id: 'test-id', + }); + + const { response } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(response).toStrictEqual({ + id: 'test-id', + }); + }); + it('should extract finish reason', async () => { prepareJsonResponse({ finish_reason: 'MAX_TOKENS', @@ -470,6 +488,7 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { type: 'response-metadata', id: '586ac33f-9c64-452c-8f8d-e5890e73b6fb' }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: 'World!' }, @@ -483,7 +502,7 @@ describe('doStream', () => { it('should stream tool deltas', async () => { server.responseChunks = [ - `{"event_type":"stream-start"}\n\n`, + `{"event_type":"stream-start","generation_id":"29f14a5a-11de-4cae-9800-25e4747408ea"}\n\n`, `{"event_type":"tool-calls-chunk","text":"I"}\n\n`, `{"event_type":"tool-calls-chunk","text":" will"}\n\n`, `{"event_type":"tool-calls-chunk","text":" use"}\n\n`, @@ -539,6 +558,7 @@ describe('doStream', () => { const responseArray = await convertReadableStreamToArray(stream); expect(responseArray).toStrictEqual([ + { type: 'response-metadata', id: '29f14a5a-11de-4cae-9800-25e4747408ea' }, { type: 'tool-call-delta', toolCallType: 'function', @@ -645,7 +665,7 @@ describe('doStream', () => { it('should handle out of order tool deltas', async () => { server.responseChunks = [ - `{"event_type":"stream-start"}\n\n`, + `{"event_type":"stream-start","generation_id":"29f14a5a-11de-4cae-9800-25e4747408ea"}\n\n`, `{"event_type":"tool-calls-chunk","tool_call_delta":{"index":0,"name":"test-tool-a"}}\n\n`, `{"event_type":"tool-calls-chunk","tool_call_delta":{"index":1,"name":"test-tool-b"}}\n\n`, `{"event_type":"tool-calls-chunk","tool_call_delta":{"index":0,"parameters":"{\\n \\""}}\n\n`, @@ -707,6 +727,7 @@ describe('doStream', () => { const responseArray = await convertReadableStreamToArray(stream); expect(responseArray).toStrictEqual([ + { type: 'response-metadata', id: '29f14a5a-11de-4cae-9800-25e4747408ea' }, { type: 'tool-call-delta', toolCallType: 'function', diff --git a/packages/cohere/src/cohere-chat-language-model.ts b/packages/cohere/src/cohere-chat-language-model.ts index 78f054296be0..be4559f0b7c2 100644 --- a/packages/cohere/src/cohere-chat-language-model.ts +++ b/packages/cohere/src/cohere-chat-language-model.ts @@ -174,6 +174,9 @@ export class CohereChatLanguageModel implements LanguageModelV1 { }, rawSettings, }, + response: { + id: response.generation_id ?? undefined, + }, rawResponse: { headers: responseHeaders }, warnings: undefined, }; @@ -291,6 +294,15 @@ export class CohereChatLanguageModel implements LanguageModelV1 { return; } + case 'stream-start': { + controller.enqueue({ + type: 'response-metadata', + id: value.generation_id ?? undefined, + }); + + return; + } + case 'stream-end': { finishReason = mapCohereFinishReason(value.finish_reason); const tokens = value.response.meta.tokens; @@ -332,6 +344,7 @@ export class CohereChatLanguageModel implements LanguageModelV1 { // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency const cohereChatResponseSchema = z.object({ + generation_id: z.string().nullish(), text: z.string(), tool_calls: z .array( @@ -340,7 +353,7 @@ const cohereChatResponseSchema = z.object({ parameters: z.unknown({}), }), ) - .optional(), + .nullish(), finish_reason: z.string(), meta: z.object({ tokens: z.object({ @@ -355,6 +368,7 @@ const cohereChatResponseSchema = z.object({ const cohereChatChunkSchema = z.discriminatedUnion('event_type', [ z.object({ event_type: z.literal('stream-start'), + generation_id: z.string().nullish(), }), z.object({ event_type: z.literal('search-queries-generation'), diff --git a/packages/mistral/src/get-response-metadata.ts b/packages/mistral/src/get-response-metadata.ts new file mode 100644 index 000000000000..bd358b23f704 --- /dev/null +++ b/packages/mistral/src/get-response-metadata.ts @@ -0,0 +1,15 @@ +export function getResponseMetadata({ + id, + model, + created, +}: { + id?: string | undefined | null; + created?: number | undefined | null; + model?: string | undefined | null; +}) { + return { + id: id ?? undefined, + modelId: model ?? undefined, + timestamp: created != null ? new Date(created * 1000) : undefined, + }; +} diff --git a/packages/mistral/src/mistral-chat-language-model.test.ts b/packages/mistral/src/mistral-chat-language-model.test.ts index 5c0b54ee159b..d9718e3a3ef5 100644 --- a/packages/mistral/src/mistral-chat-language-model.test.ts +++ b/packages/mistral/src/mistral-chat-language-model.test.ts @@ -27,6 +27,9 @@ describe('doGenerate', () => { total_tokens: 34, completion_tokens: 30, }, + id = '16362f24e60340d0994dd205c267a43a', + created = 1711113008, + model = 'mistral-small-latest', }: { content?: string; usage?: { @@ -34,12 +37,15 @@ describe('doGenerate', () => { total_tokens: number; completion_tokens: number; }; + id?: string; + created?: number; + model?: string; }) { server.responseBodyJson = { - id: '16362f24e60340d0994dd205c267a43a', object: 'chat.completion', - created: 1711113008, - model: 'mistral-small-latest', + id, + created, + model, choices: [ { index: 0, @@ -131,6 +137,26 @@ describe('doGenerate', () => { }); }); + it('should send additional response information', async () => { + prepareJsonResponse({ + id: 'test-id', + created: 123, + model: 'test-model', + }); + + const { response } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(response).toStrictEqual({ + id: 'test-id', + timestamp: new Date(123 * 1000), + modelId: 'test-model', + }); + }); + it('should expose the raw response headers', async () => { prepareJsonResponse({ content: '' }); @@ -286,6 +312,12 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: '6e2cd91750904b7092f49bdca9083de1', + timestamp: new Date(1711097175 * 1000), + modelId: 'mistral-small-latest', + }, { type: 'text-delta', textDelta: '' }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, @@ -337,9 +369,12 @@ describe('doStream', () => { expect(await convertReadableStreamToArray(stream)).toStrictEqual([ { - type: 'text-delta', - textDelta: '', + type: 'response-metadata', + id: 'ad6f7ce6543c4d0890280ae184fe4dd8', + timestamp: new Date(1711365023 * 1000), + modelId: 'mistral-large-latest', }, + { type: 'text-delta', textDelta: '' }, { type: 'tool-call-delta', toolCallId: 'yfBEybNYi', diff --git a/packages/mistral/src/mistral-chat-language-model.ts b/packages/mistral/src/mistral-chat-language-model.ts index a3bfcb885489..041112b2e664 100644 --- a/packages/mistral/src/mistral-chat-language-model.ts +++ b/packages/mistral/src/mistral-chat-language-model.ts @@ -20,6 +20,7 @@ import { MistralChatSettings, } from './mistral-chat-settings'; import { mistralFailedResponseHandler } from './mistral-error'; +import { getResponseMetadata } from './get-response-metadata'; type MistralChatConfig = { provider: string; @@ -200,6 +201,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 { }, rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, + response: getResponseMetadata(response), warnings, }; } @@ -212,10 +214,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 { const { responseHeaders, value: response } = await postJsonToApi({ url: `${this.config.baseURL}/chat/completions`, headers: combineHeaders(this.config.headers(), options.headers), - body: { - ...args, - stream: true, - }, + body: { ...args, stream: true }, failedResponseHandler: mistralFailedResponseHandler, successfulResponseHandler: createEventSourceResponseHandler( mistralChatChunkSchema, @@ -231,6 +230,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 { promptTokens: Number.NaN, completionTokens: Number.NaN, }; + let isFirstChunk = true; return { stream: response.pipeThrough( @@ -246,6 +246,15 @@ export class MistralChatLanguageModel implements LanguageModelV1 { const value = chunk.value; + if (isFirstChunk) { + isFirstChunk = false; + + controller.enqueue({ + type: 'response-metadata', + ...getResponseMetadata(value), + }); + } + if (value.usage != null) { usage = { promptTokens: value.usage.prompt_tokens, @@ -308,6 +317,9 @@ export class MistralChatLanguageModel implements LanguageModelV1 { // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency const mistralChatResponseSchema = z.object({ + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ message: z.object({ @@ -336,7 +348,9 @@ const mistralChatResponseSchema = z.object({ // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency const mistralChatChunkSchema = z.object({ - object: z.literal('chat.completion.chunk'), + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ delta: z.object({ diff --git a/packages/openai/src/get-response-metadata.ts b/packages/openai/src/get-response-metadata.ts new file mode 100644 index 000000000000..bd358b23f704 --- /dev/null +++ b/packages/openai/src/get-response-metadata.ts @@ -0,0 +1,15 @@ +export function getResponseMetadata({ + id, + model, + created, +}: { + id?: string | undefined | null; + created?: number | undefined | null; + model?: string | undefined | null; +}) { + return { + id: id ?? undefined, + modelId: model ?? undefined, + timestamp: created != null ? new Date(created * 1000) : undefined, + }; +} diff --git a/packages/openai/src/openai-chat-language-model.test.ts b/packages/openai/src/openai-chat-language-model.test.ts index d1f991ff1a03..39ea7819b384 100644 --- a/packages/openai/src/openai-chat-language-model.test.ts +++ b/packages/openai/src/openai-chat-language-model.test.ts @@ -131,6 +131,9 @@ describe('doGenerate', () => { }, logprobs = null, finish_reason = 'stop', + id = 'chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd', + created = 1711115037, + model = 'gpt-3.5-turbo-0125', }: { content?: string; tool_calls?: Array<{ @@ -160,12 +163,15 @@ describe('doGenerate', () => { | null; } | null; finish_reason?: string; + created?: number; + id?: string; + model?: string; } = {}) { server.responseBodyJson = { - id: 'chatcmpl-95ZTZkhr0mHNKqerQfiwkuox3PHAd', + id, object: 'chat.completion', - created: 1711115037, - model: 'gpt-3.5-turbo-0125', + created, + model, choices: [ { index: 0, @@ -214,6 +220,26 @@ describe('doGenerate', () => { }); }); + it('should send additional response information', async () => { + prepareJsonResponse({ + id: 'test-id', + created: 123, + model: 'test-model', + }); + + const { response } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(response).toStrictEqual({ + id: 'test-id', + timestamp: new Date(123 * 1000), + modelId: 'test-model', + }); + }); + it('should support partial usage', async () => { prepareJsonResponse({ content: '', @@ -865,6 +891,12 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + modelId: 'gpt-3.5-turbo-0613', + timestamp: new Date('2023-12-15T16:17:00.000Z'), + }, { type: 'text-delta', textDelta: '' }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, @@ -934,6 +966,12 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + modelId: 'gpt-3.5-turbo-0125', + timestamp: new Date('2024-03-25T09:06:38.000Z'), + }, { type: 'tool-call-delta', toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', @@ -1055,6 +1093,12 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + modelId: 'gpt-3.5-turbo-0125', + timestamp: new Date('2024-03-25T09:06:38.000Z'), + }, { type: 'tool-call-delta', toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', @@ -1162,6 +1206,12 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-96aZqmeDpA9IPD6tACY8djkMsJCMP', + modelId: 'gpt-3.5-turbo-0125', + timestamp: new Date('2024-03-25T09:06:38.000Z'), + }, { type: 'tool-call-delta', toolCallId: 'call_O17Uplv4lJvD6DVdIvFFeRMw', @@ -1225,6 +1275,12 @@ describe('doStream', () => { }); expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + type: 'response-metadata', + id: 'chatcmpl-9o4RjdXk92In6yOzgND3bJxtedhS2', + modelId: 'gpt-4-turbo-2024-04-09', + timestamp: new Date('2024-07-23T07:41:59.000Z'), + }, { type: 'tool-call-delta', toolCallId: expect.any(String), diff --git a/packages/openai/src/openai-chat-language-model.ts b/packages/openai/src/openai-chat-language-model.ts index 2ec098b46299..789e110251c7 100644 --- a/packages/openai/src/openai-chat-language-model.ts +++ b/packages/openai/src/openai-chat-language-model.ts @@ -26,6 +26,7 @@ import { openAIErrorDataSchema, openaiFailedResponseHandler, } from './openai-error'; +import { getResponseMetadata } from './get-response-metadata'; type OpenAIChatConfig = { provider: string; @@ -289,6 +290,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { }, rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, + response: getResponseMetadata(response), warnings, logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs), }; @@ -343,6 +345,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { completionTokens: undefined, }; let logprobs: LanguageModelV1LogProbs; + let isFirstChunk = true; const { useLegacyFunctionCalling } = this.settings; @@ -369,6 +372,15 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 { return; } + if (isFirstChunk) { + isFirstChunk = false; + + controller.enqueue({ + type: 'response-metadata', + ...getResponseMetadata(value), + }); + } + if (value.usage != null) { usage = { promptTokens: value.usage.prompt_tokens ?? undefined, @@ -549,6 +561,9 @@ const openAITokenUsageSchema = z // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency const openAIChatResponseSchema = z.object({ + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ message: z.object({ @@ -602,6 +617,9 @@ const openAIChatResponseSchema = z.object({ // this approach limits breakages when the API changes and increases efficiency const openaiChatChunkSchema = z.union([ z.object({ + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ delta: z diff --git a/packages/openai/src/openai-completion-language-model.test.ts b/packages/openai/src/openai-completion-language-model.test.ts index a9fa91820f1f..d55ea067ef9e 100644 --- a/packages/openai/src/openai-completion-language-model.test.ts +++ b/packages/openai/src/openai-completion-language-model.test.ts @@ -59,6 +59,9 @@ describe('doGenerate', () => { }, logprobs = null, finish_reason = 'stop', + id = 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB', + created = 1711363706, + model = 'gpt-3.5-turbo-instruct', }: { content?: string; usage?: { @@ -72,12 +75,15 @@ describe('doGenerate', () => { top_logprobs: Record[]; } | null; finish_reason?: string; + id?: string; + created?: number; + model?: string; }) { server.responseBodyJson = { - id: 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB', + id, object: 'text_completion', - created: 1711363706, - model: 'gpt-3.5-turbo-instruct', + created, + model, choices: [ { text: content, @@ -120,6 +126,26 @@ describe('doGenerate', () => { }); }); + it('should send additional response information', async () => { + prepareJsonResponse({ + id: 'test-id', + created: 123, + model: 'test-model', + }); + + const { response } = await model.doGenerate({ + inputFormat: 'prompt', + mode: { type: 'regular' }, + prompt: TEST_PROMPT, + }); + + expect(response).toStrictEqual({ + id: 'test-id', + timestamp: new Date(123 * 1000), + modelId: 'test-model', + }); + }); + it('should extract logprobs', async () => { prepareJsonResponse({ logprobs: TEST_LOGPROBS }); @@ -312,6 +338,12 @@ describe('doStream', () => { // note: space moved to last chunk bc of trimming expect(await convertReadableStreamToArray(stream)).toStrictEqual([ + { + id: 'cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT', + modelId: 'gpt-3.5-turbo-instruct', + timestamp: new Date('2024-03-25T10:44:00.000Z'), + type: 'response-metadata', + }, { type: 'text-delta', textDelta: 'Hello' }, { type: 'text-delta', textDelta: ', ' }, { type: 'text-delta', textDelta: 'World!' }, diff --git a/packages/openai/src/openai-completion-language-model.ts b/packages/openai/src/openai-completion-language-model.ts index 590004d9b758..e7e5bdd12061 100644 --- a/packages/openai/src/openai-completion-language-model.ts +++ b/packages/openai/src/openai-completion-language-model.ts @@ -26,6 +26,7 @@ import { openAIErrorDataSchema, openaiFailedResponseHandler, } from './openai-error'; +import { getResponseMetadata } from './get-response-metadata'; type OpenAICompletionConfig = { provider: string; @@ -198,6 +199,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { logprobs: mapOpenAICompletionLogProbs(choice.logprobs), rawCall: { rawPrompt, rawSettings }, rawResponse: { headers: responseHeaders }, + response: getResponseMetadata(response), warnings, }; } @@ -239,6 +241,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { completionTokens: Number.NaN, }; let logprobs: LanguageModelV1LogProbs; + let isFirstChunk = true; return { stream: response.pipeThrough( @@ -263,6 +266,15 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { return; } + if (isFirstChunk) { + isFirstChunk = false; + + controller.enqueue({ + type: 'response-metadata', + ...getResponseMetadata(value), + }); + } + if (value.usage != null) { usage = { promptTokens: value.usage.prompt_tokens, @@ -312,6 +324,9 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 { // limited version of the schema, focussed on what is needed for the implementation // this approach limits breakages when the API changes and increases efficiency const openAICompletionResponseSchema = z.object({ + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ text: z.string(), @@ -322,8 +337,7 @@ const openAICompletionResponseSchema = z.object({ token_logprobs: z.array(z.number()), top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), }) - .nullable() - .optional(), + .nullish(), }), ), usage: z.object({ @@ -336,6 +350,9 @@ const openAICompletionResponseSchema = z.object({ // this approach limits breakages when the API changes and increases efficiency const openaiCompletionChunkSchema = z.union([ z.object({ + id: z.string().nullish(), + created: z.number().nullish(), + model: z.string().nullish(), choices: z.array( z.object({ text: z.string(), @@ -347,8 +364,7 @@ const openaiCompletionChunkSchema = z.union([ token_logprobs: z.array(z.number()), top_logprobs: z.array(z.record(z.string(), z.number())).nullable(), }) - .nullable() - .optional(), + .nullish(), }), ), usage: z @@ -356,8 +372,7 @@ const openaiCompletionChunkSchema = z.union([ prompt_tokens: z.number(), completion_tokens: z.number(), }) - .optional() - .nullable(), + .nullish(), }), openAIErrorDataSchema, ]); diff --git a/packages/provider-utils/src/generate-id.ts b/packages/provider-utils/src/generate-id.ts index 71f52c60199d..b3b95609ed6d 100644 --- a/packages/provider-utils/src/generate-id.ts +++ b/packages/provider-utils/src/generate-id.ts @@ -1,9 +1,28 @@ import { customAlphabet } from 'nanoid/non-secure'; +/** + * Creates an ID generator that uses an alphabet of digits, uppercase and lowercase letters. + * + * @param alphabet - The alphabet to use for the ID. Default: '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'. + * @param prefix - The prefix of the ID to generate. Default: ''. + * @param length - The length of the random part of the ID to generate. Default: 7. + */ +// TODO change length to 16 in 4.0 +export const createIdGenerator = ({ + prefix = '', + length = 7, + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', +}: { + prefix?: string; + length?: number; + alphabet?: string; +} = {}): (() => string) => { + const generator = customAlphabet(alphabet, length); + return () => `${prefix}${generator()}`; +}; + /** * Generates a 7-character random string to use for IDs. Not secure. */ -export const generateId = customAlphabet( - '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', - 7, -); +//TODO change length to 16 in 4.0 +export const generateId = createIdGenerator(); diff --git a/packages/provider-utils/src/index.ts b/packages/provider-utils/src/index.ts index 81e6e4edc3a9..bcf019c317a1 100644 --- a/packages/provider-utils/src/index.ts +++ b/packages/provider-utils/src/index.ts @@ -2,7 +2,7 @@ export * from './combine-headers'; export * from './convert-async-generator-to-readable-stream'; export * from './extract-response-headers'; export * from './fetch-function'; -export * from './generate-id'; +export { createIdGenerator, generateId } from './generate-id'; export * from './get-error-message'; export * from './is-abort-error'; export * from './load-api-key'; diff --git a/packages/provider/src/language-model/v1/language-model-v1.ts b/packages/provider/src/language-model/v1/language-model-v1.ts index a5b51d163122..84fdfced6fae 100644 --- a/packages/provider/src/language-model/v1/language-model-v1.ts +++ b/packages/provider/src/language-model/v1/language-model-v1.ts @@ -61,9 +61,8 @@ regardless of this flag, but might send different prompts and use further optimizations if this flag is set to `true`. Defaults to `false`. - -TODO rename to supportsGrammarGuidedGeneration in v2 */ + // TODO rename to supportsGrammarGuidedGeneration in v2 readonly supportsStructuredOutputs?: boolean; /** @@ -116,8 +115,9 @@ settings. }; /** -Optional raw response information for debugging purposes. +Optional response information for telemetry and debugging purposes. */ + // TODO rename to `response` in v2 rawResponse?: { /** Response headers. @@ -125,6 +125,23 @@ Response headers. headers?: Record; }; + response?: { + /** +ID for the generated response, if the provider sends one. + */ + id?: string; + + /** +Timestamp for the start of the generated response, if the provider sends one. + */ + timestamp?: Date; + + /** +The ID of the response model that was used to generate the response, if the provider sends one. + */ + modelId?: string; + }; + warnings?: LanguageModelV1CallWarning[]; /** @@ -202,6 +219,15 @@ export type LanguageModelV1StreamPart = argsTextDelta: string; } + // metadata for the response. + // separate stream part so it can be sent once it is available. + | { + type: 'response-metadata'; + id?: string; + timestamp?: Date; + modelId?: string; + } + // the usage stats, finish reason and logprobs should be the last part of the // stream: | { From 2be4fb5e9c99d1eaec2cd03991b8baa90c0e2a13 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:01:24 +0200 Subject: [PATCH 11/11] Version Packages (#2891) Co-authored-by: github-actions[bot] --- .changeset/calm-walls-sin.md | 11 - .changeset/two-boxes-attend.md | 7 - packages/ai/CHANGELOG.md | 16 ++ packages/ai/package.json | 16 +- packages/amazon-bedrock/CHANGELOG.md | 9 + packages/amazon-bedrock/package.json | 6 +- packages/anthropic/CHANGELOG.md | 11 + packages/anthropic/package.json | 6 +- packages/azure/CHANGELOG.md | 10 + packages/azure/package.json | 8 +- packages/cohere/CHANGELOG.md | 10 + packages/cohere/package.json | 6 +- packages/google-vertex/CHANGELOG.md | 9 + packages/google-vertex/package.json | 6 +- packages/google/CHANGELOG.md | 9 + packages/google/package.json | 6 +- packages/mistral/CHANGELOG.md | 10 + packages/mistral/package.json | 6 +- packages/openai/CHANGELOG.md | 10 + packages/openai/package.json | 6 +- packages/provider-utils/CHANGELOG.md | 9 + packages/provider-utils/package.json | 4 +- packages/provider/CHANGELOG.md | 7 + packages/provider/package.json | 2 +- packages/react/CHANGELOG.md | 8 + packages/react/package.json | 6 +- packages/solid/CHANGELOG.md | 8 + packages/solid/package.json | 6 +- packages/svelte/CHANGELOG.md | 8 + packages/svelte/package.json | 6 +- packages/ui-utils/CHANGELOG.md | 9 + packages/ui-utils/package.json | 6 +- packages/vue/CHANGELOG.md | 8 + packages/vue/package.json | 6 +- pnpm-lock.yaml | 346 +++++++++++++-------------- 35 files changed, 375 insertions(+), 242 deletions(-) delete mode 100644 .changeset/calm-walls-sin.md delete mode 100644 .changeset/two-boxes-attend.md diff --git a/.changeset/calm-walls-sin.md b/.changeset/calm-walls-sin.md deleted file mode 100644 index 62e5c1fc7da9..000000000000 --- a/.changeset/calm-walls-sin.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -'@ai-sdk/provider-utils': patch -'@ai-sdk/anthropic': patch -'@ai-sdk/provider': patch -'@ai-sdk/mistral': patch -'@ai-sdk/cohere': patch -'@ai-sdk/openai': patch -'ai': patch ---- - -feat (ai): expose response id, response model, response timestamp in telemetry and api diff --git a/.changeset/two-boxes-attend.md b/.changeset/two-boxes-attend.md deleted file mode 100644 index 84408377808c..000000000000 --- a/.changeset/two-boxes-attend.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -'@ai-sdk/anthropic': patch -'@ai-sdk/provider': patch -'ai': patch ---- - -fix (provider/anthropic): support prompt caching on assistant messages diff --git a/packages/ai/CHANGELOG.md b/packages/ai/CHANGELOG.md index 3547f8a81c6d..71f38a240a22 100644 --- a/packages/ai/CHANGELOG.md +++ b/packages/ai/CHANGELOG.md @@ -1,5 +1,21 @@ # ai +## 3.3.27 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- 3be7c1c: fix (provider/anthropic): support prompt caching on assistant messages +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + - @ai-sdk/react@0.0.55 + - @ai-sdk/solid@0.0.44 + - @ai-sdk/svelte@0.0.46 + - @ai-sdk/ui-utils@0.0.41 + - @ai-sdk/vue@0.0.46 + ## 3.3.26 ### Patch Changes diff --git a/packages/ai/package.json b/packages/ai/package.json index 0c7b8257e735..4008fe9e33b8 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -1,6 +1,6 @@ { "name": "ai", - "version": "3.3.26", + "version": "3.3.27", "description": "Vercel AI SDK - The AI Toolkit for TypeScript and JavaScript", "license": "Apache-2.0", "sideEffects": false, @@ -78,13 +78,13 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17", - "@ai-sdk/react": "0.0.54", - "@ai-sdk/solid": "0.0.43", - "@ai-sdk/svelte": "0.0.45", - "@ai-sdk/ui-utils": "0.0.40", - "@ai-sdk/vue": "0.0.45", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18", + "@ai-sdk/react": "0.0.55", + "@ai-sdk/solid": "0.0.44", + "@ai-sdk/svelte": "0.0.46", + "@ai-sdk/ui-utils": "0.0.41", + "@ai-sdk/vue": "0.0.46", "@opentelemetry/api": "1.9.0", "eventsource-parser": "1.1.2", "jsondiffpatch": "0.6.0", diff --git a/packages/amazon-bedrock/CHANGELOG.md b/packages/amazon-bedrock/CHANGELOG.md index 78232ef689e2..8b0e68c8686f 100644 --- a/packages/amazon-bedrock/CHANGELOG.md +++ b/packages/amazon-bedrock/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/amazon-bedrock +## 0.0.23 + +### Patch Changes + +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.22 ### Patch Changes diff --git a/packages/amazon-bedrock/package.json b/packages/amazon-bedrock/package.json index d86e80ceb1bc..6aeeecdf4572 100644 --- a/packages/amazon-bedrock/package.json +++ b/packages/amazon-bedrock/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/amazon-bedrock", - "version": "0.0.22", + "version": "0.0.23", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18", "@aws-sdk/client-bedrock-runtime": "3.602.0" }, "devDependencies": { diff --git a/packages/anthropic/CHANGELOG.md b/packages/anthropic/CHANGELOG.md index 202ac6d75ba7..63d2a0a5e1a8 100644 --- a/packages/anthropic/CHANGELOG.md +++ b/packages/anthropic/CHANGELOG.md @@ -1,5 +1,16 @@ # @ai-sdk/anthropic +## 0.0.49 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- 3be7c1c: fix (provider/anthropic): support prompt caching on assistant messages +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.48 ### Patch Changes diff --git a/packages/anthropic/package.json b/packages/anthropic/package.json index 0e215f8921be..50ca1d6b51c1 100644 --- a/packages/anthropic/package.json +++ b/packages/anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/anthropic", - "version": "0.0.48", + "version": "0.0.49", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17" + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18" }, "devDependencies": { "@types/node": "^18", diff --git a/packages/azure/CHANGELOG.md b/packages/azure/CHANGELOG.md index d5876d6cfa7e..282bb0c7584a 100644 --- a/packages/azure/CHANGELOG.md +++ b/packages/azure/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/azure +## 0.0.34 + +### Patch Changes + +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + - @ai-sdk/openai@0.0.56 + ## 0.0.33 ### Patch Changes diff --git a/packages/azure/package.json b/packages/azure/package.json index 20835221084b..2940f2b692ed 100644 --- a/packages/azure/package.json +++ b/packages/azure/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/azure", - "version": "0.0.33", + "version": "0.0.34", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,9 +30,9 @@ } }, "dependencies": { - "@ai-sdk/openai": "0.0.55", - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17" + "@ai-sdk/openai": "0.0.56", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18" }, "devDependencies": { "@types/node": "^18", diff --git a/packages/cohere/CHANGELOG.md b/packages/cohere/CHANGELOG.md index 7d97baea53f2..09c43a4a3f30 100644 --- a/packages/cohere/CHANGELOG.md +++ b/packages/cohere/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/cohere +## 0.0.23 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.22 ### Patch Changes diff --git a/packages/cohere/package.json b/packages/cohere/package.json index 361a4795c5f8..83778c54b001 100644 --- a/packages/cohere/package.json +++ b/packages/cohere/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/cohere", - "version": "0.0.22", + "version": "0.0.23", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17" + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18" }, "devDependencies": { "@types/node": "^18", diff --git a/packages/google-vertex/CHANGELOG.md b/packages/google-vertex/CHANGELOG.md index 9322b4ddee0c..6dfee6a97840 100644 --- a/packages/google-vertex/CHANGELOG.md +++ b/packages/google-vertex/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/google-vertex +## 0.0.36 + +### Patch Changes + +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.35 ### Patch Changes diff --git a/packages/google-vertex/package.json b/packages/google-vertex/package.json index 78998066efa9..c5d7c322190e 100644 --- a/packages/google-vertex/package.json +++ b/packages/google-vertex/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google-vertex", - "version": "0.0.35", + "version": "0.0.36", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18", "json-schema": "0.4.0" }, "devDependencies": { diff --git a/packages/google/CHANGELOG.md b/packages/google/CHANGELOG.md index 37de0184e284..aff45fcc2a7b 100644 --- a/packages/google/CHANGELOG.md +++ b/packages/google/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/google +## 0.0.47 + +### Patch Changes + +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.46 ### Patch Changes diff --git a/packages/google/package.json b/packages/google/package.json index c6c5b6eb7182..f28b90436993 100644 --- a/packages/google/package.json +++ b/packages/google/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/google", - "version": "0.0.46", + "version": "0.0.47", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18", "json-schema": "0.4.0" }, "devDependencies": { diff --git a/packages/mistral/CHANGELOG.md b/packages/mistral/CHANGELOG.md index bc9c04aedde6..dffc9257456c 100644 --- a/packages/mistral/CHANGELOG.md +++ b/packages/mistral/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/mistral +## 0.0.39 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.38 ### Patch Changes diff --git a/packages/mistral/package.json b/packages/mistral/package.json index 3b173cec1ff2..5974e7371353 100644 --- a/packages/mistral/package.json +++ b/packages/mistral/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/mistral", - "version": "0.0.38", + "version": "0.0.39", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -30,8 +30,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17" + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18" }, "devDependencies": { "@types/node": "^18", diff --git a/packages/openai/CHANGELOG.md b/packages/openai/CHANGELOG.md index dfc86c409dfb..765422d147e7 100644 --- a/packages/openai/CHANGELOG.md +++ b/packages/openai/CHANGELOG.md @@ -1,5 +1,15 @@ # @ai-sdk/openai +## 0.0.56 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.55 ### Patch Changes diff --git a/packages/openai/package.json b/packages/openai/package.json index ea39d3f2b6bc..b54e87916764 100644 --- a/packages/openai/package.json +++ b/packages/openai/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/openai", - "version": "0.0.55", + "version": "0.0.56", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17" + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18" }, "devDependencies": { "@types/node": "^18", diff --git a/packages/provider-utils/CHANGELOG.md b/packages/provider-utils/CHANGELOG.md index 76a4864419c6..87867ce5ee49 100644 --- a/packages/provider-utils/CHANGELOG.md +++ b/packages/provider-utils/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/provider-utils +## 1.0.18 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider@0.0.23 + ## 1.0.17 ### Patch Changes diff --git a/packages/provider-utils/package.json b/packages/provider-utils/package.json index 4ee53626cf37..bccf3a706ab1 100644 --- a/packages/provider-utils/package.json +++ b/packages/provider-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider-utils", - "version": "1.0.17", + "version": "1.0.18", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,7 +37,7 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", + "@ai-sdk/provider": "0.0.23", "eventsource-parser": "1.1.2", "nanoid": "3.3.6", "secure-json-parse": "2.7.0" diff --git a/packages/provider/CHANGELOG.md b/packages/provider/CHANGELOG.md index 45f093dd371d..45de1c7780df 100644 --- a/packages/provider/CHANGELOG.md +++ b/packages/provider/CHANGELOG.md @@ -1,5 +1,12 @@ # @ai-sdk/provider +## 0.0.23 + +### Patch Changes + +- 03313cd: feat (ai): expose response id, response model, response timestamp in telemetry and api +- 3be7c1c: fix (provider/anthropic): support prompt caching on assistant messages + ## 0.0.22 ### Patch Changes diff --git a/packages/provider/package.json b/packages/provider/package.json index d7cc71f3b8d4..7b3a9ebd0ddf 100644 --- a/packages/provider/package.json +++ b/packages/provider/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/provider", - "version": "0.0.22", + "version": "0.0.23", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", diff --git a/packages/react/CHANGELOG.md b/packages/react/CHANGELOG.md index 3a1df30674c3..f3d699ed4f29 100644 --- a/packages/react/CHANGELOG.md +++ b/packages/react/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/react +## 0.0.55 + +### Patch Changes + +- Updated dependencies [03313cd] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/ui-utils@0.0.41 + ## 0.0.54 ### Patch Changes diff --git a/packages/react/package.json b/packages/react/package.json index f5dda6aa54cc..f805201957ae 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/react", - "version": "0.0.54", + "version": "0.0.55", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "1.0.17", - "@ai-sdk/ui-utils": "0.0.40", + "@ai-sdk/provider-utils": "1.0.18", + "@ai-sdk/ui-utils": "0.0.41", "swr": "2.2.5" }, "devDependencies": { diff --git a/packages/solid/CHANGELOG.md b/packages/solid/CHANGELOG.md index 696adc87689c..ad3cdda15db2 100644 --- a/packages/solid/CHANGELOG.md +++ b/packages/solid/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/solid +## 0.0.44 + +### Patch Changes + +- Updated dependencies [03313cd] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/ui-utils@0.0.41 + ## 0.0.43 ### Patch Changes diff --git a/packages/solid/package.json b/packages/solid/package.json index 3db082c1f681..23705c38c1e0 100644 --- a/packages/solid/package.json +++ b/packages/solid/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/solid", - "version": "0.0.43", + "version": "0.0.44", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "1.0.17", - "@ai-sdk/ui-utils": "0.0.40" + "@ai-sdk/provider-utils": "1.0.18", + "@ai-sdk/ui-utils": "0.0.41" }, "devDependencies": { "@testing-library/jest-dom": "^6.4.5", diff --git a/packages/svelte/CHANGELOG.md b/packages/svelte/CHANGELOG.md index 12cdf9ef93b7..0bc1fb14bea9 100644 --- a/packages/svelte/CHANGELOG.md +++ b/packages/svelte/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/svelte +## 0.0.46 + +### Patch Changes + +- Updated dependencies [03313cd] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/ui-utils@0.0.41 + ## 0.0.45 ### Patch Changes diff --git a/packages/svelte/package.json b/packages/svelte/package.json index 04a0511f7c16..6fe0630299dc 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/svelte", - "version": "0.0.45", + "version": "0.0.46", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -27,8 +27,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "1.0.17", - "@ai-sdk/ui-utils": "0.0.40", + "@ai-sdk/provider-utils": "1.0.18", + "@ai-sdk/ui-utils": "0.0.41", "sswr": "2.1.0" }, "devDependencies": { diff --git a/packages/ui-utils/CHANGELOG.md b/packages/ui-utils/CHANGELOG.md index 07f9331cda02..0ff237e91347 100644 --- a/packages/ui-utils/CHANGELOG.md +++ b/packages/ui-utils/CHANGELOG.md @@ -1,5 +1,14 @@ # @ai-sdk/ui-utils +## 0.0.41 + +### Patch Changes + +- Updated dependencies [03313cd] +- Updated dependencies [3be7c1c] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/provider@0.0.23 + ## 0.0.40 ### Patch Changes diff --git a/packages/ui-utils/package.json b/packages/ui-utils/package.json index 054dbf560073..e284d8115104 100644 --- a/packages/ui-utils/package.json +++ b/packages/ui-utils/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/ui-utils", - "version": "0.0.40", + "version": "0.0.41", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -37,8 +37,8 @@ } }, "dependencies": { - "@ai-sdk/provider": "0.0.22", - "@ai-sdk/provider-utils": "1.0.17", + "@ai-sdk/provider": "0.0.23", + "@ai-sdk/provider-utils": "1.0.18", "json-schema": "0.4.0", "secure-json-parse": "2.7.0", "zod-to-json-schema": "3.23.2" diff --git a/packages/vue/CHANGELOG.md b/packages/vue/CHANGELOG.md index 1d0deca7da53..493ff6a40db3 100644 --- a/packages/vue/CHANGELOG.md +++ b/packages/vue/CHANGELOG.md @@ -1,5 +1,13 @@ # @ai-sdk/vue +## 0.0.46 + +### Patch Changes + +- Updated dependencies [03313cd] + - @ai-sdk/provider-utils@1.0.18 + - @ai-sdk/ui-utils@0.0.41 + ## 0.0.45 ### Patch Changes diff --git a/packages/vue/package.json b/packages/vue/package.json index c65455925a24..ed39d803fc40 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -1,6 +1,6 @@ { "name": "@ai-sdk/vue", - "version": "0.0.45", + "version": "0.0.46", "license": "Apache-2.0", "sideEffects": false, "main": "./dist/index.js", @@ -29,8 +29,8 @@ "CHANGELOG.md" ], "dependencies": { - "@ai-sdk/provider-utils": "1.0.17", - "@ai-sdk/ui-utils": "0.0.40", + "@ai-sdk/provider-utils": "1.0.18", + "@ai-sdk/ui-utils": "0.0.41", "swrv": "1.0.4" }, "devDependencies": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b0b6330cc602..bd48d2f2e403 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -125,10 +125,10 @@ importers: version: link:../../packages/ai geist: specifier: ^1.3.1 - version: 1.3.1(next@14.2.7) + version: 1.3.1(next@14.2.8) next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) react: specifier: ^18 version: 18.2.0 @@ -180,7 +180,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) react: specifier: ^18 version: 18.2.0 @@ -229,7 +229,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) react: specifier: ^18 version: 18.2.0 @@ -284,7 +284,7 @@ importers: version: 0.1.36(playwright@1.46.0) next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) react: specifier: ^18 version: 18.2.0 @@ -339,7 +339,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@playwright/test@1.46.0)(react-dom@18.3.1)(react@18.3.1) + version: 14.2.8(@playwright/test@1.46.0)(react-dom@18.3.1)(react@18.3.1) openai: specifier: 4.52.6 version: 4.52.6 @@ -397,7 +397,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) openai: specifier: 4.52.6 version: 4.52.6 @@ -452,7 +452,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) openai: specifier: 4.52.6 version: 4.52.6 @@ -510,7 +510,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) openai: specifier: 4.52.6 version: 4.52.6 @@ -577,7 +577,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) openai: specifier: 4.52.6 version: 4.52.6 @@ -638,7 +638,7 @@ importers: version: 0.52.1(@opentelemetry/api@1.9.0) '@sentry/nextjs': specifier: ^8.22.0 - version: 8.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1)(@opentelemetry/instrumentation@0.52.1)(@opentelemetry/sdk-trace-base@1.25.1)(next@14.2.7)(react@18.2.0)(webpack@5.93.0) + version: 8.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1)(@opentelemetry/instrumentation@0.52.1)(@opentelemetry/sdk-trace-base@1.25.1)(next@14.2.8)(react@18.2.0)(webpack@5.93.0) '@sentry/opentelemetry': specifier: 8.22.0 version: 8.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1)(@opentelemetry/instrumentation@0.52.1)(@opentelemetry/sdk-trace-base@1.25.1)(@opentelemetry/semantic-conventions@1.25.1) @@ -650,7 +650,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) openai: specifier: 4.52.6 version: 4.52.6 @@ -702,7 +702,7 @@ importers: version: link:../../packages/ai next: specifier: latest - version: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + version: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) react: specifier: ^18 version: 18.2.0 @@ -930,25 +930,25 @@ importers: packages/ai: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@ai-sdk/react': - specifier: 0.0.54 + specifier: 0.0.55 version: link:../react '@ai-sdk/solid': - specifier: 0.0.43 + specifier: 0.0.44 version: link:../solid '@ai-sdk/svelte': - specifier: 0.0.45 + specifier: 0.0.46 version: link:../svelte '@ai-sdk/ui-utils': - specifier: 0.0.40 + specifier: 0.0.41 version: link:../ui-utils '@ai-sdk/vue': - specifier: 0.0.45 + specifier: 0.0.46 version: link:../vue '@opentelemetry/api': specifier: 1.9.0 @@ -1070,21 +1070,21 @@ importers: version: link:../../.. next: specifier: canary - version: 15.0.0-canary.139(@playwright/test@1.46.0)(react-dom@19.0.0-rc-4f604941-20240830)(react@19.0.0-rc-4f604941-20240830) + version: 15.0.0-canary.141(@playwright/test@1.46.0)(react-dom@19.0.0-rc-d1afcb43-20240903)(react@19.0.0-rc-d1afcb43-20240903) react: specifier: rc - version: 19.0.0-rc-4f604941-20240830 + version: 19.0.0-rc-d1afcb43-20240903 react-dom: specifier: rc - version: 19.0.0-rc-4f604941-20240830(react@19.0.0-rc-4f604941-20240830) + version: 19.0.0-rc-d1afcb43-20240903(react@19.0.0-rc-d1afcb43-20240903) packages/amazon-bedrock: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@aws-sdk/client-bedrock-runtime': specifier: 3.602.0 @@ -1115,10 +1115,10 @@ importers: packages/anthropic: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils devDependencies: '@types/node': @@ -1140,13 +1140,13 @@ importers: packages/azure: dependencies: '@ai-sdk/openai': - specifier: 0.0.55 + specifier: 0.0.56 version: link:../openai '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils devDependencies: '@types/node': @@ -1168,10 +1168,10 @@ importers: packages/cohere: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils devDependencies: '@types/node': @@ -1193,10 +1193,10 @@ importers: packages/google: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils json-schema: specifier: 0.4.0 @@ -1224,10 +1224,10 @@ importers: packages/google-vertex: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils json-schema: specifier: 0.4.0 @@ -1255,10 +1255,10 @@ importers: packages/mistral: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils devDependencies: '@types/node': @@ -1280,10 +1280,10 @@ importers: packages/openai: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils devDependencies: '@types/node': @@ -1327,7 +1327,7 @@ importers: packages/provider-utils: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider eventsource-parser: specifier: 1.1.2 @@ -1361,10 +1361,10 @@ importers: packages/react: dependencies: '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 0.0.40 + specifier: 0.0.41 version: link:../ui-utils react: specifier: ^18 || ^19 @@ -1422,10 +1422,10 @@ importers: packages/solid: dependencies: '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 0.0.40 + specifier: 0.0.41 version: link:../ui-utils solid-js: specifier: ^1.7.7 @@ -1474,10 +1474,10 @@ importers: packages/svelte: dependencies: '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 0.0.40 + specifier: 0.0.41 version: link:../ui-utils sswr: specifier: 2.1.0 @@ -1508,10 +1508,10 @@ importers: packages/ui-utils: dependencies: '@ai-sdk/provider': - specifier: 0.0.22 + specifier: 0.0.23 version: link:../provider '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils json-schema: specifier: 0.4.0 @@ -1548,10 +1548,10 @@ importers: packages/vue: dependencies: '@ai-sdk/provider-utils': - specifier: 1.0.17 + specifier: 1.0.18 version: link:../provider-utils '@ai-sdk/ui-utils': - specifier: 0.0.40 + specifier: 0.0.41 version: link:../ui-utils swrv: specifier: 1.0.4 @@ -5987,12 +5987,12 @@ packages: - '@opentelemetry/api' - supports-color - /@next/env@14.2.7: - resolution: {integrity: sha512-OTx9y6I3xE/eih+qtthppwLytmpJVPM5PPoJxChFsbjIEFXIayG0h/xLzefHGJviAa3Q5+Fd+9uYojKkHDKxoQ==} + /@next/env@14.2.8: + resolution: {integrity: sha512-L44a+ynqkolyNBnYfF8VoCiSrjSZWgEHYKkKLGcs/a80qh7AkfVUD/MduVPgdsWZ31tgROR+yJRA0PZjSVBXWQ==} dev: false - /@next/env@15.0.0-canary.139: - resolution: {integrity: sha512-OiMN4DTsyfHreRTANJLMGWNa1jK1G46D+IfavYUBu0NkNpmm7LkEnsE6AaAFLK4V/oafL37/vN4nzccOnVhvmg==} + /@next/env@15.0.0-canary.141: + resolution: {integrity: sha512-JZSWuaEU/vwYheetDQG4tQpNCZr6jJwd7NGI4CvmClGk9ZGM63PIN3WmhsXgqG1bgTEzgmiKj9sNoN+qhpO8cA==} dev: false /@next/env@15.0.0-canary.23: @@ -6004,8 +6004,8 @@ packages: dependencies: glob: 10.3.10 - /@next/swc-darwin-arm64@14.2.7: - resolution: {integrity: sha512-UhZGcOyI9LE/tZL3h9rs/2wMZaaJKwnpAyegUVDGZqwsla6hMfeSj9ssBWQS9yA4UXun3pPhrFLVnw5KXZs3vw==} + /@next/swc-darwin-arm64@14.2.8: + resolution: {integrity: sha512-1VrQlG8OzdyvvGZhGJFnaNE2P10Jjy/2FopnqbY0nSa/gr8If3iINxvOEW3cmVeoAYkmW0RsBazQecA2dBFOSw==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] @@ -6013,8 +6013,8 @@ packages: dev: false optional: true - /@next/swc-darwin-arm64@15.0.0-canary.139: - resolution: {integrity: sha512-XTFcbQjsy8DUH6TFIF6C52G3gJX8DBc29qguyf8sGxCpR88D3tkgj7Ph2LXoAIy6c01TX40e6jhp8CVis5Wh4g==} + /@next/swc-darwin-arm64@15.0.0-canary.141: + resolution: {integrity: sha512-A0MID270+ivW8e8/yuJ+CWQmvUK0YRz0eJhq53JDdEJXbzzjRrGUKIS9RxtJGacV6KjptTAtAR+ZWMSR+quzAA==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] @@ -6031,8 +6031,8 @@ packages: dev: true optional: true - /@next/swc-darwin-x64@14.2.7: - resolution: {integrity: sha512-ys2cUgZYRc+CbyDeLAaAdZgS7N1Kpyy+wo0b/gAj+SeOeaj0Lw/q+G1hp+DuDiDAVyxLBCJXEY/AkhDmtihUTA==} + /@next/swc-darwin-x64@14.2.8: + resolution: {integrity: sha512-87t3I86rNRSOJB1gXIUzaQWWSWrkWPDyZGsR0Z7JAPtLeX3uUOW2fHxl7dNWD2BZvbvftctTQjgtfpp7nMtmWg==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] @@ -6040,8 +6040,8 @@ packages: dev: false optional: true - /@next/swc-darwin-x64@15.0.0-canary.139: - resolution: {integrity: sha512-DArn6tL67GILz3mANmwhD/ytIuJ6DBHyzR6iZ8rNYYR8CUczXfzJ1QU3CBndwFQwVCiLNLV7rMpjcyvLaZGBgQ==} + /@next/swc-darwin-x64@15.0.0-canary.141: + resolution: {integrity: sha512-yMlESZ/xgblBuHNp8Jn1pUO5mPLtKJTq2/oMEMFq7X7vyIYqO/rhGLG2qU444KacVMl8Qnd0N73qxwsbxlFYwQ==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] @@ -6058,8 +6058,8 @@ packages: dev: true optional: true - /@next/swc-linux-arm64-gnu@14.2.7: - resolution: {integrity: sha512-2xoWtE13sUJ3qrC1lwE/HjbDPm+kBQYFkkiVECJWctRASAHQ+NwjMzgrfqqMYHfMxFb5Wws3w9PqzZJqKFdWcQ==} + /@next/swc-linux-arm64-gnu@14.2.8: + resolution: {integrity: sha512-ta2sfVzbOpTbgBrF9HM5m+U58dv6QPuwU4n5EX4LLyCJGKc433Z0D9h9gay/HSOjLEXJ2fJYrMP5JYYbHdxhtw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] @@ -6067,8 +6067,8 @@ packages: dev: false optional: true - /@next/swc-linux-arm64-gnu@15.0.0-canary.139: - resolution: {integrity: sha512-LJ8H0L0ufsIP3yXFP6C9dFwhMBHGcpmAVh/fHNlH/H7Xku1sRaiThLgWJ4hW0joxOJHUsPVAh8JepbM1ywK61Q==} + /@next/swc-linux-arm64-gnu@15.0.0-canary.141: + resolution: {integrity: sha512-76w9K0xymid+TL1PB9c6FxacGAzOYRl5hJNMK268prpQNcLxsUx6IgMfYu8CgeDfXSOWVTgo6fw6Lu/6W2VP8Q==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] @@ -6085,8 +6085,8 @@ packages: dev: true optional: true - /@next/swc-linux-arm64-musl@14.2.7: - resolution: {integrity: sha512-+zJ1gJdl35BSAGpkCbfyiY6iRTaPrt3KTl4SF/B1NyELkqqnrNX6cp4IjjjxKpd64/7enI0kf6b9O1Uf3cL0pw==} + /@next/swc-linux-arm64-musl@14.2.8: + resolution: {integrity: sha512-+IoLTPK6Z5uIgDhgeWnQF5/o5GBN7+zyUNrs4Bes1W3g9++YELb8y0unFybS8s87ntAKMDl6jeQ+mD7oNwp/Ng==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] @@ -6094,8 +6094,8 @@ packages: dev: false optional: true - /@next/swc-linux-arm64-musl@15.0.0-canary.139: - resolution: {integrity: sha512-ZD7AK7LxLyDXzdGYEMvkyxS+PdUhHmrH9opNyS5A6VB9F0hmchNmZVpEY/VygTxeEIpTOJxAKCK3fxLHaEt4kg==} + /@next/swc-linux-arm64-musl@15.0.0-canary.141: + resolution: {integrity: sha512-DNP7NkksefhPLMYgIFk75Y6GPfFeKS3e+sIMW8z5Tjll4BoQjWieiuTH0Fl0W7spwsXbniyn5y2d9GqoPyYWQg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] @@ -6112,8 +6112,8 @@ packages: dev: true optional: true - /@next/swc-linux-x64-gnu@14.2.7: - resolution: {integrity: sha512-m6EBqrskeMUzykBrv0fDX/28lWIBGhMzOYaStp0ihkjzIYJiKUOzVYD1gULHc8XDf5EMSqoH/0/TRAgXqpQwmw==} + /@next/swc-linux-x64-gnu@14.2.8: + resolution: {integrity: sha512-pO+hVXC+mvzUOQJJRG4RX4wJsRJ5BkURSf6dD6EjUXAX4Ml9es1WsEfkaZ4lcpmFzFvY47IkDaffks/GdCn9ag==} engines: {node: '>= 10'} cpu: [x64] os: [linux] @@ -6121,8 +6121,8 @@ packages: dev: false optional: true - /@next/swc-linux-x64-gnu@15.0.0-canary.139: - resolution: {integrity: sha512-ZRHABIrLOzNDxY6DbjTY0QIM6eS0R78qlojZ4vlIIGJFjLLEuPG7kny5vEtl2vCbxQPaJHgG7T8kCNakDsYYbQ==} + /@next/swc-linux-x64-gnu@15.0.0-canary.141: + resolution: {integrity: sha512-UFhJQNZWJKRaS+y7yNt90xIosCFRgIzuK0gkiTTt/pGLlkwRlRrgpJ214MGQ9ii2z7gr4qA64DLFB8ikIXfBlA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] @@ -6139,8 +6139,8 @@ packages: dev: true optional: true - /@next/swc-linux-x64-musl@14.2.7: - resolution: {integrity: sha512-gUu0viOMvMlzFRz1r1eQ7Ql4OE+hPOmA7smfZAhn8vC4+0swMZaZxa9CSIozTYavi+bJNDZ3tgiSdMjmMzRJlQ==} + /@next/swc-linux-x64-musl@14.2.8: + resolution: {integrity: sha512-bCat9izctychCtf3uL1nqHq31N5e1VxvdyNcBQflkudPMLbxVnlrw45Vi87K+lt1CwrtVayHqzo4ie0Szcpwzg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] @@ -6148,8 +6148,8 @@ packages: dev: false optional: true - /@next/swc-linux-x64-musl@15.0.0-canary.139: - resolution: {integrity: sha512-yeLzz6apJLu/6Lh6KPDkvmk50quiERkZCIJpM03Q+20YKtN/68Yo5HpsX/GAWqnhiHvBRGBO+ukhYPa8pEQgsQ==} + /@next/swc-linux-x64-musl@15.0.0-canary.141: + resolution: {integrity: sha512-Kx3KcISWILAIPEIgszDfgbI9f8ZpV+5Of0BwomyiQzWpDKcWws+8JjLenGQhPaElFG3OrjKihCUkUizHwXo9aw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] @@ -6166,8 +6166,8 @@ packages: dev: true optional: true - /@next/swc-win32-arm64-msvc@14.2.7: - resolution: {integrity: sha512-PGbONHIVIuzWlYmLvuFKcj+8jXnLbx4WrlESYlVnEzDsa3+Q2hI1YHoXaSmbq0k4ZwZ7J6sWNV4UZfx1OeOlbQ==} + /@next/swc-win32-arm64-msvc@14.2.8: + resolution: {integrity: sha512-gbxfUaSPV7EyUobpavida2Hwi62GhSJaSg7iBjmBWoxkxlmETOD7U4tWt763cGIsyE6jM7IoNavq0BXqwdW2QA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] @@ -6175,8 +6175,8 @@ packages: dev: false optional: true - /@next/swc-win32-arm64-msvc@15.0.0-canary.139: - resolution: {integrity: sha512-/pad4W35P/HyFJ7YgS7vamBoHRNVVZh3Gb2y9LEmAv5cdBbY9TMGLPL8nxFLglKG5+aw8rvy8IC+n9f3hSoAjg==} + /@next/swc-win32-arm64-msvc@15.0.0-canary.141: + resolution: {integrity: sha512-jikgy9gGzhGAjI6f8PO9Oue74Dk6w/pw449d91RPY9MjohnC6Q43Q/K8+ogJ9panAYya6BzgXpDJN0fuxPKZOQ==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] @@ -6193,8 +6193,8 @@ packages: dev: true optional: true - /@next/swc-win32-ia32-msvc@14.2.7: - resolution: {integrity: sha512-BiSY5umlx9ed5RQDoHcdbuKTUkuFORDqzYKPHlLeS+STUWQKWziVOn3Ic41LuTBvqE0TRJPKpio9GSIblNR+0w==} + /@next/swc-win32-ia32-msvc@14.2.8: + resolution: {integrity: sha512-PUXzEzjTTlUh3b5VAn1nlpwvujTnuCMMwbiCnaTazoVlN1nA3kWjlmp42IfURA2N/nyrlVEw7pURa/o4Qxj1cw==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] @@ -6202,8 +6202,8 @@ packages: dev: false optional: true - /@next/swc-win32-ia32-msvc@15.0.0-canary.139: - resolution: {integrity: sha512-P3CJXnhhg1zkYeNtPYRS2R0LGnZNpcvJYYCu8Kptmp1a44khDtWICHnnplbqXmr+WoT2On2vwRAwtSDRSfnBPw==} + /@next/swc-win32-ia32-msvc@15.0.0-canary.141: + resolution: {integrity: sha512-HTrdiKGcY15zHYjdiimJoSCwz3PiILiUDj/LfzSVptcmFAkNFPb5VPtFZOyrsnisHfoKp+E1qVaNLf/2STJTog==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] @@ -6220,8 +6220,8 @@ packages: dev: true optional: true - /@next/swc-win32-x64-msvc@14.2.7: - resolution: {integrity: sha512-pxsI23gKWRt/SPHFkDEsP+w+Nd7gK37Hpv0ngc5HpWy2e7cKx9zR/+Q2ptAUqICNTecAaGWvmhway7pj/JLEWA==} + /@next/swc-win32-x64-msvc@14.2.8: + resolution: {integrity: sha512-EnPKv0ttq02E9/1KZ/8Dn7kuutv6hy1CKc0HlNcvzOQcm4/SQtvfws5gY0zrG9tuupd3HfC2L/zcTrnBhpjTuQ==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -6229,8 +6229,8 @@ packages: dev: false optional: true - /@next/swc-win32-x64-msvc@15.0.0-canary.139: - resolution: {integrity: sha512-GoNrf8ATwlZXv1o94RFXJSf9wMIltJ7l11nvv+d3DY5nRTT23OUlc/Z41jyMvAq4yWbLDbOeLXXn3Fx4jlCB7Q==} + /@next/swc-win32-x64-msvc@15.0.0-canary.141: + resolution: {integrity: sha512-zt4Ly7XV//wwX6v4YU/wP55U9LOdmGQ9xArbKGJPPYdTAFgD1JcK1OqzrZxJzf+xqZs2erP7cHu/JyHvpEpq4w==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -8730,7 +8730,7 @@ packages: '@sentry/utils': 8.22.0 dev: false - /@sentry/nextjs@8.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1)(@opentelemetry/instrumentation@0.52.1)(@opentelemetry/sdk-trace-base@1.25.1)(next@14.2.7)(react@18.2.0)(webpack@5.93.0): + /@sentry/nextjs@8.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1)(@opentelemetry/instrumentation@0.52.1)(@opentelemetry/sdk-trace-base@1.25.1)(next@14.2.8)(react@18.2.0)(webpack@5.93.0): resolution: {integrity: sha512-XYb/3ocQLhZmdqqTgI7xce7AiRpHn3L6Sj3RVTBwNb4nb+XOfQ8o0LKF7v7yo6LGoQin+IWpWPACnNc8zH7fBA==} engines: {node: '>=14.18'} peerDependencies: @@ -8752,7 +8752,7 @@ packages: '@sentry/vercel-edge': 8.22.0 '@sentry/webpack-plugin': 2.20.1(webpack@5.93.0) chalk: 3.0.0 - next: 14.2.7(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + next: 14.2.8(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) resolve: 1.22.8 rollup: 3.29.4 stacktrace-parser: 0.1.10 @@ -10258,8 +10258,8 @@ packages: tslib: 2.6.3 dev: true - /@swc/helpers@0.5.12: - resolution: {integrity: sha512-KMZNXiGibsW9kvZAO1Pam2JPTDBm+KSHMMHWdsyI/1DbIZjT2A6Gy3hblVXUMEDvUAKq+e0vL0X0o54owWji7g==} + /@swc/helpers@0.5.13: + resolution: {integrity: sha512-UoKGxQ3r5kYI9dALKJapMmuK+1zWM/H17Z1+iwnNmzcJRnfFuevZs375TA5rW31pu4BS4NoSy1fRsexDXfWn5w==} dependencies: tslib: 2.6.3 dev: false @@ -15048,12 +15048,12 @@ packages: - encoding - supports-color - /geist@1.3.1(next@14.2.7): + /geist@1.3.1(next@14.2.8): resolution: {integrity: sha512-Q4gC1pBVPN+D579pBaz0TRRnGA4p9UK6elDY/xizXdFk/g4EKR5g0I+4p/Kj6gM0SajDBZ/0FvDV9ey9ud7BWw==} peerDependencies: next: '>=13.2.0' dependencies: - next: 14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) + next: 14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0) dev: false /gensync@1.0.0-beta.2: @@ -17701,8 +17701,8 @@ packages: /neo-async@2.6.2: resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} - /next@14.2.7(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-4Qy2aK0LwH4eQiSvQWyKuC7JXE13bIopEQesWE0c/P3uuNRnZCQanI0vsrMLmUQJLAto+A+/8+sve2hd+BQuOQ==} + /next@14.2.8(@babel/core@7.24.7)(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-EyEyJZ89r8C5FPlS/401AiF3O8jeMtHIE+bLom9MwcdWJJFBgRl+MR/2VgO0v5bI6tQORNY0a0DR5sjpFNrjbg==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -17719,7 +17719,7 @@ packages: sass: optional: true dependencies: - '@next/env': 14.2.7 + '@next/env': 14.2.8 '@opentelemetry/api': 1.9.0 '@playwright/test': 1.46.0 '@swc/helpers': 0.5.5 @@ -17731,22 +17731,22 @@ packages: react-dom: 18.2.0(react@18.2.0) styled-jsx: 5.1.1(@babel/core@7.24.7)(react@18.2.0) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.7 - '@next/swc-darwin-x64': 14.2.7 - '@next/swc-linux-arm64-gnu': 14.2.7 - '@next/swc-linux-arm64-musl': 14.2.7 - '@next/swc-linux-x64-gnu': 14.2.7 - '@next/swc-linux-x64-musl': 14.2.7 - '@next/swc-win32-arm64-msvc': 14.2.7 - '@next/swc-win32-ia32-msvc': 14.2.7 - '@next/swc-win32-x64-msvc': 14.2.7 + '@next/swc-darwin-arm64': 14.2.8 + '@next/swc-darwin-x64': 14.2.8 + '@next/swc-linux-arm64-gnu': 14.2.8 + '@next/swc-linux-arm64-musl': 14.2.8 + '@next/swc-linux-x64-gnu': 14.2.8 + '@next/swc-linux-x64-musl': 14.2.8 + '@next/swc-win32-arm64-msvc': 14.2.8 + '@next/swc-win32-ia32-msvc': 14.2.8 + '@next/swc-win32-x64-msvc': 14.2.8 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros dev: false - /next@14.2.7(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-4Qy2aK0LwH4eQiSvQWyKuC7JXE13bIopEQesWE0c/P3uuNRnZCQanI0vsrMLmUQJLAto+A+/8+sve2hd+BQuOQ==} + /next@14.2.8(@opentelemetry/api@1.9.0)(@playwright/test@1.46.0)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-EyEyJZ89r8C5FPlS/401AiF3O8jeMtHIE+bLom9MwcdWJJFBgRl+MR/2VgO0v5bI6tQORNY0a0DR5sjpFNrjbg==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -17763,7 +17763,7 @@ packages: sass: optional: true dependencies: - '@next/env': 14.2.7 + '@next/env': 14.2.8 '@opentelemetry/api': 1.9.0 '@playwright/test': 1.46.0 '@swc/helpers': 0.5.5 @@ -17775,22 +17775,22 @@ packages: react-dom: 18.2.0(react@18.2.0) styled-jsx: 5.1.1(@babel/core@7.24.7)(react@18.2.0) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.7 - '@next/swc-darwin-x64': 14.2.7 - '@next/swc-linux-arm64-gnu': 14.2.7 - '@next/swc-linux-arm64-musl': 14.2.7 - '@next/swc-linux-x64-gnu': 14.2.7 - '@next/swc-linux-x64-musl': 14.2.7 - '@next/swc-win32-arm64-msvc': 14.2.7 - '@next/swc-win32-ia32-msvc': 14.2.7 - '@next/swc-win32-x64-msvc': 14.2.7 + '@next/swc-darwin-arm64': 14.2.8 + '@next/swc-darwin-x64': 14.2.8 + '@next/swc-linux-arm64-gnu': 14.2.8 + '@next/swc-linux-arm64-musl': 14.2.8 + '@next/swc-linux-x64-gnu': 14.2.8 + '@next/swc-linux-x64-musl': 14.2.8 + '@next/swc-win32-arm64-msvc': 14.2.8 + '@next/swc-win32-ia32-msvc': 14.2.8 + '@next/swc-win32-x64-msvc': 14.2.8 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros dev: false - /next@14.2.7(@playwright/test@1.46.0)(react-dom@18.3.1)(react@18.3.1): - resolution: {integrity: sha512-4Qy2aK0LwH4eQiSvQWyKuC7JXE13bIopEQesWE0c/P3uuNRnZCQanI0vsrMLmUQJLAto+A+/8+sve2hd+BQuOQ==} + /next@14.2.8(@playwright/test@1.46.0)(react-dom@18.3.1)(react@18.3.1): + resolution: {integrity: sha512-EyEyJZ89r8C5FPlS/401AiF3O8jeMtHIE+bLom9MwcdWJJFBgRl+MR/2VgO0v5bI6tQORNY0a0DR5sjpFNrjbg==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -17807,7 +17807,7 @@ packages: sass: optional: true dependencies: - '@next/env': 14.2.7 + '@next/env': 14.2.8 '@playwright/test': 1.46.0 '@swc/helpers': 0.5.5 busboy: 1.6.0 @@ -17818,22 +17818,22 @@ packages: react-dom: 18.3.1(react@18.3.1) styled-jsx: 5.1.1(react@18.3.1) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.7 - '@next/swc-darwin-x64': 14.2.7 - '@next/swc-linux-arm64-gnu': 14.2.7 - '@next/swc-linux-arm64-musl': 14.2.7 - '@next/swc-linux-x64-gnu': 14.2.7 - '@next/swc-linux-x64-musl': 14.2.7 - '@next/swc-win32-arm64-msvc': 14.2.7 - '@next/swc-win32-ia32-msvc': 14.2.7 - '@next/swc-win32-x64-msvc': 14.2.7 + '@next/swc-darwin-arm64': 14.2.8 + '@next/swc-darwin-x64': 14.2.8 + '@next/swc-linux-arm64-gnu': 14.2.8 + '@next/swc-linux-arm64-musl': 14.2.8 + '@next/swc-linux-x64-gnu': 14.2.8 + '@next/swc-linux-x64-musl': 14.2.8 + '@next/swc-win32-arm64-msvc': 14.2.8 + '@next/swc-win32-ia32-msvc': 14.2.8 + '@next/swc-win32-x64-msvc': 14.2.8 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros dev: false - /next@15.0.0-canary.139(@playwright/test@1.46.0)(react-dom@19.0.0-rc-4f604941-20240830)(react@19.0.0-rc-4f604941-20240830): - resolution: {integrity: sha512-Ytzb2lBXtXVake51QpdVy/ChlLL7cEhAyCQnlcyJXn92PHi/HBDlXQlf9Bwu8lOO9h5Q9+ERnUpmQavoFcPTDg==} + /next@15.0.0-canary.141(@playwright/test@1.46.0)(react-dom@19.0.0-rc-d1afcb43-20240903)(react@19.0.0-rc-d1afcb43-20240903): + resolution: {integrity: sha512-T5EzpyzpTQo1XQ5L3MLzSmAq9aqQNaF32yo990yiQH6fQXcms3pXXMdrDzMl9l+u0xjaD2aGsfv6+LFC0zh7Dw==} engines: {node: '>=18.18.0'} hasBin: true peerDependencies: @@ -17853,27 +17853,27 @@ packages: sass: optional: true dependencies: - '@next/env': 15.0.0-canary.139 + '@next/env': 15.0.0-canary.141 '@playwright/test': 1.46.0 '@swc/counter': 0.1.3 - '@swc/helpers': 0.5.12 + '@swc/helpers': 0.5.13 busboy: 1.6.0 caniuse-lite: 1.0.30001649 graceful-fs: 4.2.11 postcss: 8.4.31 - react: 19.0.0-rc-4f604941-20240830 - react-dom: 19.0.0-rc-4f604941-20240830(react@19.0.0-rc-4f604941-20240830) - styled-jsx: 5.1.6(react@19.0.0-rc-4f604941-20240830) + react: 19.0.0-rc-d1afcb43-20240903 + react-dom: 19.0.0-rc-d1afcb43-20240903(react@19.0.0-rc-d1afcb43-20240903) + styled-jsx: 5.1.6(react@19.0.0-rc-d1afcb43-20240903) optionalDependencies: - '@next/swc-darwin-arm64': 15.0.0-canary.139 - '@next/swc-darwin-x64': 15.0.0-canary.139 - '@next/swc-linux-arm64-gnu': 15.0.0-canary.139 - '@next/swc-linux-arm64-musl': 15.0.0-canary.139 - '@next/swc-linux-x64-gnu': 15.0.0-canary.139 - '@next/swc-linux-x64-musl': 15.0.0-canary.139 - '@next/swc-win32-arm64-msvc': 15.0.0-canary.139 - '@next/swc-win32-ia32-msvc': 15.0.0-canary.139 - '@next/swc-win32-x64-msvc': 15.0.0-canary.139 + '@next/swc-darwin-arm64': 15.0.0-canary.141 + '@next/swc-darwin-x64': 15.0.0-canary.141 + '@next/swc-linux-arm64-gnu': 15.0.0-canary.141 + '@next/swc-linux-arm64-musl': 15.0.0-canary.141 + '@next/swc-linux-x64-gnu': 15.0.0-canary.141 + '@next/swc-linux-x64-musl': 15.0.0-canary.141 + '@next/swc-win32-arm64-msvc': 15.0.0-canary.141 + '@next/swc-win32-ia32-msvc': 15.0.0-canary.141 + '@next/swc-win32-x64-msvc': 15.0.0-canary.141 sharp: 0.33.5 transitivePeerDependencies: - '@babel/core' @@ -19735,15 +19735,6 @@ packages: react: 18.3.1 scheduler: 0.23.2 - /react-dom@19.0.0-rc-4f604941-20240830(react@19.0.0-rc-4f604941-20240830): - resolution: {integrity: sha512-dFNDFiwOyx0qjJwlcxeFi0Vj7jdKOahca5L0xd/ZBdN3xvsv2ZXy8k/PKabJ1ZhHnaMX0g9ovrWjZxKOP+2ybw==} - peerDependencies: - react: 19.0.0-rc-4f604941-20240830 - dependencies: - react: 19.0.0-rc-4f604941-20240830 - scheduler: 0.25.0-rc-4f604941-20240830 - dev: false - /react-dom@19.0.0-rc-cc1ec60d0d-20240607(react@19.0.0-rc-cc1ec60d0d-20240607): resolution: {integrity: sha512-paspD9kAfKKuURVwKWJ0/g3qYw1DGi9h1k9xQV2iQN9cSVZ4JAOD727yjVLyp1zdzsoygjFfLMtSBdZ+oERYvA==} peerDependencies: @@ -19753,6 +19744,15 @@ packages: scheduler: 0.25.0-rc-cc1ec60d0d-20240607 dev: true + /react-dom@19.0.0-rc-d1afcb43-20240903(react@19.0.0-rc-d1afcb43-20240903): + resolution: {integrity: sha512-2c1WEFLt4iODSlnuQv+H2U0GvglEKn21hWboP489758A/1olwMKUwC8IOox6Hgh1g+Zu92fUgsWU+wJGIK6Tzg==} + peerDependencies: + react: 19.0.0-rc-d1afcb43-20240903 + dependencies: + react: 19.0.0-rc-d1afcb43-20240903 + scheduler: 0.25.0-rc-d1afcb43-20240903 + dev: false + /react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} @@ -19796,16 +19796,16 @@ packages: dependencies: loose-envify: 1.4.0 - /react@19.0.0-rc-4f604941-20240830: - resolution: {integrity: sha512-ajjx5aGye+DCwLFyOi7zH+sTX0BEf939FBTFtCSwvSyE73Im+CwY+hgh+Jys4T6tXgxCwj/QF6CmrJz04uw0sA==} - engines: {node: '>=0.10.0'} - dev: false - /react@19.0.0-rc-cc1ec60d0d-20240607: resolution: {integrity: sha512-q8A0/IdJ2wdHsjDNO1igFcSSFIMqSKmO7oJZtAjxIA9g0klK45Lxt15NQJ7z7cBvgD1r3xRTtQ/MAqnmwYHs1Q==} engines: {node: '>=0.10.0'} dev: true + /react@19.0.0-rc-d1afcb43-20240903: + resolution: {integrity: sha512-fCQZwZOveZEXThFJDK164B7L4++upA9x3SuJYEt9aVbFgTVIguo4ku01G/kQcxlymGUR3AnpZ/jWGNf77d0h2Q==} + engines: {node: '>=0.10.0'} + dev: false + /read-cache@1.0.0: resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} dependencies: @@ -20217,14 +20217,14 @@ packages: dependencies: loose-envify: 1.4.0 - /scheduler@0.25.0-rc-4f604941-20240830: - resolution: {integrity: sha512-PrK50bsJWH/whOy2t1PX303p1w+9lxKOYlR34IM/CrTK0/p4W+fyN564jdR1m3ecWjDgVAbf4SR7Iwmkyyw5bw==} - dev: false - /scheduler@0.25.0-rc-cc1ec60d0d-20240607: resolution: {integrity: sha512-yFVKy6SDJkN2bOJSeH6gNo4+1MTygTZXnLRY5IHvEB6P9+O6WYRWz9PkELLjnl64lQwRgiigwzWQRSMNEboOGQ==} dev: true + /scheduler@0.25.0-rc-d1afcb43-20240903: + resolution: {integrity: sha512-yAxUDr230qpOdxSDPCDgl6/S9OwK2Hb5IG751dt1gsPQGsElbrPu5jPgkw6cH5u/pvJVTFDvTHVvjPTepQuryA==} + dev: false + /schema-utils@3.3.0: resolution: {integrity: sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==} engines: {node: '>= 10.13.0'} @@ -20926,7 +20926,7 @@ packages: react: 18.3.1 dev: false - /styled-jsx@5.1.6(react@19.0.0-rc-4f604941-20240830): + /styled-jsx@5.1.6(react@19.0.0-rc-cc1ec60d0d-20240607): resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==} engines: {node: '>= 12.0.0'} peerDependencies: @@ -20940,10 +20940,10 @@ packages: optional: true dependencies: client-only: 0.0.1 - react: 19.0.0-rc-4f604941-20240830 - dev: false + react: 19.0.0-rc-cc1ec60d0d-20240607 + dev: true - /styled-jsx@5.1.6(react@19.0.0-rc-cc1ec60d0d-20240607): + /styled-jsx@5.1.6(react@19.0.0-rc-d1afcb43-20240903): resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==} engines: {node: '>= 12.0.0'} peerDependencies: @@ -20957,8 +20957,8 @@ packages: optional: true dependencies: client-only: 0.0.1 - react: 19.0.0-rc-cc1ec60d0d-20240607 - dev: true + react: 19.0.0-rc-d1afcb43-20240903 + dev: false /stylehacks@7.0.2(postcss@8.4.41): resolution: {integrity: sha512-HdkWZS9b4gbgYTdMg4gJLmm7biAUug1qTqXjS+u8X+/pUd+9Px1E+520GnOW3rST9MNsVOVpsJG+mPHNosxjOQ==}