Generates text and calls tools for a given prompt using a language model.
It is ideal for non-interactive use cases such as automation tasks where you need to write text (e.g. drafting email or summarizing web pages) and for agents that use tools.
import { openai } from '@ai-sdk/openai';import { generateText } from 'ai';const { text } = await generateText({ model: openai('gpt-4o'), prompt: 'Invent a new holiday and describe its traditions.',});console.log(text);
To see generateText in action, check out these examples.
Import
<Snippet text={import { generateText } from "ai"} prompt={false} />
API Signature
Parameters
<PropertiesTable
content={[
{
name: 'model',
type: 'LanguageModel',
description: "The language model to use. Example: openai('gpt-4o')",
},
{
name: 'system',
type: 'string',
description:
'The system prompt to use that specifies the behavior of the model.',
},
{
name: 'prompt',
type: 'string | Array<SystemModelMessage | UserModelMessage | AssistantModelMessage | ToolModelMessage>',
description: 'The input prompt to generate the text from.',
},
{
name: 'messages',
type: 'Array<SystemModelMessage | UserModelMessage | AssistantModelMessage | ToolModelMessage>',
description:
'A list of messages that represent a conversation. Automatically converts UI messages from the useChat hook.',
properties: [
{
type: 'SystemModelMessage',
parameters: [
{
name: 'role',
type: "'system'",
description: 'The role for the system message.',
},
{
name: 'content',
type: 'string',
description: 'The content of the message.',
},
],
},
{
type: 'UserModelMessage',
parameters: [
{
name: 'role',
type: "'user'",
description: 'The role for the user message.',
},
{
name: 'content',
type: 'string | Array<TextPart | ImagePart | FilePart>',
description: 'The content of the message.',
properties: [
{
type: 'TextPart',
parameters: [
{
name: 'type',
type: "'text'",
description: 'The type of the message part.',
},
{
name: 'text',
type: 'string',
description: 'The text content of the message part.',
},
],
},
{
type: 'ImagePart',
parameters: [
{
name: 'type',
type: "'image'",
description: 'The type of the message part.',
},
{
name: 'image',
type: 'string | Uint8Array | Buffer | ArrayBuffer | URL',
description:
'The image content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.',
},
{
name: 'mediaType',
type: 'string',
description:
'The IANA media type of the image. Optional.',
isOptional: true,
},
],
},
{
type: 'FilePart',
parameters: [
{
name: 'type',
type: "'file'",
description: 'The type of the message part.',
},
{
name: 'data',
type: 'string | Uint8Array | Buffer | ArrayBuffer | URL',
description:
'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.',
},
{
name: 'mediaType',
type: 'string',
description: 'The IANA media type of the file.',
},
],
},
],
},
],
},
{
type: 'AssistantModelMessage',
parameters: [
{
name: 'role',
type: "'assistant'",
description: 'The role for the assistant message.',
},
{
name: 'content',
type: 'string | Array<TextPart | FilePart | ReasoningPart | ToolCallPart>',
description: 'The content of the message.',
properties: [
{
type: 'TextPart',
parameters: [
{
name: 'type',
type: "'text'",
description: 'The type of the message part.',
},
{
name: 'text',
type: 'string',
description: 'The text content of the message part.',
},
],
},
{
type: 'ReasoningPart',
parameters: [
{
name: 'type',
type: "'reasoning'",
description: 'The type of the message part.',
},
{
name: 'text',
type: 'string',
description: 'The reasoning text.',
},
],
},
{
type: 'FilePart',
parameters: [
{
name: 'type',
type: "'file'",
description: 'The type of the message part.',
},
{
name: 'data',
type: 'string | Uint8Array | Buffer | ArrayBuffer | URL',
description:
'The file content of the message part. String are either base64 encoded content, base64 data URLs, or http(s) URLs.',
},
{
name: 'mediaType',
type: 'string',
description: 'The IANA media type of the file.',
},
{
name: 'filename',
type: 'string',
description: 'The name of the file.',
isOptional: true,
},
],
},
{
type: 'ToolCallPart',
parameters: [
{
name: 'type',
type: "'tool-call'",
description: 'The type of the message part.',
},
{
name: 'toolCallId',
type: 'string',
description: 'The id of the tool call.',
},
{
name: 'toolName',
type: 'string',
description:
'The name of the tool, which typically would be the name of the function.',
},
{
name: 'input',
type: 'object based on zod schema',
description:
'Input (parameters) generated by the model to be used by the tool.',
},
],
},
],
},
],
},
{
type: 'ToolModelMessage',
parameters: [
{
name: 'role',
type: "'tool'",
description: 'The role for the assistant message.',
},
{
name: 'content',
type: 'Array',
description: 'The content of the message.',
properties: [
{
type: 'ToolResultPart',
parameters: [
{
name: 'type',
type: "'tool-result'",
description: 'The type of the message part.',
},
{
name: 'toolCallId',
type: 'string',
description:
'The id of the tool call the result corresponds to.',
},
{
name: 'toolName',
type: 'string',
description:
'The name of the tool the result corresponds to.',
},
{
name: 'output',
type: 'unknown',
description:
'The result returned by the tool after execution.',
},
{
name: 'isError',
type: 'boolean',
isOptional: true,
description:
'Whether the result is an error or an error message.',
},
],
},
],
},
],
},
],
},
{
name: 'tools',
type: 'ToolSet',
description:
'Tools that are accessible to and can be called by the model. The model needs to support calling tools.',
properties: [
{
type: 'Tool',
parameters: [
{
name: 'description',
isOptional: true,
type: 'string',
description:
'Information about the purpose of the tool including details on how and when it can be used by the model.',
},
{
name: 'inputSchema',
type: 'Zod Schema | JSON Schema',
description:
'The schema of the input that the tool expects. The language model will use this to generate the input. It is also used to validate the output of the language model. Use descriptions to make the input understandable for the language model. You can either pass in a Zod schema or a JSON schema (using the jsonSchema function).',
},
{
name: 'execute',
isOptional: true,
type: 'async (parameters: T, options: ToolExecutionOptions) => RESULT',
description:
'An async function that is called with the arguments from the tool call and produces a result. If not provided, the tool will not be executed automatically.',
properties: [
{
type: 'ToolExecutionOptions',
parameters: [
{
name: 'toolCallId',
type: 'string',
description:
'The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.',
},
{
name: 'messages',
type: 'ModelMessage[]',
description:
'Messages that were sent to the language model to initiate the response that contained the tool call. The messages do not include the system prompt nor the assistant response that contained the tool call.',
},
{
name: 'abortSignal',
type: 'AbortSignal',
description:
'An optional abort signal that indicates that the overall operation should be aborted.',
},
],
},
],
},
],
},
],
},
{
name: 'toolChoice',
isOptional: true,
type: '"auto" | "none" | "required" | { "type": "tool", "toolName": string }',
description:
'The tool choice setting. It specifies how tools are selected for execution. The default is "auto". "none" disables tool execution. "required" requires tools to be executed. { "type": "tool", "toolName": string } specifies a specific tool to execute.',
},
{
name: 'maxOutputTokens',
type: 'number',
isOptional: true,
description: 'Maximum number of tokens to generate.',
},
{
name: 'temperature',
type: 'number',
isOptional: true,
description:
'Temperature setting. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either temperature or topP, but not both.',
},
{
name: 'topP',
type: 'number',
isOptional: true,
description:
'Nucleus sampling. The value is passed through to the provider. The range depends on the provider and model. It is recommended to set either temperature or topP, but not both.',
},
{
name: 'topK',
type: 'number',
isOptional: true,
description:
'Only sample from the top K options for each subsequent token. Used to remove "long tail" low probability responses. Recommended for advanced use cases only. You usually only need to use temperature.',
},
{
name: 'presencePenalty',
type: 'number',
isOptional: true,
description:
'Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. The value is passed through to the provider. The range depends on the provider and model.',
},
{
name: 'frequencyPenalty',
type: 'number',
isOptional: true,
description:
'Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. The value is passed through to the provider. The range depends on the provider and model.',
},
{
name: 'stopSequences',
type: 'string[]',
isOptional: true,
description:
'Sequences that will stop the generation of the text. If the model generates any of these sequences, it will stop generating further text.',
},
{
name: 'seed',
type: 'number',
isOptional: true,
description:
'The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results.',
},
{
name: 'maxRetries',
type: 'number',
isOptional: true,
description:
'Maximum number of retries. Set to 0 to disable retries. Default: 2.',
},
{
name: 'abortSignal',
type: 'AbortSignal',
isOptional: true,
description:
'An optional abort signal that can be used to cancel the call.',
},
{
name: 'headers',
type: 'Record<string, string>',
isOptional: true,
description:
'Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.',
},
{
name: 'experimental_telemetry',
type: 'TelemetrySettings',
isOptional: true,
description: 'Telemetry configuration. Experimental feature.',
properties: [
{
type: 'TelemetrySettings',
parameters: [
{
name: 'isEnabled',
type: 'boolean',
isOptional: true,
description:
'Enable or disable telemetry. Disabled by default while experimental.',
},
{
name: 'recordInputs',
type: 'boolean',
isOptional: true,
description:
'Enable or disable input recording. Enabled by default.',
},
{
name: 'recordOutputs',
type: 'boolean',
isOptional: true,
description:
'Enable or disable output recording. Enabled by default.',
},
{
name: 'functionId',
type: 'string',
isOptional: true,
description:
'Identifier for this function. Used to group telemetry data by function.',
},
{
name: 'metadata',
isOptional: true,
type: 'Record<string, string | number | boolean | Array<null | undefined | string> | Array<null | undefined | number> | Array<null | undefined | boolean>>',
description:
'Additional information to include in the telemetry data.',
},
],
},
],
},
{
name: 'providerOptions',
type: 'Record<string,Record<string,JSONValue>> | undefined',
isOptional: true,
description:
'Provider-specific options. The outer key is the provider name. The inner values are the metadata. Details depend on the provider.',
},
{
name: 'activeTools',
type: 'Array',
isOptional: true,
description:
'Limits the tools that are available for the model to call without changing the tool call and result types in the result. All tools are active by default.',
},
{
name: 'stopWhen',
type: 'StopCondition | Array<StopCondition>',
isOptional: true,
description:
'Condition for stopping the generation when there are tool results in the last step. When the condition is an array, any of the conditions can be met to stop the generation. Default: stepCountIs(1).',
},
{
name: 'prepareStep',
type: '(options: PrepareStepOptions) => PrepareStepResult | Promise<PrepareStepResult>',
isOptional: true,
description:
'Optional function that you can use to provide different settings for a step. You can modify the model, tool choices, active tools, system prompt, and input messages for each step.',
properties: [
{
type: 'PrepareStepFunction',
parameters: [
{
name: 'options',
type: 'object',
description: 'The options for the step.',
properties: [
{
type: 'PrepareStepOptions',
parameters: [
{
name: 'steps',
type: 'Array<StepResult>',
description: 'The steps that have been executed so far.',
},
{
name: 'stepNumber',
type: 'number',
description:
'The number of the step that is being executed.',
},
{
name: 'model',
type: 'LanguageModel',
description: 'The model that is being used.',
},
{
name: 'messages',
type: 'Array',
description:
'The messages that will be sent to the model for the current step.',
},
],
},
],
},
],
},
{
type: 'PrepareStepResult',
description:
'Return value that can modify settings for the current step.',
parameters: [
{
name: 'model',
type: 'LanguageModel',
isOptional: true,
description: 'Change the model for this step.',
},
{
name: 'toolChoice',
type: 'ToolChoice',
isOptional: true,
description: 'Change the tool choice strategy for this step.',
},
{
name: 'activeTools',
type: 'Array',
isOptional: true,
description: 'Change which tools are active for this step.',
},
{
name: 'system',
type: 'string',
isOptional: true,
description: 'Change the system prompt for this step.',
},
{
name: 'messages',
type: 'Array',
isOptional: true,
description: 'Modify the input messages for this step.',
},
],
},
],
},
{
name: 'experimental_context',
type: 'unknown',
isOptional: true,
description:
'Context that is passed into tool execution. Experimental (can break in patch releases).',
},
{
name: 'experimental_download',
type: '(requestedDownloads: Array<{ url: URL; isUrlSupportedByModel: boolean }>) => Promise<Array<null | { data: Uint8Array; mediaType?: string }>>',
isOptional: true,
description:
'Custom download function to control how URLs are fetched when they appear in prompts. By default, files are downloaded if the model does not support the URL for the given media type. Experimental feature. Return null to pass the URL directly to the model (when supported), or return downloaded content with data and media type.',
},
{
name: 'experimental_repairToolCall',
type: '(options: ToolCallRepairOptions) => Promise<LanguageModelV2ToolCall | null>',
isOptional: true,
description:
'A function that attempts to repair a tool call that failed to parse. Return either a repaired tool call or null if the tool call cannot be repaired.',
properties: [
{
type: 'ToolCallRepairOptions',
parameters: [
{
name: 'system',
type: 'string | undefined',
description: 'The system prompt.',
},
{
name: 'messages',
type: 'ModelMessage[]',
description: 'The messages in the current generation step.',
},
{
name: 'toolCall',
type: 'LanguageModelV2ToolCall',
description: 'The tool call that failed to parse.',
},
{
name: 'tools',
type: 'TOOLS',
description: 'The tools that are available.',
},
{
name: 'parameterSchema',
type: '(options: { toolName: string }) => JSONSchema7',
description:
'A function that returns the JSON Schema for a tool.',
},
{
name: 'error',
type: 'NoSuchToolError | InvalidToolInputError',
description:
'The error that occurred while parsing the tool call.',
},
],
},
],
},
{
name: 'experimental_output',
type: 'Output',
isOptional: true,
description: 'Experimental setting for generating structured outputs.',
properties: [
{
type: 'Output',
parameters: [
{
name: 'Output.text()',
type: 'Output',
description: 'Forward text output.',
},
{
name: 'Output.object()',
type: 'Output',
description: 'Generate a JSON object of type OBJECT.',
properties: [
{
type: 'Options',
parameters: [
{
name: 'schema',
type: 'Schema