mirror of
https://github.com/tvytlx/ai-agent-deep-dive.git
synced 2026-04-12 03:54:48 +08:00
Add extracted source directory and README navigation
This commit is contained in:
149
extracted-source/node_modules/@anthropic-ai/sdk/resources/messages/batches.mjs
generated
vendored
Normal file
149
extracted-source/node_modules/@anthropic-ai/sdk/resources/messages/batches.mjs
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
import { APIResource } from "../../core/resource.mjs";
|
||||
import { Page } from "../../core/pagination.mjs";
|
||||
import { buildHeaders } from "../../internal/headers.mjs";
|
||||
import { JSONLDecoder } from "../../internal/decoders/jsonl.mjs";
|
||||
import { AnthropicError } from "../../error.mjs";
|
||||
import { path } from "../../internal/utils/path.mjs";
|
||||
export class Batches extends APIResource {
|
||||
/**
|
||||
* Send a batch of Message creation requests.
|
||||
*
|
||||
* The Message Batches API can be used to process multiple Messages API requests at
|
||||
* once. Once a Message Batch is created, it begins processing immediately. Batches
|
||||
* can take up to 24 hours to complete.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const messageBatch = await client.messages.batches.create({
|
||||
* requests: [
|
||||
* {
|
||||
* custom_id: 'my-custom-id-1',
|
||||
* params: {
|
||||
* max_tokens: 1024,
|
||||
* messages: [
|
||||
* { content: 'Hello, world', role: 'user' },
|
||||
* ],
|
||||
* model: 'claude-opus-4-6',
|
||||
* },
|
||||
* },
|
||||
* ],
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
create(body, options) {
|
||||
return this._client.post('/v1/messages/batches', { body, ...options });
|
||||
}
|
||||
/**
|
||||
* This endpoint is idempotent and can be used to poll for Message Batch
|
||||
* completion. To access the results of a Message Batch, make a request to the
|
||||
* `results_url` field in the response.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const messageBatch = await client.messages.batches.retrieve(
|
||||
* 'message_batch_id',
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
retrieve(messageBatchID, options) {
|
||||
return this._client.get(path `/v1/messages/batches/${messageBatchID}`, options);
|
||||
}
|
||||
/**
|
||||
* List all Message Batches within a Workspace. Most recently created batches are
|
||||
* returned first.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Automatically fetches more pages as needed.
|
||||
* for await (const messageBatch of client.messages.batches.list()) {
|
||||
* // ...
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
list(query = {}, options) {
|
||||
return this._client.getAPIList('/v1/messages/batches', (Page), { query, ...options });
|
||||
}
|
||||
/**
|
||||
* Delete a Message Batch.
|
||||
*
|
||||
* Message Batches can only be deleted once they've finished processing. If you'd
|
||||
* like to delete an in-progress batch, you must first cancel it.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const deletedMessageBatch =
|
||||
* await client.messages.batches.delete('message_batch_id');
|
||||
* ```
|
||||
*/
|
||||
delete(messageBatchID, options) {
|
||||
return this._client.delete(path `/v1/messages/batches/${messageBatchID}`, options);
|
||||
}
|
||||
/**
|
||||
* Batches may be canceled any time before processing ends. Once cancellation is
|
||||
* initiated, the batch enters a `canceling` state, at which time the system may
|
||||
* complete any in-progress, non-interruptible requests before finalizing
|
||||
* cancellation.
|
||||
*
|
||||
* The number of canceled requests is specified in `request_counts`. To determine
|
||||
* which requests were canceled, check the individual results within the batch.
|
||||
* Note that cancellation may not result in any canceled requests if they were
|
||||
* non-interruptible.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const messageBatch = await client.messages.batches.cancel(
|
||||
* 'message_batch_id',
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
cancel(messageBatchID, options) {
|
||||
return this._client.post(path `/v1/messages/batches/${messageBatchID}/cancel`, options);
|
||||
}
|
||||
/**
|
||||
* Streams the results of a Message Batch as a `.jsonl` file.
|
||||
*
|
||||
* Each line in the file is a JSON object containing the result of a single request
|
||||
* in the Message Batch. Results are not guaranteed to be in the same order as
|
||||
* requests. Use the `custom_id` field to match results to requests.
|
||||
*
|
||||
* Learn more about the Message Batches API in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const messageBatchIndividualResponse =
|
||||
* await client.messages.batches.results('message_batch_id');
|
||||
* ```
|
||||
*/
|
||||
async results(messageBatchID, options) {
|
||||
const batch = await this.retrieve(messageBatchID);
|
||||
if (!batch.results_url) {
|
||||
throw new AnthropicError(`No batch \`results_url\`; Has it finished processing? ${batch.processing_status} - ${batch.id}`);
|
||||
}
|
||||
return this._client
|
||||
.get(batch.results_url, {
|
||||
...options,
|
||||
headers: buildHeaders([{ Accept: 'application/binary' }, options?.headers]),
|
||||
stream: true,
|
||||
__binaryResponse: true,
|
||||
})
|
||||
._thenUnwrap((_, props) => JSONLDecoder.fromResponse(props.response, props.controller));
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=batches.mjs.map
|
||||
123
extracted-source/node_modules/@anthropic-ai/sdk/resources/messages/messages.mjs
generated
vendored
Normal file
123
extracted-source/node_modules/@anthropic-ai/sdk/resources/messages/messages.mjs
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
import { APIResource } from "../../core/resource.mjs";
|
||||
import { buildHeaders } from "../../internal/headers.mjs";
|
||||
import { stainlessHelperHeader } from "../../lib/stainless-helper-header.mjs";
|
||||
import { MessageStream } from "../../lib/MessageStream.mjs";
|
||||
import { parseMessage, } from "../../lib/parser.mjs";
|
||||
import * as BatchesAPI from "./batches.mjs";
|
||||
import { Batches, } from "./batches.mjs";
|
||||
import { MODEL_NONSTREAMING_TOKENS } from "../../internal/constants.mjs";
|
||||
export class Messages extends APIResource {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.batches = new BatchesAPI.Batches(this._client);
|
||||
}
|
||||
create(body, options) {
|
||||
if (body.model in DEPRECATED_MODELS) {
|
||||
console.warn(`The model '${body.model}' is deprecated and will reach end-of-life on ${DEPRECATED_MODELS[body.model]}\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.`);
|
||||
}
|
||||
if (body.model in MODELS_TO_WARN_WITH_THINKING_ENABLED &&
|
||||
body.thinking &&
|
||||
body.thinking.type === 'enabled') {
|
||||
console.warn(`Using Claude with ${body.model} and 'thinking.type=enabled' is deprecated. Use 'thinking.type=adaptive' instead which results in better model performance in our testing: https://platform.claude.com/docs/en/build-with-claude/adaptive-thinking`);
|
||||
}
|
||||
let timeout = this._client._options.timeout;
|
||||
if (!body.stream && timeout == null) {
|
||||
const maxNonstreamingTokens = MODEL_NONSTREAMING_TOKENS[body.model] ?? undefined;
|
||||
timeout = this._client.calculateNonstreamingTimeout(body.max_tokens, maxNonstreamingTokens);
|
||||
}
|
||||
// Collect helper info from tools and messages
|
||||
const helperHeader = stainlessHelperHeader(body.tools, body.messages);
|
||||
return this._client.post('/v1/messages', {
|
||||
body,
|
||||
timeout: timeout ?? 600000,
|
||||
...options,
|
||||
headers: buildHeaders([helperHeader, options?.headers]),
|
||||
stream: body.stream ?? false,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Send a structured list of input messages with text and/or image content, along with an expected `output_config.format` and
|
||||
* the response will be automatically parsed and available in the `parsed_output` property of the message.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const message = await client.messages.parse({
|
||||
* model: 'claude-sonnet-4-5-20250929',
|
||||
* max_tokens: 1024,
|
||||
* messages: [{ role: 'user', content: 'What is 2+2?' }],
|
||||
* output_config: {
|
||||
* format: zodOutputFormat(z.object({ answer: z.number() })),
|
||||
* },
|
||||
* });
|
||||
*
|
||||
* console.log(message.parsed_output?.answer); // 4
|
||||
* ```
|
||||
*/
|
||||
parse(params, options) {
|
||||
return this.create(params, options).then((message) => parseMessage(message, params, { logger: this._client.logger ?? console }));
|
||||
}
|
||||
/**
|
||||
* Create a Message stream.
|
||||
*
|
||||
* If `output_config.format` is provided with a parseable format (like `zodOutputFormat()`),
|
||||
* the final message will include a `parsed_output` property with the parsed content.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const stream = client.messages.stream({
|
||||
* model: 'claude-sonnet-4-5-20250929',
|
||||
* max_tokens: 1024,
|
||||
* messages: [{ role: 'user', content: 'What is 2+2?' }],
|
||||
* output_config: {
|
||||
* format: zodOutputFormat(z.object({ answer: z.number() })),
|
||||
* },
|
||||
* });
|
||||
*
|
||||
* const message = await stream.finalMessage();
|
||||
* console.log(message.parsed_output?.answer); // 4
|
||||
* ```
|
||||
*/
|
||||
stream(body, options) {
|
||||
return MessageStream.createMessage(this, body, options, { logger: this._client.logger ?? console });
|
||||
}
|
||||
/**
|
||||
* Count the number of tokens in a Message.
|
||||
*
|
||||
* The Token Count API can be used to count the number of tokens in a Message,
|
||||
* including tools, images, and documents, without creating it.
|
||||
*
|
||||
* Learn more about token counting in our
|
||||
* [user guide](https://docs.claude.com/en/docs/build-with-claude/token-counting)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const messageTokensCount =
|
||||
* await client.messages.countTokens({
|
||||
* messages: [{ content: 'string', role: 'user' }],
|
||||
* model: 'claude-opus-4-6',
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
countTokens(body, options) {
|
||||
return this._client.post('/v1/messages/count_tokens', { body, ...options });
|
||||
}
|
||||
}
|
||||
const DEPRECATED_MODELS = {
|
||||
'claude-1.3': 'November 6th, 2024',
|
||||
'claude-1.3-100k': 'November 6th, 2024',
|
||||
'claude-instant-1.1': 'November 6th, 2024',
|
||||
'claude-instant-1.1-100k': 'November 6th, 2024',
|
||||
'claude-instant-1.2': 'November 6th, 2024',
|
||||
'claude-3-sonnet-20240229': 'July 21st, 2025',
|
||||
'claude-3-opus-20240229': 'January 5th, 2026',
|
||||
'claude-2.1': 'July 21st, 2025',
|
||||
'claude-2.0': 'July 21st, 2025',
|
||||
'claude-3-7-sonnet-latest': 'February 19th, 2026',
|
||||
'claude-3-7-sonnet-20250219': 'February 19th, 2026',
|
||||
'claude-3-5-haiku-latest': 'February 19th, 2026',
|
||||
'claude-3-5-haiku-20241022': 'February 19th, 2026',
|
||||
};
|
||||
const MODELS_TO_WARN_WITH_THINKING_ENABLED = ['claude-opus-4-6'];
|
||||
Messages.Batches = Batches;
|
||||
//# sourceMappingURL=messages.mjs.map
|
||||
Reference in New Issue
Block a user