chore: 添加虚拟环境到仓库
- 添加 backend_service/venv 虚拟环境 - 包含所有Python依赖包 - 注意:虚拟环境约393MB,包含12655个文件
This commit is contained in:
@@ -0,0 +1,119 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
import typing as _t
|
||||
|
||||
from . import types
|
||||
from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given
|
||||
from ._utils import file_from_path
|
||||
from ._client import (
|
||||
Client,
|
||||
Stream,
|
||||
Timeout,
|
||||
Anthropic,
|
||||
Transport,
|
||||
AsyncClient,
|
||||
AsyncStream,
|
||||
AsyncAnthropic,
|
||||
RequestOptions,
|
||||
)
|
||||
from ._models import BaseModel
|
||||
from ._version import __title__, __version__
|
||||
from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
|
||||
from ._constants import (
|
||||
AI_PROMPT as AI_PROMPT,
|
||||
HUMAN_PROMPT as HUMAN_PROMPT,
|
||||
DEFAULT_TIMEOUT,
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_CONNECTION_LIMITS,
|
||||
)
|
||||
from ._exceptions import (
|
||||
APIError,
|
||||
ConflictError,
|
||||
NotFoundError,
|
||||
AnthropicError,
|
||||
APIStatusError,
|
||||
RateLimitError,
|
||||
APITimeoutError,
|
||||
BadRequestError,
|
||||
APIConnectionError,
|
||||
AuthenticationError,
|
||||
InternalServerError,
|
||||
PermissionDeniedError,
|
||||
UnprocessableEntityError,
|
||||
APIResponseValidationError,
|
||||
)
|
||||
from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient
|
||||
from ._utils._logs import setup_logging as _setup_logging
|
||||
from .lib._parse._transform import transform_schema
|
||||
|
||||
__all__ = [
|
||||
"types",
|
||||
"__version__",
|
||||
"__title__",
|
||||
"NoneType",
|
||||
"Transport",
|
||||
"ProxiesTypes",
|
||||
"NotGiven",
|
||||
"NOT_GIVEN",
|
||||
"not_given",
|
||||
"Omit",
|
||||
"omit",
|
||||
"AnthropicError",
|
||||
"APIError",
|
||||
"APIStatusError",
|
||||
"APITimeoutError",
|
||||
"APIConnectionError",
|
||||
"APIResponseValidationError",
|
||||
"BadRequestError",
|
||||
"AuthenticationError",
|
||||
"PermissionDeniedError",
|
||||
"NotFoundError",
|
||||
"ConflictError",
|
||||
"UnprocessableEntityError",
|
||||
"RateLimitError",
|
||||
"InternalServerError",
|
||||
"Timeout",
|
||||
"RequestOptions",
|
||||
"Client",
|
||||
"AsyncClient",
|
||||
"Stream",
|
||||
"AsyncStream",
|
||||
"Anthropic",
|
||||
"AsyncAnthropic",
|
||||
"file_from_path",
|
||||
"BaseModel",
|
||||
"DEFAULT_TIMEOUT",
|
||||
"DEFAULT_MAX_RETRIES",
|
||||
"DEFAULT_CONNECTION_LIMITS",
|
||||
"DefaultHttpxClient",
|
||||
"DefaultAsyncHttpxClient",
|
||||
"DefaultAioHttpClient",
|
||||
"HUMAN_PROMPT",
|
||||
"AI_PROMPT",
|
||||
"beta_tool",
|
||||
"beta_async_tool",
|
||||
"transform_schema",
|
||||
]
|
||||
|
||||
if not _t.TYPE_CHECKING:
|
||||
from ._utils._resources_proxy import resources as resources
|
||||
|
||||
from .lib.tools import beta_tool, beta_async_tool
|
||||
from .lib.vertex import *
|
||||
from .lib.bedrock import *
|
||||
from .lib.streaming import *
|
||||
|
||||
_setup_logging()
|
||||
|
||||
# Update the __module__ attribute for exported symbols so that
|
||||
# error messages point to this module instead of the module
|
||||
# it was originally defined in, e.g.
|
||||
# anthropic._exceptions.NotFoundError -> anthropic.NotFoundError
|
||||
__locals = locals()
|
||||
for __name in __all__:
|
||||
if not __name.startswith("__"):
|
||||
try:
|
||||
__locals[__name].__module__ = "anthropic"
|
||||
except (TypeError, AttributeError):
|
||||
# Some of our exported symbols are builtins which we can't set attributes for.
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,653 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, Mapping
|
||||
from typing_extensions import Self, override
|
||||
|
||||
import httpx
|
||||
|
||||
from . import _constants, _exceptions
|
||||
from ._qs import Querystring
|
||||
from ._types import (
|
||||
Omit,
|
||||
Headers,
|
||||
Timeout,
|
||||
NotGiven,
|
||||
Transport,
|
||||
ProxiesTypes,
|
||||
RequestOptions,
|
||||
not_given,
|
||||
)
|
||||
from ._utils import is_given, get_async_library
|
||||
from ._compat import cached_property
|
||||
from ._version import __version__
|
||||
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
|
||||
from ._exceptions import APIStatusError
|
||||
from ._base_client import (
|
||||
DEFAULT_MAX_RETRIES,
|
||||
SyncAPIClient,
|
||||
AsyncAPIClient,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .resources import beta, models, messages, completions
|
||||
from .resources.models import Models, AsyncModels
|
||||
from .resources.beta.beta import Beta, AsyncBeta
|
||||
from .resources.completions import Completions, AsyncCompletions
|
||||
from .resources.messages.messages import Messages, AsyncMessages
|
||||
|
||||
__all__ = [
|
||||
"Timeout",
|
||||
"Transport",
|
||||
"ProxiesTypes",
|
||||
"RequestOptions",
|
||||
"Anthropic",
|
||||
"AsyncAnthropic",
|
||||
"Client",
|
||||
"AsyncClient",
|
||||
]
|
||||
|
||||
|
||||
class Anthropic(SyncAPIClient):
|
||||
# client options
|
||||
api_key: str | None
|
||||
auth_token: str | None
|
||||
|
||||
# constants
|
||||
HUMAN_PROMPT = _constants.HUMAN_PROMPT
|
||||
AI_PROMPT = _constants.AI_PROMPT
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
auth_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = not_given,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client.
|
||||
# We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
|
||||
# See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: httpx.Client | None = None,
|
||||
# Enable or disable schema validation for data returned by the API.
|
||||
# When enabled an error APIResponseValidationError is raised
|
||||
# if the API responds with invalid data for the expected schema.
|
||||
#
|
||||
# This parameter may be removed or changed in the future.
|
||||
# If you rely on this feature, please open a GitHub issue
|
||||
# outlining your use-case to help us decide if it should be
|
||||
# part of our public interface in the future.
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
"""Construct a new synchronous Anthropic client instance.
|
||||
|
||||
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
|
||||
- `api_key` from `ANTHROPIC_API_KEY`
|
||||
- `auth_token` from `ANTHROPIC_AUTH_TOKEN`
|
||||
"""
|
||||
if api_key is None:
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
self.api_key = api_key
|
||||
|
||||
if auth_token is None:
|
||||
auth_token = os.environ.get("ANTHROPIC_AUTH_TOKEN")
|
||||
self.auth_token = auth_token
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_BASE_URL")
|
||||
if base_url is None:
|
||||
base_url = f"https://api.anthropic.com"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
max_retries=max_retries,
|
||||
timeout=timeout,
|
||||
http_client=http_client,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
self._default_stream_cls = Stream
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> Completions:
|
||||
from .resources.completions import Completions
|
||||
|
||||
return Completions(self)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> Messages:
|
||||
from .resources.messages import Messages
|
||||
|
||||
return Messages(self)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> Models:
|
||||
from .resources.models import Models
|
||||
|
||||
return Models(self)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> Beta:
|
||||
from .resources.beta import Beta
|
||||
|
||||
return Beta(self)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AnthropicWithRawResponse:
|
||||
return AnthropicWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AnthropicWithStreamedResponse:
|
||||
return AnthropicWithStreamedResponse(self)
|
||||
|
||||
@property
|
||||
@override
|
||||
def qs(self) -> Querystring:
|
||||
return Querystring(array_format="comma")
|
||||
|
||||
@property
|
||||
@override
|
||||
def auth_headers(self) -> dict[str, str]:
|
||||
return {**self._api_key_auth, **self._bearer_auth}
|
||||
|
||||
@property
|
||||
def _api_key_auth(self) -> dict[str, str]:
|
||||
api_key = self.api_key
|
||||
if api_key is None:
|
||||
return {}
|
||||
return {"X-Api-Key": api_key}
|
||||
|
||||
@property
|
||||
def _bearer_auth(self) -> dict[str, str]:
|
||||
auth_token = self.auth_token
|
||||
if auth_token is None:
|
||||
return {}
|
||||
return {"Authorization": f"Bearer {auth_token}"}
|
||||
|
||||
@property
|
||||
@override
|
||||
def default_headers(self) -> dict[str, str | Omit]:
|
||||
return {
|
||||
**super().default_headers,
|
||||
"X-Stainless-Async": "false",
|
||||
"anthropic-version": "2023-06-01",
|
||||
**self._custom_headers,
|
||||
}
|
||||
|
||||
@override
|
||||
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
|
||||
if self.api_key and headers.get("X-Api-Key"):
|
||||
return
|
||||
if isinstance(custom_headers.get("X-Api-Key"), Omit):
|
||||
return
|
||||
|
||||
if self.auth_token and headers.get("Authorization"):
|
||||
return
|
||||
if isinstance(custom_headers.get("Authorization"), Omit):
|
||||
return
|
||||
|
||||
raise TypeError(
|
||||
'"Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted"'
|
||||
)
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
auth_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = not_given,
|
||||
http_client: httpx.Client | None = None,
|
||||
max_retries: int | NotGiven = not_given,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
http_client = http_client or self._client
|
||||
return self.__class__(
|
||||
api_key=api_key or self.api_key,
|
||||
auth_token=auth_token or self.auth_token,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
|
||||
@override
|
||||
def _make_status_error(
|
||||
self,
|
||||
err_msg: str,
|
||||
*,
|
||||
body: object,
|
||||
response: httpx.Response,
|
||||
) -> APIStatusError:
|
||||
if response.status_code == 400:
|
||||
return _exceptions.BadRequestError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 401:
|
||||
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 403:
|
||||
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 404:
|
||||
return _exceptions.NotFoundError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 409:
|
||||
return _exceptions.ConflictError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 413:
|
||||
return _exceptions.RequestTooLargeError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 422:
|
||||
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 429:
|
||||
return _exceptions.RateLimitError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 529:
|
||||
return _exceptions.OverloadedError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code >= 500:
|
||||
return _exceptions.InternalServerError(err_msg, response=response, body=body)
|
||||
return APIStatusError(err_msg, response=response, body=body)
|
||||
|
||||
|
||||
class AsyncAnthropic(AsyncAPIClient):
|
||||
# client options
|
||||
api_key: str | None
|
||||
auth_token: str | None
|
||||
|
||||
# constants
|
||||
HUMAN_PROMPT = _constants.HUMAN_PROMPT
|
||||
AI_PROMPT = _constants.AI_PROMPT
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
auth_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = not_given,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client.
|
||||
# We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
|
||||
# See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
# Enable or disable schema validation for data returned by the API.
|
||||
# When enabled an error APIResponseValidationError is raised
|
||||
# if the API responds with invalid data for the expected schema.
|
||||
#
|
||||
# This parameter may be removed or changed in the future.
|
||||
# If you rely on this feature, please open a GitHub issue
|
||||
# outlining your use-case to help us decide if it should be
|
||||
# part of our public interface in the future.
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
"""Construct a new async AsyncAnthropic client instance.
|
||||
|
||||
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
|
||||
- `api_key` from `ANTHROPIC_API_KEY`
|
||||
- `auth_token` from `ANTHROPIC_AUTH_TOKEN`
|
||||
"""
|
||||
if api_key is None:
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
self.api_key = api_key
|
||||
|
||||
if auth_token is None:
|
||||
auth_token = os.environ.get("ANTHROPIC_AUTH_TOKEN")
|
||||
self.auth_token = auth_token
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_BASE_URL")
|
||||
if base_url is None:
|
||||
base_url = f"https://api.anthropic.com"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
max_retries=max_retries,
|
||||
timeout=timeout,
|
||||
http_client=http_client,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
self._default_stream_cls = AsyncStream
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> AsyncCompletions:
|
||||
from .resources.completions import AsyncCompletions
|
||||
|
||||
return AsyncCompletions(self)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessages:
|
||||
from .resources.messages import AsyncMessages
|
||||
|
||||
return AsyncMessages(self)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> AsyncModels:
|
||||
from .resources.models import AsyncModels
|
||||
|
||||
return AsyncModels(self)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> AsyncBeta:
|
||||
from .resources.beta import AsyncBeta
|
||||
|
||||
return AsyncBeta(self)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncAnthropicWithRawResponse:
|
||||
return AsyncAnthropicWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncAnthropicWithStreamedResponse:
|
||||
return AsyncAnthropicWithStreamedResponse(self)
|
||||
|
||||
@property
|
||||
@override
|
||||
def qs(self) -> Querystring:
|
||||
return Querystring(array_format="comma")
|
||||
|
||||
@property
|
||||
@override
|
||||
def auth_headers(self) -> dict[str, str]:
|
||||
return {**self._api_key_auth, **self._bearer_auth}
|
||||
|
||||
@property
|
||||
def _api_key_auth(self) -> dict[str, str]:
|
||||
api_key = self.api_key
|
||||
if api_key is None:
|
||||
return {}
|
||||
return {"X-Api-Key": api_key}
|
||||
|
||||
@property
|
||||
def _bearer_auth(self) -> dict[str, str]:
|
||||
auth_token = self.auth_token
|
||||
if auth_token is None:
|
||||
return {}
|
||||
return {"Authorization": f"Bearer {auth_token}"}
|
||||
|
||||
@property
|
||||
@override
|
||||
def default_headers(self) -> dict[str, str | Omit]:
|
||||
return {
|
||||
**super().default_headers,
|
||||
"X-Stainless-Async": f"async:{get_async_library()}",
|
||||
"anthropic-version": "2023-06-01",
|
||||
**self._custom_headers,
|
||||
}
|
||||
|
||||
@override
|
||||
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
|
||||
if self.api_key and headers.get("X-Api-Key"):
|
||||
return
|
||||
if isinstance(custom_headers.get("X-Api-Key"), Omit):
|
||||
return
|
||||
|
||||
if self.auth_token and headers.get("Authorization"):
|
||||
return
|
||||
if isinstance(custom_headers.get("Authorization"), Omit):
|
||||
return
|
||||
|
||||
raise TypeError(
|
||||
'"Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted"'
|
||||
)
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
api_key: str | None = None,
|
||||
auth_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = not_given,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
max_retries: int | NotGiven = not_given,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
http_client = http_client or self._client
|
||||
return self.__class__(
|
||||
api_key=api_key or self.api_key,
|
||||
auth_token=auth_token or self.auth_token,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
|
||||
@override
|
||||
def _make_status_error(
|
||||
self,
|
||||
err_msg: str,
|
||||
*,
|
||||
body: object,
|
||||
response: httpx.Response,
|
||||
) -> APIStatusError:
|
||||
if response.status_code == 400:
|
||||
return _exceptions.BadRequestError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 401:
|
||||
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 403:
|
||||
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 404:
|
||||
return _exceptions.NotFoundError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 409:
|
||||
return _exceptions.ConflictError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 422:
|
||||
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 429:
|
||||
return _exceptions.RateLimitError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code >= 500:
|
||||
return _exceptions.InternalServerError(err_msg, response=response, body=body)
|
||||
return APIStatusError(err_msg, response=response, body=body)
|
||||
|
||||
|
||||
class AnthropicWithRawResponse:
|
||||
_client: Anthropic
|
||||
|
||||
def __init__(self, client: Anthropic) -> None:
|
||||
self._client = client
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> completions.CompletionsWithRawResponse:
|
||||
from .resources.completions import CompletionsWithRawResponse
|
||||
|
||||
return CompletionsWithRawResponse(self._client.completions)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> messages.MessagesWithRawResponse:
|
||||
from .resources.messages import MessagesWithRawResponse
|
||||
|
||||
return MessagesWithRawResponse(self._client.messages)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> models.ModelsWithRawResponse:
|
||||
from .resources.models import ModelsWithRawResponse
|
||||
|
||||
return ModelsWithRawResponse(self._client.models)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> beta.BetaWithRawResponse:
|
||||
from .resources.beta import BetaWithRawResponse
|
||||
|
||||
return BetaWithRawResponse(self._client.beta)
|
||||
|
||||
|
||||
class AsyncAnthropicWithRawResponse:
|
||||
_client: AsyncAnthropic
|
||||
|
||||
def __init__(self, client: AsyncAnthropic) -> None:
|
||||
self._client = client
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> completions.AsyncCompletionsWithRawResponse:
|
||||
from .resources.completions import AsyncCompletionsWithRawResponse
|
||||
|
||||
return AsyncCompletionsWithRawResponse(self._client.completions)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> messages.AsyncMessagesWithRawResponse:
|
||||
from .resources.messages import AsyncMessagesWithRawResponse
|
||||
|
||||
return AsyncMessagesWithRawResponse(self._client.messages)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> models.AsyncModelsWithRawResponse:
|
||||
from .resources.models import AsyncModelsWithRawResponse
|
||||
|
||||
return AsyncModelsWithRawResponse(self._client.models)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> beta.AsyncBetaWithRawResponse:
|
||||
from .resources.beta import AsyncBetaWithRawResponse
|
||||
|
||||
return AsyncBetaWithRawResponse(self._client.beta)
|
||||
|
||||
|
||||
class AnthropicWithStreamedResponse:
|
||||
_client: Anthropic
|
||||
|
||||
def __init__(self, client: Anthropic) -> None:
|
||||
self._client = client
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> completions.CompletionsWithStreamingResponse:
|
||||
from .resources.completions import CompletionsWithStreamingResponse
|
||||
|
||||
return CompletionsWithStreamingResponse(self._client.completions)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> messages.MessagesWithStreamingResponse:
|
||||
from .resources.messages import MessagesWithStreamingResponse
|
||||
|
||||
return MessagesWithStreamingResponse(self._client.messages)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> models.ModelsWithStreamingResponse:
|
||||
from .resources.models import ModelsWithStreamingResponse
|
||||
|
||||
return ModelsWithStreamingResponse(self._client.models)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> beta.BetaWithStreamingResponse:
|
||||
from .resources.beta import BetaWithStreamingResponse
|
||||
|
||||
return BetaWithStreamingResponse(self._client.beta)
|
||||
|
||||
|
||||
class AsyncAnthropicWithStreamedResponse:
|
||||
_client: AsyncAnthropic
|
||||
|
||||
def __init__(self, client: AsyncAnthropic) -> None:
|
||||
self._client = client
|
||||
|
||||
@cached_property
|
||||
def completions(self) -> completions.AsyncCompletionsWithStreamingResponse:
|
||||
from .resources.completions import AsyncCompletionsWithStreamingResponse
|
||||
|
||||
return AsyncCompletionsWithStreamingResponse(self._client.completions)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> messages.AsyncMessagesWithStreamingResponse:
|
||||
from .resources.messages import AsyncMessagesWithStreamingResponse
|
||||
|
||||
return AsyncMessagesWithStreamingResponse(self._client.messages)
|
||||
|
||||
@cached_property
|
||||
def models(self) -> models.AsyncModelsWithStreamingResponse:
|
||||
from .resources.models import AsyncModelsWithStreamingResponse
|
||||
|
||||
return AsyncModelsWithStreamingResponse(self._client.models)
|
||||
|
||||
@cached_property
|
||||
def beta(self) -> beta.AsyncBetaWithStreamingResponse:
|
||||
from .resources.beta import AsyncBetaWithStreamingResponse
|
||||
|
||||
return AsyncBetaWithStreamingResponse(self._client.beta)
|
||||
|
||||
|
||||
Client = Anthropic
|
||||
|
||||
AsyncClient = AsyncAnthropic
|
||||
@@ -0,0 +1,225 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
|
||||
from datetime import date, datetime
|
||||
from typing_extensions import Self, Literal
|
||||
|
||||
import pydantic
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
from ._types import IncEx, StrBytesIntFloat
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
|
||||
|
||||
# --------------- Pydantic v2, v3 compatibility ---------------
|
||||
|
||||
# Pyright incorrectly reports some of our functions as overriding a method when they don't
|
||||
# pyright: reportIncompatibleMethodOverride=false
|
||||
|
||||
PYDANTIC_V1 = pydantic.VERSION.startswith("1.")
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
|
||||
...
|
||||
|
||||
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001
|
||||
...
|
||||
|
||||
def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001
|
||||
...
|
||||
|
||||
def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001
|
||||
...
|
||||
|
||||
def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001
|
||||
...
|
||||
|
||||
def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001
|
||||
...
|
||||
|
||||
def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
|
||||
...
|
||||
|
||||
else:
|
||||
# v1 re-exports
|
||||
if PYDANTIC_V1:
|
||||
from pydantic.typing import (
|
||||
get_args as get_args,
|
||||
is_union as is_union,
|
||||
get_origin as get_origin,
|
||||
is_typeddict as is_typeddict,
|
||||
is_literal_type as is_literal_type,
|
||||
)
|
||||
from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
|
||||
else:
|
||||
from ._utils import (
|
||||
get_args as get_args,
|
||||
is_union as is_union,
|
||||
get_origin as get_origin,
|
||||
parse_date as parse_date,
|
||||
is_typeddict as is_typeddict,
|
||||
parse_datetime as parse_datetime,
|
||||
is_literal_type as is_literal_type,
|
||||
)
|
||||
|
||||
|
||||
# refactored config
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import ConfigDict as ConfigDict
|
||||
else:
|
||||
if PYDANTIC_V1:
|
||||
# TODO: provide an error message here?
|
||||
ConfigDict = None
|
||||
else:
|
||||
from pydantic import ConfigDict as ConfigDict
|
||||
|
||||
|
||||
# renamed methods / properties
|
||||
def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
|
||||
if PYDANTIC_V1:
|
||||
return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
||||
else:
|
||||
return model.model_validate(value)
|
||||
|
||||
|
||||
def field_is_required(field: FieldInfo) -> bool:
|
||||
if PYDANTIC_V1:
|
||||
return field.required # type: ignore
|
||||
return field.is_required()
|
||||
|
||||
|
||||
def field_get_default(field: FieldInfo) -> Any:
|
||||
value = field.get_default()
|
||||
if PYDANTIC_V1:
|
||||
return value
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
if value == PydanticUndefined:
|
||||
return None
|
||||
return value
|
||||
|
||||
|
||||
def field_outer_type(field: FieldInfo) -> Any:
|
||||
if PYDANTIC_V1:
|
||||
return field.outer_type_ # type: ignore
|
||||
return field.annotation
|
||||
|
||||
|
||||
def get_model_config(model: type[pydantic.BaseModel]) -> Any:
|
||||
if PYDANTIC_V1:
|
||||
return model.__config__ # type: ignore
|
||||
return model.model_config
|
||||
|
||||
|
||||
def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
|
||||
if PYDANTIC_V1:
|
||||
return model.__fields__ # type: ignore
|
||||
return model.model_fields
|
||||
|
||||
|
||||
def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
|
||||
if PYDANTIC_V1:
|
||||
return model.copy(deep=deep) # type: ignore
|
||||
return model.model_copy(deep=deep)
|
||||
|
||||
|
||||
def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
|
||||
if PYDANTIC_V1:
|
||||
return model.json(indent=indent) # type: ignore
|
||||
return model.model_dump_json(indent=indent)
|
||||
|
||||
|
||||
def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
|
||||
if PYDANTIC_V1:
|
||||
return model.parse_raw(data) # pyright: ignore[reportDeprecated]
|
||||
return model.model_validate_json(data)
|
||||
|
||||
|
||||
def model_dump(
|
||||
model: pydantic.BaseModel,
|
||||
*,
|
||||
exclude: IncEx | None = None,
|
||||
exclude_unset: bool = False,
|
||||
exclude_defaults: bool = False,
|
||||
warnings: bool = True,
|
||||
mode: Literal["json", "python"] = "python",
|
||||
) -> dict[str, Any]:
|
||||
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
|
||||
return model.model_dump(
|
||||
mode=mode,
|
||||
exclude=exclude,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
# warnings are not supported in Pydantic v1
|
||||
warnings=True if PYDANTIC_V1 else warnings,
|
||||
)
|
||||
return cast(
|
||||
"dict[str, Any]",
|
||||
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
||||
exclude=exclude,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
|
||||
if PYDANTIC_V1:
|
||||
return model.parse_obj(data) # pyright: ignore[reportDeprecated]
|
||||
return model.model_validate(data)
|
||||
|
||||
|
||||
# generic models
|
||||
if TYPE_CHECKING:
|
||||
|
||||
class GenericModel(pydantic.BaseModel): ...
|
||||
|
||||
else:
|
||||
if PYDANTIC_V1:
|
||||
import pydantic.generics
|
||||
|
||||
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
|
||||
else:
|
||||
# there no longer needs to be a distinction in v2 but
|
||||
# we still have to create our own subclass to avoid
|
||||
# inconsistent MRO ordering errors
|
||||
class GenericModel(pydantic.BaseModel): ...
|
||||
|
||||
|
||||
# cached properties
|
||||
if TYPE_CHECKING:
|
||||
cached_property = property
|
||||
|
||||
# we define a separate type (copied from typeshed)
|
||||
# that represents that `cached_property` is `set`able
|
||||
# at runtime, which differs from `@property`.
|
||||
#
|
||||
# this is a separate type as editors likely special case
|
||||
# `@property` and we don't want to cause issues just to have
|
||||
# more helpful internal types.
|
||||
|
||||
class typed_cached_property(Generic[_T]):
|
||||
func: Callable[[Any], _T]
|
||||
attrname: str | None
|
||||
|
||||
def __init__(self, func: Callable[[Any], _T]) -> None: ...
|
||||
|
||||
@overload
|
||||
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
|
||||
|
||||
@overload
|
||||
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
|
||||
|
||||
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
|
||||
raise NotImplementedError()
|
||||
|
||||
def __set_name__(self, owner: type[Any], name: str) -> None: ...
|
||||
|
||||
# __set__ is not defined at runtime, but @cached_property is designed to be settable
|
||||
def __set__(self, instance: object, value: _T) -> None: ...
|
||||
else:
|
||||
from functools import cached_property as cached_property
|
||||
|
||||
typed_cached_property = cached_property
|
||||
@@ -0,0 +1,29 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
import httpx
|
||||
|
||||
RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
|
||||
OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to"
|
||||
|
||||
# default timeout is 10 minutes
|
||||
DEFAULT_TIMEOUT = httpx.Timeout(timeout=10 * 60, connect=5.0)
|
||||
DEFAULT_MAX_RETRIES = 2
|
||||
DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
||||
|
||||
INITIAL_RETRY_DELAY = 0.5
|
||||
MAX_RETRY_DELAY = 8.0
|
||||
|
||||
HUMAN_PROMPT = "\n\nHuman:"
|
||||
|
||||
AI_PROMPT = "\n\nAssistant:"
|
||||
|
||||
MODEL_NONSTREAMING_TOKENS = {
|
||||
"claude-opus-4-20250514": 8_192,
|
||||
"claude-opus-4-0": 8_192,
|
||||
"claude-4-opus-20250514": 8_192,
|
||||
"anthropic.claude-opus-4-20250514-v1:0": 8_192,
|
||||
"claude-opus-4@20250514": 8_192,
|
||||
"claude-opus-4-1-20250805": 8192,
|
||||
"anthropic.claude-opus-4-1-20250805-v1:0": 8192,
|
||||
"claude-opus-4-1@20250805": 8192,
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing_extensions import Generic, TypeVar, Iterator, AsyncIterator
|
||||
|
||||
import httpx
|
||||
|
||||
from .._models import construct_type_unchecked
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class JSONLDecoder(Generic[_T]):
|
||||
"""A decoder for [JSON Lines](https://jsonlines.org) format.
|
||||
|
||||
This class provides an iterator over a byte-iterator that parses each JSON Line
|
||||
into a given type.
|
||||
"""
|
||||
|
||||
http_response: httpx.Response
|
||||
"""The HTTP response this decoder was constructed from"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw_iterator: Iterator[bytes],
|
||||
line_type: type[_T],
|
||||
http_response: httpx.Response,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.http_response = http_response
|
||||
self._raw_iterator = raw_iterator
|
||||
self._line_type = line_type
|
||||
self._iterator = self.__decode__()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the response body stream.
|
||||
|
||||
This is called automatically if you consume the entire stream.
|
||||
"""
|
||||
self.http_response.close()
|
||||
|
||||
def __decode__(self) -> Iterator[_T]:
|
||||
buf = b""
|
||||
for chunk in self._raw_iterator:
|
||||
for line in chunk.splitlines(keepends=True):
|
||||
buf += line
|
||||
if buf.endswith((b"\r", b"\n", b"\r\n")):
|
||||
yield construct_type_unchecked(
|
||||
value=json.loads(buf),
|
||||
type_=self._line_type,
|
||||
)
|
||||
buf = b""
|
||||
|
||||
# flush
|
||||
if buf:
|
||||
yield construct_type_unchecked(
|
||||
value=json.loads(buf),
|
||||
type_=self._line_type,
|
||||
)
|
||||
|
||||
def __next__(self) -> _T:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[_T]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
|
||||
class AsyncJSONLDecoder(Generic[_T]):
|
||||
"""A decoder for [JSON Lines](https://jsonlines.org) format.
|
||||
|
||||
This class provides an async iterator over a byte-iterator that parses each JSON Line
|
||||
into a given type.
|
||||
"""
|
||||
|
||||
http_response: httpx.Response
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw_iterator: AsyncIterator[bytes],
|
||||
line_type: type[_T],
|
||||
http_response: httpx.Response,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.http_response = http_response
|
||||
self._raw_iterator = raw_iterator
|
||||
self._line_type = line_type
|
||||
self._iterator = self.__decode__()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the response body stream.
|
||||
|
||||
This is called automatically if you consume the entire stream.
|
||||
"""
|
||||
await self.http_response.aclose()
|
||||
|
||||
async def __decode__(self) -> AsyncIterator[_T]:
|
||||
buf = b""
|
||||
async for chunk in self._raw_iterator:
|
||||
for line in chunk.splitlines(keepends=True):
|
||||
buf += line
|
||||
if buf.endswith((b"\r", b"\n", b"\r\n")):
|
||||
yield construct_type_unchecked(
|
||||
value=json.loads(buf),
|
||||
type_=self._line_type,
|
||||
)
|
||||
buf = b""
|
||||
|
||||
# flush
|
||||
if buf:
|
||||
yield construct_type_unchecked(
|
||||
value=json.loads(buf),
|
||||
type_=self._line_type,
|
||||
)
|
||||
|
||||
async def __anext__(self) -> _T:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[_T]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
@@ -0,0 +1,129 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
__all__ = [
|
||||
"BadRequestError",
|
||||
"AuthenticationError",
|
||||
"PermissionDeniedError",
|
||||
"NotFoundError",
|
||||
"ConflictError",
|
||||
"UnprocessableEntityError",
|
||||
"RateLimitError",
|
||||
"InternalServerError",
|
||||
]
|
||||
|
||||
|
||||
class AnthropicError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class APIError(AnthropicError):
|
||||
message: str
|
||||
request: httpx.Request
|
||||
|
||||
body: object | None
|
||||
"""The API response body.
|
||||
|
||||
If the API responded with a valid JSON structure then this property will be the
|
||||
decoded result.
|
||||
|
||||
If it isn't a valid JSON structure then this will be the raw response.
|
||||
|
||||
If there was no response associated with this error then it will be `None`.
|
||||
"""
|
||||
|
||||
def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: # noqa: ARG002
|
||||
super().__init__(message)
|
||||
self.request = request
|
||||
self.message = message
|
||||
self.body = body
|
||||
|
||||
|
||||
class APIResponseValidationError(APIError):
|
||||
response: httpx.Response
|
||||
status_code: int
|
||||
|
||||
def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None:
|
||||
super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body)
|
||||
self.response = response
|
||||
self.status_code = response.status_code
|
||||
|
||||
|
||||
class APIStatusError(APIError):
|
||||
"""Raised when an API response has a status code of 4xx or 5xx."""
|
||||
|
||||
response: httpx.Response
|
||||
status_code: int
|
||||
request_id: str | None
|
||||
|
||||
def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
|
||||
super().__init__(message, response.request, body=body)
|
||||
self.response = response
|
||||
self.status_code = response.status_code
|
||||
self.request_id = response.headers.get("request-id")
|
||||
|
||||
|
||||
class APIConnectionError(APIError):
|
||||
def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
|
||||
super().__init__(message, request, body=None)
|
||||
|
||||
|
||||
class APITimeoutError(APIConnectionError):
|
||||
def __init__(self, request: httpx.Request) -> None:
|
||||
super().__init__(
|
||||
message="Request timed out or interrupted. This could be due to a network timeout, dropped connection, or request cancellation. See https://docs.anthropic.com/en/api/errors#long-requests for more details.",
|
||||
request=request,
|
||||
)
|
||||
|
||||
|
||||
class BadRequestError(APIStatusError):
|
||||
status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class AuthenticationError(APIStatusError):
|
||||
status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class PermissionDeniedError(APIStatusError):
|
||||
status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class NotFoundError(APIStatusError):
|
||||
status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class ConflictError(APIStatusError):
|
||||
status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class RequestTooLargeError(APIStatusError):
|
||||
status_code: Literal[413] = 413 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class UnprocessableEntityError(APIStatusError):
|
||||
status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class RateLimitError(APIStatusError):
|
||||
status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class ServiceUnavailableError(APIStatusError):
|
||||
status_code: Literal[503] = 503 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class OverloadedError(APIStatusError):
|
||||
status_code: Literal[529] = 529 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class DeadlineExceededError(APIStatusError):
|
||||
status_code: Literal[504] = 504 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class InternalServerError(APIStatusError):
|
||||
pass
|
||||
@@ -0,0 +1,123 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import pathlib
|
||||
from typing import overload
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
import anyio
|
||||
|
||||
from ._types import (
|
||||
FileTypes,
|
||||
FileContent,
|
||||
RequestFiles,
|
||||
HttpxFileTypes,
|
||||
Base64FileInput,
|
||||
HttpxFileContent,
|
||||
HttpxRequestFiles,
|
||||
)
|
||||
from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
|
||||
|
||||
|
||||
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
|
||||
return isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
|
||||
|
||||
|
||||
def is_file_content(obj: object) -> TypeGuard[FileContent]:
|
||||
return (
|
||||
isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
|
||||
)
|
||||
|
||||
|
||||
def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
|
||||
if not is_file_content(obj):
|
||||
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
|
||||
raise RuntimeError(
|
||||
f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/anthropics/anthropic-sdk-python/tree/main#file-uploads"
|
||||
) from None
|
||||
|
||||
|
||||
@overload
|
||||
def to_httpx_files(files: None) -> None: ...
|
||||
|
||||
|
||||
@overload
|
||||
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
||||
|
||||
|
||||
def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
||||
if files is None:
|
||||
return None
|
||||
|
||||
if is_mapping_t(files):
|
||||
files = {key: _transform_file(file) for key, file in files.items()}
|
||||
elif is_sequence_t(files):
|
||||
files = [(key, _transform_file(file)) for key, file in files]
|
||||
else:
|
||||
raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence")
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def _transform_file(file: FileTypes) -> HttpxFileTypes:
|
||||
if is_file_content(file):
|
||||
if isinstance(file, os.PathLike):
|
||||
path = pathlib.Path(file)
|
||||
return (path.name, path.read_bytes())
|
||||
|
||||
return file
|
||||
|
||||
if is_tuple_t(file):
|
||||
return (file[0], read_file_content(file[1]), *file[2:])
|
||||
|
||||
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
|
||||
|
||||
|
||||
def read_file_content(file: FileContent) -> HttpxFileContent:
|
||||
if isinstance(file, os.PathLike):
|
||||
return pathlib.Path(file).read_bytes()
|
||||
return file
|
||||
|
||||
|
||||
@overload
|
||||
async def async_to_httpx_files(files: None) -> None: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
||||
|
||||
|
||||
async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
||||
if files is None:
|
||||
return None
|
||||
|
||||
if is_mapping_t(files):
|
||||
files = {key: await _async_transform_file(file) for key, file in files.items()}
|
||||
elif is_sequence_t(files):
|
||||
files = [(key, await _async_transform_file(file)) for key, file in files]
|
||||
else:
|
||||
raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence")
|
||||
|
||||
return files
|
||||
|
||||
|
||||
async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
|
||||
if is_file_content(file):
|
||||
if isinstance(file, os.PathLike):
|
||||
path = anyio.Path(file)
|
||||
return (path.name, await path.read_bytes())
|
||||
|
||||
return file
|
||||
|
||||
if is_tuple_t(file):
|
||||
return (file[0], await async_read_file_content(file[1]), *file[2:])
|
||||
|
||||
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
|
||||
|
||||
|
||||
async def async_read_file_content(file: FileContent) -> HttpxFileContent:
|
||||
if isinstance(file, os.PathLike):
|
||||
return await anyio.Path(file).read_bytes()
|
||||
|
||||
return file
|
||||
@@ -0,0 +1,511 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import inspect
|
||||
import logging
|
||||
import datetime
|
||||
import functools
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterator,
|
||||
AsyncIterator,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin
|
||||
|
||||
import anyio
|
||||
import httpx
|
||||
import pydantic
|
||||
|
||||
from ._types import NoneType
|
||||
from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type
|
||||
from ._models import BaseModel, is_basemodel, add_request_id
|
||||
from ._constants import RAW_RESPONSE_HEADER
|
||||
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
|
||||
from ._exceptions import APIResponseValidationError
|
||||
from ._decoders.jsonl import JSONLDecoder, AsyncJSONLDecoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._models import FinalRequestOptions
|
||||
from ._base_client import BaseClient
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
_T = TypeVar("_T")
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
|
||||
log: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LegacyAPIResponse(Generic[R]):
|
||||
"""This is a legacy class as it will be replaced by `APIResponse`
|
||||
and `AsyncAPIResponse` in the `_response.py` file in the next major
|
||||
release.
|
||||
|
||||
For the sync client this will mostly be the same with the exception
|
||||
of `content` & `text` will be methods instead of properties. In the
|
||||
async client, all methods will be async.
|
||||
|
||||
A migration script will be provided & the migration in general should
|
||||
be smooth.
|
||||
"""
|
||||
|
||||
_cast_to: type[R]
|
||||
_client: BaseClient[Any, Any]
|
||||
_parsed_by_type: dict[type[Any], Any]
|
||||
_stream: bool
|
||||
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
|
||||
_options: FinalRequestOptions
|
||||
|
||||
http_response: httpx.Response
|
||||
|
||||
retries_taken: int
|
||||
"""The number of retries made. If no retries happened this will be `0`"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw: httpx.Response,
|
||||
cast_to: type[R],
|
||||
client: BaseClient[Any, Any],
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
options: FinalRequestOptions,
|
||||
retries_taken: int = 0,
|
||||
) -> None:
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
self._parsed_by_type = {}
|
||||
self._stream = stream
|
||||
self._stream_cls = stream_cls
|
||||
self._options = options
|
||||
self.http_response = raw
|
||||
self.retries_taken = retries_taken
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
def parse(self, *, to: type[_T]) -> _T: ...
|
||||
|
||||
@overload
|
||||
def parse(self) -> R: ...
|
||||
|
||||
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
|
||||
NOTE: For the async client: this will become a coroutine in the next major version.
|
||||
|
||||
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
||||
|
||||
You can customise the type that the response is parsed into through
|
||||
the `to` argument, e.g.
|
||||
|
||||
```py
|
||||
from anthropic import BaseModel
|
||||
|
||||
|
||||
class MyModel(BaseModel):
|
||||
foo: str
|
||||
|
||||
|
||||
obj = response.parse(to=MyModel)
|
||||
print(obj.foo)
|
||||
```
|
||||
|
||||
We support parsing:
|
||||
- `BaseModel`
|
||||
- `dict`
|
||||
- `list`
|
||||
- `Union`
|
||||
- `str`
|
||||
- `int`
|
||||
- `float`
|
||||
- `httpx.Response`
|
||||
"""
|
||||
cache_key = to if to is not None else self._cast_to
|
||||
cached = self._parsed_by_type.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached # type: ignore[no-any-return]
|
||||
|
||||
parsed = self._parse(to=to)
|
||||
if is_given(self._options.post_parser):
|
||||
parsed = self._options.post_parser(parsed)
|
||||
|
||||
if isinstance(parsed, BaseModel):
|
||||
add_request_id(parsed, self.request_id)
|
||||
|
||||
self._parsed_by_type[cache_key] = parsed
|
||||
return cast(R, parsed)
|
||||
|
||||
@property
|
||||
def headers(self) -> httpx.Headers:
|
||||
return self.http_response.headers
|
||||
|
||||
@property
|
||||
def http_request(self) -> httpx.Request:
|
||||
return self.http_response.request
|
||||
|
||||
@property
|
||||
def status_code(self) -> int:
|
||||
return self.http_response.status_code
|
||||
|
||||
@property
|
||||
def url(self) -> httpx.URL:
|
||||
return self.http_response.url
|
||||
|
||||
@property
|
||||
def method(self) -> str:
|
||||
return self.http_request.method
|
||||
|
||||
@property
|
||||
def content(self) -> bytes:
|
||||
"""Return the binary response content.
|
||||
|
||||
NOTE: this will be removed in favour of `.read()` in the
|
||||
next major version.
|
||||
"""
|
||||
return self.http_response.content
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
"""Return the decoded response content.
|
||||
|
||||
NOTE: this will be turned into a method in the next major version.
|
||||
"""
|
||||
return self.http_response.text
|
||||
|
||||
@property
|
||||
def http_version(self) -> str:
|
||||
return self.http_response.http_version
|
||||
|
||||
@property
|
||||
def is_closed(self) -> bool:
|
||||
return self.http_response.is_closed
|
||||
|
||||
@property
|
||||
def elapsed(self) -> datetime.timedelta:
|
||||
"""The time taken for the complete request/response cycle to complete."""
|
||||
return self.http_response.elapsed
|
||||
|
||||
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
cast_to = to if to is not None else self._cast_to
|
||||
|
||||
# unwrap `TypeAlias('Name', T)` -> `T`
|
||||
if is_type_alias_type(cast_to):
|
||||
cast_to = cast_to.__value__ # type: ignore[unreachable]
|
||||
|
||||
# unwrap `Annotated[T, ...]` -> `T`
|
||||
if cast_to and is_annotated_type(cast_to):
|
||||
cast_to = extract_type_arg(cast_to, 0)
|
||||
|
||||
origin = get_origin(cast_to) or cast_to
|
||||
|
||||
if inspect.isclass(origin):
|
||||
if issubclass(cast(Any, origin), JSONLDecoder):
|
||||
return cast(
|
||||
R,
|
||||
cast("type[JSONLDecoder[Any]]", cast_to)(
|
||||
raw_iterator=self.http_response.iter_bytes(chunk_size=64),
|
||||
line_type=extract_type_arg(cast_to, 0),
|
||||
http_response=self.http_response,
|
||||
),
|
||||
)
|
||||
|
||||
if issubclass(cast(Any, origin), AsyncJSONLDecoder):
|
||||
return cast(
|
||||
R,
|
||||
cast("type[AsyncJSONLDecoder[Any]]", cast_to)(
|
||||
raw_iterator=self.http_response.aiter_bytes(chunk_size=64),
|
||||
line_type=extract_type_arg(cast_to, 0),
|
||||
http_response=self.http_response,
|
||||
),
|
||||
)
|
||||
|
||||
if self._stream:
|
||||
if to:
|
||||
if not is_stream_class_type(to):
|
||||
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
|
||||
|
||||
return cast(
|
||||
_T,
|
||||
to(
|
||||
cast_to=extract_stream_chunk_type(
|
||||
to,
|
||||
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
|
||||
),
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
if self._stream_cls:
|
||||
return cast(
|
||||
R,
|
||||
self._stream_cls(
|
||||
cast_to=extract_stream_chunk_type(self._stream_cls),
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
|
||||
if stream_cls is None:
|
||||
raise MissingStreamClassError()
|
||||
|
||||
return cast(
|
||||
R,
|
||||
stream_cls(
|
||||
cast_to=cast_to,
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
if cast_to is NoneType:
|
||||
return cast(R, None)
|
||||
|
||||
response = self.http_response
|
||||
if cast_to == str:
|
||||
return cast(R, response.text)
|
||||
|
||||
if cast_to == int:
|
||||
return cast(R, int(response.text))
|
||||
|
||||
if cast_to == float:
|
||||
return cast(R, float(response.text))
|
||||
|
||||
if cast_to == bool:
|
||||
return cast(R, response.text.lower() == "true")
|
||||
|
||||
if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent):
|
||||
return cast(R, cast_to(response)) # type: ignore
|
||||
|
||||
if origin == LegacyAPIResponse:
|
||||
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
|
||||
|
||||
if inspect.isclass(
|
||||
origin # pyright: ignore[reportUnknownArgumentType]
|
||||
) and issubclass(origin, httpx.Response):
|
||||
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
|
||||
# and pass that class to our request functions. We cannot change the variance to be either
|
||||
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
|
||||
# the response class ourselves but that is something that should be supported directly in httpx
|
||||
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
|
||||
if cast_to != httpx.Response:
|
||||
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
|
||||
return cast(R, response)
|
||||
|
||||
if (
|
||||
inspect.isclass(
|
||||
origin # pyright: ignore[reportUnknownArgumentType]
|
||||
)
|
||||
and not issubclass(origin, BaseModel)
|
||||
and issubclass(origin, pydantic.BaseModel)
|
||||
):
|
||||
raise TypeError("Pydantic models must subclass our base model type, e.g. `from anthropic import BaseModel`")
|
||||
|
||||
if (
|
||||
cast_to is not object
|
||||
and not origin is list
|
||||
and not origin is dict
|
||||
and not origin is Union
|
||||
and not issubclass(origin, BaseModel)
|
||||
):
|
||||
raise RuntimeError(
|
||||
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
|
||||
)
|
||||
|
||||
# split is required to handle cases where additional information is included
|
||||
# in the response, e.g. application/json; charset=utf-8
|
||||
content_type, *_ = response.headers.get("content-type", "*").split(";")
|
||||
if not content_type.endswith("json"):
|
||||
if is_basemodel(cast_to):
|
||||
try:
|
||||
data = response.json()
|
||||
except Exception as exc:
|
||||
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
|
||||
else:
|
||||
return self._client._process_response_data(
|
||||
data=data,
|
||||
cast_to=cast_to, # type: ignore
|
||||
response=response,
|
||||
)
|
||||
|
||||
if self._client._strict_response_validation:
|
||||
raise APIResponseValidationError(
|
||||
response=response,
|
||||
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
|
||||
body=response.text,
|
||||
)
|
||||
|
||||
# If the API responds with content that isn't JSON then we just return
|
||||
# the (decoded) text without performing any parsing so that you can still
|
||||
# handle the response however you need to.
|
||||
return response.text # type: ignore
|
||||
|
||||
data = response.json()
|
||||
|
||||
return self._client._process_response_data(
|
||||
data=data,
|
||||
cast_to=cast_to, # type: ignore
|
||||
response=response,
|
||||
)
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return f"<APIResponse [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
|
||||
|
||||
|
||||
class MissingStreamClassError(TypeError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `anthropic._streaming` for reference",
|
||||
)
|
||||
|
||||
|
||||
def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "true"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(LegacyAPIResponse[R], func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[LegacyAPIResponse[R]]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "true"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
class HttpxBinaryResponseContent:
|
||||
response: httpx.Response
|
||||
|
||||
def __init__(self, response: httpx.Response) -> None:
|
||||
self.response = response
|
||||
|
||||
@property
|
||||
def content(self) -> bytes:
|
||||
return self.response.content
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
return self.response.text
|
||||
|
||||
@property
|
||||
def encoding(self) -> str | None:
|
||||
return self.response.encoding
|
||||
|
||||
@property
|
||||
def charset_encoding(self) -> str | None:
|
||||
return self.response.charset_encoding
|
||||
|
||||
def json(self, **kwargs: Any) -> Any:
|
||||
return self.response.json(**kwargs)
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self.response.read()
|
||||
|
||||
def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
||||
return self.response.iter_bytes(chunk_size)
|
||||
|
||||
def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
|
||||
return self.response.iter_text(chunk_size)
|
||||
|
||||
def iter_lines(self) -> Iterator[str]:
|
||||
return self.response.iter_lines()
|
||||
|
||||
def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
||||
return self.response.iter_raw(chunk_size)
|
||||
|
||||
def write_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
) -> None:
|
||||
"""Write the output to the given file.
|
||||
|
||||
Accepts a filename or any path-like object, e.g. pathlib.Path
|
||||
|
||||
Note: if you want to stream the data to the file instead of writing
|
||||
all at once then you should use `.with_streaming_response` when making
|
||||
the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')`
|
||||
"""
|
||||
with open(file, mode="wb") as f:
|
||||
for data in self.response.iter_bytes():
|
||||
f.write(data)
|
||||
|
||||
@deprecated(
|
||||
"Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
|
||||
)
|
||||
def stream_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
*,
|
||||
chunk_size: int | None = None,
|
||||
) -> None:
|
||||
with open(file, mode="wb") as f:
|
||||
for data in self.response.iter_bytes(chunk_size):
|
||||
f.write(data)
|
||||
|
||||
def close(self) -> None:
|
||||
return self.response.close()
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
return await self.response.aread()
|
||||
|
||||
async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
||||
return self.response.aiter_bytes(chunk_size)
|
||||
|
||||
async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
|
||||
return self.response.aiter_text(chunk_size)
|
||||
|
||||
async def aiter_lines(self) -> AsyncIterator[str]:
|
||||
return self.response.aiter_lines()
|
||||
|
||||
async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
||||
return self.response.aiter_raw(chunk_size)
|
||||
|
||||
@deprecated(
|
||||
"Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
|
||||
)
|
||||
async def astream_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
*,
|
||||
chunk_size: int | None = None,
|
||||
) -> None:
|
||||
path = anyio.Path(file)
|
||||
async with await path.open(mode="wb") as f:
|
||||
async for data in self.response.aiter_bytes(chunk_size):
|
||||
await f.write(data)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
return await self.response.aclose()
|
||||
@@ -0,0 +1,903 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import inspect
|
||||
import weakref
|
||||
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
|
||||
from datetime import date, datetime
|
||||
from typing_extensions import (
|
||||
List,
|
||||
Unpack,
|
||||
Literal,
|
||||
ClassVar,
|
||||
Protocol,
|
||||
Required,
|
||||
ParamSpec,
|
||||
TypedDict,
|
||||
TypeGuard,
|
||||
final,
|
||||
override,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
import pydantic
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
from ._types import (
|
||||
Body,
|
||||
IncEx,
|
||||
Query,
|
||||
ModelT,
|
||||
Headers,
|
||||
Timeout,
|
||||
NotGiven,
|
||||
AnyMapping,
|
||||
HttpxRequestFiles,
|
||||
)
|
||||
from ._utils import (
|
||||
PropertyInfo,
|
||||
is_list,
|
||||
is_given,
|
||||
json_safe,
|
||||
lru_cache,
|
||||
is_mapping,
|
||||
parse_date,
|
||||
coerce_boolean,
|
||||
parse_datetime,
|
||||
strip_not_given,
|
||||
extract_type_arg,
|
||||
is_annotated_type,
|
||||
is_type_alias_type,
|
||||
strip_annotated_type,
|
||||
)
|
||||
from ._compat import (
|
||||
PYDANTIC_V1,
|
||||
ConfigDict,
|
||||
GenericModel as BaseGenericModel,
|
||||
get_args,
|
||||
is_union,
|
||||
parse_obj,
|
||||
get_origin,
|
||||
is_literal_type,
|
||||
get_model_config,
|
||||
get_model_fields,
|
||||
field_get_default,
|
||||
)
|
||||
from ._constants import RAW_RESPONSE_HEADER
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema
|
||||
|
||||
__all__ = ["BaseModel", "GenericModel"]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel")
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class _ConfigProtocol(Protocol):
|
||||
allow_population_by_field_name: bool
|
||||
|
||||
|
||||
class BaseModel(pydantic.BaseModel):
|
||||
if PYDANTIC_V1:
|
||||
|
||||
@property
|
||||
@override
|
||||
def model_fields_set(self) -> set[str]:
|
||||
# a forwards-compat shim for pydantic v2
|
||||
return self.__fields_set__ # type: ignore
|
||||
|
||||
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
|
||||
extra: Any = pydantic.Extra.allow # type: ignore
|
||||
else:
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(
|
||||
extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
_request_id: Optional[str] = None
|
||||
"""The ID of the request, returned via the `request-id` header. Useful for debugging requests and reporting issues to Anthropic.
|
||||
This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
|
||||
|
||||
```py
|
||||
message = await client.messages.create(...)
|
||||
message._request_id # req_xxx
|
||||
message.usage._request_id # raises `AttributeError`
|
||||
```
|
||||
|
||||
Note: unlike other properties that use an `_` prefix, this property
|
||||
*is* public. Unless documented otherwise, all other `_` prefix properties,
|
||||
methods and modules are *private*.
|
||||
"""
|
||||
|
||||
def to_dict(
|
||||
self,
|
||||
*,
|
||||
mode: Literal["json", "python"] = "python",
|
||||
use_api_names: bool = True,
|
||||
exclude_unset: bool = True,
|
||||
exclude_defaults: bool = False,
|
||||
exclude_none: bool = False,
|
||||
warnings: bool = True,
|
||||
) -> dict[str, object]:
|
||||
"""Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
|
||||
|
||||
By default, fields that were not set by the API will not be included,
|
||||
and keys will match the API response, *not* the property names from the model.
|
||||
|
||||
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
|
||||
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
|
||||
|
||||
Args:
|
||||
mode:
|
||||
If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`.
|
||||
If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)`
|
||||
|
||||
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
|
||||
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
||||
exclude_defaults: Whether to exclude fields that are set to their default value from the output.
|
||||
exclude_none: Whether to exclude fields that have a value of `None` from the output.
|
||||
warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
|
||||
"""
|
||||
return self.model_dump(
|
||||
mode=mode,
|
||||
by_alias=use_api_names,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
exclude_none=exclude_none,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
def to_json(
|
||||
self,
|
||||
*,
|
||||
indent: int | None = 2,
|
||||
use_api_names: bool = True,
|
||||
exclude_unset: bool = True,
|
||||
exclude_defaults: bool = False,
|
||||
exclude_none: bool = False,
|
||||
warnings: bool = True,
|
||||
) -> str:
|
||||
"""Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation).
|
||||
|
||||
By default, fields that were not set by the API will not be included,
|
||||
and keys will match the API response, *not* the property names from the model.
|
||||
|
||||
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
|
||||
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
|
||||
|
||||
Args:
|
||||
indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2`
|
||||
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
|
||||
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
||||
exclude_defaults: Whether to exclude fields that have the default value.
|
||||
exclude_none: Whether to exclude fields that have a value of `None`.
|
||||
warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
|
||||
"""
|
||||
return self.model_dump_json(
|
||||
indent=indent,
|
||||
by_alias=use_api_names,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
exclude_none=exclude_none,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
@override
|
||||
def __str__(self) -> str:
|
||||
# mypy complains about an invalid self arg
|
||||
return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc]
|
||||
|
||||
# Override the 'construct' method in a way that supports recursive parsing without validation.
|
||||
# Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
|
||||
@classmethod
|
||||
@override
|
||||
def construct( # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
__cls: Type[ModelT],
|
||||
_fields_set: set[str] | None = None,
|
||||
**values: object,
|
||||
) -> ModelT:
|
||||
m = __cls.__new__(__cls)
|
||||
fields_values: dict[str, object] = {}
|
||||
|
||||
config = get_model_config(__cls)
|
||||
populate_by_name = (
|
||||
config.allow_population_by_field_name
|
||||
if isinstance(config, _ConfigProtocol)
|
||||
else config.get("populate_by_name")
|
||||
)
|
||||
|
||||
if _fields_set is None:
|
||||
_fields_set = set()
|
||||
|
||||
model_fields = get_model_fields(__cls)
|
||||
for name, field in model_fields.items():
|
||||
key = field.alias
|
||||
if key is None or (key not in values and populate_by_name):
|
||||
key = name
|
||||
|
||||
if key in values:
|
||||
fields_values[name] = _construct_field(value=values[key], field=field, key=key)
|
||||
_fields_set.add(name)
|
||||
else:
|
||||
fields_values[name] = field_get_default(field)
|
||||
|
||||
extra_field_type = _get_extra_fields_type(__cls)
|
||||
|
||||
_extra = {}
|
||||
for key, value in values.items():
|
||||
if key not in model_fields:
|
||||
parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
|
||||
|
||||
if PYDANTIC_V1:
|
||||
_fields_set.add(key)
|
||||
fields_values[key] = parsed
|
||||
else:
|
||||
_extra[key] = parsed
|
||||
|
||||
object.__setattr__(m, "__dict__", fields_values)
|
||||
|
||||
if PYDANTIC_V1:
|
||||
# init_private_attributes() does not exist in v2
|
||||
m._init_private_attributes() # type: ignore
|
||||
|
||||
# copied from Pydantic v1's `construct()` method
|
||||
object.__setattr__(m, "__fields_set__", _fields_set)
|
||||
else:
|
||||
# these properties are copied from Pydantic's `model_construct()` method
|
||||
object.__setattr__(m, "__pydantic_private__", None)
|
||||
object.__setattr__(m, "__pydantic_extra__", _extra)
|
||||
object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
|
||||
|
||||
return m
|
||||
|
||||
if not TYPE_CHECKING:
|
||||
# type checkers incorrectly complain about this assignment
|
||||
# because the type signatures are technically different
|
||||
# although not in practice
|
||||
model_construct = construct
|
||||
|
||||
if PYDANTIC_V1:
|
||||
# we define aliases for some of the new pydantic v2 methods so
|
||||
# that we can just document these methods without having to specify
|
||||
# a specific pydantic version as some users may not know which
|
||||
# pydantic version they are currently using
|
||||
|
||||
@override
|
||||
def model_dump(
|
||||
self,
|
||||
*,
|
||||
mode: Literal["json", "python"] | str = "python",
|
||||
include: IncEx | None = None,
|
||||
exclude: IncEx | None = None,
|
||||
context: Any | None = None,
|
||||
by_alias: bool | None = None,
|
||||
exclude_unset: bool = False,
|
||||
exclude_defaults: bool = False,
|
||||
exclude_none: bool = False,
|
||||
exclude_computed_fields: bool = False,
|
||||
round_trip: bool = False,
|
||||
warnings: bool | Literal["none", "warn", "error"] = True,
|
||||
fallback: Callable[[Any], Any] | None = None,
|
||||
serialize_as_any: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
|
||||
|
||||
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
|
||||
|
||||
Args:
|
||||
mode: The mode in which `to_python` should run.
|
||||
If mode is 'json', the output will only contain JSON serializable types.
|
||||
If mode is 'python', the output may contain non-JSON-serializable Python objects.
|
||||
include: A set of fields to include in the output.
|
||||
exclude: A set of fields to exclude from the output.
|
||||
context: Additional context to pass to the serializer.
|
||||
by_alias: Whether to use the field's alias in the dictionary key if defined.
|
||||
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
||||
exclude_defaults: Whether to exclude fields that are set to their default value.
|
||||
exclude_none: Whether to exclude fields that have a value of `None`.
|
||||
exclude_computed_fields: Whether to exclude computed fields.
|
||||
While this can be useful for round-tripping, it is usually recommended to use the dedicated
|
||||
`round_trip` parameter instead.
|
||||
round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
|
||||
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
|
||||
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
|
||||
fallback: A function to call when an unknown value is encountered. If not provided,
|
||||
a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
|
||||
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
|
||||
|
||||
Returns:
|
||||
A dictionary representation of the model.
|
||||
"""
|
||||
if mode not in {"json", "python"}:
|
||||
raise ValueError("mode must be either 'json' or 'python'")
|
||||
if round_trip != False:
|
||||
raise ValueError("round_trip is only supported in Pydantic v2")
|
||||
if warnings != True:
|
||||
raise ValueError("warnings is only supported in Pydantic v2")
|
||||
if context is not None:
|
||||
raise ValueError("context is only supported in Pydantic v2")
|
||||
if serialize_as_any != False:
|
||||
raise ValueError("serialize_as_any is only supported in Pydantic v2")
|
||||
if fallback is not None:
|
||||
raise ValueError("fallback is only supported in Pydantic v2")
|
||||
if exclude_computed_fields != False:
|
||||
raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
|
||||
dumped = super().dict( # pyright: ignore[reportDeprecated]
|
||||
include=include,
|
||||
exclude=exclude,
|
||||
by_alias=by_alias if by_alias is not None else False,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
exclude_none=exclude_none,
|
||||
)
|
||||
|
||||
return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped
|
||||
|
||||
@override
|
||||
def model_dump_json(
|
||||
self,
|
||||
*,
|
||||
indent: int | None = None,
|
||||
ensure_ascii: bool = False,
|
||||
include: IncEx | None = None,
|
||||
exclude: IncEx | None = None,
|
||||
context: Any | None = None,
|
||||
by_alias: bool | None = None,
|
||||
exclude_unset: bool = False,
|
||||
exclude_defaults: bool = False,
|
||||
exclude_none: bool = False,
|
||||
exclude_computed_fields: bool = False,
|
||||
round_trip: bool = False,
|
||||
warnings: bool | Literal["none", "warn", "error"] = True,
|
||||
fallback: Callable[[Any], Any] | None = None,
|
||||
serialize_as_any: bool = False,
|
||||
) -> str:
|
||||
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
|
||||
|
||||
Generates a JSON representation of the model using Pydantic's `to_json` method.
|
||||
|
||||
Args:
|
||||
indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
|
||||
include: Field(s) to include in the JSON output. Can take either a string or set of strings.
|
||||
exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings.
|
||||
by_alias: Whether to serialize using field aliases.
|
||||
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
||||
exclude_defaults: Whether to exclude fields that have the default value.
|
||||
exclude_none: Whether to exclude fields that have a value of `None`.
|
||||
round_trip: Whether to use serialization/deserialization between JSON and class instance.
|
||||
warnings: Whether to show any warnings that occurred during serialization.
|
||||
|
||||
Returns:
|
||||
A JSON string representation of the model.
|
||||
"""
|
||||
if round_trip != False:
|
||||
raise ValueError("round_trip is only supported in Pydantic v2")
|
||||
if warnings != True:
|
||||
raise ValueError("warnings is only supported in Pydantic v2")
|
||||
if context is not None:
|
||||
raise ValueError("context is only supported in Pydantic v2")
|
||||
if serialize_as_any != False:
|
||||
raise ValueError("serialize_as_any is only supported in Pydantic v2")
|
||||
if fallback is not None:
|
||||
raise ValueError("fallback is only supported in Pydantic v2")
|
||||
if ensure_ascii != False:
|
||||
raise ValueError("ensure_ascii is only supported in Pydantic v2")
|
||||
if exclude_computed_fields != False:
|
||||
raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
|
||||
return super().json( # type: ignore[reportDeprecated]
|
||||
indent=indent,
|
||||
include=include,
|
||||
exclude=exclude,
|
||||
by_alias=by_alias if by_alias is not None else False,
|
||||
exclude_unset=exclude_unset,
|
||||
exclude_defaults=exclude_defaults,
|
||||
exclude_none=exclude_none,
|
||||
)
|
||||
|
||||
|
||||
def _construct_field(value: object, field: FieldInfo, key: str) -> object:
|
||||
if value is None:
|
||||
return field_get_default(field)
|
||||
|
||||
if PYDANTIC_V1:
|
||||
type_ = cast(type, field.outer_type_) # type: ignore
|
||||
else:
|
||||
type_ = field.annotation # type: ignore
|
||||
|
||||
if type_ is None:
|
||||
raise RuntimeError(f"Unexpected field type is None for {key}")
|
||||
|
||||
return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
|
||||
|
||||
|
||||
def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
|
||||
if PYDANTIC_V1:
|
||||
# TODO
|
||||
return None
|
||||
|
||||
schema = cls.__pydantic_core_schema__
|
||||
if schema["type"] == "model":
|
||||
fields = schema["schema"]
|
||||
if fields["type"] == "model-fields":
|
||||
extras = fields.get("extras_schema")
|
||||
if extras and "cls" in extras:
|
||||
# mypy can't narrow the type
|
||||
return extras["cls"] # type: ignore[no-any-return]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_basemodel(type_: type) -> bool:
|
||||
"""Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
|
||||
if is_union(type_):
|
||||
for variant in get_args(type_):
|
||||
if is_basemodel(variant):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
return is_basemodel_type(type_)
|
||||
|
||||
|
||||
def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
|
||||
origin = get_origin(type_) or type_
|
||||
if not inspect.isclass(origin):
|
||||
return False
|
||||
return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)
|
||||
|
||||
|
||||
def build(
|
||||
base_model_cls: Callable[P, _BaseModelT],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> _BaseModelT:
|
||||
"""Construct a BaseModel class without validation.
|
||||
|
||||
This is useful for cases where you need to instantiate a `BaseModel`
|
||||
from an API response as this provides type-safe params which isn't supported
|
||||
by helpers like `construct_type()`.
|
||||
|
||||
```py
|
||||
build(MyModel, my_field_a="foo", my_field_b=123)
|
||||
```
|
||||
"""
|
||||
if args:
|
||||
raise TypeError(
|
||||
"Received positional arguments which are not supported; Keyword arguments must be used instead",
|
||||
)
|
||||
|
||||
return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs))
|
||||
|
||||
|
||||
def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
|
||||
"""Loose coercion to the expected type with construction of nested values.
|
||||
|
||||
Note: the returned value from this function is not guaranteed to match the
|
||||
given type.
|
||||
"""
|
||||
return cast(_T, construct_type(value=value, type_=type_))
|
||||
|
||||
|
||||
def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object:
|
||||
"""Loose coercion to the expected type with construction of nested values.
|
||||
|
||||
If the given value does not match the expected type then it is returned as-is.
|
||||
"""
|
||||
|
||||
# store a reference to the original type we were given before we extract any inner
|
||||
# types so that we can properly resolve forward references in `TypeAliasType` annotations
|
||||
original_type = None
|
||||
|
||||
# we allow `object` as the input type because otherwise, passing things like
|
||||
# `Literal['value']` will be reported as a type error by type checkers
|
||||
type_ = cast("type[object]", type_)
|
||||
if is_type_alias_type(type_):
|
||||
original_type = type_ # type: ignore[unreachable]
|
||||
type_ = type_.__value__ # type: ignore[unreachable]
|
||||
|
||||
# unwrap `Annotated[T, ...]` -> `T`
|
||||
if metadata is not None and len(metadata) > 0:
|
||||
meta: tuple[Any, ...] = tuple(metadata)
|
||||
elif is_annotated_type(type_):
|
||||
meta = get_args(type_)[1:]
|
||||
type_ = extract_type_arg(type_, 0)
|
||||
else:
|
||||
meta = tuple()
|
||||
|
||||
# we need to use the origin class for any types that are subscripted generics
|
||||
# e.g. Dict[str, object]
|
||||
origin = get_origin(type_) or type_
|
||||
args = get_args(type_)
|
||||
|
||||
if is_union(origin):
|
||||
try:
|
||||
return validate_type(type_=cast("type[object]", original_type or type_), value=value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# if the type is a discriminated union then we want to construct the right variant
|
||||
# in the union, even if the data doesn't match exactly, otherwise we'd break code
|
||||
# that relies on the constructed class types, e.g.
|
||||
#
|
||||
# class FooType:
|
||||
# kind: Literal['foo']
|
||||
# value: str
|
||||
#
|
||||
# class BarType:
|
||||
# kind: Literal['bar']
|
||||
# value: int
|
||||
#
|
||||
# without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then
|
||||
# we'd end up constructing `FooType` when it should be `BarType`.
|
||||
discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta)
|
||||
if discriminator and is_mapping(value):
|
||||
variant_value = value.get(discriminator.field_alias_from or discriminator.field_name)
|
||||
if variant_value and isinstance(variant_value, str):
|
||||
variant_type = discriminator.mapping.get(variant_value)
|
||||
if variant_type:
|
||||
return construct_type(type_=variant_type, value=value)
|
||||
|
||||
# if the data is not valid, use the first variant that doesn't fail while deserializing
|
||||
for variant in args:
|
||||
try:
|
||||
return construct_type(value=value, type_=variant)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
raise RuntimeError(f"Could not convert data into a valid instance of {type_}")
|
||||
|
||||
if origin == dict:
|
||||
if not is_mapping(value):
|
||||
return value
|
||||
|
||||
_, items_type = get_args(type_) # Dict[_, items_type]
|
||||
return {key: construct_type(value=item, type_=items_type) for key, item in value.items()}
|
||||
|
||||
if (
|
||||
not is_literal_type(type_)
|
||||
and inspect.isclass(origin)
|
||||
and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel))
|
||||
):
|
||||
if is_list(value):
|
||||
return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value]
|
||||
|
||||
if is_mapping(value):
|
||||
if issubclass(type_, BaseModel):
|
||||
return type_.construct(**value) # type: ignore[arg-type]
|
||||
|
||||
return cast(Any, type_).construct(**value)
|
||||
|
||||
if origin == list:
|
||||
if not is_list(value):
|
||||
return value
|
||||
|
||||
inner_type = args[0] # List[inner_type]
|
||||
return [construct_type(value=entry, type_=inner_type) for entry in value]
|
||||
|
||||
if origin == float:
|
||||
if isinstance(value, int):
|
||||
coerced = float(value)
|
||||
if coerced != value:
|
||||
return value
|
||||
return coerced
|
||||
|
||||
return value
|
||||
|
||||
if type_ == datetime:
|
||||
try:
|
||||
return parse_datetime(value) # type: ignore
|
||||
except Exception:
|
||||
return value
|
||||
|
||||
if type_ == date:
|
||||
try:
|
||||
return parse_date(value) # type: ignore
|
||||
except Exception:
|
||||
return value
|
||||
|
||||
return value
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class CachedDiscriminatorType(Protocol):
|
||||
__discriminator__: DiscriminatorDetails
|
||||
|
||||
|
||||
DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary()
|
||||
|
||||
|
||||
class DiscriminatorDetails:
|
||||
field_name: str
|
||||
"""The name of the discriminator field in the variant class, e.g.
|
||||
|
||||
```py
|
||||
class Foo(BaseModel):
|
||||
type: Literal['foo']
|
||||
```
|
||||
|
||||
Will result in field_name='type'
|
||||
"""
|
||||
|
||||
field_alias_from: str | None
|
||||
"""The name of the discriminator field in the API response, e.g.
|
||||
|
||||
```py
|
||||
class Foo(BaseModel):
|
||||
type: Literal['foo'] = Field(alias='type_from_api')
|
||||
```
|
||||
|
||||
Will result in field_alias_from='type_from_api'
|
||||
"""
|
||||
|
||||
mapping: dict[str, type]
|
||||
"""Mapping of discriminator value to variant type, e.g.
|
||||
|
||||
{'foo': FooVariant, 'bar': BarVariant}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
mapping: dict[str, type],
|
||||
discriminator_field: str,
|
||||
discriminator_alias: str | None,
|
||||
) -> None:
|
||||
self.mapping = mapping
|
||||
self.field_name = discriminator_field
|
||||
self.field_alias_from = discriminator_alias
|
||||
|
||||
|
||||
def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
|
||||
cached = DISCRIMINATOR_CACHE.get(union)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
discriminator_field_name: str | None = None
|
||||
|
||||
for annotation in meta_annotations:
|
||||
if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None:
|
||||
discriminator_field_name = annotation.discriminator
|
||||
break
|
||||
|
||||
if not discriminator_field_name:
|
||||
return None
|
||||
|
||||
mapping: dict[str, type] = {}
|
||||
discriminator_alias: str | None = None
|
||||
|
||||
for variant in get_args(union):
|
||||
variant = strip_annotated_type(variant)
|
||||
if is_basemodel_type(variant):
|
||||
if PYDANTIC_V1:
|
||||
field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
||||
if not field_info:
|
||||
continue
|
||||
|
||||
# Note: if one variant defines an alias then they all should
|
||||
discriminator_alias = field_info.alias
|
||||
|
||||
if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
|
||||
for entry in get_args(annotation):
|
||||
if isinstance(entry, str):
|
||||
mapping[entry] = variant
|
||||
else:
|
||||
field = _extract_field_schema_pv2(variant, discriminator_field_name)
|
||||
if not field:
|
||||
continue
|
||||
|
||||
# Note: if one variant defines an alias then they all should
|
||||
discriminator_alias = field.get("serialization_alias")
|
||||
|
||||
field_schema = field["schema"]
|
||||
|
||||
if field_schema["type"] == "literal":
|
||||
for entry in cast("LiteralSchema", field_schema)["expected"]:
|
||||
if isinstance(entry, str):
|
||||
mapping[entry] = variant
|
||||
|
||||
if not mapping:
|
||||
return None
|
||||
|
||||
details = DiscriminatorDetails(
|
||||
mapping=mapping,
|
||||
discriminator_field=discriminator_field_name,
|
||||
discriminator_alias=discriminator_alias,
|
||||
)
|
||||
DISCRIMINATOR_CACHE.setdefault(union, details)
|
||||
return details
|
||||
|
||||
|
||||
def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
|
||||
schema = model.__pydantic_core_schema__
|
||||
if schema["type"] == "definitions":
|
||||
schema = schema["schema"]
|
||||
|
||||
if schema["type"] != "model":
|
||||
return None
|
||||
|
||||
schema = cast("ModelSchema", schema)
|
||||
fields_schema = schema["schema"]
|
||||
if fields_schema["type"] != "model-fields":
|
||||
return None
|
||||
|
||||
fields_schema = cast("ModelFieldsSchema", fields_schema)
|
||||
field = fields_schema["fields"].get(field_name)
|
||||
if not field:
|
||||
return None
|
||||
|
||||
return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast]
|
||||
|
||||
|
||||
def validate_type(*, type_: type[_T], value: object) -> _T:
|
||||
"""Strict validation that the given value matches the expected type"""
|
||||
if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel):
|
||||
return cast(_T, parse_obj(type_, value))
|
||||
|
||||
return cast(_T, _validate_non_model_type(type_=type_, value=value))
|
||||
|
||||
|
||||
def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
|
||||
"""Add a pydantic config for the given type.
|
||||
|
||||
Note: this is a no-op on Pydantic v1.
|
||||
"""
|
||||
setattr(typ, "__pydantic_config__", config) # noqa: B010
|
||||
|
||||
|
||||
def add_request_id(obj: BaseModel, request_id: str | None) -> None:
|
||||
obj._request_id = request_id
|
||||
|
||||
# in Pydantic v1, using setattr like we do above causes the attribute
|
||||
# to be included when serializing the model which we don't want in this
|
||||
# case so we need to explicitly exclude it
|
||||
if PYDANTIC_V1:
|
||||
try:
|
||||
exclude_fields = obj.__exclude_fields__ # type: ignore
|
||||
except AttributeError:
|
||||
cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
|
||||
else:
|
||||
cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
|
||||
|
||||
|
||||
# our use of subclassing here causes weirdness for type checkers,
|
||||
# so we just pretend that we don't subclass
|
||||
if TYPE_CHECKING:
|
||||
GenericModel = BaseModel
|
||||
else:
|
||||
|
||||
class GenericModel(BaseGenericModel, BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
if not PYDANTIC_V1:
|
||||
from pydantic import TypeAdapter as _TypeAdapter, computed_field as computed_field
|
||||
|
||||
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import TypeAdapter
|
||||
else:
|
||||
TypeAdapter = _CachedTypeAdapter
|
||||
|
||||
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
|
||||
return TypeAdapter(type_).validate_python(value)
|
||||
|
||||
elif not TYPE_CHECKING: # TODO: condition is weird
|
||||
|
||||
class RootModel(GenericModel, Generic[_T]):
|
||||
"""Used as a placeholder to easily convert runtime types to a Pydantic format
|
||||
to provide validation.
|
||||
|
||||
For example:
|
||||
```py
|
||||
validated = RootModel[int](__root__="5").__root__
|
||||
# validated: 5
|
||||
```
|
||||
"""
|
||||
|
||||
__root__: _T
|
||||
|
||||
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
|
||||
model = _create_pydantic_model(type_).validate(value)
|
||||
return cast(_T, model.__root__)
|
||||
|
||||
def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]:
|
||||
return RootModel[type_] # type: ignore
|
||||
|
||||
def TypeAdapter(*_args: Any, **_kwargs: Any) -> Any:
|
||||
raise RuntimeError("attempted to use TypeAdapter in pydantic v1")
|
||||
|
||||
def computed_field(func: Any | None = None, /, **__: Any) -> Any:
|
||||
def _exc_func(*_: Any, **__: Any) -> Any:
|
||||
raise RuntimeError("attempted to use computed_field in pydantic v1")
|
||||
|
||||
def _dec(*_: Any, **__: Any) -> Any:
|
||||
return _exc_func
|
||||
|
||||
if func is not None:
|
||||
return _dec(func)
|
||||
else:
|
||||
return _dec
|
||||
|
||||
|
||||
class FinalRequestOptionsInput(TypedDict, total=False):
|
||||
method: Required[str]
|
||||
url: Required[str]
|
||||
params: Query
|
||||
headers: Headers
|
||||
max_retries: int
|
||||
timeout: float | Timeout | None
|
||||
files: HttpxRequestFiles | None
|
||||
idempotency_key: str
|
||||
json_data: Body
|
||||
extra_json: AnyMapping
|
||||
follow_redirects: bool
|
||||
|
||||
|
||||
@final
|
||||
class FinalRequestOptions(pydantic.BaseModel):
|
||||
method: str
|
||||
url: str
|
||||
params: Query = {}
|
||||
headers: Union[Headers, NotGiven] = NotGiven()
|
||||
max_retries: Union[int, NotGiven] = NotGiven()
|
||||
timeout: Union[float, Timeout, None, NotGiven] = NotGiven()
|
||||
files: Union[HttpxRequestFiles, None] = None
|
||||
idempotency_key: Union[str, None] = None
|
||||
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
|
||||
follow_redirects: Union[bool, None] = None
|
||||
|
||||
# It should be noted that we cannot use `json` here as that would override
|
||||
# a BaseModel method in an incompatible fashion.
|
||||
json_data: Union[Body, None] = None
|
||||
extra_json: Union[AnyMapping, None] = None
|
||||
|
||||
if PYDANTIC_V1:
|
||||
|
||||
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
|
||||
arbitrary_types_allowed: bool = True
|
||||
else:
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def get_max_retries(self, max_retries: int) -> int:
|
||||
if isinstance(self.max_retries, NotGiven):
|
||||
return max_retries
|
||||
return self.max_retries
|
||||
|
||||
def _strip_raw_response_header(self) -> None:
|
||||
if not is_given(self.headers):
|
||||
return
|
||||
|
||||
if self.headers.get(RAW_RESPONSE_HEADER):
|
||||
self.headers = {**self.headers}
|
||||
self.headers.pop(RAW_RESPONSE_HEADER)
|
||||
|
||||
# override the `construct` method so that we can run custom transformations.
|
||||
# this is necessary as we don't want to do any actual runtime type checking
|
||||
# (which means we can't use validators) but we do want to ensure that `NotGiven`
|
||||
# values are not present
|
||||
#
|
||||
# type ignore required because we're adding explicit types to `**values`
|
||||
@classmethod
|
||||
def construct( # type: ignore
|
||||
cls,
|
||||
_fields_set: set[str] | None = None,
|
||||
**values: Unpack[FinalRequestOptionsInput],
|
||||
) -> FinalRequestOptions:
|
||||
kwargs: dict[str, Any] = {
|
||||
# we unconditionally call `strip_not_given` on any value
|
||||
# as it will just ignore any non-mapping types
|
||||
key: strip_not_given(value)
|
||||
for key, value in values.items()
|
||||
}
|
||||
if PYDANTIC_V1:
|
||||
return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
|
||||
return super().model_construct(_fields_set, **kwargs)
|
||||
|
||||
if not TYPE_CHECKING:
|
||||
# type checkers incorrectly complain about this assignment
|
||||
model_construct = construct
|
||||
@@ -0,0 +1,150 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List, Tuple, Union, Mapping, TypeVar
|
||||
from urllib.parse import parse_qs, urlencode
|
||||
from typing_extensions import Literal, get_args
|
||||
|
||||
from ._types import NotGiven, not_given
|
||||
from ._utils import flatten
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
|
||||
NestedFormat = Literal["dots", "brackets"]
|
||||
|
||||
PrimitiveData = Union[str, int, float, bool, None]
|
||||
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
|
||||
# https://github.com/microsoft/pyright/issues/3555
|
||||
Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"]
|
||||
Params = Mapping[str, Data]
|
||||
|
||||
|
||||
class Querystring:
|
||||
array_format: ArrayFormat
|
||||
nested_format: NestedFormat
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
array_format: ArrayFormat = "repeat",
|
||||
nested_format: NestedFormat = "brackets",
|
||||
) -> None:
|
||||
self.array_format = array_format
|
||||
self.nested_format = nested_format
|
||||
|
||||
def parse(self, query: str) -> Mapping[str, object]:
|
||||
# Note: custom format syntax is not supported yet
|
||||
return parse_qs(query)
|
||||
|
||||
def stringify(
|
||||
self,
|
||||
params: Params,
|
||||
*,
|
||||
array_format: ArrayFormat | NotGiven = not_given,
|
||||
nested_format: NestedFormat | NotGiven = not_given,
|
||||
) -> str:
|
||||
return urlencode(
|
||||
self.stringify_items(
|
||||
params,
|
||||
array_format=array_format,
|
||||
nested_format=nested_format,
|
||||
)
|
||||
)
|
||||
|
||||
def stringify_items(
|
||||
self,
|
||||
params: Params,
|
||||
*,
|
||||
array_format: ArrayFormat | NotGiven = not_given,
|
||||
nested_format: NestedFormat | NotGiven = not_given,
|
||||
) -> list[tuple[str, str]]:
|
||||
opts = Options(
|
||||
qs=self,
|
||||
array_format=array_format,
|
||||
nested_format=nested_format,
|
||||
)
|
||||
return flatten([self._stringify_item(key, value, opts) for key, value in params.items()])
|
||||
|
||||
def _stringify_item(
|
||||
self,
|
||||
key: str,
|
||||
value: Data,
|
||||
opts: Options,
|
||||
) -> list[tuple[str, str]]:
|
||||
if isinstance(value, Mapping):
|
||||
items: list[tuple[str, str]] = []
|
||||
nested_format = opts.nested_format
|
||||
for subkey, subvalue in value.items():
|
||||
items.extend(
|
||||
self._stringify_item(
|
||||
# TODO: error if unknown format
|
||||
f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]",
|
||||
subvalue,
|
||||
opts,
|
||||
)
|
||||
)
|
||||
return items
|
||||
|
||||
if isinstance(value, (list, tuple)):
|
||||
array_format = opts.array_format
|
||||
if array_format == "comma":
|
||||
return [
|
||||
(
|
||||
key,
|
||||
",".join(self._primitive_value_to_str(item) for item in value if item is not None),
|
||||
),
|
||||
]
|
||||
elif array_format == "repeat":
|
||||
items = []
|
||||
for item in value:
|
||||
items.extend(self._stringify_item(key, item, opts))
|
||||
return items
|
||||
elif array_format == "indices":
|
||||
raise NotImplementedError("The array indices format is not supported yet")
|
||||
elif array_format == "brackets":
|
||||
items = []
|
||||
key = key + "[]"
|
||||
for item in value:
|
||||
items.extend(self._stringify_item(key, item, opts))
|
||||
return items
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
|
||||
)
|
||||
|
||||
serialised = self._primitive_value_to_str(value)
|
||||
if not serialised:
|
||||
return []
|
||||
return [(key, serialised)]
|
||||
|
||||
def _primitive_value_to_str(self, value: PrimitiveData) -> str:
|
||||
# copied from httpx
|
||||
if value is True:
|
||||
return "true"
|
||||
elif value is False:
|
||||
return "false"
|
||||
elif value is None:
|
||||
return ""
|
||||
return str(value)
|
||||
|
||||
|
||||
_qs = Querystring()
|
||||
parse = _qs.parse
|
||||
stringify = _qs.stringify
|
||||
stringify_items = _qs.stringify_items
|
||||
|
||||
|
||||
class Options:
|
||||
array_format: ArrayFormat
|
||||
nested_format: NestedFormat
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
qs: Querystring = _qs,
|
||||
*,
|
||||
array_format: ArrayFormat | NotGiven = not_given,
|
||||
nested_format: NestedFormat | NotGiven = not_given,
|
||||
) -> None:
|
||||
self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format
|
||||
self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format
|
||||
@@ -0,0 +1,41 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
import anyio
|
||||
|
||||
from ._base_client import SyncAPIClient, AsyncAPIClient
|
||||
|
||||
|
||||
class SyncAPIResource:
|
||||
_client: SyncAPIClient
|
||||
|
||||
def __init__(self, client: SyncAPIClient) -> None:
|
||||
self._client = client
|
||||
self._get = client.get
|
||||
self._post = client.post
|
||||
self._patch = client.patch
|
||||
self._put = client.put
|
||||
self._delete = client.delete
|
||||
self._get_api_list = client.get_api_list
|
||||
|
||||
def _sleep(self, seconds: float) -> None:
|
||||
time.sleep(seconds)
|
||||
|
||||
|
||||
class AsyncAPIResource:
|
||||
_client: AsyncAPIClient
|
||||
|
||||
def __init__(self, client: AsyncAPIClient) -> None:
|
||||
self._client = client
|
||||
self._get = client.get
|
||||
self._post = client.post
|
||||
self._patch = client.patch
|
||||
self._put = client.put
|
||||
self._delete = client.delete
|
||||
self._get_api_list = client.get_api_list
|
||||
|
||||
async def _sleep(self, seconds: float) -> None:
|
||||
await anyio.sleep(seconds)
|
||||
@@ -0,0 +1,872 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import inspect
|
||||
import logging
|
||||
import datetime
|
||||
import functools
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Union,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterator,
|
||||
AsyncIterator,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from typing_extensions import Awaitable, ParamSpec, override, get_origin
|
||||
|
||||
import anyio
|
||||
import httpx
|
||||
import pydantic
|
||||
|
||||
from ._types import NoneType
|
||||
from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base
|
||||
from ._models import BaseModel, is_basemodel, add_request_id
|
||||
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
|
||||
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
|
||||
from ._exceptions import AnthropicError, APIResponseValidationError
|
||||
from ._decoders.jsonl import JSONLDecoder, AsyncJSONLDecoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._models import FinalRequestOptions
|
||||
from ._base_client import BaseClient
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
_T = TypeVar("_T")
|
||||
_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]")
|
||||
_AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]")
|
||||
|
||||
log: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseAPIResponse(Generic[R]):
|
||||
_cast_to: type[R]
|
||||
_client: BaseClient[Any, Any]
|
||||
_parsed_by_type: dict[type[Any], Any]
|
||||
_is_sse_stream: bool
|
||||
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
|
||||
_options: FinalRequestOptions
|
||||
|
||||
http_response: httpx.Response
|
||||
|
||||
retries_taken: int
|
||||
"""The number of retries made. If no retries happened this will be `0`"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
raw: httpx.Response,
|
||||
cast_to: type[R],
|
||||
client: BaseClient[Any, Any],
|
||||
stream: bool,
|
||||
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
||||
options: FinalRequestOptions,
|
||||
retries_taken: int = 0,
|
||||
) -> None:
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
self._parsed_by_type = {}
|
||||
self._is_sse_stream = stream
|
||||
self._stream_cls = stream_cls
|
||||
self._options = options
|
||||
self.http_response = raw
|
||||
self.retries_taken = retries_taken
|
||||
|
||||
@property
|
||||
def headers(self) -> httpx.Headers:
|
||||
return self.http_response.headers
|
||||
|
||||
@property
|
||||
def http_request(self) -> httpx.Request:
|
||||
"""Returns the httpx Request instance associated with the current response."""
|
||||
return self.http_response.request
|
||||
|
||||
@property
|
||||
def status_code(self) -> int:
|
||||
return self.http_response.status_code
|
||||
|
||||
@property
|
||||
def url(self) -> httpx.URL:
|
||||
"""Returns the URL for which the request was made."""
|
||||
return self.http_response.url
|
||||
|
||||
@property
|
||||
def method(self) -> str:
|
||||
return self.http_request.method
|
||||
|
||||
@property
|
||||
def http_version(self) -> str:
|
||||
return self.http_response.http_version
|
||||
|
||||
@property
|
||||
def elapsed(self) -> datetime.timedelta:
|
||||
"""The time taken for the complete request/response cycle to complete."""
|
||||
return self.http_response.elapsed
|
||||
|
||||
@property
|
||||
def is_closed(self) -> bool:
|
||||
"""Whether or not the response body has been closed.
|
||||
|
||||
If this is False then there is response data that has not been read yet.
|
||||
You must either fully consume the response body or call `.close()`
|
||||
before discarding the response to prevent resource leaks.
|
||||
"""
|
||||
return self.http_response.is_closed
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
|
||||
)
|
||||
|
||||
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
cast_to = to if to is not None else self._cast_to
|
||||
|
||||
# unwrap `TypeAlias('Name', T)` -> `T`
|
||||
if is_type_alias_type(cast_to):
|
||||
cast_to = cast_to.__value__ # type: ignore[unreachable]
|
||||
|
||||
# unwrap `Annotated[T, ...]` -> `T`
|
||||
if cast_to and is_annotated_type(cast_to):
|
||||
cast_to = extract_type_arg(cast_to, 0)
|
||||
|
||||
origin = get_origin(cast_to) or cast_to
|
||||
|
||||
if inspect.isclass(origin):
|
||||
if issubclass(cast(Any, origin), JSONLDecoder):
|
||||
return cast(
|
||||
R,
|
||||
cast("type[JSONLDecoder[Any]]", cast_to)(
|
||||
raw_iterator=self.http_response.iter_bytes(chunk_size=64),
|
||||
line_type=extract_type_arg(cast_to, 0),
|
||||
http_response=self.http_response,
|
||||
),
|
||||
)
|
||||
|
||||
if issubclass(cast(Any, origin), AsyncJSONLDecoder):
|
||||
return cast(
|
||||
R,
|
||||
cast("type[AsyncJSONLDecoder[Any]]", cast_to)(
|
||||
raw_iterator=self.http_response.aiter_bytes(chunk_size=64),
|
||||
line_type=extract_type_arg(cast_to, 0),
|
||||
http_response=self.http_response,
|
||||
),
|
||||
)
|
||||
|
||||
if self._is_sse_stream:
|
||||
if to:
|
||||
if not is_stream_class_type(to):
|
||||
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
|
||||
|
||||
return cast(
|
||||
_T,
|
||||
to(
|
||||
cast_to=extract_stream_chunk_type(
|
||||
to,
|
||||
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
|
||||
),
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
if self._stream_cls:
|
||||
return cast(
|
||||
R,
|
||||
self._stream_cls(
|
||||
cast_to=extract_stream_chunk_type(self._stream_cls),
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
|
||||
if stream_cls is None:
|
||||
raise MissingStreamClassError()
|
||||
|
||||
return cast(
|
||||
R,
|
||||
stream_cls(
|
||||
cast_to=cast_to,
|
||||
response=self.http_response,
|
||||
client=cast(Any, self._client),
|
||||
),
|
||||
)
|
||||
|
||||
if cast_to is NoneType:
|
||||
return cast(R, None)
|
||||
|
||||
response = self.http_response
|
||||
if cast_to == str:
|
||||
return cast(R, response.text)
|
||||
|
||||
if cast_to == bytes:
|
||||
return cast(R, response.content)
|
||||
|
||||
if cast_to == int:
|
||||
return cast(R, int(response.text))
|
||||
|
||||
if cast_to == float:
|
||||
return cast(R, float(response.text))
|
||||
|
||||
if cast_to == bool:
|
||||
return cast(R, response.text.lower() == "true")
|
||||
|
||||
# handle the legacy binary response case
|
||||
if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent":
|
||||
return cast(R, cast_to(response)) # type: ignore
|
||||
|
||||
if origin == APIResponse:
|
||||
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
|
||||
|
||||
if inspect.isclass(
|
||||
origin # pyright: ignore[reportUnknownArgumentType]
|
||||
) and issubclass(origin, httpx.Response):
|
||||
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
|
||||
# and pass that class to our request functions. We cannot change the variance to be either
|
||||
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
|
||||
# the response class ourselves but that is something that should be supported directly in httpx
|
||||
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
|
||||
if cast_to != httpx.Response:
|
||||
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
|
||||
return cast(R, response)
|
||||
|
||||
if (
|
||||
inspect.isclass(
|
||||
origin # pyright: ignore[reportUnknownArgumentType]
|
||||
)
|
||||
and not issubclass(origin, BaseModel)
|
||||
and issubclass(origin, pydantic.BaseModel)
|
||||
):
|
||||
raise TypeError("Pydantic models must subclass our base model type, e.g. `from anthropic import BaseModel`")
|
||||
|
||||
if (
|
||||
cast_to is not object
|
||||
and not origin is list
|
||||
and not origin is dict
|
||||
and not origin is Union
|
||||
and not issubclass(origin, BaseModel)
|
||||
):
|
||||
raise RuntimeError(
|
||||
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
|
||||
)
|
||||
|
||||
# split is required to handle cases where additional information is included
|
||||
# in the response, e.g. application/json; charset=utf-8
|
||||
content_type, *_ = response.headers.get("content-type", "*").split(";")
|
||||
if not content_type.endswith("json"):
|
||||
if is_basemodel(cast_to):
|
||||
try:
|
||||
data = response.json()
|
||||
except Exception as exc:
|
||||
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
|
||||
else:
|
||||
return self._client._process_response_data(
|
||||
data=data,
|
||||
cast_to=cast_to, # type: ignore
|
||||
response=response,
|
||||
)
|
||||
|
||||
if self._client._strict_response_validation:
|
||||
raise APIResponseValidationError(
|
||||
response=response,
|
||||
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
|
||||
body=response.text,
|
||||
)
|
||||
|
||||
# If the API responds with content that isn't JSON then we just return
|
||||
# the (decoded) text without performing any parsing so that you can still
|
||||
# handle the response however you need to.
|
||||
return response.text # type: ignore
|
||||
|
||||
data = response.json()
|
||||
|
||||
return self._client._process_response_data(
|
||||
data=data,
|
||||
cast_to=cast_to, # type: ignore
|
||||
response=response,
|
||||
)
|
||||
|
||||
|
||||
class APIResponse(BaseAPIResponse[R]):
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
def parse(self, *, to: type[_T]) -> _T: ...
|
||||
|
||||
@overload
|
||||
def parse(self) -> R: ...
|
||||
|
||||
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
|
||||
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
||||
|
||||
You can customise the type that the response is parsed into through
|
||||
the `to` argument, e.g.
|
||||
|
||||
```py
|
||||
from anthropic import BaseModel
|
||||
|
||||
|
||||
class MyModel(BaseModel):
|
||||
foo: str
|
||||
|
||||
|
||||
obj = response.parse(to=MyModel)
|
||||
print(obj.foo)
|
||||
```
|
||||
|
||||
We support parsing:
|
||||
- `BaseModel`
|
||||
- `dict`
|
||||
- `list`
|
||||
- `Union`
|
||||
- `str`
|
||||
- `int`
|
||||
- `float`
|
||||
- `httpx.Response`
|
||||
"""
|
||||
cache_key = to if to is not None else self._cast_to
|
||||
cached = self._parsed_by_type.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached # type: ignore[no-any-return]
|
||||
|
||||
if not self._is_sse_stream:
|
||||
self.read()
|
||||
|
||||
parsed = self._parse(to=to)
|
||||
if is_given(self._options.post_parser):
|
||||
parsed = self._options.post_parser(parsed)
|
||||
|
||||
if isinstance(parsed, BaseModel):
|
||||
add_request_id(parsed, self.request_id)
|
||||
|
||||
self._parsed_by_type[cache_key] = parsed
|
||||
return cast(R, parsed)
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Read and return the binary response content."""
|
||||
try:
|
||||
return self.http_response.read()
|
||||
except httpx.StreamConsumed as exc:
|
||||
# The default error raised by httpx isn't very
|
||||
# helpful in our case so we re-raise it with
|
||||
# a different error message.
|
||||
raise StreamAlreadyConsumed() from exc
|
||||
|
||||
def text(self) -> str:
|
||||
"""Read and decode the response content into a string."""
|
||||
self.read()
|
||||
return self.http_response.text
|
||||
|
||||
def json(self) -> object:
|
||||
"""Read and decode the JSON response content."""
|
||||
self.read()
|
||||
return self.http_response.json()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
self.http_response.close()
|
||||
|
||||
def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
||||
"""
|
||||
A byte-iterator over the decoded response content.
|
||||
|
||||
This automatically handles gzip, deflate and brotli encoded responses.
|
||||
"""
|
||||
for chunk in self.http_response.iter_bytes(chunk_size):
|
||||
yield chunk
|
||||
|
||||
def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
|
||||
"""A str-iterator over the decoded response content
|
||||
that handles both gzip, deflate, etc but also detects the content's
|
||||
string encoding.
|
||||
"""
|
||||
for chunk in self.http_response.iter_text(chunk_size):
|
||||
yield chunk
|
||||
|
||||
def iter_lines(self) -> Iterator[str]:
|
||||
"""Like `iter_text()` but will only yield chunks for each line"""
|
||||
for chunk in self.http_response.iter_lines():
|
||||
yield chunk
|
||||
|
||||
|
||||
class AsyncAPIResponse(BaseAPIResponse[R]):
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
@overload
|
||||
async def parse(self, *, to: type[_T]) -> _T: ...
|
||||
|
||||
@overload
|
||||
async def parse(self) -> R: ...
|
||||
|
||||
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
||||
"""Returns the rich python representation of this response's data.
|
||||
|
||||
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
||||
|
||||
You can customise the type that the response is parsed into through
|
||||
the `to` argument, e.g.
|
||||
|
||||
```py
|
||||
from anthropic import BaseModel
|
||||
|
||||
|
||||
class MyModel(BaseModel):
|
||||
foo: str
|
||||
|
||||
|
||||
obj = response.parse(to=MyModel)
|
||||
print(obj.foo)
|
||||
```
|
||||
|
||||
We support parsing:
|
||||
- `BaseModel`
|
||||
- `dict`
|
||||
- `list`
|
||||
- `Union`
|
||||
- `str`
|
||||
- `httpx.Response`
|
||||
"""
|
||||
cache_key = to if to is not None else self._cast_to
|
||||
cached = self._parsed_by_type.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached # type: ignore[no-any-return]
|
||||
|
||||
if not self._is_sse_stream:
|
||||
await self.read()
|
||||
|
||||
parsed = self._parse(to=to)
|
||||
if is_given(self._options.post_parser):
|
||||
parsed = self._options.post_parser(parsed)
|
||||
|
||||
if isinstance(parsed, BaseModel):
|
||||
add_request_id(parsed, self.request_id)
|
||||
|
||||
self._parsed_by_type[cache_key] = parsed
|
||||
return cast(R, parsed)
|
||||
|
||||
async def read(self) -> bytes:
|
||||
"""Read and return the binary response content."""
|
||||
try:
|
||||
return await self.http_response.aread()
|
||||
except httpx.StreamConsumed as exc:
|
||||
# the default error raised by httpx isn't very
|
||||
# helpful in our case so we re-raise it with
|
||||
# a different error message
|
||||
raise StreamAlreadyConsumed() from exc
|
||||
|
||||
async def text(self) -> str:
|
||||
"""Read and decode the response content into a string."""
|
||||
await self.read()
|
||||
return self.http_response.text
|
||||
|
||||
async def json(self) -> object:
|
||||
"""Read and decode the JSON response content."""
|
||||
await self.read()
|
||||
return self.http_response.json()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
await self.http_response.aclose()
|
||||
|
||||
async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
||||
"""
|
||||
A byte-iterator over the decoded response content.
|
||||
|
||||
This automatically handles gzip, deflate and brotli encoded responses.
|
||||
"""
|
||||
async for chunk in self.http_response.aiter_bytes(chunk_size):
|
||||
yield chunk
|
||||
|
||||
async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
|
||||
"""A str-iterator over the decoded response content
|
||||
that handles both gzip, deflate, etc but also detects the content's
|
||||
string encoding.
|
||||
"""
|
||||
async for chunk in self.http_response.aiter_text(chunk_size):
|
||||
yield chunk
|
||||
|
||||
async def iter_lines(self) -> AsyncIterator[str]:
|
||||
"""Like `iter_text()` but will only yield chunks for each line"""
|
||||
async for chunk in self.http_response.aiter_lines():
|
||||
yield chunk
|
||||
|
||||
|
||||
class BinaryAPIResponse(APIResponse[bytes]):
|
||||
"""Subclass of APIResponse providing helpers for dealing with binary data.
|
||||
|
||||
Note: If you want to stream the response data instead of eagerly reading it
|
||||
all at once then you should use `.with_streaming_response` when making
|
||||
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
||||
"""
|
||||
|
||||
def write_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
) -> None:
|
||||
"""Write the output to the given file.
|
||||
|
||||
Accepts a filename or any path-like object, e.g. pathlib.Path
|
||||
|
||||
Note: if you want to stream the data to the file instead of writing
|
||||
all at once then you should use `.with_streaming_response` when making
|
||||
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
||||
"""
|
||||
with open(file, mode="wb") as f:
|
||||
for data in self.iter_bytes():
|
||||
f.write(data)
|
||||
|
||||
|
||||
class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]):
|
||||
"""Subclass of APIResponse providing helpers for dealing with binary data.
|
||||
|
||||
Note: If you want to stream the response data instead of eagerly reading it
|
||||
all at once then you should use `.with_streaming_response` when making
|
||||
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
||||
"""
|
||||
|
||||
async def write_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
) -> None:
|
||||
"""Write the output to the given file.
|
||||
|
||||
Accepts a filename or any path-like object, e.g. pathlib.Path
|
||||
|
||||
Note: if you want to stream the data to the file instead of writing
|
||||
all at once then you should use `.with_streaming_response` when making
|
||||
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
||||
"""
|
||||
path = anyio.Path(file)
|
||||
async with await path.open(mode="wb") as f:
|
||||
async for data in self.iter_bytes():
|
||||
await f.write(data)
|
||||
|
||||
|
||||
class StreamedBinaryAPIResponse(APIResponse[bytes]):
|
||||
def stream_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
*,
|
||||
chunk_size: int | None = None,
|
||||
) -> None:
|
||||
"""Streams the output to the given file.
|
||||
|
||||
Accepts a filename or any path-like object, e.g. pathlib.Path
|
||||
"""
|
||||
with open(file, mode="wb") as f:
|
||||
for data in self.iter_bytes(chunk_size):
|
||||
f.write(data)
|
||||
|
||||
|
||||
class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]):
|
||||
async def stream_to_file(
|
||||
self,
|
||||
file: str | os.PathLike[str],
|
||||
*,
|
||||
chunk_size: int | None = None,
|
||||
) -> None:
|
||||
"""Streams the output to the given file.
|
||||
|
||||
Accepts a filename or any path-like object, e.g. pathlib.Path
|
||||
"""
|
||||
path = anyio.Path(file)
|
||||
async with await path.open(mode="wb") as f:
|
||||
async for data in self.iter_bytes(chunk_size):
|
||||
await f.write(data)
|
||||
|
||||
|
||||
class MissingStreamClassError(TypeError):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `anthropic._streaming` for reference",
|
||||
)
|
||||
|
||||
|
||||
class StreamAlreadyConsumed(AnthropicError):
|
||||
"""
|
||||
Attempted to read or stream content, but the content has already
|
||||
been streamed.
|
||||
|
||||
This can happen if you use a method like `.iter_lines()` and then attempt
|
||||
to read th entire response body afterwards, e.g.
|
||||
|
||||
```py
|
||||
response = await client.post(...)
|
||||
async for line in response.iter_lines():
|
||||
... # do something with `line`
|
||||
|
||||
content = await response.read()
|
||||
# ^ error
|
||||
```
|
||||
|
||||
If you want this behaviour you'll need to either manually accumulate the response
|
||||
content or call `await response.read()` before iterating over the stream.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
message = (
|
||||
"Attempted to read or stream some content, but the content has "
|
||||
"already been streamed. "
|
||||
"This could be due to attempting to stream the response "
|
||||
"content more than once."
|
||||
"\n\n"
|
||||
"You can fix this by manually accumulating the response content while streaming "
|
||||
"or by calling `.read()` before starting to stream."
|
||||
)
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ResponseContextManager(Generic[_APIResponseT]):
|
||||
"""Context manager for ensuring that a request is not made
|
||||
until it is entered and that the response will always be closed
|
||||
when the context manager exits
|
||||
"""
|
||||
|
||||
def __init__(self, request_func: Callable[[], _APIResponseT]) -> None:
|
||||
self._request_func = request_func
|
||||
self.__response: _APIResponseT | None = None
|
||||
|
||||
def __enter__(self) -> _APIResponseT:
|
||||
self.__response = self._request_func()
|
||||
return self.__response
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__response is not None:
|
||||
self.__response.close()
|
||||
|
||||
|
||||
class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]):
|
||||
"""Context manager for ensuring that a request is not made
|
||||
until it is entered and that the response will always be closed
|
||||
when the context manager exits
|
||||
"""
|
||||
|
||||
def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None:
|
||||
self._api_request = api_request
|
||||
self.__response: _AsyncAPIResponseT | None = None
|
||||
|
||||
async def __aenter__(self) -> _AsyncAPIResponseT:
|
||||
self.__response = await self._api_request
|
||||
return self.__response
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__response is not None:
|
||||
await self.__response.close()
|
||||
|
||||
|
||||
def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support streaming and returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
make_request = functools.partial(func, *args, **kwargs)
|
||||
|
||||
return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def async_to_streamed_response_wrapper(
|
||||
func: Callable[P, Awaitable[R]],
|
||||
) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support streaming and returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
make_request = func(*args, **kwargs)
|
||||
|
||||
return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def to_custom_streamed_response_wrapper(
|
||||
func: Callable[P, object],
|
||||
response_cls: type[_APIResponseT],
|
||||
) -> Callable[P, ResponseContextManager[_APIResponseT]]:
|
||||
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
||||
and wraps the method to support streaming and returning the given response class directly.
|
||||
|
||||
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]:
|
||||
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
||||
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
make_request = functools.partial(func, *args, **kwargs)
|
||||
|
||||
return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def async_to_custom_streamed_response_wrapper(
|
||||
func: Callable[P, Awaitable[object]],
|
||||
response_cls: type[_AsyncAPIResponseT],
|
||||
) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]:
|
||||
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
||||
and wraps the method to support streaming and returning the given response class directly.
|
||||
|
||||
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]:
|
||||
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
||||
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
make_request = func(*args, **kwargs)
|
||||
|
||||
return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(APIResponse[R], func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]:
|
||||
"""Higher order function that takes one of our bound API methods and wraps it
|
||||
to support returning the raw `APIResponse` object directly.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]:
|
||||
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(AsyncAPIResponse[R], await func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def to_custom_raw_response_wrapper(
|
||||
func: Callable[P, object],
|
||||
response_cls: type[_APIResponseT],
|
||||
) -> Callable[P, _APIResponseT]:
|
||||
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
||||
and wraps the method to support returning the given response class directly.
|
||||
|
||||
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT:
|
||||
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
||||
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(_APIResponseT, func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def async_to_custom_raw_response_wrapper(
|
||||
func: Callable[P, Awaitable[object]],
|
||||
response_cls: type[_AsyncAPIResponseT],
|
||||
) -> Callable[P, Awaitable[_AsyncAPIResponseT]]:
|
||||
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
||||
and wraps the method to support returning the given response class directly.
|
||||
|
||||
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]:
|
||||
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
||||
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
||||
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
||||
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs))
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type:
|
||||
"""Given a type like `APIResponse[T]`, returns the generic type variable `T`.
|
||||
|
||||
This also handles the case where a concrete subclass is given, e.g.
|
||||
```py
|
||||
class MyResponse(APIResponse[bytes]):
|
||||
...
|
||||
|
||||
extract_response_type(MyResponse) -> bytes
|
||||
```
|
||||
"""
|
||||
return extract_type_var_from_base(
|
||||
typ,
|
||||
generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)),
|
||||
index=0,
|
||||
)
|
||||
@@ -0,0 +1,441 @@
|
||||
# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import json
|
||||
import inspect
|
||||
import warnings
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
|
||||
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
|
||||
|
||||
import httpx
|
||||
|
||||
from ._utils import is_dict, extract_type_var_from_base
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._client import Anthropic, AsyncAnthropic
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class _SyncStreamMeta(abc.ABCMeta):
|
||||
@override
|
||||
def __instancecheck__(self, instance: Any) -> bool:
|
||||
# we override the `isinstance()` check for `Stream`
|
||||
# as a previous version of the `MessageStream` class
|
||||
# inherited from `Stream` & without this workaround,
|
||||
# changing it to not inherit would be a breaking change.
|
||||
|
||||
from .lib.streaming import MessageStream
|
||||
|
||||
if isinstance(instance, MessageStream):
|
||||
warnings.warn(
|
||||
"Using `isinstance()` to check if a `MessageStream` object is an instance of `Stream` is deprecated & will be removed in the next major version",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class Stream(Generic[_T], metaclass=_SyncStreamMeta):
|
||||
"""Provides the core interface to iterate over a synchronous stream response."""
|
||||
|
||||
response: httpx.Response
|
||||
|
||||
_decoder: SSEBytesDecoder
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
cast_to: type[_T],
|
||||
response: httpx.Response,
|
||||
client: Anthropic,
|
||||
) -> None:
|
||||
self.response = response
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
self._decoder = client._make_sse_decoder()
|
||||
self._iterator = self.__stream__()
|
||||
|
||||
def __next__(self) -> _T:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[_T]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
def _iter_events(self) -> Iterator[ServerSentEvent]:
|
||||
yield from self._decoder.iter_bytes(self.response.iter_bytes())
|
||||
|
||||
def __stream__(self) -> Iterator[_T]:
|
||||
cast_to = cast(Any, self._cast_to)
|
||||
response = self.response
|
||||
process_data = self._client._process_response_data
|
||||
iterator = self._iter_events()
|
||||
|
||||
for sse in iterator:
|
||||
if sse.event == "completion":
|
||||
yield process_data(data=sse.json(), cast_to=cast_to, response=response)
|
||||
|
||||
if (
|
||||
sse.event == "message_start"
|
||||
or sse.event == "message_delta"
|
||||
or sse.event == "message_stop"
|
||||
or sse.event == "content_block_start"
|
||||
or sse.event == "content_block_delta"
|
||||
or sse.event == "content_block_stop"
|
||||
):
|
||||
data = sse.json()
|
||||
if is_dict(data) and "type" not in data:
|
||||
data["type"] = sse.event
|
||||
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
|
||||
if sse.event == "ping":
|
||||
continue
|
||||
|
||||
if sse.event == "error":
|
||||
body = sse.data
|
||||
|
||||
try:
|
||||
body = sse.json()
|
||||
err_msg = f"{body}"
|
||||
except Exception:
|
||||
err_msg = sse.data or f"Error code: {response.status_code}"
|
||||
|
||||
raise self._client._make_status_error(
|
||||
err_msg,
|
||||
body=body,
|
||||
response=self.response,
|
||||
)
|
||||
|
||||
# As we might not fully consume the response stream, we need to close it explicitly
|
||||
response.close()
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
self.response.close()
|
||||
|
||||
|
||||
class _AsyncStreamMeta(abc.ABCMeta):
|
||||
@override
|
||||
def __instancecheck__(self, instance: Any) -> bool:
|
||||
# we override the `isinstance()` check for `AsyncStream`
|
||||
# as a previous version of the `AsyncMessageStream` class
|
||||
# inherited from `AsyncStream` & without this workaround,
|
||||
# changing it to not inherit would be a breaking change.
|
||||
|
||||
from .lib.streaming import AsyncMessageStream
|
||||
|
||||
if isinstance(instance, AsyncMessageStream):
|
||||
warnings.warn(
|
||||
"Using `isinstance()` to check if a `AsyncMessageStream` object is an instance of `AsyncStream` is deprecated & will be removed in the next major version",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class AsyncStream(Generic[_T], metaclass=_AsyncStreamMeta):
|
||||
"""Provides the core interface to iterate over an asynchronous stream response."""
|
||||
|
||||
response: httpx.Response
|
||||
|
||||
_decoder: SSEDecoder | SSEBytesDecoder
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
cast_to: type[_T],
|
||||
response: httpx.Response,
|
||||
client: AsyncAnthropic,
|
||||
) -> None:
|
||||
self.response = response
|
||||
self._cast_to = cast_to
|
||||
self._client = client
|
||||
self._decoder = client._make_sse_decoder()
|
||||
self._iterator = self.__stream__()
|
||||
|
||||
async def __anext__(self) -> _T:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[_T]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
|
||||
async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
|
||||
async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
|
||||
yield sse
|
||||
|
||||
async def __stream__(self) -> AsyncIterator[_T]:
|
||||
cast_to = cast(Any, self._cast_to)
|
||||
response = self.response
|
||||
process_data = self._client._process_response_data
|
||||
iterator = self._iter_events()
|
||||
|
||||
async for sse in iterator:
|
||||
if sse.event == "completion":
|
||||
yield process_data(data=sse.json(), cast_to=cast_to, response=response)
|
||||
|
||||
if (
|
||||
sse.event == "message_start"
|
||||
or sse.event == "message_delta"
|
||||
or sse.event == "message_stop"
|
||||
or sse.event == "content_block_start"
|
||||
or sse.event == "content_block_delta"
|
||||
or sse.event == "content_block_stop"
|
||||
):
|
||||
data = sse.json()
|
||||
if is_dict(data) and "type" not in data:
|
||||
data["type"] = sse.event
|
||||
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
|
||||
if sse.event == "ping":
|
||||
continue
|
||||
|
||||
if sse.event == "error":
|
||||
body = sse.data
|
||||
|
||||
try:
|
||||
body = sse.json()
|
||||
err_msg = f"{body}"
|
||||
except Exception:
|
||||
err_msg = sse.data or f"Error code: {response.status_code}"
|
||||
|
||||
raise self._client._make_status_error(
|
||||
err_msg,
|
||||
body=body,
|
||||
response=self.response,
|
||||
)
|
||||
|
||||
# As we might not fully consume the response stream, we need to close it explicitly
|
||||
await response.aclose()
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.close()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
await self.response.aclose()
|
||||
|
||||
|
||||
class ServerSentEvent:
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
event: str | None = None,
|
||||
data: str | None = None,
|
||||
id: str | None = None,
|
||||
retry: int | None = None,
|
||||
) -> None:
|
||||
if data is None:
|
||||
data = ""
|
||||
|
||||
self._id = id
|
||||
self._data = data
|
||||
self._event = event or None
|
||||
self._retry = retry
|
||||
|
||||
@property
|
||||
def event(self) -> str | None:
|
||||
return self._event
|
||||
|
||||
@property
|
||||
def id(self) -> str | None:
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def retry(self) -> int | None:
|
||||
return self._retry
|
||||
|
||||
@property
|
||||
def data(self) -> str:
|
||||
return self._data
|
||||
|
||||
def json(self) -> Any:
|
||||
return json.loads(self.data)
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})"
|
||||
|
||||
|
||||
class SSEDecoder:
|
||||
_data: list[str]
|
||||
_event: str | None
|
||||
_retry: int | None
|
||||
_last_event_id: str | None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._event = None
|
||||
self._data = []
|
||||
self._last_event_id = None
|
||||
self._retry = None
|
||||
|
||||
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
|
||||
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
||||
for chunk in self._iter_chunks(iterator):
|
||||
# Split before decoding so splitlines() only uses \r and \n
|
||||
for raw_line in chunk.splitlines():
|
||||
line = raw_line.decode("utf-8")
|
||||
sse = self.decode(line)
|
||||
if sse:
|
||||
yield sse
|
||||
|
||||
def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]:
|
||||
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
|
||||
data = b""
|
||||
for chunk in iterator:
|
||||
for line in chunk.splitlines(keepends=True):
|
||||
data += line
|
||||
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
|
||||
yield data
|
||||
data = b""
|
||||
if data:
|
||||
yield data
|
||||
|
||||
async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
|
||||
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
||||
async for chunk in self._aiter_chunks(iterator):
|
||||
# Split before decoding so splitlines() only uses \r and \n
|
||||
for raw_line in chunk.splitlines():
|
||||
line = raw_line.decode("utf-8")
|
||||
sse = self.decode(line)
|
||||
if sse:
|
||||
yield sse
|
||||
|
||||
async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
|
||||
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
|
||||
data = b""
|
||||
async for chunk in iterator:
|
||||
for line in chunk.splitlines(keepends=True):
|
||||
data += line
|
||||
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
|
||||
yield data
|
||||
data = b""
|
||||
if data:
|
||||
yield data
|
||||
|
||||
def decode(self, line: str) -> ServerSentEvent | None:
|
||||
# See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
|
||||
|
||||
if not line:
|
||||
if not self._event and not self._data and not self._last_event_id and self._retry is None:
|
||||
return None
|
||||
|
||||
sse = ServerSentEvent(
|
||||
event=self._event,
|
||||
data="\n".join(self._data),
|
||||
id=self._last_event_id,
|
||||
retry=self._retry,
|
||||
)
|
||||
|
||||
# NOTE: as per the SSE spec, do not reset last_event_id.
|
||||
self._event = None
|
||||
self._data = []
|
||||
self._retry = None
|
||||
|
||||
return sse
|
||||
|
||||
if line.startswith(":"):
|
||||
return None
|
||||
|
||||
fieldname, _, value = line.partition(":")
|
||||
|
||||
if value.startswith(" "):
|
||||
value = value[1:]
|
||||
|
||||
if fieldname == "event":
|
||||
self._event = value
|
||||
elif fieldname == "data":
|
||||
self._data.append(value)
|
||||
elif fieldname == "id":
|
||||
if "\0" in value:
|
||||
pass
|
||||
else:
|
||||
self._last_event_id = value
|
||||
elif fieldname == "retry":
|
||||
try:
|
||||
self._retry = int(value)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
else:
|
||||
pass # Field is ignored.
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class SSEBytesDecoder(Protocol):
|
||||
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
|
||||
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
||||
...
|
||||
|
||||
def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
|
||||
"""Given an async iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
||||
...
|
||||
|
||||
|
||||
def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]:
|
||||
"""TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`"""
|
||||
origin = get_origin(typ) or typ
|
||||
return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream))
|
||||
|
||||
|
||||
def extract_stream_chunk_type(
|
||||
stream_cls: type,
|
||||
*,
|
||||
failure_message: str | None = None,
|
||||
) -> type:
|
||||
"""Given a type like `Stream[T]`, returns the generic type variable `T`.
|
||||
|
||||
This also handles the case where a concrete subclass is given, e.g.
|
||||
```py
|
||||
class MyStream(Stream[bytes]):
|
||||
...
|
||||
|
||||
extract_stream_chunk_type(MyStream) -> bytes
|
||||
```
|
||||
"""
|
||||
from ._base_client import Stream, AsyncStream
|
||||
|
||||
return extract_type_var_from_base(
|
||||
stream_cls,
|
||||
index=0,
|
||||
generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)),
|
||||
failure_message=failure_message,
|
||||
)
|
||||
@@ -0,0 +1,262 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from os import PathLike
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Type,
|
||||
Tuple,
|
||||
Union,
|
||||
Mapping,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterator,
|
||||
Optional,
|
||||
Sequence,
|
||||
)
|
||||
from typing_extensions import (
|
||||
Set,
|
||||
Literal,
|
||||
Protocol,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
SupportsIndex,
|
||||
overload,
|
||||
override,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
import httpx
|
||||
import pydantic
|
||||
from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._models import BaseModel
|
||||
from ._response import APIResponse, AsyncAPIResponse
|
||||
from ._legacy_response import HttpxBinaryResponseContent
|
||||
|
||||
Transport = BaseTransport
|
||||
AsyncTransport = AsyncBaseTransport
|
||||
Query = Mapping[str, object]
|
||||
Body = object
|
||||
AnyMapping = Mapping[str, object]
|
||||
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
# Approximates httpx internal ProxiesTypes and RequestFiles types
|
||||
# while adding support for `PathLike` instances
|
||||
ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]]
|
||||
ProxiesTypes = Union[str, Proxy, ProxiesDict]
|
||||
if TYPE_CHECKING:
|
||||
Base64FileInput = Union[IO[bytes], PathLike[str]]
|
||||
FileContent = Union[IO[bytes], bytes, PathLike[str]]
|
||||
else:
|
||||
Base64FileInput = Union[IO[bytes], PathLike]
|
||||
FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
|
||||
FileTypes = Union[
|
||||
# file (or bytes)
|
||||
FileContent,
|
||||
# (filename, file (or bytes))
|
||||
Tuple[Optional[str], FileContent],
|
||||
# (filename, file (or bytes), content_type)
|
||||
Tuple[Optional[str], FileContent, Optional[str]],
|
||||
# (filename, file (or bytes), content_type, headers)
|
||||
Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
|
||||
]
|
||||
RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
|
||||
|
||||
# duplicate of the above but without our custom file support
|
||||
HttpxFileContent = Union[IO[bytes], bytes]
|
||||
HttpxFileTypes = Union[
|
||||
# file (or bytes)
|
||||
HttpxFileContent,
|
||||
# (filename, file (or bytes))
|
||||
Tuple[Optional[str], HttpxFileContent],
|
||||
# (filename, file (or bytes), content_type)
|
||||
Tuple[Optional[str], HttpxFileContent, Optional[str]],
|
||||
# (filename, file (or bytes), content_type, headers)
|
||||
Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]],
|
||||
]
|
||||
HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]]
|
||||
|
||||
# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
|
||||
# where ResponseT includes `None`. In order to support directly
|
||||
# passing `None`, overloads would have to be defined for every
|
||||
# method that uses `ResponseT` which would lead to an unacceptable
|
||||
# amount of code duplication and make it unreadable. See _base_client.py
|
||||
# for example usage.
|
||||
#
|
||||
# This unfortunately means that you will either have
|
||||
# to import this type and pass it explicitly:
|
||||
#
|
||||
# from anthropic import NoneType
|
||||
# client.get('/foo', cast_to=NoneType)
|
||||
#
|
||||
# or build it yourself:
|
||||
#
|
||||
# client.get('/foo', cast_to=type(None))
|
||||
if TYPE_CHECKING:
|
||||
NoneType: Type[None]
|
||||
else:
|
||||
NoneType = type(None)
|
||||
|
||||
|
||||
class RequestOptions(TypedDict, total=False):
|
||||
headers: Headers
|
||||
max_retries: int
|
||||
timeout: float | Timeout | None
|
||||
params: Query
|
||||
extra_json: AnyMapping
|
||||
idempotency_key: str
|
||||
follow_redirects: bool
|
||||
|
||||
|
||||
# Sentinel class used until PEP 0661 is accepted
|
||||
class NotGiven:
|
||||
"""
|
||||
For parameters with a meaningful None value, we need to distinguish between
|
||||
the user explicitly passing None, and the user not passing the parameter at
|
||||
all.
|
||||
|
||||
User code shouldn't need to use not_given directly.
|
||||
|
||||
For example:
|
||||
|
||||
```py
|
||||
def create(timeout: Timeout | None | NotGiven = not_given): ...
|
||||
|
||||
|
||||
create(timeout=1) # 1s timeout
|
||||
create(timeout=None) # No timeout
|
||||
create() # Default timeout behavior
|
||||
```
|
||||
"""
|
||||
|
||||
def __bool__(self) -> Literal[False]:
|
||||
return False
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return "NOT_GIVEN"
|
||||
|
||||
|
||||
not_given = NotGiven()
|
||||
# for backwards compatibility:
|
||||
NOT_GIVEN = NotGiven()
|
||||
|
||||
|
||||
class Omit:
|
||||
"""
|
||||
To explicitly omit something from being sent in a request, use `omit`.
|
||||
|
||||
```py
|
||||
# as the default `Content-Type` header is `application/json` that will be sent
|
||||
client.post("/upload/files", files={"file": b"my raw file content"})
|
||||
|
||||
# you can't explicitly override the header as it has to be dynamically generated
|
||||
# to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
|
||||
client.post(..., headers={"Content-Type": "multipart/form-data"})
|
||||
|
||||
# instead you can remove the default `application/json` header by passing omit
|
||||
client.post(..., headers={"Content-Type": omit})
|
||||
```
|
||||
"""
|
||||
|
||||
def __bool__(self) -> Literal[False]:
|
||||
return False
|
||||
|
||||
|
||||
omit = Omit()
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class ModelBuilderProtocol(Protocol):
|
||||
@classmethod
|
||||
def build(
|
||||
cls: type[_T],
|
||||
*,
|
||||
response: Response,
|
||||
data: object,
|
||||
) -> _T: ...
|
||||
|
||||
|
||||
Headers = Mapping[str, Union[str, Omit]]
|
||||
|
||||
|
||||
class HeadersLikeProtocol(Protocol):
|
||||
def get(self, __key: str) -> str | None: ...
|
||||
|
||||
|
||||
HeadersLike = Union[Headers, HeadersLikeProtocol]
|
||||
|
||||
ResponseT = TypeVar(
|
||||
"ResponseT",
|
||||
bound=Union[
|
||||
object,
|
||||
str,
|
||||
None,
|
||||
"BaseModel",
|
||||
List[Any],
|
||||
Dict[str, Any],
|
||||
Response,
|
||||
ModelBuilderProtocol,
|
||||
"APIResponse[Any]",
|
||||
"AsyncAPIResponse[Any]",
|
||||
"HttpxBinaryResponseContent",
|
||||
],
|
||||
)
|
||||
|
||||
StrBytesIntFloat = Union[str, bytes, int, float]
|
||||
|
||||
# Note: copied from Pydantic
|
||||
# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79
|
||||
IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]]
|
||||
|
||||
PostParser = Callable[[Any], Any]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class InheritsGeneric(Protocol):
|
||||
"""Represents a type that has inherited from `Generic`
|
||||
|
||||
The `__orig_bases__` property can be used to determine the resolved
|
||||
type variable for a given base class.
|
||||
"""
|
||||
|
||||
__orig_bases__: tuple[_GenericAlias]
|
||||
|
||||
|
||||
class _GenericAlias(Protocol):
|
||||
__origin__: type[object]
|
||||
|
||||
|
||||
class HttpxSendArgs(TypedDict, total=False):
|
||||
auth: httpx.Auth
|
||||
follow_redirects: bool
|
||||
|
||||
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
|
||||
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
|
||||
class SequenceNotStr(Protocol[_T_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
|
||||
def __contains__(self, value: object, /) -> bool: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __iter__(self) -> Iterator[_T_co]: ...
|
||||
def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
|
||||
def count(self, value: Any, /) -> int: ...
|
||||
def __reversed__(self) -> Iterator[_T_co]: ...
|
||||
else:
|
||||
# just point this to a normal `Sequence` at runtime to avoid having to special case
|
||||
# deserializing our custom sequence type
|
||||
SequenceNotStr = Sequence
|
||||
@@ -0,0 +1,64 @@
|
||||
from ._sync import asyncify as asyncify
|
||||
from ._proxy import LazyProxy as LazyProxy
|
||||
from ._utils import (
|
||||
flatten as flatten,
|
||||
is_dict as is_dict,
|
||||
is_list as is_list,
|
||||
is_given as is_given,
|
||||
is_tuple as is_tuple,
|
||||
json_safe as json_safe,
|
||||
lru_cache as lru_cache,
|
||||
is_mapping as is_mapping,
|
||||
is_tuple_t as is_tuple_t,
|
||||
is_iterable as is_iterable,
|
||||
is_sequence as is_sequence,
|
||||
coerce_float as coerce_float,
|
||||
is_mapping_t as is_mapping_t,
|
||||
removeprefix as removeprefix,
|
||||
removesuffix as removesuffix,
|
||||
extract_files as extract_files,
|
||||
is_sequence_t as is_sequence_t,
|
||||
required_args as required_args,
|
||||
coerce_boolean as coerce_boolean,
|
||||
coerce_integer as coerce_integer,
|
||||
file_from_path as file_from_path,
|
||||
strip_not_given as strip_not_given,
|
||||
deepcopy_minimal as deepcopy_minimal,
|
||||
get_async_library as get_async_library,
|
||||
maybe_coerce_float as maybe_coerce_float,
|
||||
get_required_header as get_required_header,
|
||||
maybe_coerce_boolean as maybe_coerce_boolean,
|
||||
maybe_coerce_integer as maybe_coerce_integer,
|
||||
)
|
||||
from ._compat import (
|
||||
get_args as get_args,
|
||||
is_union as is_union,
|
||||
get_origin as get_origin,
|
||||
is_typeddict as is_typeddict,
|
||||
is_literal_type as is_literal_type,
|
||||
)
|
||||
from ._typing import (
|
||||
is_list_type as is_list_type,
|
||||
is_union_type as is_union_type,
|
||||
extract_type_arg as extract_type_arg,
|
||||
is_iterable_type as is_iterable_type,
|
||||
is_required_type as is_required_type,
|
||||
is_sequence_type as is_sequence_type,
|
||||
is_annotated_type as is_annotated_type,
|
||||
is_type_alias_type as is_type_alias_type,
|
||||
strip_annotated_type as strip_annotated_type,
|
||||
extract_type_var_from_base as extract_type_var_from_base,
|
||||
)
|
||||
from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator
|
||||
from ._transform import (
|
||||
PropertyInfo as PropertyInfo,
|
||||
transform as transform,
|
||||
async_transform as async_transform,
|
||||
maybe_transform as maybe_transform,
|
||||
async_maybe_transform as async_maybe_transform,
|
||||
)
|
||||
from ._reflection import (
|
||||
function_has_argument as function_has_argument,
|
||||
assert_signatures_in_sync as assert_signatures_in_sync,
|
||||
)
|
||||
from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
|
||||
@@ -0,0 +1,45 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import typing_extensions
|
||||
from typing import Any, Type, Union, Literal, Optional
|
||||
from datetime import date, datetime
|
||||
from typing_extensions import get_args as _get_args, get_origin as _get_origin
|
||||
|
||||
from .._types import StrBytesIntFloat
|
||||
from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
|
||||
|
||||
_LITERAL_TYPES = {Literal, typing_extensions.Literal}
|
||||
|
||||
|
||||
def get_args(tp: type[Any]) -> tuple[Any, ...]:
|
||||
return _get_args(tp)
|
||||
|
||||
|
||||
def get_origin(tp: type[Any]) -> type[Any] | None:
|
||||
return _get_origin(tp)
|
||||
|
||||
|
||||
def is_union(tp: Optional[Type[Any]]) -> bool:
|
||||
if sys.version_info < (3, 10):
|
||||
return tp is Union # type: ignore[comparison-overlap]
|
||||
else:
|
||||
import types
|
||||
|
||||
return tp is Union or tp is types.UnionType
|
||||
|
||||
|
||||
def is_typeddict(tp: Type[Any]) -> bool:
|
||||
return typing_extensions.is_typeddict(tp)
|
||||
|
||||
|
||||
def is_literal_type(tp: Type[Any]) -> bool:
|
||||
return get_origin(tp) in _LITERAL_TYPES
|
||||
|
||||
|
||||
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
|
||||
return _parse_date(value)
|
||||
|
||||
|
||||
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
|
||||
return _parse_datetime(value)
|
||||
@@ -0,0 +1,136 @@
|
||||
"""
|
||||
This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
|
||||
without the Pydantic v1 specific errors.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Dict, Union, Optional
|
||||
from datetime import date, datetime, timezone, timedelta
|
||||
|
||||
from .._types import StrBytesIntFloat
|
||||
|
||||
date_expr = r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
|
||||
time_expr = (
|
||||
r"(?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
|
||||
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
|
||||
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
|
||||
)
|
||||
|
||||
date_re = re.compile(f"{date_expr}$")
|
||||
datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
|
||||
|
||||
|
||||
EPOCH = datetime(1970, 1, 1)
|
||||
# if greater than this, the number is in ms, if less than or equal it's in seconds
|
||||
# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
|
||||
MS_WATERSHED = int(2e10)
|
||||
# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
|
||||
MAX_NUMBER = int(3e20)
|
||||
|
||||
|
||||
def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
|
||||
if isinstance(value, (int, float)):
|
||||
return value
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
return None
|
||||
except TypeError:
|
||||
raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
|
||||
|
||||
|
||||
def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
|
||||
if seconds > MAX_NUMBER:
|
||||
return datetime.max
|
||||
elif seconds < -MAX_NUMBER:
|
||||
return datetime.min
|
||||
|
||||
while abs(seconds) > MS_WATERSHED:
|
||||
seconds /= 1000
|
||||
dt = EPOCH + timedelta(seconds=seconds)
|
||||
return dt.replace(tzinfo=timezone.utc)
|
||||
|
||||
|
||||
def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
|
||||
if value == "Z":
|
||||
return timezone.utc
|
||||
elif value is not None:
|
||||
offset_mins = int(value[-2:]) if len(value) > 3 else 0
|
||||
offset = 60 * int(value[1:3]) + offset_mins
|
||||
if value[0] == "-":
|
||||
offset = -offset
|
||||
return timezone(timedelta(minutes=offset))
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
|
||||
"""
|
||||
Parse a datetime/int/float/string and return a datetime.datetime.
|
||||
|
||||
This function supports time zone offsets. When the input contains one,
|
||||
the output uses a timezone with a fixed offset from UTC.
|
||||
|
||||
Raise ValueError if the input is well formatted but not a valid datetime.
|
||||
Raise ValueError if the input isn't well formatted.
|
||||
"""
|
||||
if isinstance(value, datetime):
|
||||
return value
|
||||
|
||||
number = _get_numeric(value, "datetime")
|
||||
if number is not None:
|
||||
return _from_unix_seconds(number)
|
||||
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode()
|
||||
|
||||
assert not isinstance(value, (float, int))
|
||||
|
||||
match = datetime_re.match(value)
|
||||
if match is None:
|
||||
raise ValueError("invalid datetime format")
|
||||
|
||||
kw = match.groupdict()
|
||||
if kw["microsecond"]:
|
||||
kw["microsecond"] = kw["microsecond"].ljust(6, "0")
|
||||
|
||||
tzinfo = _parse_timezone(kw.pop("tzinfo"))
|
||||
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
|
||||
kw_["tzinfo"] = tzinfo
|
||||
|
||||
return datetime(**kw_) # type: ignore
|
||||
|
||||
|
||||
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
|
||||
"""
|
||||
Parse a date/int/float/string and return a datetime.date.
|
||||
|
||||
Raise ValueError if the input is well formatted but not a valid date.
|
||||
Raise ValueError if the input isn't well formatted.
|
||||
"""
|
||||
if isinstance(value, date):
|
||||
if isinstance(value, datetime):
|
||||
return value.date()
|
||||
else:
|
||||
return value
|
||||
|
||||
number = _get_numeric(value, "date")
|
||||
if number is not None:
|
||||
return _from_unix_seconds(number).date()
|
||||
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode()
|
||||
|
||||
assert not isinstance(value, (float, int))
|
||||
match = date_re.match(value)
|
||||
if match is None:
|
||||
raise ValueError("invalid date format")
|
||||
|
||||
kw = {k: int(v) for k, v in match.groupdict().items()}
|
||||
|
||||
try:
|
||||
return date(**kw)
|
||||
except ValueError:
|
||||
raise ValueError("invalid date format") from None
|
||||
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
This file includes code adapted from HTTPX's utility module
|
||||
(https://github.com/encode/httpx/blob/336204f0121a9aefdebac5cacd81f912bafe8057/httpx/_utils.py).
|
||||
We implement custom proxy handling to support configurations like `socket_options`,
|
||||
which are not currently configurable through the HTTPX client.
|
||||
For more context, see: https://github.com/encode/httpx/discussions/3514
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
from typing import Mapping
|
||||
from urllib.request import getproxies
|
||||
|
||||
|
||||
def is_ipv4_hostname(hostname: str) -> bool:
|
||||
try:
|
||||
ipaddress.IPv4Address(hostname.split("/")[0])
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_ipv6_hostname(hostname: str) -> bool:
|
||||
try:
|
||||
ipaddress.IPv6Address(hostname.split("/")[0])
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_environment_proxies() -> Mapping[str, str | None]:
|
||||
"""
|
||||
Gets the proxy mappings based on environment variables.
|
||||
We use our own logic to parse these variables, as HTTPX
|
||||
doesn’t allow full configuration of the underlying
|
||||
transport when proxies are set via environment variables.
|
||||
"""
|
||||
|
||||
proxy_info = getproxies()
|
||||
mounts: dict[str, str | None] = {}
|
||||
|
||||
for scheme in ("http", "https", "all"):
|
||||
if proxy_info.get(scheme):
|
||||
hostname = proxy_info[scheme]
|
||||
mounts[f"{scheme}://"] = hostname if "://" in hostname else f"http://{hostname}"
|
||||
|
||||
no_proxy_hosts = [host.strip() for host in proxy_info.get("no", "").split(",")]
|
||||
for hostname in no_proxy_hosts:
|
||||
if hostname == "*":
|
||||
return {}
|
||||
elif hostname:
|
||||
if "://" in hostname:
|
||||
mounts[hostname] = None
|
||||
elif is_ipv4_hostname(hostname):
|
||||
mounts[f"all://{hostname}"] = None
|
||||
elif is_ipv6_hostname(hostname):
|
||||
mounts[f"all://[{hostname}]"] = None
|
||||
elif hostname.lower() == "localhost":
|
||||
mounts[f"all://{hostname}"] = None
|
||||
else:
|
||||
mounts[f"all://*{hostname}"] = None
|
||||
|
||||
return mounts
|
||||
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
import logging
|
||||
|
||||
logger: logging.Logger = logging.getLogger("anthropic")
|
||||
httpx_logger: logging.Logger = logging.getLogger("httpx")
|
||||
|
||||
|
||||
def _basic_config() -> None:
|
||||
# e.g. [2023-10-05 14:12:26 - anthropic._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
|
||||
logging.basicConfig(
|
||||
format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
|
||||
def setup_logging() -> None:
|
||||
env = os.environ.get("ANTHROPIC_LOG")
|
||||
if env == "debug":
|
||||
_basic_config()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
httpx_logger.setLevel(logging.DEBUG)
|
||||
elif env == "info":
|
||||
_basic_config()
|
||||
logger.setLevel(logging.INFO)
|
||||
httpx_logger.setLevel(logging.INFO)
|
||||
@@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Generic, TypeVar, Iterable, cast
|
||||
from typing_extensions import override
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class LazyProxy(Generic[T], ABC):
|
||||
"""Implements data methods to pretend that an instance is another instance.
|
||||
|
||||
This includes forwarding attribute access and other methods.
|
||||
"""
|
||||
|
||||
# Note: we have to special case proxies that themselves return proxies
|
||||
# to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz`
|
||||
|
||||
def __getattr__(self, attr: str) -> object:
|
||||
proxied = self.__get_proxied__()
|
||||
if isinstance(proxied, LazyProxy):
|
||||
return proxied # pyright: ignore
|
||||
return getattr(proxied, attr)
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
proxied = self.__get_proxied__()
|
||||
if isinstance(proxied, LazyProxy):
|
||||
return proxied.__class__.__name__
|
||||
return repr(self.__get_proxied__())
|
||||
|
||||
@override
|
||||
def __str__(self) -> str:
|
||||
proxied = self.__get_proxied__()
|
||||
if isinstance(proxied, LazyProxy):
|
||||
return proxied.__class__.__name__
|
||||
return str(proxied)
|
||||
|
||||
@override
|
||||
def __dir__(self) -> Iterable[str]:
|
||||
proxied = self.__get_proxied__()
|
||||
if isinstance(proxied, LazyProxy):
|
||||
return []
|
||||
return proxied.__dir__()
|
||||
|
||||
@property # type: ignore
|
||||
@override
|
||||
def __class__(self) -> type: # pyright: ignore
|
||||
try:
|
||||
proxied = self.__get_proxied__()
|
||||
except Exception:
|
||||
return type(self)
|
||||
if issubclass(type(proxied), LazyProxy):
|
||||
return type(proxied)
|
||||
return proxied.__class__
|
||||
|
||||
def __get_proxied__(self) -> T:
|
||||
return self.__load__()
|
||||
|
||||
def __as_proxied__(self) -> T:
|
||||
"""Helper method that returns the current proxy, typed as the loaded object"""
|
||||
return cast(T, self)
|
||||
|
||||
@abstractmethod
|
||||
def __load__(self) -> T: ...
|
||||
@@ -0,0 +1,42 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from typing import Any, Callable
|
||||
|
||||
|
||||
def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool:
|
||||
"""Returns whether or not the given function has a specific parameter"""
|
||||
sig = inspect.signature(func)
|
||||
return arg_name in sig.parameters
|
||||
|
||||
|
||||
def assert_signatures_in_sync(
|
||||
source_func: Callable[..., Any],
|
||||
check_func: Callable[..., Any],
|
||||
*,
|
||||
exclude_params: set[str] = set(),
|
||||
) -> None:
|
||||
"""Ensure that the signature of the second function matches the first."""
|
||||
|
||||
check_sig = inspect.signature(check_func)
|
||||
source_sig = inspect.signature(source_func)
|
||||
|
||||
errors: list[str] = []
|
||||
|
||||
for name, source_param in source_sig.parameters.items():
|
||||
if name in exclude_params:
|
||||
continue
|
||||
|
||||
custom_param = check_sig.parameters.get(name)
|
||||
if not custom_param:
|
||||
errors.append(f"the `{name}` param is missing")
|
||||
continue
|
||||
|
||||
if custom_param.annotation != source_param.annotation:
|
||||
errors.append(
|
||||
f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}"
|
||||
)
|
||||
continue
|
||||
|
||||
if errors:
|
||||
raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors))
|
||||
@@ -0,0 +1,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing_extensions import override
|
||||
|
||||
from ._proxy import LazyProxy
|
||||
|
||||
|
||||
class ResourcesProxy(LazyProxy[Any]):
|
||||
"""A proxy for the `anthropic.resources` module.
|
||||
|
||||
This is used so that we can lazily import `anthropic.resources` only when
|
||||
needed *and* so that users can just import `anthropic` and reference `anthropic.resources`
|
||||
"""
|
||||
|
||||
@override
|
||||
def __load__(self) -> Any:
|
||||
import importlib
|
||||
|
||||
mod = importlib.import_module("anthropic.resources")
|
||||
return mod
|
||||
|
||||
|
||||
resources = ResourcesProxy().__as_proxied__()
|
||||
@@ -0,0 +1,12 @@
|
||||
from typing import Any
|
||||
from typing_extensions import Iterator, AsyncIterator
|
||||
|
||||
|
||||
def consume_sync_iterator(iterator: Iterator[Any]) -> None:
|
||||
for _ in iterator:
|
||||
...
|
||||
|
||||
|
||||
async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None:
|
||||
async for _ in iterator:
|
||||
...
|
||||
@@ -0,0 +1,58 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
from typing import TypeVar, Callable, Awaitable
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
import anyio
|
||||
import sniffio
|
||||
import anyio.to_thread
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_ParamSpec = ParamSpec("T_ParamSpec")
|
||||
|
||||
|
||||
async def to_thread(
|
||||
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
|
||||
) -> T_Retval:
|
||||
if sniffio.current_async_library() == "asyncio":
|
||||
return await asyncio.to_thread(func, *args, **kwargs)
|
||||
|
||||
return await anyio.to_thread.run_sync(
|
||||
functools.partial(func, *args, **kwargs),
|
||||
)
|
||||
|
||||
|
||||
# inspired by `asyncer`, https://github.com/tiangolo/asyncer
|
||||
def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
|
||||
"""
|
||||
Take a blocking function and create an async one that receives the same
|
||||
positional and keyword arguments.
|
||||
|
||||
Usage:
|
||||
|
||||
```python
|
||||
def blocking_func(arg1, arg2, kwarg1=None):
|
||||
# blocking code
|
||||
return result
|
||||
|
||||
|
||||
result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
`function`: a blocking regular callable (e.g. a function)
|
||||
|
||||
## Return
|
||||
|
||||
An async function that takes the same positional and keyword arguments as the
|
||||
original one, that when called runs the same original function in a thread worker
|
||||
and returns the result.
|
||||
"""
|
||||
|
||||
async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
|
||||
return await to_thread(function, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
@@ -0,0 +1,457 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import base64
|
||||
import pathlib
|
||||
from typing import Any, Mapping, TypeVar, cast
|
||||
from datetime import date, datetime
|
||||
from typing_extensions import Literal, get_args, override, get_type_hints as _get_type_hints
|
||||
|
||||
import anyio
|
||||
import pydantic
|
||||
|
||||
from ._utils import (
|
||||
is_list,
|
||||
is_given,
|
||||
lru_cache,
|
||||
is_mapping,
|
||||
is_iterable,
|
||||
is_sequence,
|
||||
)
|
||||
from .._files import is_base64_file_input
|
||||
from ._compat import get_origin, is_typeddict
|
||||
from ._typing import (
|
||||
is_list_type,
|
||||
is_union_type,
|
||||
extract_type_arg,
|
||||
is_iterable_type,
|
||||
is_required_type,
|
||||
is_sequence_type,
|
||||
is_annotated_type,
|
||||
strip_annotated_type,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
# TODO: support for drilling globals() and locals()
|
||||
# TODO: ensure works correctly with forward references in all cases
|
||||
|
||||
|
||||
PropertyFormat = Literal["iso8601", "base64", "custom"]
|
||||
|
||||
|
||||
class PropertyInfo:
|
||||
"""Metadata class to be used in Annotated types to provide information about a given type.
|
||||
|
||||
For example:
|
||||
|
||||
class MyParams(TypedDict):
|
||||
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
|
||||
|
||||
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
|
||||
"""
|
||||
|
||||
alias: str | None
|
||||
format: PropertyFormat | None
|
||||
format_template: str | None
|
||||
discriminator: str | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
alias: str | None = None,
|
||||
format: PropertyFormat | None = None,
|
||||
format_template: str | None = None,
|
||||
discriminator: str | None = None,
|
||||
) -> None:
|
||||
self.alias = alias
|
||||
self.format = format
|
||||
self.format_template = format_template
|
||||
self.discriminator = discriminator
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')"
|
||||
|
||||
|
||||
def maybe_transform(
|
||||
data: object,
|
||||
expected_type: object,
|
||||
) -> Any | None:
|
||||
"""Wrapper over `transform()` that allows `None` to be passed.
|
||||
|
||||
See `transform()` for more details.
|
||||
"""
|
||||
if data is None:
|
||||
return None
|
||||
return transform(data, expected_type)
|
||||
|
||||
|
||||
# Wrapper over _transform_recursive providing fake types
|
||||
def transform(
|
||||
data: _T,
|
||||
expected_type: object,
|
||||
) -> _T:
|
||||
"""Transform dictionaries based off of type information from the given type, for example:
|
||||
|
||||
```py
|
||||
class Params(TypedDict, total=False):
|
||||
card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
|
||||
|
||||
|
||||
transformed = transform({"card_id": "<my card ID>"}, Params)
|
||||
# {'cardID': '<my card ID>'}
|
||||
```
|
||||
|
||||
Any keys / data that does not have type information given will be included as is.
|
||||
|
||||
It should be noted that the transformations that this function does are not represented in the type system.
|
||||
"""
|
||||
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
|
||||
return cast(_T, transformed)
|
||||
|
||||
|
||||
@lru_cache(maxsize=8096)
|
||||
def _get_annotated_type(type_: type) -> type | None:
|
||||
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
|
||||
|
||||
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
|
||||
"""
|
||||
if is_required_type(type_):
|
||||
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
|
||||
type_ = get_args(type_)[0]
|
||||
|
||||
if is_annotated_type(type_):
|
||||
return type_
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _maybe_transform_key(key: str, type_: type) -> str:
|
||||
"""Transform the given `data` based on the annotations provided in `type_`.
|
||||
|
||||
Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata.
|
||||
"""
|
||||
annotated_type = _get_annotated_type(type_)
|
||||
if annotated_type is None:
|
||||
# no `Annotated` definition for this type, no transformation needed
|
||||
return key
|
||||
|
||||
# ignore the first argument as it is the actual type
|
||||
annotations = get_args(annotated_type)[1:]
|
||||
for annotation in annotations:
|
||||
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
|
||||
return annotation.alias
|
||||
|
||||
return key
|
||||
|
||||
|
||||
def _no_transform_needed(annotation: type) -> bool:
|
||||
return annotation == float or annotation == int
|
||||
|
||||
|
||||
def _transform_recursive(
|
||||
data: object,
|
||||
*,
|
||||
annotation: type,
|
||||
inner_type: type | None = None,
|
||||
) -> object:
|
||||
"""Transform the given data against the expected type.
|
||||
|
||||
Args:
|
||||
annotation: The direct type annotation given to the particular piece of data.
|
||||
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
|
||||
|
||||
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
|
||||
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
|
||||
the list can be transformed using the metadata from the container type.
|
||||
|
||||
Defaults to the same value as the `annotation` argument.
|
||||
"""
|
||||
from .._compat import model_dump
|
||||
|
||||
if inner_type is None:
|
||||
inner_type = annotation
|
||||
|
||||
stripped_type = strip_annotated_type(inner_type)
|
||||
origin = get_origin(stripped_type) or stripped_type
|
||||
if is_typeddict(stripped_type) and is_mapping(data):
|
||||
return _transform_typeddict(data, stripped_type)
|
||||
|
||||
if origin == dict and is_mapping(data):
|
||||
items_type = get_args(stripped_type)[1]
|
||||
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
|
||||
|
||||
if (
|
||||
# List[T]
|
||||
(is_list_type(stripped_type) and is_list(data))
|
||||
# Iterable[T]
|
||||
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
|
||||
# Sequence[T]
|
||||
or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
|
||||
):
|
||||
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
|
||||
# intended as an iterable, so we don't transform it.
|
||||
if isinstance(data, dict):
|
||||
return cast(object, data)
|
||||
|
||||
inner_type = extract_type_arg(stripped_type, 0)
|
||||
if _no_transform_needed(inner_type):
|
||||
# for some types there is no need to transform anything, so we can get a small
|
||||
# perf boost from skipping that work.
|
||||
#
|
||||
# but we still need to convert to a list to ensure the data is json-serializable
|
||||
if is_list(data):
|
||||
return data
|
||||
return list(data)
|
||||
|
||||
return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
|
||||
|
||||
if is_union_type(stripped_type):
|
||||
# For union types we run the transformation against all subtypes to ensure that everything is transformed.
|
||||
#
|
||||
# TODO: there may be edge cases where the same normalized field name will transform to two different names
|
||||
# in different subtypes.
|
||||
for subtype in get_args(stripped_type):
|
||||
data = _transform_recursive(data, annotation=annotation, inner_type=subtype)
|
||||
return data
|
||||
|
||||
if isinstance(data, pydantic.BaseModel):
|
||||
return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, "__api_exclude__", None))
|
||||
|
||||
annotated_type = _get_annotated_type(annotation)
|
||||
if annotated_type is None:
|
||||
return data
|
||||
|
||||
# ignore the first argument as it is the actual type
|
||||
annotations = get_args(annotated_type)[1:]
|
||||
for annotation in annotations:
|
||||
if isinstance(annotation, PropertyInfo) and annotation.format is not None:
|
||||
return _format_data(data, annotation.format, annotation.format_template)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
|
||||
if isinstance(data, (date, datetime)):
|
||||
if format_ == "iso8601":
|
||||
return data.isoformat()
|
||||
|
||||
if format_ == "custom" and format_template is not None:
|
||||
return data.strftime(format_template)
|
||||
|
||||
if format_ == "base64" and is_base64_file_input(data):
|
||||
binary: str | bytes | None = None
|
||||
|
||||
if isinstance(data, pathlib.Path):
|
||||
binary = data.read_bytes()
|
||||
elif isinstance(data, io.IOBase):
|
||||
binary = data.read()
|
||||
|
||||
if isinstance(binary, str): # type: ignore[unreachable]
|
||||
binary = binary.encode()
|
||||
|
||||
if not isinstance(binary, bytes):
|
||||
raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
|
||||
|
||||
return base64.b64encode(binary).decode("ascii")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _transform_typeddict(
|
||||
data: Mapping[str, object],
|
||||
expected_type: type,
|
||||
) -> Mapping[str, object]:
|
||||
result: dict[str, object] = {}
|
||||
annotations = get_type_hints(expected_type, include_extras=True)
|
||||
for key, value in data.items():
|
||||
if not is_given(value):
|
||||
# we don't need to include omitted values here as they'll
|
||||
# be stripped out before the request is sent anyway
|
||||
continue
|
||||
|
||||
type_ = annotations.get(key)
|
||||
if type_ is None:
|
||||
# we do not have a type annotation for this field, leave it as is
|
||||
result[key] = value
|
||||
else:
|
||||
result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_)
|
||||
return result
|
||||
|
||||
|
||||
async def async_maybe_transform(
|
||||
data: object,
|
||||
expected_type: object,
|
||||
) -> Any | None:
|
||||
"""Wrapper over `async_transform()` that allows `None` to be passed.
|
||||
|
||||
See `async_transform()` for more details.
|
||||
"""
|
||||
if data is None:
|
||||
return None
|
||||
return await async_transform(data, expected_type)
|
||||
|
||||
|
||||
async def async_transform(
|
||||
data: _T,
|
||||
expected_type: object,
|
||||
) -> _T:
|
||||
"""Transform dictionaries based off of type information from the given type, for example:
|
||||
|
||||
```py
|
||||
class Params(TypedDict, total=False):
|
||||
card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
|
||||
|
||||
|
||||
transformed = transform({"card_id": "<my card ID>"}, Params)
|
||||
# {'cardID': '<my card ID>'}
|
||||
```
|
||||
|
||||
Any keys / data that does not have type information given will be included as is.
|
||||
|
||||
It should be noted that the transformations that this function does are not represented in the type system.
|
||||
"""
|
||||
transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type))
|
||||
return cast(_T, transformed)
|
||||
|
||||
|
||||
async def _async_transform_recursive(
|
||||
data: object,
|
||||
*,
|
||||
annotation: type,
|
||||
inner_type: type | None = None,
|
||||
) -> object:
|
||||
"""Transform the given data against the expected type.
|
||||
|
||||
Args:
|
||||
annotation: The direct type annotation given to the particular piece of data.
|
||||
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
|
||||
|
||||
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
|
||||
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
|
||||
the list can be transformed using the metadata from the container type.
|
||||
|
||||
Defaults to the same value as the `annotation` argument.
|
||||
"""
|
||||
from .._compat import model_dump
|
||||
|
||||
if inner_type is None:
|
||||
inner_type = annotation
|
||||
|
||||
stripped_type = strip_annotated_type(inner_type)
|
||||
origin = get_origin(stripped_type) or stripped_type
|
||||
if is_typeddict(stripped_type) and is_mapping(data):
|
||||
return await _async_transform_typeddict(data, stripped_type)
|
||||
|
||||
if origin == dict and is_mapping(data):
|
||||
items_type = get_args(stripped_type)[1]
|
||||
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
|
||||
|
||||
if (
|
||||
# List[T]
|
||||
(is_list_type(stripped_type) and is_list(data))
|
||||
# Iterable[T]
|
||||
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
|
||||
# Sequence[T]
|
||||
or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
|
||||
):
|
||||
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
|
||||
# intended as an iterable, so we don't transform it.
|
||||
if isinstance(data, dict):
|
||||
return cast(object, data)
|
||||
|
||||
inner_type = extract_type_arg(stripped_type, 0)
|
||||
if _no_transform_needed(inner_type):
|
||||
# for some types there is no need to transform anything, so we can get a small
|
||||
# perf boost from skipping that work.
|
||||
#
|
||||
# but we still need to convert to a list to ensure the data is json-serializable
|
||||
if is_list(data):
|
||||
return data
|
||||
return list(data)
|
||||
|
||||
return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
|
||||
|
||||
if is_union_type(stripped_type):
|
||||
# For union types we run the transformation against all subtypes to ensure that everything is transformed.
|
||||
#
|
||||
# TODO: there may be edge cases where the same normalized field name will transform to two different names
|
||||
# in different subtypes.
|
||||
for subtype in get_args(stripped_type):
|
||||
data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype)
|
||||
return data
|
||||
|
||||
if isinstance(data, pydantic.BaseModel):
|
||||
return model_dump(data, exclude_unset=True, mode="json")
|
||||
|
||||
annotated_type = _get_annotated_type(annotation)
|
||||
if annotated_type is None:
|
||||
return data
|
||||
|
||||
# ignore the first argument as it is the actual type
|
||||
annotations = get_args(annotated_type)[1:]
|
||||
for annotation in annotations:
|
||||
if isinstance(annotation, PropertyInfo) and annotation.format is not None:
|
||||
return await _async_format_data(data, annotation.format, annotation.format_template)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
|
||||
if isinstance(data, (date, datetime)):
|
||||
if format_ == "iso8601":
|
||||
return data.isoformat()
|
||||
|
||||
if format_ == "custom" and format_template is not None:
|
||||
return data.strftime(format_template)
|
||||
|
||||
if format_ == "base64" and is_base64_file_input(data):
|
||||
binary: str | bytes | None = None
|
||||
|
||||
if isinstance(data, pathlib.Path):
|
||||
binary = await anyio.Path(data).read_bytes()
|
||||
elif isinstance(data, io.IOBase):
|
||||
binary = data.read()
|
||||
|
||||
if isinstance(binary, str): # type: ignore[unreachable]
|
||||
binary = binary.encode()
|
||||
|
||||
if not isinstance(binary, bytes):
|
||||
raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
|
||||
|
||||
return base64.b64encode(binary).decode("ascii")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
async def _async_transform_typeddict(
|
||||
data: Mapping[str, object],
|
||||
expected_type: type,
|
||||
) -> Mapping[str, object]:
|
||||
result: dict[str, object] = {}
|
||||
annotations = get_type_hints(expected_type, include_extras=True)
|
||||
for key, value in data.items():
|
||||
if not is_given(value):
|
||||
# we don't need to include omitted values here as they'll
|
||||
# be stripped out before the request is sent anyway
|
||||
continue
|
||||
|
||||
type_ = annotations.get(key)
|
||||
if type_ is None:
|
||||
# we do not have a type annotation for this field, leave it as is
|
||||
result[key] = value
|
||||
else:
|
||||
result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_)
|
||||
return result
|
||||
|
||||
|
||||
@lru_cache(maxsize=8096)
|
||||
def get_type_hints(
|
||||
obj: Any,
|
||||
globalns: dict[str, Any] | None = None,
|
||||
localns: Mapping[str, Any] | None = None,
|
||||
include_extras: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
return _get_type_hints(obj, globalns=globalns, localns=localns, include_extras=include_extras)
|
||||
@@ -0,0 +1,156 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import typing
|
||||
import typing_extensions
|
||||
from typing import Any, TypeVar, Iterable, cast
|
||||
from collections import abc as _c_abc
|
||||
from typing_extensions import (
|
||||
TypeIs,
|
||||
Required,
|
||||
Annotated,
|
||||
get_args,
|
||||
get_origin,
|
||||
)
|
||||
|
||||
from ._utils import lru_cache
|
||||
from .._types import InheritsGeneric
|
||||
from ._compat import is_union as _is_union
|
||||
|
||||
|
||||
def is_annotated_type(typ: type) -> bool:
|
||||
return get_origin(typ) == Annotated
|
||||
|
||||
|
||||
def is_list_type(typ: type) -> bool:
|
||||
return (get_origin(typ) or typ) == list
|
||||
|
||||
|
||||
def is_sequence_type(typ: type) -> bool:
|
||||
origin = get_origin(typ) or typ
|
||||
return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
|
||||
|
||||
|
||||
def is_iterable_type(typ: type) -> bool:
|
||||
"""If the given type is `typing.Iterable[T]`"""
|
||||
origin = get_origin(typ) or typ
|
||||
return origin == Iterable or origin == _c_abc.Iterable
|
||||
|
||||
|
||||
def is_union_type(typ: type) -> bool:
|
||||
return _is_union(get_origin(typ))
|
||||
|
||||
|
||||
def is_required_type(typ: type) -> bool:
|
||||
return get_origin(typ) == Required
|
||||
|
||||
|
||||
def is_typevar(typ: type) -> bool:
|
||||
# type ignore is required because type checkers
|
||||
# think this expression will always return False
|
||||
return type(typ) == TypeVar # type: ignore
|
||||
|
||||
|
||||
_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,)
|
||||
if sys.version_info >= (3, 12):
|
||||
_TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType)
|
||||
|
||||
|
||||
def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]:
|
||||
"""Return whether the provided argument is an instance of `TypeAliasType`.
|
||||
|
||||
```python
|
||||
type Int = int
|
||||
is_type_alias_type(Int)
|
||||
# > True
|
||||
Str = TypeAliasType("Str", str)
|
||||
is_type_alias_type(Str)
|
||||
# > True
|
||||
```
|
||||
"""
|
||||
return isinstance(tp, _TYPE_ALIAS_TYPES)
|
||||
|
||||
|
||||
# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]]
|
||||
@lru_cache(maxsize=8096)
|
||||
def strip_annotated_type(typ: type) -> type:
|
||||
if is_required_type(typ) or is_annotated_type(typ):
|
||||
return strip_annotated_type(cast(type, get_args(typ)[0]))
|
||||
|
||||
return typ
|
||||
|
||||
|
||||
def extract_type_arg(typ: type, index: int) -> type:
|
||||
args = get_args(typ)
|
||||
try:
|
||||
return cast(type, args[index])
|
||||
except IndexError as err:
|
||||
raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err
|
||||
|
||||
|
||||
def extract_type_var_from_base(
|
||||
typ: type,
|
||||
*,
|
||||
generic_bases: tuple[type, ...],
|
||||
index: int,
|
||||
failure_message: str | None = None,
|
||||
) -> type:
|
||||
"""Given a type like `Foo[T]`, returns the generic type variable `T`.
|
||||
|
||||
This also handles the case where a concrete subclass is given, e.g.
|
||||
```py
|
||||
class MyResponse(Foo[bytes]):
|
||||
...
|
||||
|
||||
extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes
|
||||
```
|
||||
|
||||
And where a generic subclass is given:
|
||||
```py
|
||||
_T = TypeVar('_T')
|
||||
class MyResponse(Foo[_T]):
|
||||
...
|
||||
|
||||
extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes
|
||||
```
|
||||
"""
|
||||
cls = cast(object, get_origin(typ) or typ)
|
||||
if cls in generic_bases: # pyright: ignore[reportUnnecessaryContains]
|
||||
# we're given the class directly
|
||||
return extract_type_arg(typ, index)
|
||||
|
||||
# if a subclass is given
|
||||
# ---
|
||||
# this is needed as __orig_bases__ is not present in the typeshed stubs
|
||||
# because it is intended to be for internal use only, however there does
|
||||
# not seem to be a way to resolve generic TypeVars for inherited subclasses
|
||||
# without using it.
|
||||
if isinstance(cls, InheritsGeneric):
|
||||
target_base_class: Any | None = None
|
||||
for base in cls.__orig_bases__:
|
||||
if base.__origin__ in generic_bases:
|
||||
target_base_class = base
|
||||
break
|
||||
|
||||
if target_base_class is None:
|
||||
raise RuntimeError(
|
||||
"Could not find the generic base class;\n"
|
||||
"This should never happen;\n"
|
||||
f"Does {cls} inherit from one of {generic_bases} ?"
|
||||
)
|
||||
|
||||
extracted = extract_type_arg(target_base_class, index)
|
||||
if is_typevar(extracted):
|
||||
# If the extracted type argument is itself a type variable
|
||||
# then that means the subclass itself is generic, so we have
|
||||
# to resolve the type argument from the class itself, not
|
||||
# the base class.
|
||||
#
|
||||
# Note: if there is more than 1 type argument, the subclass could
|
||||
# change the ordering of the type arguments, this is not currently
|
||||
# supported.
|
||||
return extract_type_arg(typ, index)
|
||||
|
||||
return extracted
|
||||
|
||||
raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}")
|
||||
@@ -0,0 +1,421 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import inspect
|
||||
import functools
|
||||
from typing import (
|
||||
Any,
|
||||
Tuple,
|
||||
Mapping,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterable,
|
||||
Sequence,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from pathlib import Path
|
||||
from datetime import date, datetime
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
import sniffio
|
||||
|
||||
from .._types import Omit, NotGiven, FileTypes, HeadersLike
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
|
||||
_MappingT = TypeVar("_MappingT", bound=Mapping[str, object])
|
||||
_SequenceT = TypeVar("_SequenceT", bound=Sequence[object])
|
||||
CallableT = TypeVar("CallableT", bound=Callable[..., Any])
|
||||
|
||||
|
||||
def flatten(t: Iterable[Iterable[_T]]) -> list[_T]:
|
||||
return [item for sublist in t for item in sublist]
|
||||
|
||||
|
||||
def extract_files(
|
||||
# TODO: this needs to take Dict but variance issues.....
|
||||
# create protocol type ?
|
||||
query: Mapping[str, object],
|
||||
*,
|
||||
paths: Sequence[Sequence[str]],
|
||||
) -> list[tuple[str, FileTypes]]:
|
||||
"""Recursively extract files from the given dictionary based on specified paths.
|
||||
|
||||
A path may look like this ['foo', 'files', '<array>', 'data'].
|
||||
|
||||
Note: this mutates the given dictionary.
|
||||
"""
|
||||
files: list[tuple[str, FileTypes]] = []
|
||||
for path in paths:
|
||||
files.extend(_extract_items(query, path, index=0, flattened_key=None))
|
||||
return files
|
||||
|
||||
|
||||
def _extract_items(
|
||||
obj: object,
|
||||
path: Sequence[str],
|
||||
*,
|
||||
index: int,
|
||||
flattened_key: str | None,
|
||||
) -> list[tuple[str, FileTypes]]:
|
||||
try:
|
||||
key = path[index]
|
||||
except IndexError:
|
||||
if not is_given(obj):
|
||||
# no value was provided - we can safely ignore
|
||||
return []
|
||||
|
||||
# cyclical import
|
||||
from .._files import assert_is_file_content
|
||||
|
||||
# We have exhausted the path, return the entry we found.
|
||||
assert flattened_key is not None
|
||||
|
||||
if is_list(obj):
|
||||
files: list[tuple[str, FileTypes]] = []
|
||||
for entry in obj:
|
||||
assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
|
||||
files.append((flattened_key + "[]", cast(FileTypes, entry)))
|
||||
return files
|
||||
|
||||
assert_is_file_content(obj, key=flattened_key)
|
||||
return [(flattened_key, cast(FileTypes, obj))]
|
||||
|
||||
index += 1
|
||||
if is_dict(obj):
|
||||
try:
|
||||
# We are at the last entry in the path so we must remove the field
|
||||
if (len(path)) == index:
|
||||
item = obj.pop(key)
|
||||
else:
|
||||
item = obj[key]
|
||||
except KeyError:
|
||||
# Key was not present in the dictionary, this is not indicative of an error
|
||||
# as the given path may not point to a required field. We also do not want
|
||||
# to enforce required fields as the API may differ from the spec in some cases.
|
||||
return []
|
||||
if flattened_key is None:
|
||||
flattened_key = key
|
||||
else:
|
||||
flattened_key += f"[{key}]"
|
||||
return _extract_items(
|
||||
item,
|
||||
path,
|
||||
index=index,
|
||||
flattened_key=flattened_key,
|
||||
)
|
||||
elif is_list(obj):
|
||||
if key != "<array>":
|
||||
return []
|
||||
|
||||
return flatten(
|
||||
[
|
||||
_extract_items(
|
||||
item,
|
||||
path,
|
||||
index=index,
|
||||
flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
|
||||
)
|
||||
for item in obj
|
||||
]
|
||||
)
|
||||
|
||||
# Something unexpected was passed, just ignore it.
|
||||
return []
|
||||
|
||||
|
||||
def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]:
|
||||
return not isinstance(obj, NotGiven) and not isinstance(obj, Omit)
|
||||
|
||||
|
||||
# Type safe methods for narrowing types with TypeVars.
|
||||
# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown],
|
||||
# however this cause Pyright to rightfully report errors. As we know we don't
|
||||
# care about the contained types we can safely use `object` in its place.
|
||||
#
|
||||
# There are two separate functions defined, `is_*` and `is_*_t` for different use cases.
|
||||
# `is_*` is for when you're dealing with an unknown input
|
||||
# `is_*_t` is for when you're narrowing a known union type to a specific subset
|
||||
|
||||
|
||||
def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]:
|
||||
return isinstance(obj, tuple)
|
||||
|
||||
|
||||
def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]:
|
||||
return isinstance(obj, tuple)
|
||||
|
||||
|
||||
def is_sequence(obj: object) -> TypeGuard[Sequence[object]]:
|
||||
return isinstance(obj, Sequence)
|
||||
|
||||
|
||||
def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]:
|
||||
return isinstance(obj, Sequence)
|
||||
|
||||
|
||||
def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:
|
||||
return isinstance(obj, Mapping)
|
||||
|
||||
|
||||
def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]:
|
||||
return isinstance(obj, Mapping)
|
||||
|
||||
|
||||
def is_dict(obj: object) -> TypeGuard[dict[object, object]]:
|
||||
return isinstance(obj, dict)
|
||||
|
||||
|
||||
def is_list(obj: object) -> TypeGuard[list[object]]:
|
||||
return isinstance(obj, list)
|
||||
|
||||
|
||||
def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
|
||||
return isinstance(obj, Iterable)
|
||||
|
||||
|
||||
def deepcopy_minimal(item: _T) -> _T:
|
||||
"""Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
|
||||
|
||||
- mappings, e.g. `dict`
|
||||
- list
|
||||
|
||||
This is done for performance reasons.
|
||||
"""
|
||||
if is_mapping(item):
|
||||
return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
|
||||
if is_list(item):
|
||||
return cast(_T, [deepcopy_minimal(entry) for entry in item])
|
||||
return item
|
||||
|
||||
|
||||
# copied from https://github.com/Rapptz/RoboDanny
|
||||
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
|
||||
size = len(seq)
|
||||
if size == 0:
|
||||
return ""
|
||||
|
||||
if size == 1:
|
||||
return seq[0]
|
||||
|
||||
if size == 2:
|
||||
return f"{seq[0]} {final} {seq[1]}"
|
||||
|
||||
return delim.join(seq[:-1]) + f" {final} {seq[-1]}"
|
||||
|
||||
|
||||
def quote(string: str) -> str:
|
||||
"""Add single quotation marks around the given string. Does *not* do any escaping."""
|
||||
return f"'{string}'"
|
||||
|
||||
|
||||
def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
|
||||
"""Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function.
|
||||
|
||||
Useful for enforcing runtime validation of overloaded functions.
|
||||
|
||||
Example usage:
|
||||
```py
|
||||
@overload
|
||||
def foo(*, a: str) -> str: ...
|
||||
|
||||
|
||||
@overload
|
||||
def foo(*, b: bool) -> str: ...
|
||||
|
||||
|
||||
# This enforces the same constraints that a static type checker would
|
||||
# i.e. that either a or b must be passed to the function
|
||||
@required_args(["a"], ["b"])
|
||||
def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
|
||||
```
|
||||
"""
|
||||
|
||||
def inner(func: CallableT) -> CallableT:
|
||||
params = inspect.signature(func).parameters
|
||||
positional = [
|
||||
name
|
||||
for name, param in params.items()
|
||||
if param.kind
|
||||
in {
|
||||
param.POSITIONAL_ONLY,
|
||||
param.POSITIONAL_OR_KEYWORD,
|
||||
}
|
||||
]
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args: object, **kwargs: object) -> object:
|
||||
given_params: set[str] = set()
|
||||
for i, _ in enumerate(args):
|
||||
try:
|
||||
given_params.add(positional[i])
|
||||
except IndexError:
|
||||
raise TypeError(
|
||||
f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given"
|
||||
) from None
|
||||
|
||||
for key in kwargs.keys():
|
||||
given_params.add(key)
|
||||
|
||||
for variant in variants:
|
||||
matches = all((param in given_params for param in variant))
|
||||
if matches:
|
||||
break
|
||||
else: # no break
|
||||
if len(variants) > 1:
|
||||
variations = human_join(
|
||||
["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants]
|
||||
)
|
||||
msg = f"Missing required arguments; Expected either {variations} arguments to be given"
|
||||
else:
|
||||
assert len(variants) > 0
|
||||
|
||||
# TODO: this error message is not deterministic
|
||||
missing = list(set(variants[0]) - given_params)
|
||||
if len(missing) > 1:
|
||||
msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}"
|
||||
else:
|
||||
msg = f"Missing required argument: {quote(missing[0])}"
|
||||
raise TypeError(msg)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper # type: ignore
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
_K = TypeVar("_K")
|
||||
_V = TypeVar("_V")
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: None) -> None: ...
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def strip_not_given(obj: object) -> object: ...
|
||||
|
||||
|
||||
def strip_not_given(obj: object | None) -> object:
|
||||
"""Remove all top-level keys where their values are instances of `NotGiven`"""
|
||||
if obj is None:
|
||||
return None
|
||||
|
||||
if not is_mapping(obj):
|
||||
return obj
|
||||
|
||||
return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)}
|
||||
|
||||
|
||||
def coerce_integer(val: str) -> int:
|
||||
return int(val, base=10)
|
||||
|
||||
|
||||
def coerce_float(val: str) -> float:
|
||||
return float(val)
|
||||
|
||||
|
||||
def coerce_boolean(val: str) -> bool:
|
||||
return val == "true" or val == "1" or val == "on"
|
||||
|
||||
|
||||
def maybe_coerce_integer(val: str | None) -> int | None:
|
||||
if val is None:
|
||||
return None
|
||||
return coerce_integer(val)
|
||||
|
||||
|
||||
def maybe_coerce_float(val: str | None) -> float | None:
|
||||
if val is None:
|
||||
return None
|
||||
return coerce_float(val)
|
||||
|
||||
|
||||
def maybe_coerce_boolean(val: str | None) -> bool | None:
|
||||
if val is None:
|
||||
return None
|
||||
return coerce_boolean(val)
|
||||
|
||||
|
||||
def removeprefix(string: str, prefix: str) -> str:
|
||||
"""Remove a prefix from a string.
|
||||
|
||||
Backport of `str.removeprefix` for Python < 3.9
|
||||
"""
|
||||
if string.startswith(prefix):
|
||||
return string[len(prefix) :]
|
||||
return string
|
||||
|
||||
|
||||
def removesuffix(string: str, suffix: str) -> str:
|
||||
"""Remove a suffix from a string.
|
||||
|
||||
Backport of `str.removesuffix` for Python < 3.9
|
||||
"""
|
||||
if string.endswith(suffix):
|
||||
return string[: -len(suffix)]
|
||||
return string
|
||||
|
||||
|
||||
def file_from_path(path: str) -> FileTypes:
|
||||
contents = Path(path).read_bytes()
|
||||
file_name = os.path.basename(path)
|
||||
return (file_name, contents)
|
||||
|
||||
|
||||
def get_required_header(headers: HeadersLike, header: str) -> str:
|
||||
lower_header = header.lower()
|
||||
if is_mapping_t(headers):
|
||||
# mypy doesn't understand the type narrowing here
|
||||
for k, v in headers.items(): # type: ignore
|
||||
if k.lower() == lower_header and isinstance(v, str):
|
||||
return v
|
||||
|
||||
# to deal with the case where the header looks like Stainless-Event-Id
|
||||
intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize())
|
||||
|
||||
for normalized_header in [header, lower_header, header.upper(), intercaps_header]:
|
||||
value = headers.get(normalized_header)
|
||||
if value:
|
||||
return value
|
||||
|
||||
raise ValueError(f"Could not find {header} header")
|
||||
|
||||
|
||||
def get_async_library() -> str:
|
||||
try:
|
||||
return sniffio.current_async_library()
|
||||
except Exception:
|
||||
return "false"
|
||||
|
||||
|
||||
def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]:
|
||||
"""A version of functools.lru_cache that retains the type signature
|
||||
for the wrapped function arguments.
|
||||
"""
|
||||
wrapper = functools.lru_cache( # noqa: TID251
|
||||
maxsize=maxsize,
|
||||
)
|
||||
return cast(Any, wrapper) # type: ignore[no-any-return]
|
||||
|
||||
|
||||
def json_safe(data: object) -> object:
|
||||
"""Translates a mapping / sequence recursively in the same fashion
|
||||
as `pydantic` v2's `model_dump(mode="json")`.
|
||||
"""
|
||||
if is_mapping(data):
|
||||
return {json_safe(key): json_safe(value) for key, value in data.items()}
|
||||
|
||||
if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)):
|
||||
return [json_safe(item) for item in data]
|
||||
|
||||
if isinstance(data, (datetime, date)):
|
||||
return data.isoformat()
|
||||
|
||||
return data
|
||||
@@ -0,0 +1,4 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
__title__ = "anthropic"
|
||||
__version__ = "0.73.0" # x-release-please-version
|
||||
@@ -0,0 +1,4 @@
|
||||
File generated from our OpenAPI spec by Stainless.
|
||||
|
||||
This directory can be used to store custom files to expand the SDK.
|
||||
It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.
|
||||
@@ -0,0 +1 @@
|
||||
from ._files import files_from_dir as files_from_dir, async_files_from_dir as async_files_from_dir
|
||||
@@ -0,0 +1 @@
|
||||
from ._google_auth import google_auth as google_auth
|
||||
@@ -0,0 +1,13 @@
|
||||
from ..._exceptions import AnthropicError
|
||||
|
||||
INSTRUCTIONS = """
|
||||
|
||||
Anthropic error: missing required dependency `{library}`.
|
||||
|
||||
$ pip install anthropic[{extra}]
|
||||
"""
|
||||
|
||||
|
||||
class MissingDependencyError(AnthropicError):
|
||||
def __init__(self, *, library: str, extra: str) -> None:
|
||||
super().__init__(INSTRUCTIONS.format(library=library, extra=extra))
|
||||
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing_extensions import ClassVar, override
|
||||
|
||||
from ._common import MissingDependencyError
|
||||
from ..._utils import LazyProxy
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import google.auth # type: ignore
|
||||
|
||||
google_auth = google.auth
|
||||
|
||||
|
||||
class GoogleAuthProxy(LazyProxy[Any]):
|
||||
should_cache: ClassVar[bool] = True
|
||||
|
||||
@override
|
||||
def __load__(self) -> Any:
|
||||
try:
|
||||
import google.auth # type: ignore
|
||||
except ImportError as err:
|
||||
raise MissingDependencyError(extra="vertex", library="google-auth") from err
|
||||
|
||||
return google.auth
|
||||
|
||||
|
||||
if not TYPE_CHECKING:
|
||||
google_auth = GoogleAuthProxy()
|
||||
@@ -0,0 +1,42 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import anyio
|
||||
|
||||
from .._types import FileTypes
|
||||
|
||||
|
||||
def files_from_dir(directory: str | os.PathLike[str]) -> list[FileTypes]:
|
||||
path = Path(directory)
|
||||
|
||||
files: list[FileTypes] = []
|
||||
_collect_files(path, path.parent, files)
|
||||
return files
|
||||
|
||||
|
||||
def _collect_files(directory: Path, relative_to: Path, files: list[FileTypes]) -> None:
|
||||
for path in directory.iterdir():
|
||||
if path.is_dir():
|
||||
_collect_files(path, relative_to, files)
|
||||
continue
|
||||
|
||||
files.append((str(path.relative_to(relative_to)), path.read_bytes()))
|
||||
|
||||
|
||||
async def async_files_from_dir(directory: str | os.PathLike[str]) -> list[FileTypes]:
|
||||
path = anyio.Path(directory)
|
||||
|
||||
files: list[FileTypes] = []
|
||||
await _async_collect_files(path, path.parent, files)
|
||||
return files
|
||||
|
||||
|
||||
async def _async_collect_files(directory: anyio.Path, relative_to: anyio.Path, files: list[FileTypes]) -> None:
|
||||
async for path in directory.iterdir():
|
||||
if await path.is_dir():
|
||||
await _async_collect_files(path, relative_to, files)
|
||||
continue
|
||||
|
||||
files.append((str(path.relative_to(relative_to)), await path.read_bytes()))
|
||||
@@ -0,0 +1,44 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import TypeVar
|
||||
|
||||
from ..._types import NotGiven
|
||||
from ..._models import TypeAdapter, construct_type_unchecked
|
||||
from ..._utils._utils import is_given
|
||||
from ...types.beta.beta_message import BetaMessage
|
||||
from ...types.beta.parsed_beta_message import ParsedBetaMessage, ParsedBetaTextBlock, ParsedBetaContentBlock
|
||||
|
||||
ResponseFormatT = TypeVar("ResponseFormatT", default=None)
|
||||
|
||||
|
||||
def parse_text(text: str, output_format: ResponseFormatT | NotGiven) -> ResponseFormatT | None:
|
||||
if is_given(output_format):
|
||||
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
|
||||
return adapted_type.validate_json(text)
|
||||
return None
|
||||
|
||||
|
||||
def parse_response(
|
||||
*,
|
||||
output_format: ResponseFormatT | NotGiven,
|
||||
response: BetaMessage,
|
||||
) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
content_list: list[ParsedBetaContentBlock[ResponseFormatT]] = []
|
||||
for content in response.content:
|
||||
if content.type == "text":
|
||||
content_list.append(
|
||||
construct_type_unchecked(
|
||||
type_=ParsedBetaTextBlock[ResponseFormatT],
|
||||
value={**content.to_dict(), "parsed_output": parse_text(content.text, output_format)},
|
||||
)
|
||||
)
|
||||
else:
|
||||
content_list.append(content) # type: ignore
|
||||
|
||||
return construct_type_unchecked(
|
||||
type_=ParsedBetaMessage[ResponseFormatT],
|
||||
value={
|
||||
**response.to_dict(),
|
||||
"content": content_list,
|
||||
},
|
||||
)
|
||||
@@ -0,0 +1,167 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from typing import Any, Literal, Optional, cast
|
||||
from typing_extensions import assert_never
|
||||
|
||||
import pydantic
|
||||
|
||||
from ..._utils import is_list
|
||||
|
||||
SupportedTypes = Literal[
|
||||
"object",
|
||||
"array",
|
||||
"string",
|
||||
"integer",
|
||||
"number",
|
||||
"boolean",
|
||||
"null",
|
||||
]
|
||||
|
||||
SupportedStringFormats = {
|
||||
"date-time",
|
||||
"time",
|
||||
"date",
|
||||
"duration",
|
||||
"email",
|
||||
"hostname",
|
||||
"uri",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
"uuid",
|
||||
}
|
||||
|
||||
|
||||
def get_transformed_string(
|
||||
schema: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Transforms a JSON schema of type string to ensure it conforms to the API's expectations.
|
||||
|
||||
Specifically, it ensures that if the schema is of type "string" and does not already
|
||||
specify a "format", it sets the format to "text".
|
||||
|
||||
Args:
|
||||
schema: The original JSON schema.
|
||||
|
||||
Returns:
|
||||
The transformed JSON schema.
|
||||
"""
|
||||
if schema.get("type") == "string" and "format" not in schema:
|
||||
schema["format"] = "text"
|
||||
return schema
|
||||
|
||||
|
||||
def transform_schema(
|
||||
json_schema: type[pydantic.BaseModel] | dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Transforms a JSON schema to ensure it conforms to the API's expectations.
|
||||
|
||||
Args:
|
||||
json_schema (Dict[str, Any]): The original JSON schema.
|
||||
|
||||
Returns:
|
||||
The transformed JSON schema.
|
||||
|
||||
Examples:
|
||||
>>> transform_schema(
|
||||
... {
|
||||
... "type": "integer",
|
||||
... "minimum": 1,
|
||||
... "maximum": 10,
|
||||
... "description": "A number",
|
||||
... }
|
||||
... )
|
||||
{'type': 'integer', 'description': 'A number\n\n{minimum: 1, maximum: 10}'}
|
||||
"""
|
||||
if inspect.isclass(json_schema) and issubclass(json_schema, pydantic.BaseModel): # pyright: ignore[reportUnnecessaryIsInstance]
|
||||
json_schema = json_schema.model_json_schema()
|
||||
|
||||
strict_schema: dict[str, Any] = {}
|
||||
json_schema = {**json_schema}
|
||||
|
||||
ref = json_schema.pop("$ref", None)
|
||||
if ref is not None:
|
||||
strict_schema["$ref"] = ref
|
||||
return strict_schema
|
||||
|
||||
defs = json_schema.pop("$defs", None)
|
||||
if defs is not None:
|
||||
strict_defs: dict[str, Any] = {}
|
||||
strict_schema["$defs"] = strict_defs
|
||||
|
||||
for name, schema in defs.items():
|
||||
strict_defs[name] = transform_schema(schema)
|
||||
|
||||
type_: Optional[SupportedTypes] = json_schema.pop("type", None)
|
||||
any_of = json_schema.pop("anyOf", None)
|
||||
one_of = json_schema.pop("oneOf", None)
|
||||
all_of = json_schema.pop("allOf", None)
|
||||
|
||||
if is_list(any_of):
|
||||
strict_schema["anyOf"] = [transform_schema(cast("dict[str, Any]", variant)) for variant in any_of]
|
||||
elif is_list(one_of):
|
||||
strict_schema["anyOf"] = [transform_schema(cast("dict[str, Any]", variant)) for variant in one_of]
|
||||
elif is_list(all_of):
|
||||
strict_schema["allOf"] = [transform_schema(cast("dict[str, Any]", variant)) for variant in all_of]
|
||||
else:
|
||||
if type_ is None:
|
||||
raise ValueError("Schema must have a 'type', 'anyOf', 'oneOf', or 'allOf' field.")
|
||||
|
||||
strict_schema["type"] = type_
|
||||
|
||||
description = json_schema.pop("description", None)
|
||||
if description is not None:
|
||||
strict_schema["description"] = description
|
||||
|
||||
title = json_schema.pop("title", None)
|
||||
if title is not None:
|
||||
strict_schema["title"] = title
|
||||
|
||||
if type_ == "object":
|
||||
strict_schema["properties"] = {
|
||||
key: transform_schema(prop_schema) for key, prop_schema in json_schema.pop("properties", {}).items()
|
||||
}
|
||||
json_schema.pop("additionalProperties", None)
|
||||
strict_schema["additionalProperties"] = False
|
||||
|
||||
required = json_schema.pop("required", None)
|
||||
if required is not None:
|
||||
strict_schema["required"] = required
|
||||
|
||||
elif type_ == "string":
|
||||
format = json_schema.pop("format", None)
|
||||
if format and format in SupportedStringFormats:
|
||||
strict_schema["format"] = format
|
||||
elif format:
|
||||
# add it back so its treated as an extra property and appended to the description
|
||||
json_schema["format"] = format
|
||||
elif type_ == "array":
|
||||
items = json_schema.pop("items", None)
|
||||
if items is not None:
|
||||
strict_schema["items"] = transform_schema(items)
|
||||
|
||||
min_items = json_schema.pop("minItems", None)
|
||||
if min_items is not None and min_items == 0 or min_items == 1:
|
||||
strict_schema["minItems"] = min_items
|
||||
elif min_items is not None:
|
||||
# add it back so its treated as an extra property and appended to the description
|
||||
json_schema["minItems"] = min_items
|
||||
|
||||
elif type_ == "boolean" or type_ == "integer" or type_ == "number" or type_ == "null" or type_ is None:
|
||||
pass
|
||||
else:
|
||||
assert_never(type_)
|
||||
|
||||
# if there are any propes leftover then they aren't supported, so we add them to the description
|
||||
# so that the model *might* follow them.
|
||||
if json_schema:
|
||||
description = strict_schema.get("description")
|
||||
strict_schema["description"] = (
|
||||
(description + "\n\n" if description is not None else "")
|
||||
+ "{"
|
||||
+ ", ".join(f"{key}: {value}" for key, value in json_schema.items())
|
||||
+ "}"
|
||||
)
|
||||
|
||||
return strict_schema
|
||||
@@ -0,0 +1 @@
|
||||
from ._client import AnthropicBedrock as AnthropicBedrock, AsyncAnthropicBedrock as AsyncAnthropicBedrock
|
||||
@@ -0,0 +1,72 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import httpx
|
||||
|
||||
from ..._utils import lru_cache
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import boto3
|
||||
|
||||
|
||||
@lru_cache(maxsize=512)
|
||||
def _get_session(
|
||||
*,
|
||||
aws_access_key: str | None,
|
||||
aws_secret_key: str | None,
|
||||
aws_session_token: str | None,
|
||||
region: str | None,
|
||||
profile: str | None,
|
||||
) -> boto3.Session:
|
||||
import boto3
|
||||
|
||||
return boto3.Session(
|
||||
profile_name=profile,
|
||||
region_name=region,
|
||||
aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
aws_session_token=aws_session_token,
|
||||
)
|
||||
|
||||
|
||||
def get_auth_headers(
|
||||
*,
|
||||
method: str,
|
||||
url: str,
|
||||
headers: httpx.Headers,
|
||||
aws_access_key: str | None,
|
||||
aws_secret_key: str | None,
|
||||
aws_session_token: str | None,
|
||||
region: str | None,
|
||||
profile: str | None,
|
||||
data: str | None,
|
||||
) -> dict[str, str]:
|
||||
from botocore.auth import SigV4Auth
|
||||
from botocore.awsrequest import AWSRequest
|
||||
|
||||
session = _get_session(
|
||||
profile=profile,
|
||||
region=region,
|
||||
aws_access_key=aws_access_key,
|
||||
aws_secret_key=aws_secret_key,
|
||||
aws_session_token=aws_session_token,
|
||||
)
|
||||
|
||||
# The connection header may be stripped by a proxy somewhere, so the receiver
|
||||
# of this message may not see this header, so we remove it from the set of headers
|
||||
# that are signed.
|
||||
headers = headers.copy()
|
||||
del headers["connection"]
|
||||
|
||||
request = AWSRequest(method=method.upper(), url=url, headers=headers, data=data)
|
||||
credentials = session.get_credentials()
|
||||
if not credentials:
|
||||
raise RuntimeError("could not resolve credentials from session")
|
||||
|
||||
signer = SigV4Auth(credentials, "bedrock", session.region_name)
|
||||
signer.add_auth(request)
|
||||
|
||||
prepped = request.prepare()
|
||||
|
||||
return {key: value for key, value in dict(prepped.headers).items() if value is not None}
|
||||
@@ -0,0 +1,102 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ._beta_messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = ["Beta", "AsyncBeta"]
|
||||
|
||||
|
||||
class Beta(SyncAPIResource):
|
||||
@cached_property
|
||||
def messages(self) -> Messages:
|
||||
return Messages(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return BetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncBeta(AsyncAPIResource):
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessages:
|
||||
return AsyncMessages(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class BetaWithRawResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithRawResponse:
|
||||
return MessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
|
||||
class AsyncBetaWithRawResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithRawResponse:
|
||||
return AsyncMessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
|
||||
class BetaWithStreamingResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithStreamingResponse:
|
||||
return MessagesWithStreamingResponse(self._beta.messages)
|
||||
|
||||
|
||||
class AsyncBetaWithStreamingResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithStreamingResponse:
|
||||
return AsyncMessagesWithStreamingResponse(self._beta.messages)
|
||||
@@ -0,0 +1,93 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...resources.beta import Messages as FirstPartyMessagesAPI, AsyncMessages as FirstPartyAsyncMessagesAPI
|
||||
|
||||
__all__ = ["Messages", "AsyncMessages"]
|
||||
|
||||
|
||||
class Messages(SyncAPIResource):
|
||||
create = FirstPartyMessagesAPI.create
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> MessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return MessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> MessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return MessagesWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncMessages(AsyncAPIResource):
|
||||
create = FirstPartyAsyncMessagesAPI.create
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncMessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncMessagesWithStreamingResponse(self)
|
||||
|
||||
|
||||
class MessagesWithRawResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithRawResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class MessagesWithStreamingResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithStreamingResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
@@ -0,0 +1,412 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import Any, Union, Mapping, TypeVar
|
||||
from typing_extensions import Self, override
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _exceptions
|
||||
from ._beta import Beta, AsyncBeta
|
||||
from ..._types import NOT_GIVEN, Timeout, NotGiven
|
||||
from ..._utils import is_dict, is_given
|
||||
from ..._compat import model_copy
|
||||
from ..._version import __version__
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
from ..._exceptions import AnthropicError, APIStatusError
|
||||
from ..._base_client import (
|
||||
DEFAULT_MAX_RETRIES,
|
||||
BaseClient,
|
||||
SyncAPIClient,
|
||||
AsyncAPIClient,
|
||||
FinalRequestOptions,
|
||||
)
|
||||
from ._stream_decoder import AWSEventStreamDecoder
|
||||
from ...resources.messages import Messages, AsyncMessages
|
||||
from ...resources.completions import Completions, AsyncCompletions
|
||||
|
||||
log: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_VERSION = "bedrock-2023-05-31"
|
||||
|
||||
_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
|
||||
_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
|
||||
|
||||
|
||||
def _prepare_options(input_options: FinalRequestOptions) -> FinalRequestOptions:
|
||||
options = model_copy(input_options, deep=True)
|
||||
|
||||
if is_dict(options.json_data):
|
||||
options.json_data.setdefault("anthropic_version", DEFAULT_VERSION)
|
||||
|
||||
if is_given(options.headers):
|
||||
betas = options.headers.get("anthropic-beta")
|
||||
if betas:
|
||||
options.json_data.setdefault("anthropic_beta", betas.split(","))
|
||||
|
||||
if options.url in {"/v1/complete", "/v1/messages", "/v1/messages?beta=true"} and options.method == "post":
|
||||
if not is_dict(options.json_data):
|
||||
raise RuntimeError("Expected dictionary json_data for post /completions endpoint")
|
||||
|
||||
model = options.json_data.pop("model", None)
|
||||
model = urllib.parse.quote(str(model), safe=":")
|
||||
stream = options.json_data.pop("stream", False)
|
||||
if stream:
|
||||
options.url = f"/model/{model}/invoke-with-response-stream"
|
||||
else:
|
||||
options.url = f"/model/{model}/invoke"
|
||||
|
||||
if options.url.startswith("/v1/messages/batches"):
|
||||
raise AnthropicError("The Batch API is not supported in Bedrock yet")
|
||||
|
||||
if options.url == "/v1/messages/count_tokens":
|
||||
raise AnthropicError("Token counting is not supported in Bedrock yet")
|
||||
|
||||
return options
|
||||
|
||||
|
||||
def _infer_region() -> str:
|
||||
"""
|
||||
Infer the AWS region from the environment variables or
|
||||
from the boto3 session if available.
|
||||
"""
|
||||
aws_region = os.environ.get("AWS_REGION")
|
||||
if aws_region is None:
|
||||
try:
|
||||
import boto3
|
||||
|
||||
session = boto3.Session()
|
||||
if session.region_name:
|
||||
aws_region = session.region_name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if aws_region is None:
|
||||
log.warning("No AWS region specified, defaulting to us-east-1")
|
||||
aws_region = "us-east-1" # fall back to legacy behavior
|
||||
|
||||
return aws_region
|
||||
|
||||
|
||||
class BaseBedrockClient(BaseClient[_HttpxClientT, _DefaultStreamT]):
|
||||
@override
|
||||
def _make_status_error(
|
||||
self,
|
||||
err_msg: str,
|
||||
*,
|
||||
body: object,
|
||||
response: httpx.Response,
|
||||
) -> APIStatusError:
|
||||
if response.status_code == 400:
|
||||
return _exceptions.BadRequestError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 401:
|
||||
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 403:
|
||||
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 404:
|
||||
return _exceptions.NotFoundError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 409:
|
||||
return _exceptions.ConflictError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 422:
|
||||
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 429:
|
||||
return _exceptions.RateLimitError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 503:
|
||||
return _exceptions.ServiceUnavailableError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code >= 500:
|
||||
return _exceptions.InternalServerError(err_msg, response=response, body=body)
|
||||
return APIStatusError(err_msg, response=response, body=body)
|
||||
|
||||
|
||||
class AnthropicBedrock(BaseBedrockClient[httpx.Client, Stream[Any]], SyncAPIClient):
|
||||
messages: Messages
|
||||
completions: Completions
|
||||
beta: Beta
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
aws_secret_key: str | None = None,
|
||||
aws_access_key: str | None = None,
|
||||
aws_region: str | None = None,
|
||||
aws_profile: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: httpx.Client | None = None,
|
||||
# Enable or disable schema validation for data returned by the API.
|
||||
# When enabled an error APIResponseValidationError is raised
|
||||
# if the API responds with invalid data for the expected schema.
|
||||
#
|
||||
# This parameter may be removed or changed in the future.
|
||||
# If you rely on this feature, please open a GitHub issue
|
||||
# outlining your use-case to help us decide if it should be
|
||||
# part of our public interface in the future.
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
self.aws_secret_key = aws_secret_key
|
||||
|
||||
self.aws_access_key = aws_access_key
|
||||
|
||||
self.aws_region = _infer_region() if aws_region is None else aws_region
|
||||
self.aws_profile = aws_profile
|
||||
|
||||
self.aws_session_token = aws_session_token
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_BEDROCK_BASE_URL")
|
||||
if base_url is None:
|
||||
base_url = f"https://bedrock-runtime.{self.aws_region}.amazonaws.com"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
http_client=http_client,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
self.beta = Beta(self)
|
||||
self.messages = Messages(self)
|
||||
self.completions = Completions(self)
|
||||
|
||||
@override
|
||||
def _make_sse_decoder(self) -> AWSEventStreamDecoder:
|
||||
return AWSEventStreamDecoder()
|
||||
|
||||
@override
|
||||
def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
|
||||
return _prepare_options(options)
|
||||
|
||||
@override
|
||||
def _prepare_request(self, request: httpx.Request) -> None:
|
||||
from ._auth import get_auth_headers
|
||||
|
||||
data = request.read().decode()
|
||||
|
||||
headers = get_auth_headers(
|
||||
method=request.method,
|
||||
url=str(request.url),
|
||||
headers=request.headers,
|
||||
aws_access_key=self.aws_access_key,
|
||||
aws_secret_key=self.aws_secret_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
region=self.aws_region or "us-east-1",
|
||||
profile=self.aws_profile,
|
||||
data=data,
|
||||
)
|
||||
request.headers.update(headers)
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
aws_secret_key: str | None = None,
|
||||
aws_access_key: str | None = None,
|
||||
aws_region: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
||||
http_client: httpx.Client | None = None,
|
||||
max_retries: int | NotGiven = NOT_GIVEN,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
return self.__class__(
|
||||
aws_secret_key=aws_secret_key or self.aws_secret_key,
|
||||
aws_access_key=aws_access_key or self.aws_access_key,
|
||||
aws_region=aws_region or self.aws_region,
|
||||
aws_session_token=aws_session_token or self.aws_session_token,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
|
||||
|
||||
class AsyncAnthropicBedrock(BaseBedrockClient[httpx.AsyncClient, AsyncStream[Any]], AsyncAPIClient):
|
||||
messages: AsyncMessages
|
||||
completions: AsyncCompletions
|
||||
beta: AsyncBeta
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
aws_secret_key: str | None = None,
|
||||
aws_access_key: str | None = None,
|
||||
aws_region: str | None = None,
|
||||
aws_profile: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
# Enable or disable schema validation for data returned by the API.
|
||||
# When enabled an error APIResponseValidationError is raised
|
||||
# if the API responds with invalid data for the expected schema.
|
||||
#
|
||||
# This parameter may be removed or changed in the future.
|
||||
# If you rely on this feature, please open a GitHub issue
|
||||
# outlining your use-case to help us decide if it should be
|
||||
# part of our public interface in the future.
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
self.aws_secret_key = aws_secret_key
|
||||
|
||||
self.aws_access_key = aws_access_key
|
||||
|
||||
self.aws_region = _infer_region() if aws_region is None else aws_region
|
||||
self.aws_profile = aws_profile
|
||||
|
||||
self.aws_session_token = aws_session_token
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_BEDROCK_BASE_URL")
|
||||
if base_url is None:
|
||||
base_url = f"https://bedrock-runtime.{self.aws_region}.amazonaws.com"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
http_client=http_client,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
self.messages = AsyncMessages(self)
|
||||
self.completions = AsyncCompletions(self)
|
||||
self.beta = AsyncBeta(self)
|
||||
|
||||
@override
|
||||
def _make_sse_decoder(self) -> AWSEventStreamDecoder:
|
||||
return AWSEventStreamDecoder()
|
||||
|
||||
@override
|
||||
async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
|
||||
return _prepare_options(options)
|
||||
|
||||
@override
|
||||
async def _prepare_request(self, request: httpx.Request) -> None:
|
||||
from ._auth import get_auth_headers
|
||||
|
||||
data = request.read().decode()
|
||||
|
||||
headers = get_auth_headers(
|
||||
method=request.method,
|
||||
url=str(request.url),
|
||||
headers=request.headers,
|
||||
aws_access_key=self.aws_access_key,
|
||||
aws_secret_key=self.aws_secret_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
region=self.aws_region or "us-east-1",
|
||||
profile=self.aws_profile,
|
||||
data=data,
|
||||
)
|
||||
request.headers.update(headers)
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
aws_secret_key: str | None = None,
|
||||
aws_access_key: str | None = None,
|
||||
aws_region: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
max_retries: int | NotGiven = NOT_GIVEN,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
return self.__class__(
|
||||
aws_secret_key=aws_secret_key or self.aws_secret_key,
|
||||
aws_access_key=aws_access_key or self.aws_access_key,
|
||||
aws_region=aws_region or self.aws_region,
|
||||
aws_session_token=aws_session_token or self.aws_session_token,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
@@ -0,0 +1,37 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TypeVar
|
||||
|
||||
import httpx
|
||||
|
||||
from ..._client import Anthropic, AsyncAnthropic
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
from ._stream_decoder import AWSEventStreamDecoder
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class BedrockStream(Stream[_T]):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
cast_to: type[_T],
|
||||
response: httpx.Response,
|
||||
client: Anthropic,
|
||||
) -> None:
|
||||
super().__init__(cast_to=cast_to, response=response, client=client)
|
||||
|
||||
self._decoder = AWSEventStreamDecoder()
|
||||
|
||||
|
||||
class AsyncBedrockStream(AsyncStream[_T]):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
cast_to: type[_T],
|
||||
response: httpx.Response,
|
||||
client: AsyncAnthropic,
|
||||
) -> None:
|
||||
super().__init__(cast_to=cast_to, response=response, client=client)
|
||||
|
||||
self._decoder = AWSEventStreamDecoder()
|
||||
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Iterator, AsyncIterator
|
||||
|
||||
from ..._utils import lru_cache
|
||||
from ..._streaming import ServerSentEvent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from botocore.model import Shape
|
||||
from botocore.eventstream import EventStreamMessage
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def get_response_stream_shape() -> Shape:
|
||||
from botocore.model import ServiceModel
|
||||
from botocore.loaders import Loader
|
||||
|
||||
loader = Loader()
|
||||
bedrock_service_dict = loader.load_service_model("bedrock-runtime", "service-2")
|
||||
bedrock_service_model = ServiceModel(bedrock_service_dict)
|
||||
return bedrock_service_model.shape_for("ResponseStream")
|
||||
|
||||
|
||||
class AWSEventStreamDecoder:
|
||||
def __init__(self) -> None:
|
||||
from botocore.parsers import EventStreamJSONParser
|
||||
|
||||
self.parser = EventStreamJSONParser()
|
||||
|
||||
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
|
||||
"""Given an iterator that yields lines, iterate over it & yield every event encountered"""
|
||||
from botocore.eventstream import EventStreamBuffer
|
||||
|
||||
event_stream_buffer = EventStreamBuffer()
|
||||
for chunk in iterator:
|
||||
event_stream_buffer.add_data(chunk)
|
||||
for event in event_stream_buffer:
|
||||
message = self._parse_message_from_event(event)
|
||||
if message:
|
||||
yield ServerSentEvent(data=message, event="completion")
|
||||
|
||||
async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
|
||||
"""Given an async iterator that yields lines, iterate over it & yield every event encountered"""
|
||||
from botocore.eventstream import EventStreamBuffer
|
||||
|
||||
event_stream_buffer = EventStreamBuffer()
|
||||
async for chunk in iterator:
|
||||
event_stream_buffer.add_data(chunk)
|
||||
for event in event_stream_buffer:
|
||||
message = self._parse_message_from_event(event)
|
||||
if message:
|
||||
yield ServerSentEvent(data=message, event="completion")
|
||||
|
||||
def _parse_message_from_event(self, event: EventStreamMessage) -> str | None:
|
||||
response_dict = event.to_response_dict()
|
||||
parsed_response = self.parser.parse(response_dict, get_response_stream_shape())
|
||||
if response_dict["status_code"] != 200:
|
||||
raise ValueError(f"Bad response code, expected 200: {response_dict}")
|
||||
|
||||
chunk = parsed_response.get("chunk")
|
||||
if not chunk:
|
||||
return None
|
||||
|
||||
return chunk.get("bytes").decode() # type: ignore[no-any-return]
|
||||
@@ -0,0 +1,36 @@
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from ._types import (
|
||||
TextEvent as TextEvent,
|
||||
InputJsonEvent as InputJsonEvent,
|
||||
MessageStopEvent as MessageStopEvent,
|
||||
MessageStreamEvent as MessageStreamEvent,
|
||||
ContentBlockStopEvent as ContentBlockStopEvent,
|
||||
)
|
||||
from ._messages import (
|
||||
MessageStream as MessageStream,
|
||||
AsyncMessageStream as AsyncMessageStream,
|
||||
MessageStreamManager as MessageStreamManager,
|
||||
AsyncMessageStreamManager as AsyncMessageStreamManager,
|
||||
)
|
||||
from ._beta_types import (
|
||||
BetaInputJsonEvent as BetaInputJsonEvent,
|
||||
ParsedBetaTextEvent as ParsedBetaTextEvent,
|
||||
ParsedBetaMessageStopEvent as ParsedBetaMessageStopEvent,
|
||||
ParsedBetaMessageStreamEvent as ParsedBetaMessageStreamEvent,
|
||||
ParsedBetaContentBlockStopEvent as ParsedBetaContentBlockStopEvent,
|
||||
)
|
||||
|
||||
# For backwards compatibility
|
||||
BetaTextEvent: TypeAlias = ParsedBetaTextEvent
|
||||
BetaMessageStopEvent: TypeAlias = ParsedBetaMessageStopEvent[object]
|
||||
BetaMessageStreamEvent: TypeAlias = ParsedBetaMessageStreamEvent
|
||||
BetaContentBlockStopEvent: TypeAlias = ParsedBetaContentBlockStopEvent[object]
|
||||
|
||||
|
||||
from ._beta_messages import (
|
||||
BetaMessageStream as BetaMessageStream,
|
||||
BetaAsyncMessageStream as BetaAsyncMessageStream,
|
||||
BetaMessageStreamManager as BetaMessageStreamManager,
|
||||
BetaAsyncMessageStreamManager as BetaAsyncMessageStreamManager,
|
||||
)
|
||||
@@ -0,0 +1,540 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import builtins
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Type, Generic, Callable, cast
|
||||
from typing_extensions import Self, Iterator, Awaitable, AsyncIterator, assert_never
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
from anthropic.types.beta.beta_tool_use_block import BetaToolUseBlock
|
||||
from anthropic.types.beta.beta_mcp_tool_use_block import BetaMCPToolUseBlock
|
||||
from anthropic.types.beta.beta_server_tool_use_block import BetaServerToolUseBlock
|
||||
|
||||
from ..._types import NOT_GIVEN, NotGiven
|
||||
from ..._utils import consume_sync_iterator, consume_async_iterator
|
||||
from ..._models import build, construct_type, construct_type_unchecked
|
||||
from ._beta_types import (
|
||||
BetaCitationEvent,
|
||||
BetaThinkingEvent,
|
||||
BetaInputJsonEvent,
|
||||
BetaSignatureEvent,
|
||||
ParsedBetaTextEvent,
|
||||
ParsedBetaMessageStopEvent,
|
||||
ParsedBetaMessageStreamEvent,
|
||||
ParsedBetaContentBlockStopEvent,
|
||||
)
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
from ...types.beta import BetaRawMessageStreamEvent
|
||||
from ..._utils._utils import is_given
|
||||
from .._parse._response import ResponseFormatT, parse_text
|
||||
from ...types.beta.parsed_beta_message import ParsedBetaMessage, ParsedBetaContentBlock
|
||||
|
||||
|
||||
class BetaMessageStream(Generic[ResponseFormatT]):
|
||||
text_stream: Iterator[str]
|
||||
"""Iterator over just the text deltas in the stream.
|
||||
|
||||
```py
|
||||
for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
raw_stream: Stream[BetaRawMessageStreamEvent],
|
||||
output_format: ResponseFormatT | NotGiven,
|
||||
) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self.text_stream = self.__stream_text__()
|
||||
self._iterator = self.__stream__()
|
||||
self.__final_message_snapshot: ParsedBetaMessage[ResponseFormatT] | None = None
|
||||
self.__output_format = output_format
|
||||
|
||||
@property
|
||||
def response(self) -> httpx.Response:
|
||||
return self._raw_stream.response
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
def __next__(self) -> ParsedBetaMessageStreamEvent[ResponseFormatT]:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
self._raw_stream.close()
|
||||
|
||||
def get_final_message(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `Message` object.
|
||||
"""
|
||||
self.until_done()
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
def get_final_text(self) -> str:
|
||||
"""Returns all `text` content blocks concatenated together.
|
||||
|
||||
> [!NOTE]
|
||||
> Currently the API will only respond with a single content block.
|
||||
|
||||
Will raise an error if no `text` content blocks were returned.
|
||||
"""
|
||||
message = self.get_final_message()
|
||||
text_blocks: list[str] = []
|
||||
for block in message.content:
|
||||
if block.type == "text":
|
||||
text_blocks.append(block.text)
|
||||
|
||||
if not text_blocks:
|
||||
raise RuntimeError(
|
||||
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
|
||||
)
|
||||
|
||||
return "".join(text_blocks)
|
||||
|
||||
def until_done(self) -> None:
|
||||
"""Blocks until the stream has been consumed"""
|
||||
consume_sync_iterator(self)
|
||||
|
||||
# properties
|
||||
@property
|
||||
def current_message_snapshot(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
def __stream__(self) -> Iterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
|
||||
for sse_event in self._raw_stream:
|
||||
self.__final_message_snapshot = accumulate_event(
|
||||
event=sse_event,
|
||||
current_snapshot=self.__final_message_snapshot,
|
||||
request_headers=self.response.request.headers,
|
||||
output_format=self.__output_format,
|
||||
)
|
||||
|
||||
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
def __stream_text__(self) -> Iterator[str]:
|
||||
for chunk in self:
|
||||
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
|
||||
yield chunk.delta.text
|
||||
|
||||
|
||||
class BetaMessageStreamManager(Generic[ResponseFormatT]):
|
||||
"""Wrapper over MessageStream that is returned by `.stream()`.
|
||||
|
||||
```py
|
||||
with client.beta.messages.stream(...) as stream:
|
||||
for chunk in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Callable[[], Stream[BetaRawMessageStreamEvent]],
|
||||
*,
|
||||
output_format: ResponseFormatT | NotGiven,
|
||||
) -> None:
|
||||
self.__stream: BetaMessageStream[ResponseFormatT] | None = None
|
||||
self.__api_request = api_request
|
||||
self.__output_format = output_format
|
||||
|
||||
def __enter__(self) -> BetaMessageStream[ResponseFormatT]:
|
||||
raw_stream = self.__api_request()
|
||||
self.__stream = BetaMessageStream(raw_stream, output_format=self.__output_format)
|
||||
return self.__stream
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
self.__stream.close()
|
||||
|
||||
|
||||
class BetaAsyncMessageStream(Generic[ResponseFormatT]):
|
||||
text_stream: AsyncIterator[str]
|
||||
"""Async iterator over just the text deltas in the stream.
|
||||
|
||||
```py
|
||||
async for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
raw_stream: AsyncStream[BetaRawMessageStreamEvent],
|
||||
output_format: ResponseFormatT | NotGiven,
|
||||
) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self.text_stream = self.__stream_text__()
|
||||
self._iterator = self.__stream__()
|
||||
self.__final_message_snapshot: ParsedBetaMessage[ResponseFormatT] | None = None
|
||||
self.__output_format = output_format
|
||||
|
||||
@property
|
||||
def response(self) -> httpx.Response:
|
||||
return self._raw_stream.response
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
async def __anext__(self) -> ParsedBetaMessageStreamEvent[ResponseFormatT]:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.close()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
await self._raw_stream.close()
|
||||
|
||||
async def get_final_message(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `Message` object.
|
||||
"""
|
||||
await self.until_done()
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
async def get_final_text(self) -> str:
|
||||
"""Returns all `text` content blocks concatenated together.
|
||||
|
||||
> [!NOTE]
|
||||
> Currently the API will only respond with a single content block.
|
||||
|
||||
Will raise an error if no `text` content blocks were returned.
|
||||
"""
|
||||
message = await self.get_final_message()
|
||||
text_blocks: list[str] = []
|
||||
for block in message.content:
|
||||
if block.type == "text":
|
||||
text_blocks.append(block.text)
|
||||
|
||||
if not text_blocks:
|
||||
raise RuntimeError(
|
||||
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
|
||||
)
|
||||
|
||||
return "".join(text_blocks)
|
||||
|
||||
async def until_done(self) -> None:
|
||||
"""Waits until the stream has been consumed"""
|
||||
await consume_async_iterator(self)
|
||||
|
||||
# properties
|
||||
@property
|
||||
def current_message_snapshot(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
async def __stream__(self) -> AsyncIterator[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
|
||||
async for sse_event in self._raw_stream:
|
||||
self.__final_message_snapshot = accumulate_event(
|
||||
event=sse_event,
|
||||
current_snapshot=self.__final_message_snapshot,
|
||||
request_headers=self.response.request.headers,
|
||||
output_format=self.__output_format,
|
||||
)
|
||||
|
||||
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
async def __stream_text__(self) -> AsyncIterator[str]:
|
||||
async for chunk in self:
|
||||
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
|
||||
yield chunk.delta.text
|
||||
|
||||
|
||||
class BetaAsyncMessageStreamManager(Generic[ResponseFormatT]):
|
||||
"""Wrapper over BetaAsyncMessageStream that is returned by `.stream()`
|
||||
so that an async context manager can be used without `await`ing the
|
||||
original client call.
|
||||
|
||||
```py
|
||||
async with client.beta.messages.stream(...) as stream:
|
||||
async for chunk in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Awaitable[AsyncStream[BetaRawMessageStreamEvent]],
|
||||
*,
|
||||
output_format: ResponseFormatT | NotGiven = NOT_GIVEN,
|
||||
) -> None:
|
||||
self.__stream: BetaAsyncMessageStream[ResponseFormatT] | None = None
|
||||
self.__api_request = api_request
|
||||
self.__output_format = output_format
|
||||
|
||||
async def __aenter__(self) -> BetaAsyncMessageStream[ResponseFormatT]:
|
||||
raw_stream = await self.__api_request
|
||||
self.__stream = BetaAsyncMessageStream(raw_stream, output_format=self.__output_format)
|
||||
return self.__stream
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
await self.__stream.close()
|
||||
|
||||
|
||||
def build_events(
|
||||
*,
|
||||
event: BetaRawMessageStreamEvent,
|
||||
message_snapshot: ParsedBetaMessage[ResponseFormatT],
|
||||
) -> list[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
|
||||
events_to_fire: list[ParsedBetaMessageStreamEvent[ResponseFormatT]] = []
|
||||
|
||||
if event.type == "message_start":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "message_delta":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "message_stop":
|
||||
events_to_fire.append(
|
||||
build(ParsedBetaMessageStopEvent[ResponseFormatT], type="message_stop", message=message_snapshot)
|
||||
)
|
||||
elif event.type == "content_block_start":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "content_block_delta":
|
||||
events_to_fire.append(event)
|
||||
|
||||
content_block = message_snapshot.content[event.index]
|
||||
if event.delta.type == "text_delta":
|
||||
if content_block.type == "text":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
ParsedBetaTextEvent,
|
||||
type="text",
|
||||
text=event.delta.text,
|
||||
snapshot=content_block.text,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "input_json_delta":
|
||||
if content_block.type == "tool_use" or content_block.type == "mcp_tool_use":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
BetaInputJsonEvent,
|
||||
type="input_json",
|
||||
partial_json=event.delta.partial_json,
|
||||
snapshot=content_block.input,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "citations_delta":
|
||||
if content_block.type == "text":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
BetaCitationEvent,
|
||||
type="citation",
|
||||
citation=event.delta.citation,
|
||||
snapshot=content_block.citations or [],
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "thinking_delta":
|
||||
if content_block.type == "thinking":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
BetaThinkingEvent,
|
||||
type="thinking",
|
||||
thinking=event.delta.thinking,
|
||||
snapshot=content_block.thinking,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "signature_delta":
|
||||
if content_block.type == "thinking":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
BetaSignatureEvent,
|
||||
type="signature",
|
||||
signature=content_block.signature,
|
||||
)
|
||||
)
|
||||
pass
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event.delta)
|
||||
elif event.type == "content_block_stop":
|
||||
content_block = message_snapshot.content[event.index]
|
||||
|
||||
event_to_fire = build(
|
||||
ParsedBetaContentBlockStopEvent,
|
||||
type="content_block_stop",
|
||||
index=event.index,
|
||||
content_block=content_block,
|
||||
)
|
||||
|
||||
events_to_fire.append(event_to_fire)
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event)
|
||||
|
||||
return events_to_fire
|
||||
|
||||
|
||||
JSON_BUF_PROPERTY = "__json_buf"
|
||||
|
||||
TRACKS_TOOL_INPUT = (
|
||||
BetaToolUseBlock,
|
||||
BetaServerToolUseBlock,
|
||||
BetaMCPToolUseBlock,
|
||||
)
|
||||
|
||||
|
||||
def accumulate_event(
|
||||
*,
|
||||
event: BetaRawMessageStreamEvent,
|
||||
current_snapshot: ParsedBetaMessage[ResponseFormatT] | None,
|
||||
request_headers: httpx.Headers,
|
||||
output_format: ResponseFormatT | NotGiven = NOT_GIVEN,
|
||||
) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
if not isinstance(cast(Any, event), BaseModel):
|
||||
event = cast( # pyright: ignore[reportUnnecessaryCast]
|
||||
BetaRawMessageStreamEvent,
|
||||
construct_type_unchecked(
|
||||
type_=cast(Type[BetaRawMessageStreamEvent], BetaRawMessageStreamEvent),
|
||||
value=event,
|
||||
),
|
||||
)
|
||||
if not isinstance(cast(Any, event), BaseModel):
|
||||
raise TypeError(
|
||||
f"Unexpected event runtime type, after deserialising twice - {event} - {builtins.type(event)}"
|
||||
)
|
||||
|
||||
if current_snapshot is None:
|
||||
if event.type == "message_start":
|
||||
return cast(
|
||||
ParsedBetaMessage[ResponseFormatT], ParsedBetaMessage.construct(**cast(Any, event.message.to_dict()))
|
||||
)
|
||||
|
||||
raise RuntimeError(f'Unexpected event order, got {event.type} before "message_start"')
|
||||
|
||||
if event.type == "content_block_start":
|
||||
# TODO: check index
|
||||
current_snapshot.content.append(
|
||||
cast(
|
||||
Any, # Pydantic does not support generic unions at runtime
|
||||
construct_type(type_=ParsedBetaContentBlock, value=event.content_block.model_dump()),
|
||||
),
|
||||
)
|
||||
elif event.type == "content_block_delta":
|
||||
content = current_snapshot.content[event.index]
|
||||
if event.delta.type == "text_delta":
|
||||
if content.type == "text":
|
||||
content.text += event.delta.text
|
||||
elif event.delta.type == "input_json_delta":
|
||||
if isinstance(content, TRACKS_TOOL_INPUT):
|
||||
from jiter import from_json
|
||||
|
||||
# we need to keep track of the raw JSON string as well so that we can
|
||||
# re-parse it for each delta, for now we just store it as an untyped
|
||||
# property on the snapshot
|
||||
json_buf = cast(bytes, getattr(content, JSON_BUF_PROPERTY, b""))
|
||||
json_buf += bytes(event.delta.partial_json, "utf-8")
|
||||
|
||||
if json_buf:
|
||||
try:
|
||||
anthropic_beta = request_headers.get("anthropic-beta", "") if request_headers else ""
|
||||
|
||||
if "fine-grained-tool-streaming-2025-05-14" in anthropic_beta:
|
||||
content.input = from_json(json_buf, partial_mode="trailing-strings")
|
||||
else:
|
||||
content.input = from_json(json_buf, partial_mode=True)
|
||||
except ValueError as e:
|
||||
raise ValueError(
|
||||
f"Unable to parse tool parameter JSON from model. Please retry your request or adjust your prompt. Error: {e}. JSON: {json_buf.decode('utf-8')}"
|
||||
) from e
|
||||
|
||||
setattr(content, JSON_BUF_PROPERTY, json_buf)
|
||||
elif event.delta.type == "citations_delta":
|
||||
if content.type == "text":
|
||||
if not content.citations:
|
||||
content.citations = [event.delta.citation]
|
||||
else:
|
||||
content.citations.append(event.delta.citation)
|
||||
elif event.delta.type == "thinking_delta":
|
||||
if content.type == "thinking":
|
||||
content.thinking += event.delta.thinking
|
||||
elif event.delta.type == "signature_delta":
|
||||
if content.type == "thinking":
|
||||
content.signature = event.delta.signature
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event.delta)
|
||||
elif event.type == "content_block_stop":
|
||||
content_block = current_snapshot.content[event.index]
|
||||
if content_block.type == "text" and is_given(output_format):
|
||||
content_block.parsed_output = parse_text(content_block.text, output_format)
|
||||
elif event.type == "message_delta":
|
||||
current_snapshot.container = event.delta.container
|
||||
current_snapshot.stop_reason = event.delta.stop_reason
|
||||
current_snapshot.stop_sequence = event.delta.stop_sequence
|
||||
current_snapshot.usage.output_tokens = event.usage.output_tokens
|
||||
current_snapshot.context_management = event.context_management
|
||||
|
||||
# Update other usage fields if they exist in the event
|
||||
if event.usage.input_tokens is not None:
|
||||
current_snapshot.usage.input_tokens = event.usage.input_tokens
|
||||
if event.usage.cache_creation_input_tokens is not None:
|
||||
current_snapshot.usage.cache_creation_input_tokens = event.usage.cache_creation_input_tokens
|
||||
if event.usage.cache_read_input_tokens is not None:
|
||||
current_snapshot.usage.cache_read_input_tokens = event.usage.cache_read_input_tokens
|
||||
if event.usage.server_tool_use is not None:
|
||||
current_snapshot.usage.server_tool_use = event.usage.server_tool_use
|
||||
|
||||
return current_snapshot
|
||||
@@ -0,0 +1,108 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Union, Generic, cast
|
||||
from typing_extensions import List, Literal, Annotated
|
||||
|
||||
import jiter
|
||||
|
||||
from ..._models import BaseModel, GenericModel
|
||||
from ...types.beta import (
|
||||
BetaRawMessageStopEvent,
|
||||
BetaRawMessageDeltaEvent,
|
||||
BetaRawMessageStartEvent,
|
||||
BetaRawContentBlockStopEvent,
|
||||
BetaRawContentBlockDeltaEvent,
|
||||
BetaRawContentBlockStartEvent,
|
||||
)
|
||||
from .._parse._response import ResponseFormatT
|
||||
from ..._utils._transform import PropertyInfo
|
||||
from ...types.beta.parsed_beta_message import ParsedBetaMessage, ParsedBetaContentBlock
|
||||
from ...types.beta.beta_citations_delta import Citation
|
||||
|
||||
|
||||
class ParsedBetaTextEvent(BaseModel):
|
||||
type: Literal["text"]
|
||||
|
||||
text: str
|
||||
"""The text delta"""
|
||||
|
||||
snapshot: str
|
||||
"""The entire accumulated text"""
|
||||
|
||||
def parsed_snapshot(self) -> Dict[str, Any]:
|
||||
return cast(Dict[str, Any], jiter.from_json(self.snapshot.encode("utf-8"), partial_mode="trailing-strings"))
|
||||
|
||||
|
||||
class BetaCitationEvent(BaseModel):
|
||||
type: Literal["citation"]
|
||||
|
||||
citation: Citation
|
||||
"""The new citation"""
|
||||
|
||||
snapshot: List[Citation]
|
||||
"""All of the accumulated citations"""
|
||||
|
||||
|
||||
class BetaThinkingEvent(BaseModel):
|
||||
type: Literal["thinking"]
|
||||
|
||||
thinking: str
|
||||
"""The thinking delta"""
|
||||
|
||||
snapshot: str
|
||||
"""The accumulated thinking so far"""
|
||||
|
||||
|
||||
class BetaSignatureEvent(BaseModel):
|
||||
type: Literal["signature"]
|
||||
|
||||
signature: str
|
||||
"""The signature of the thinking block"""
|
||||
|
||||
|
||||
class BetaInputJsonEvent(BaseModel):
|
||||
type: Literal["input_json"]
|
||||
|
||||
partial_json: str
|
||||
"""A partial JSON string delta
|
||||
|
||||
e.g. `'"San Francisco,'`
|
||||
"""
|
||||
|
||||
snapshot: object
|
||||
"""The currently accumulated parsed object.
|
||||
|
||||
|
||||
e.g. `{'location': 'San Francisco, CA'}`
|
||||
"""
|
||||
|
||||
|
||||
class ParsedBetaMessageStopEvent(BetaRawMessageStopEvent, GenericModel, Generic[ResponseFormatT]):
|
||||
type: Literal["message_stop"]
|
||||
|
||||
message: ParsedBetaMessage[ResponseFormatT]
|
||||
|
||||
|
||||
class ParsedBetaContentBlockStopEvent(BetaRawContentBlockStopEvent, GenericModel, Generic[ResponseFormatT]):
|
||||
type: Literal["content_block_stop"]
|
||||
|
||||
if TYPE_CHECKING:
|
||||
content_block: ParsedBetaContentBlock[ResponseFormatT]
|
||||
else:
|
||||
content_block: ParsedBetaContentBlock
|
||||
|
||||
|
||||
ParsedBetaMessageStreamEvent = Annotated[
|
||||
Union[
|
||||
ParsedBetaTextEvent,
|
||||
BetaCitationEvent,
|
||||
BetaThinkingEvent,
|
||||
BetaSignatureEvent,
|
||||
BetaInputJsonEvent,
|
||||
BetaRawMessageStartEvent,
|
||||
BetaRawMessageDeltaEvent,
|
||||
ParsedBetaMessageStopEvent[ResponseFormatT],
|
||||
BetaRawContentBlockStartEvent,
|
||||
BetaRawContentBlockDeltaEvent,
|
||||
ParsedBetaContentBlockStopEvent[ResponseFormatT],
|
||||
],
|
||||
PropertyInfo(discriminator="type"),
|
||||
]
|
||||
@@ -0,0 +1,484 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Type, Callable, cast
|
||||
from typing_extensions import Self, Iterator, Awaitable, AsyncIterator, assert_never
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
from anthropic.types.tool_use_block import ToolUseBlock
|
||||
from anthropic.types.server_tool_use_block import ServerToolUseBlock
|
||||
|
||||
from ._types import (
|
||||
TextEvent,
|
||||
CitationEvent,
|
||||
ThinkingEvent,
|
||||
InputJsonEvent,
|
||||
SignatureEvent,
|
||||
MessageStopEvent,
|
||||
MessageStreamEvent,
|
||||
ContentBlockStopEvent,
|
||||
)
|
||||
from ...types import Message, ContentBlock, RawMessageStreamEvent
|
||||
from ..._utils import consume_sync_iterator, consume_async_iterator
|
||||
from ..._models import build, construct_type, construct_type_unchecked
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
|
||||
|
||||
class MessageStream:
|
||||
text_stream: Iterator[str]
|
||||
"""Iterator over just the text deltas in the stream.
|
||||
|
||||
```py
|
||||
for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, raw_stream: Stream[RawMessageStreamEvent]) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self.text_stream = self.__stream_text__()
|
||||
self._iterator = self.__stream__()
|
||||
self.__final_message_snapshot: Message | None = None
|
||||
|
||||
@property
|
||||
def response(self) -> httpx.Response:
|
||||
return self._raw_stream.response
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
def __next__(self) -> MessageStreamEvent:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[MessageStreamEvent]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
self._raw_stream.close()
|
||||
|
||||
def get_final_message(self) -> Message:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `Message` object.
|
||||
"""
|
||||
self.until_done()
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
def get_final_text(self) -> str:
|
||||
"""Returns all `text` content blocks concatenated together.
|
||||
|
||||
> [!NOTE]
|
||||
> Currently the API will only respond with a single content block.
|
||||
|
||||
Will raise an error if no `text` content blocks were returned.
|
||||
"""
|
||||
message = self.get_final_message()
|
||||
text_blocks: list[str] = []
|
||||
for block in message.content:
|
||||
if block.type == "text":
|
||||
text_blocks.append(block.text)
|
||||
|
||||
if not text_blocks:
|
||||
raise RuntimeError(
|
||||
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
|
||||
)
|
||||
|
||||
return "".join(text_blocks)
|
||||
|
||||
def until_done(self) -> None:
|
||||
"""Blocks until the stream has been consumed"""
|
||||
consume_sync_iterator(self)
|
||||
|
||||
# properties
|
||||
@property
|
||||
def current_message_snapshot(self) -> Message:
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
def __stream__(self) -> Iterator[MessageStreamEvent]:
|
||||
for sse_event in self._raw_stream:
|
||||
self.__final_message_snapshot = accumulate_event(
|
||||
event=sse_event,
|
||||
current_snapshot=self.__final_message_snapshot,
|
||||
)
|
||||
|
||||
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
def __stream_text__(self) -> Iterator[str]:
|
||||
for chunk in self:
|
||||
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
|
||||
yield chunk.delta.text
|
||||
|
||||
|
||||
class MessageStreamManager:
|
||||
"""Wrapper over MessageStream that is returned by `.stream()`.
|
||||
|
||||
```py
|
||||
with client.messages.stream(...) as stream:
|
||||
for chunk in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Callable[[], Stream[RawMessageStreamEvent]],
|
||||
) -> None:
|
||||
self.__stream: MessageStream | None = None
|
||||
self.__api_request = api_request
|
||||
|
||||
def __enter__(self) -> MessageStream:
|
||||
raw_stream = self.__api_request()
|
||||
self.__stream = MessageStream(raw_stream)
|
||||
return self.__stream
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
self.__stream.close()
|
||||
|
||||
|
||||
class AsyncMessageStream:
|
||||
text_stream: AsyncIterator[str]
|
||||
"""Async iterator over just the text deltas in the stream.
|
||||
|
||||
```py
|
||||
async for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, raw_stream: AsyncStream[RawMessageStreamEvent]) -> None:
|
||||
self._raw_stream = raw_stream
|
||||
self.text_stream = self.__stream_text__()
|
||||
self._iterator = self.__stream__()
|
||||
self.__final_message_snapshot: Message | None = None
|
||||
|
||||
@property
|
||||
def response(self) -> httpx.Response:
|
||||
return self._raw_stream.response
|
||||
|
||||
@property
|
||||
def request_id(self) -> str | None:
|
||||
return self.response.headers.get("request-id") # type: ignore[no-any-return]
|
||||
|
||||
async def __anext__(self) -> MessageStreamEvent:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[MessageStreamEvent]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.close()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the response and release the connection.
|
||||
|
||||
Automatically called if the response body is read to completion.
|
||||
"""
|
||||
await self._raw_stream.close()
|
||||
|
||||
async def get_final_message(self) -> Message:
|
||||
"""Waits until the stream has been read to completion and returns
|
||||
the accumulated `Message` object.
|
||||
"""
|
||||
await self.until_done()
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
async def get_final_text(self) -> str:
|
||||
"""Returns all `text` content blocks concatenated together.
|
||||
|
||||
> [!NOTE]
|
||||
> Currently the API will only respond with a single content block.
|
||||
|
||||
Will raise an error if no `text` content blocks were returned.
|
||||
"""
|
||||
message = await self.get_final_message()
|
||||
text_blocks: list[str] = []
|
||||
for block in message.content:
|
||||
if block.type == "text":
|
||||
text_blocks.append(block.text)
|
||||
|
||||
if not text_blocks:
|
||||
raise RuntimeError(
|
||||
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
|
||||
)
|
||||
|
||||
return "".join(text_blocks)
|
||||
|
||||
async def until_done(self) -> None:
|
||||
"""Waits until the stream has been consumed"""
|
||||
await consume_async_iterator(self)
|
||||
|
||||
# properties
|
||||
@property
|
||||
def current_message_snapshot(self) -> Message:
|
||||
assert self.__final_message_snapshot is not None
|
||||
return self.__final_message_snapshot
|
||||
|
||||
async def __stream__(self) -> AsyncIterator[MessageStreamEvent]:
|
||||
async for sse_event in self._raw_stream:
|
||||
self.__final_message_snapshot = accumulate_event(
|
||||
event=sse_event,
|
||||
current_snapshot=self.__final_message_snapshot,
|
||||
)
|
||||
|
||||
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
|
||||
for event in events_to_fire:
|
||||
yield event
|
||||
|
||||
async def __stream_text__(self) -> AsyncIterator[str]:
|
||||
async for chunk in self:
|
||||
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
|
||||
yield chunk.delta.text
|
||||
|
||||
|
||||
class AsyncMessageStreamManager:
|
||||
"""Wrapper over AsyncMessageStream that is returned by `.stream()`
|
||||
so that an async context manager can be used without `await`ing the
|
||||
original client call.
|
||||
|
||||
```py
|
||||
async with client.messages.stream(...) as stream:
|
||||
async for chunk in stream:
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_request: Awaitable[AsyncStream[RawMessageStreamEvent]],
|
||||
) -> None:
|
||||
self.__stream: AsyncMessageStream | None = None
|
||||
self.__api_request = api_request
|
||||
|
||||
async def __aenter__(self) -> AsyncMessageStream:
|
||||
raw_stream = await self.__api_request
|
||||
self.__stream = AsyncMessageStream(raw_stream)
|
||||
return self.__stream
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self.__stream is not None:
|
||||
await self.__stream.close()
|
||||
|
||||
|
||||
def build_events(
|
||||
*,
|
||||
event: RawMessageStreamEvent,
|
||||
message_snapshot: Message,
|
||||
) -> list[MessageStreamEvent]:
|
||||
events_to_fire: list[MessageStreamEvent] = []
|
||||
|
||||
if event.type == "message_start":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "message_delta":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "message_stop":
|
||||
events_to_fire.append(build(MessageStopEvent, type="message_stop", message=message_snapshot))
|
||||
elif event.type == "content_block_start":
|
||||
events_to_fire.append(event)
|
||||
elif event.type == "content_block_delta":
|
||||
events_to_fire.append(event)
|
||||
|
||||
content_block = message_snapshot.content[event.index]
|
||||
if event.delta.type == "text_delta":
|
||||
if content_block.type == "text":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
TextEvent,
|
||||
type="text",
|
||||
text=event.delta.text,
|
||||
snapshot=content_block.text,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "input_json_delta":
|
||||
if content_block.type == "tool_use":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
InputJsonEvent,
|
||||
type="input_json",
|
||||
partial_json=event.delta.partial_json,
|
||||
snapshot=content_block.input,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "citations_delta":
|
||||
if content_block.type == "text":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
CitationEvent,
|
||||
type="citation",
|
||||
citation=event.delta.citation,
|
||||
snapshot=content_block.citations or [],
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "thinking_delta":
|
||||
if content_block.type == "thinking":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
ThinkingEvent,
|
||||
type="thinking",
|
||||
thinking=event.delta.thinking,
|
||||
snapshot=content_block.thinking,
|
||||
)
|
||||
)
|
||||
elif event.delta.type == "signature_delta":
|
||||
if content_block.type == "thinking":
|
||||
events_to_fire.append(
|
||||
build(
|
||||
SignatureEvent,
|
||||
type="signature",
|
||||
signature=content_block.signature,
|
||||
)
|
||||
)
|
||||
pass
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event.delta)
|
||||
elif event.type == "content_block_stop":
|
||||
content_block = message_snapshot.content[event.index]
|
||||
|
||||
events_to_fire.append(
|
||||
build(ContentBlockStopEvent, type="content_block_stop", index=event.index, content_block=content_block),
|
||||
)
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event)
|
||||
|
||||
return events_to_fire
|
||||
|
||||
|
||||
JSON_BUF_PROPERTY = "__json_buf"
|
||||
|
||||
TRACKS_TOOL_INPUT = (
|
||||
ToolUseBlock,
|
||||
ServerToolUseBlock,
|
||||
)
|
||||
|
||||
|
||||
def accumulate_event(
|
||||
*,
|
||||
event: RawMessageStreamEvent,
|
||||
current_snapshot: Message | None,
|
||||
) -> Message:
|
||||
if not isinstance(cast(Any, event), BaseModel):
|
||||
event = cast( # pyright: ignore[reportUnnecessaryCast]
|
||||
RawMessageStreamEvent,
|
||||
construct_type_unchecked(
|
||||
type_=cast(Type[RawMessageStreamEvent], RawMessageStreamEvent),
|
||||
value=event,
|
||||
),
|
||||
)
|
||||
if not isinstance(cast(Any, event), BaseModel):
|
||||
raise TypeError(f"Unexpected event runtime type, after deserialising twice - {event} - {type(event)}")
|
||||
|
||||
if current_snapshot is None:
|
||||
if event.type == "message_start":
|
||||
return Message.construct(**cast(Any, event.message.to_dict()))
|
||||
|
||||
raise RuntimeError(f'Unexpected event order, got {event.type} before "message_start"')
|
||||
|
||||
if event.type == "content_block_start":
|
||||
# TODO: check index
|
||||
current_snapshot.content.append(
|
||||
cast(
|
||||
ContentBlock,
|
||||
construct_type(type_=ContentBlock, value=event.content_block.model_dump()),
|
||||
),
|
||||
)
|
||||
elif event.type == "content_block_delta":
|
||||
content = current_snapshot.content[event.index]
|
||||
if event.delta.type == "text_delta":
|
||||
if content.type == "text":
|
||||
content.text += event.delta.text
|
||||
elif event.delta.type == "input_json_delta":
|
||||
if isinstance(content, TRACKS_TOOL_INPUT):
|
||||
from jiter import from_json
|
||||
|
||||
# we need to keep track of the raw JSON string as well so that we can
|
||||
# re-parse it for each delta, for now we just store it as an untyped
|
||||
# property on the snapshot
|
||||
json_buf = cast(bytes, getattr(content, JSON_BUF_PROPERTY, b""))
|
||||
json_buf += bytes(event.delta.partial_json, "utf-8")
|
||||
|
||||
if json_buf:
|
||||
content.input = from_json(json_buf, partial_mode=True)
|
||||
|
||||
setattr(content, JSON_BUF_PROPERTY, json_buf)
|
||||
elif event.delta.type == "citations_delta":
|
||||
if content.type == "text":
|
||||
if not content.citations:
|
||||
content.citations = [event.delta.citation]
|
||||
else:
|
||||
content.citations.append(event.delta.citation)
|
||||
elif event.delta.type == "thinking_delta":
|
||||
if content.type == "thinking":
|
||||
content.thinking += event.delta.thinking
|
||||
elif event.delta.type == "signature_delta":
|
||||
if content.type == "thinking":
|
||||
content.signature = event.delta.signature
|
||||
else:
|
||||
# we only want exhaustive checking for linters, not at runtime
|
||||
if TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(event.delta)
|
||||
elif event.type == "message_delta":
|
||||
current_snapshot.stop_reason = event.delta.stop_reason
|
||||
current_snapshot.stop_sequence = event.delta.stop_sequence
|
||||
current_snapshot.usage.output_tokens = event.usage.output_tokens
|
||||
|
||||
# Update other usage fields if they exist in the event
|
||||
if event.usage.input_tokens is not None:
|
||||
current_snapshot.usage.input_tokens = event.usage.input_tokens
|
||||
if event.usage.cache_creation_input_tokens is not None:
|
||||
current_snapshot.usage.cache_creation_input_tokens = event.usage.cache_creation_input_tokens
|
||||
if event.usage.cache_read_input_tokens is not None:
|
||||
current_snapshot.usage.cache_read_input_tokens = event.usage.cache_read_input_tokens
|
||||
if event.usage.server_tool_use is not None:
|
||||
current_snapshot.usage.server_tool_use = event.usage.server_tool_use
|
||||
|
||||
return current_snapshot
|
||||
@@ -0,0 +1,100 @@
|
||||
from typing import Union
|
||||
from typing_extensions import List, Literal, Annotated
|
||||
|
||||
from ...types import (
|
||||
Message,
|
||||
ContentBlock,
|
||||
MessageDeltaEvent as RawMessageDeltaEvent,
|
||||
MessageStartEvent as RawMessageStartEvent,
|
||||
RawMessageStopEvent,
|
||||
ContentBlockDeltaEvent as RawContentBlockDeltaEvent,
|
||||
ContentBlockStartEvent as RawContentBlockStartEvent,
|
||||
RawContentBlockStopEvent,
|
||||
)
|
||||
from ..._models import BaseModel
|
||||
from ..._utils._transform import PropertyInfo
|
||||
from ...types.citations_delta import Citation
|
||||
|
||||
|
||||
class TextEvent(BaseModel):
|
||||
type: Literal["text"]
|
||||
|
||||
text: str
|
||||
"""The text delta"""
|
||||
|
||||
snapshot: str
|
||||
"""The entire accumulated text"""
|
||||
|
||||
|
||||
class CitationEvent(BaseModel):
|
||||
type: Literal["citation"]
|
||||
|
||||
citation: Citation
|
||||
"""The new citation"""
|
||||
|
||||
snapshot: List[Citation]
|
||||
"""All of the accumulated citations"""
|
||||
|
||||
|
||||
class ThinkingEvent(BaseModel):
|
||||
type: Literal["thinking"]
|
||||
|
||||
thinking: str
|
||||
"""The thinking delta"""
|
||||
|
||||
snapshot: str
|
||||
"""The accumulated thinking so far"""
|
||||
|
||||
|
||||
class SignatureEvent(BaseModel):
|
||||
type: Literal["signature"]
|
||||
|
||||
signature: str
|
||||
"""The signature of the thinking block"""
|
||||
|
||||
|
||||
class InputJsonEvent(BaseModel):
|
||||
type: Literal["input_json"]
|
||||
|
||||
partial_json: str
|
||||
"""A partial JSON string delta
|
||||
|
||||
e.g. `'"San Francisco,'`
|
||||
"""
|
||||
|
||||
snapshot: object
|
||||
"""The currently accumulated parsed object.
|
||||
|
||||
|
||||
e.g. `{'location': 'San Francisco, CA'}`
|
||||
"""
|
||||
|
||||
|
||||
class MessageStopEvent(RawMessageStopEvent):
|
||||
type: Literal["message_stop"]
|
||||
|
||||
message: Message
|
||||
|
||||
|
||||
class ContentBlockStopEvent(RawContentBlockStopEvent):
|
||||
type: Literal["content_block_stop"]
|
||||
|
||||
content_block: ContentBlock
|
||||
|
||||
|
||||
MessageStreamEvent = Annotated[
|
||||
Union[
|
||||
TextEvent,
|
||||
CitationEvent,
|
||||
ThinkingEvent,
|
||||
SignatureEvent,
|
||||
InputJsonEvent,
|
||||
RawMessageStartEvent,
|
||||
RawMessageDeltaEvent,
|
||||
MessageStopEvent,
|
||||
RawContentBlockStartEvent,
|
||||
RawContentBlockDeltaEvent,
|
||||
ContentBlockStopEvent,
|
||||
],
|
||||
PropertyInfo(discriminator="type"),
|
||||
]
|
||||
@@ -0,0 +1,27 @@
|
||||
from ._beta_runner import BetaToolRunner, BetaAsyncToolRunner, BetaStreamingToolRunner, BetaAsyncStreamingToolRunner
|
||||
from ._beta_functions import (
|
||||
BetaFunctionTool,
|
||||
BetaAsyncFunctionTool,
|
||||
BetaBuiltinFunctionTool,
|
||||
BetaFunctionToolResultType,
|
||||
BetaAsyncBuiltinFunctionTool,
|
||||
beta_tool,
|
||||
beta_async_tool,
|
||||
)
|
||||
from ._beta_builtin_memory_tool import BetaAbstractMemoryTool, BetaAsyncAbstractMemoryTool
|
||||
|
||||
__all__ = [
|
||||
"beta_tool",
|
||||
"beta_async_tool",
|
||||
"BetaFunctionTool",
|
||||
"BetaAsyncFunctionTool",
|
||||
"BetaBuiltinFunctionTool",
|
||||
"BetaAsyncBuiltinFunctionTool",
|
||||
"BetaToolRunner",
|
||||
"BetaAsyncStreamingToolRunner",
|
||||
"BetaStreamingToolRunner",
|
||||
"BetaAsyncToolRunner",
|
||||
"BetaFunctionToolResultType",
|
||||
"BetaAbstractMemoryTool",
|
||||
"BetaAsyncAbstractMemoryTool",
|
||||
]
|
||||
@@ -0,0 +1,245 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
from typing_extensions import override, assert_never
|
||||
|
||||
from ..._models import construct_type_unchecked
|
||||
from ...types.beta import (
|
||||
BetaMemoryTool20250818Param,
|
||||
BetaMemoryTool20250818Command,
|
||||
BetaCacheControlEphemeralParam,
|
||||
BetaMemoryTool20250818ViewCommand,
|
||||
BetaMemoryTool20250818CreateCommand,
|
||||
BetaMemoryTool20250818DeleteCommand,
|
||||
BetaMemoryTool20250818InsertCommand,
|
||||
BetaMemoryTool20250818RenameCommand,
|
||||
BetaMemoryTool20250818StrReplaceCommand,
|
||||
)
|
||||
from ._beta_functions import BetaBuiltinFunctionTool, BetaFunctionToolResultType, BetaAsyncBuiltinFunctionTool
|
||||
|
||||
|
||||
class BetaAbstractMemoryTool(BetaBuiltinFunctionTool):
|
||||
"""Abstract base class for memory tool implementations.
|
||||
|
||||
This class provides the interface for implementing a custom memory backend for Claude.
|
||||
|
||||
Subclass this to create your own memory storage solution (e.g., database, cloud storage, encrypted files, etc.).
|
||||
|
||||
Example usage:
|
||||
|
||||
```py
|
||||
class MyMemoryTool(BetaAbstractMemoryTool):
|
||||
def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
|
||||
...
|
||||
return "view result"
|
||||
|
||||
def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
|
||||
...
|
||||
return "created successfully"
|
||||
|
||||
# ... implement other abstract methods
|
||||
|
||||
|
||||
client = Anthropic()
|
||||
memory_tool = MyMemoryTool()
|
||||
message = client.beta.messages.run_tools(
|
||||
model="claude-sonnet-4-5",
|
||||
messages=[{"role": "user", "content": "Remember that I like coffee"}],
|
||||
tools=[memory_tool],
|
||||
).until_done()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, *, cache_control: BetaCacheControlEphemeralParam | None = None) -> None:
|
||||
super().__init__()
|
||||
self._cache_control = cache_control
|
||||
|
||||
@override
|
||||
def to_dict(self) -> BetaMemoryTool20250818Param:
|
||||
param: BetaMemoryTool20250818Param = {"type": "memory_20250818", "name": "memory"}
|
||||
|
||||
if self._cache_control is not None:
|
||||
param["cache_control"] = self._cache_control
|
||||
|
||||
return param
|
||||
|
||||
@override
|
||||
def call(self, input: object) -> BetaFunctionToolResultType:
|
||||
command = cast(
|
||||
BetaMemoryTool20250818Command,
|
||||
construct_type_unchecked(value=input, type_=cast(Any, BetaMemoryTool20250818Command)),
|
||||
)
|
||||
return self.execute(command)
|
||||
|
||||
def execute(self, command: BetaMemoryTool20250818Command) -> BetaFunctionToolResultType:
|
||||
"""Execute a memory command and return the result.
|
||||
|
||||
This method dispatches to the appropriate handler method based on the
|
||||
command type (view, create, str_replace, insert, delete, rename).
|
||||
|
||||
You typically don't need to override this method.
|
||||
"""
|
||||
if command.command == "view":
|
||||
return self.view(command)
|
||||
elif command.command == "create":
|
||||
return self.create(command)
|
||||
elif command.command == "str_replace":
|
||||
return self.str_replace(command)
|
||||
elif command.command == "insert":
|
||||
return self.insert(command)
|
||||
elif command.command == "delete":
|
||||
return self.delete(command)
|
||||
elif command.command == "rename":
|
||||
return self.rename(command)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(command)
|
||||
else:
|
||||
raise NotImplementedError(f"Unknown command: {command.command}")
|
||||
|
||||
@abstractmethod
|
||||
def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
|
||||
"""View the contents of a memory path."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
|
||||
"""Create a new memory file with the specified content."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def str_replace(self, command: BetaMemoryTool20250818StrReplaceCommand) -> BetaFunctionToolResultType:
|
||||
"""Replace text in a memory file."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def insert(self, command: BetaMemoryTool20250818InsertCommand) -> BetaFunctionToolResultType:
|
||||
"""Insert text at a specific line number in a memory file."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, command: BetaMemoryTool20250818DeleteCommand) -> BetaFunctionToolResultType:
|
||||
"""Delete a memory file or directory."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def rename(self, command: BetaMemoryTool20250818RenameCommand) -> BetaFunctionToolResultType:
|
||||
"""Rename or move a memory file or directory."""
|
||||
pass
|
||||
|
||||
def clear_all_memory(self) -> BetaFunctionToolResultType:
|
||||
"""Clear all memory data."""
|
||||
raise NotImplementedError("clear_all_memory not implemented")
|
||||
|
||||
|
||||
class BetaAsyncAbstractMemoryTool(BetaAsyncBuiltinFunctionTool):
|
||||
"""Abstract base class for memory tool implementations.
|
||||
|
||||
This class provides the interface for implementing a custom memory backend for Claude.
|
||||
|
||||
Subclass this to create your own memory storage solution (e.g., database, cloud storage, encrypted files, etc.).
|
||||
|
||||
Example usage:
|
||||
|
||||
```py
|
||||
class MyMemoryTool(BetaAbstractMemoryTool):
|
||||
def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
|
||||
...
|
||||
return "view result"
|
||||
|
||||
def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
|
||||
...
|
||||
return "created successfully"
|
||||
|
||||
# ... implement other abstract methods
|
||||
|
||||
|
||||
client = Anthropic()
|
||||
memory_tool = MyMemoryTool()
|
||||
message = client.beta.messages.run_tools(
|
||||
model="claude-3-5-sonnet-20241022",
|
||||
messages=[{"role": "user", "content": "Remember that I like coffee"}],
|
||||
tools=[memory_tool],
|
||||
).until_done()
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, *, cache_control: BetaCacheControlEphemeralParam | None = None) -> None:
|
||||
super().__init__()
|
||||
self._cache_control = cache_control
|
||||
|
||||
@override
|
||||
def to_dict(self) -> BetaMemoryTool20250818Param:
|
||||
param: BetaMemoryTool20250818Param = {"type": "memory_20250818", "name": "memory"}
|
||||
|
||||
if self._cache_control is not None:
|
||||
param["cache_control"] = self._cache_control
|
||||
|
||||
return param
|
||||
|
||||
@override
|
||||
async def call(self, input: object) -> BetaFunctionToolResultType:
|
||||
command = cast(
|
||||
BetaMemoryTool20250818Command,
|
||||
construct_type_unchecked(value=input, type_=cast(Any, BetaMemoryTool20250818Command)),
|
||||
)
|
||||
return await self.execute(command)
|
||||
|
||||
async def execute(self, command: BetaMemoryTool20250818Command) -> BetaFunctionToolResultType:
|
||||
"""Execute a memory command and return the result.
|
||||
|
||||
This method dispatches to the appropriate handler method based on the
|
||||
command type (view, create, str_replace, insert, delete, rename).
|
||||
|
||||
You typically don't need to override this method.
|
||||
"""
|
||||
if command.command == "view":
|
||||
return await self.view(command)
|
||||
elif command.command == "create":
|
||||
return await self.create(command)
|
||||
elif command.command == "str_replace":
|
||||
return await self.str_replace(command)
|
||||
elif command.command == "insert":
|
||||
return await self.insert(command)
|
||||
elif command.command == "delete":
|
||||
return await self.delete(command)
|
||||
elif command.command == "rename":
|
||||
return await self.rename(command)
|
||||
elif TYPE_CHECKING: # type: ignore[unreachable]
|
||||
assert_never(command)
|
||||
else:
|
||||
raise NotImplementedError(f"Unknown command: {command.command}")
|
||||
|
||||
@abstractmethod
|
||||
async def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
|
||||
"""View the contents of a memory path."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
|
||||
"""Create a new memory file with the specified content."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def str_replace(self, command: BetaMemoryTool20250818StrReplaceCommand) -> BetaFunctionToolResultType:
|
||||
"""Replace text in a memory file."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def insert(self, command: BetaMemoryTool20250818InsertCommand) -> BetaFunctionToolResultType:
|
||||
"""Insert text at a specific line number in a memory file."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def delete(self, command: BetaMemoryTool20250818DeleteCommand) -> BetaFunctionToolResultType:
|
||||
"""Delete a memory file or directory."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def rename(self, command: BetaMemoryTool20250818RenameCommand) -> BetaFunctionToolResultType:
|
||||
"""Rename or move a memory file or directory."""
|
||||
pass
|
||||
|
||||
async def clear_all_memory(self) -> BetaFunctionToolResultType:
|
||||
"""Clear all memory data."""
|
||||
raise NotImplementedError("clear_all_memory not implemented")
|
||||
@@ -0,0 +1,319 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Union, Generic, TypeVar, Callable, Iterable, Coroutine, cast, overload
|
||||
from inspect import iscoroutinefunction
|
||||
from typing_extensions import TypeAlias, override
|
||||
|
||||
import pydantic
|
||||
import docstring_parser
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ... import _compat
|
||||
from ..._utils import is_dict
|
||||
from ..._compat import cached_property
|
||||
from ..._models import TypeAdapter
|
||||
from ...types.beta import BetaToolUnionParam
|
||||
from ..._utils._utils import CallableT
|
||||
from ...types.tool_param import ToolParam, InputSchema
|
||||
from ...types.beta.beta_tool_result_block_param import Content as BetaContent
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
BetaFunctionToolResultType: TypeAlias = Union[str, Iterable[BetaContent]]
|
||||
|
||||
Function = Callable[..., BetaFunctionToolResultType]
|
||||
FunctionT = TypeVar("FunctionT", bound=Function)
|
||||
|
||||
AsyncFunction = Callable[..., Coroutine[Any, Any, BetaFunctionToolResultType]]
|
||||
AsyncFunctionT = TypeVar("AsyncFunctionT", bound=AsyncFunction)
|
||||
|
||||
|
||||
class BetaBuiltinFunctionTool(ABC):
|
||||
@abstractmethod
|
||||
def to_dict(self) -> BetaToolUnionParam: ...
|
||||
|
||||
@abstractmethod
|
||||
def call(self, input: object) -> BetaFunctionToolResultType: ...
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self.to_dict()["name"]
|
||||
|
||||
|
||||
class BetaAsyncBuiltinFunctionTool(ABC):
|
||||
@abstractmethod
|
||||
def to_dict(self) -> BetaToolUnionParam: ...
|
||||
|
||||
@abstractmethod
|
||||
async def call(self, input: object) -> BetaFunctionToolResultType: ...
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self.to_dict()["name"]
|
||||
|
||||
|
||||
class BaseFunctionTool(Generic[CallableT]):
|
||||
func: CallableT
|
||||
"""The function this tool is wrapping"""
|
||||
|
||||
name: str
|
||||
"""The name of the tool that will be sent to the API"""
|
||||
|
||||
description: str
|
||||
|
||||
input_schema: InputSchema
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
func: CallableT,
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> None:
|
||||
if _compat.PYDANTIC_V1:
|
||||
raise RuntimeError("Tool functions are only supported with Pydantic v2")
|
||||
|
||||
self.func = func
|
||||
self._func_with_validate = pydantic.validate_call(func)
|
||||
self.name = name or func.__name__
|
||||
|
||||
self.description = description or self._get_description_from_docstring()
|
||||
|
||||
if input_schema is not None:
|
||||
if isinstance(input_schema, type):
|
||||
self.input_schema: InputSchema = input_schema.model_json_schema()
|
||||
else:
|
||||
self.input_schema = input_schema
|
||||
else:
|
||||
self.input_schema = self._create_schema_from_function()
|
||||
|
||||
@property
|
||||
def __call__(self) -> CallableT:
|
||||
return self.func
|
||||
|
||||
def to_dict(self) -> ToolParam:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"input_schema": self.input_schema,
|
||||
}
|
||||
|
||||
@cached_property
|
||||
def _parsed_docstring(self) -> docstring_parser.Docstring:
|
||||
return docstring_parser.parse(self.func.__doc__ or "")
|
||||
|
||||
def _get_description_from_docstring(self) -> str:
|
||||
"""Extract description from parsed docstring."""
|
||||
if self._parsed_docstring.short_description:
|
||||
description = self._parsed_docstring.short_description
|
||||
if self._parsed_docstring.long_description:
|
||||
description += f"\n\n{self._parsed_docstring.long_description}"
|
||||
return description
|
||||
return ""
|
||||
|
||||
def _create_schema_from_function(self) -> InputSchema:
|
||||
"""Create JSON schema from function signature using pydantic."""
|
||||
|
||||
from pydantic_core import CoreSchema
|
||||
from pydantic.json_schema import JsonSchemaValue, GenerateJsonSchema
|
||||
from pydantic_core.core_schema import ArgumentsParameter
|
||||
|
||||
class CustomGenerateJsonSchema(GenerateJsonSchema):
|
||||
def __init__(self, *, func: Callable[..., Any], parsed_docstring: Any) -> None:
|
||||
super().__init__()
|
||||
self._func = func
|
||||
self._parsed_docstring = parsed_docstring
|
||||
|
||||
def __call__(self, *_args: Any, **_kwds: Any) -> "CustomGenerateJsonSchema": # noqa: ARG002
|
||||
return self
|
||||
|
||||
@override
|
||||
def kw_arguments_schema(
|
||||
self,
|
||||
arguments: "list[ArgumentsParameter]",
|
||||
var_kwargs_schema: CoreSchema | None,
|
||||
) -> JsonSchemaValue:
|
||||
schema = super().kw_arguments_schema(arguments, var_kwargs_schema)
|
||||
if schema.get("type") != "object":
|
||||
return schema
|
||||
|
||||
properties = schema.get("properties")
|
||||
if not properties or not is_dict(properties):
|
||||
return schema
|
||||
|
||||
# Add parameter descriptions from docstring
|
||||
for param in self._parsed_docstring.params:
|
||||
prop_schema = properties.get(param.arg_name)
|
||||
if not prop_schema or not is_dict(prop_schema):
|
||||
continue
|
||||
|
||||
if param.description and "description" not in prop_schema:
|
||||
prop_schema["description"] = param.description
|
||||
|
||||
return schema
|
||||
|
||||
schema_generator = CustomGenerateJsonSchema(func=self.func, parsed_docstring=self._parsed_docstring)
|
||||
return self._adapter.json_schema(schema_generator=schema_generator) # type: ignore
|
||||
|
||||
@cached_property
|
||||
def _adapter(self) -> TypeAdapter[Any]:
|
||||
return TypeAdapter(self._func_with_validate)
|
||||
|
||||
|
||||
class BetaFunctionTool(BaseFunctionTool[FunctionT]):
|
||||
def call(self, input: object) -> BetaFunctionToolResultType:
|
||||
if iscoroutinefunction(self.func):
|
||||
raise RuntimeError("Cannot call a coroutine function synchronously. Use `@async_tool` instead.")
|
||||
|
||||
if not is_dict(input):
|
||||
raise TypeError(f"Input must be a dictionary, got {type(input).__name__}")
|
||||
|
||||
try:
|
||||
return self._func_with_validate(**cast(Any, input))
|
||||
except pydantic.ValidationError as e:
|
||||
raise ValueError(f"Invalid arguments for function {self.name}") from e
|
||||
|
||||
|
||||
class BetaAsyncFunctionTool(BaseFunctionTool[AsyncFunctionT]):
|
||||
async def call(self, input: object) -> BetaFunctionToolResultType:
|
||||
if not iscoroutinefunction(self.func):
|
||||
raise RuntimeError("Cannot call a synchronous function asynchronously. Use `@tool` instead.")
|
||||
|
||||
if not is_dict(input):
|
||||
raise TypeError(f"Input must be a dictionary, got {type(input).__name__}")
|
||||
|
||||
try:
|
||||
return await self._func_with_validate(**cast(Any, input))
|
||||
except pydantic.ValidationError as e:
|
||||
raise ValueError(f"Invalid arguments for function {self.name}") from e
|
||||
|
||||
|
||||
@overload
|
||||
def beta_tool(func: FunctionT) -> BetaFunctionTool[FunctionT]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def beta_tool(
|
||||
func: FunctionT,
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> BetaFunctionTool[FunctionT]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def beta_tool(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> Callable[[FunctionT], BetaFunctionTool[FunctionT]]: ...
|
||||
|
||||
|
||||
def beta_tool(
|
||||
func: FunctionT | None = None,
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> BetaFunctionTool[FunctionT] | Callable[[FunctionT], BetaFunctionTool[FunctionT]]:
|
||||
"""Create a FunctionTool from a function with automatic schema inference.
|
||||
|
||||
Can be used as a decorator with or without parentheses:
|
||||
|
||||
@function_tool
|
||||
def my_func(x: int) -> str: ...
|
||||
|
||||
@function_tool()
|
||||
def my_func(x: int) -> str: ...
|
||||
|
||||
@function_tool(name="custom_name")
|
||||
def my_func(x: int) -> str: ...
|
||||
"""
|
||||
if _compat.PYDANTIC_V1:
|
||||
raise RuntimeError("Tool functions are only supported with Pydantic v2")
|
||||
|
||||
if func is not None:
|
||||
# @beta_tool called without parentheses
|
||||
return BetaFunctionTool(func=func, name=name, description=description, input_schema=input_schema)
|
||||
|
||||
# @beta_tool()
|
||||
def decorator(func: FunctionT) -> BetaFunctionTool[FunctionT]:
|
||||
return BetaFunctionTool(func=func, name=name, description=description, input_schema=input_schema)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@overload
|
||||
def beta_async_tool(func: AsyncFunctionT) -> BetaAsyncFunctionTool[AsyncFunctionT]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def beta_async_tool(
|
||||
func: AsyncFunctionT,
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> BetaAsyncFunctionTool[AsyncFunctionT]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def beta_async_tool(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> Callable[[AsyncFunctionT], BetaAsyncFunctionTool[AsyncFunctionT]]: ...
|
||||
|
||||
|
||||
def beta_async_tool(
|
||||
func: AsyncFunctionT | None = None,
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
input_schema: InputSchema | type[BaseModel] | None = None,
|
||||
) -> BetaAsyncFunctionTool[AsyncFunctionT] | Callable[[AsyncFunctionT], BetaAsyncFunctionTool[AsyncFunctionT]]:
|
||||
"""Create an AsyncFunctionTool from a function with automatic schema inference.
|
||||
|
||||
Can be used as a decorator with or without parentheses:
|
||||
|
||||
@async_tool
|
||||
async def my_func(x: int) -> str: ...
|
||||
|
||||
@async_tool()
|
||||
async def my_func(x: int) -> str: ...
|
||||
|
||||
@async_tool(name="custom_name")
|
||||
async def my_func(x: int) -> str: ...
|
||||
"""
|
||||
if _compat.PYDANTIC_V1:
|
||||
raise RuntimeError("Tool functions are only supported with Pydantic v2")
|
||||
|
||||
if func is not None:
|
||||
# @beta_async_tool called without parentheses
|
||||
return BetaAsyncFunctionTool(
|
||||
func=func,
|
||||
name=name,
|
||||
description=description,
|
||||
input_schema=input_schema,
|
||||
)
|
||||
|
||||
# @beta_async_tool()
|
||||
def decorator(func: AsyncFunctionT) -> BetaAsyncFunctionTool[AsyncFunctionT]:
|
||||
return BetaAsyncFunctionTool(
|
||||
func=func,
|
||||
name=name,
|
||||
description=description,
|
||||
input_schema=input_schema,
|
||||
)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
BetaRunnableTool = Union[BetaFunctionTool[Any], BetaBuiltinFunctionTool]
|
||||
BetaAsyncRunnableTool = Union[BetaAsyncFunctionTool[Any], BetaAsyncBuiltinFunctionTool]
|
||||
@@ -0,0 +1,417 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
List,
|
||||
Union,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Coroutine,
|
||||
AsyncIterator,
|
||||
)
|
||||
from contextlib import contextmanager, asynccontextmanager
|
||||
from typing_extensions import TypedDict, override
|
||||
|
||||
import httpx
|
||||
|
||||
from ..._types import Body, Query, Headers, NotGiven
|
||||
from ..._utils import consume_sync_iterator, consume_async_iterator
|
||||
from ...types.beta import BetaMessage, BetaMessageParam
|
||||
from ._beta_functions import (
|
||||
BetaFunctionTool,
|
||||
BetaRunnableTool,
|
||||
BetaAsyncFunctionTool,
|
||||
BetaAsyncRunnableTool,
|
||||
BetaBuiltinFunctionTool,
|
||||
BetaAsyncBuiltinFunctionTool,
|
||||
)
|
||||
from ..streaming._beta_messages import BetaMessageStream, BetaAsyncMessageStream
|
||||
from ...types.beta.parsed_beta_message import ResponseFormatT, ParsedBetaMessage, ParsedBetaContentBlock
|
||||
from ...types.beta.message_create_params import ParseMessageCreateParamsBase
|
||||
from ...types.beta.beta_tool_result_block_param import BetaToolResultBlockParam
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..._client import Anthropic, AsyncAnthropic
|
||||
|
||||
|
||||
AnyFunctionToolT = TypeVar(
|
||||
"AnyFunctionToolT",
|
||||
bound=Union[
|
||||
BetaFunctionTool[Any], BetaAsyncFunctionTool[Any], BetaBuiltinFunctionTool, BetaAsyncBuiltinFunctionTool
|
||||
],
|
||||
)
|
||||
RunnerItemT = TypeVar("RunnerItemT")
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RequestOptions(TypedDict, total=False):
|
||||
extra_headers: Headers | None
|
||||
extra_query: Query | None
|
||||
extra_body: Body | None
|
||||
timeout: float | httpx.Timeout | None | NotGiven
|
||||
|
||||
|
||||
class BaseToolRunner(Generic[AnyFunctionToolT, ResponseFormatT]):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
params: ParseMessageCreateParamsBase[ResponseFormatT],
|
||||
options: RequestOptions,
|
||||
tools: Iterable[AnyFunctionToolT],
|
||||
max_iterations: int | None = None,
|
||||
) -> None:
|
||||
self._tools_by_name = {tool.name: tool for tool in tools}
|
||||
self._params: ParseMessageCreateParamsBase[ResponseFormatT] = {
|
||||
**params,
|
||||
"messages": [message for message in params["messages"]],
|
||||
}
|
||||
self._options = options
|
||||
self._messages_modified = False
|
||||
self._cached_tool_call_response: BetaMessageParam | None = None
|
||||
self._max_iterations = max_iterations
|
||||
self._iteration_count = 0
|
||||
|
||||
def set_messages_params(
|
||||
self,
|
||||
params: ParseMessageCreateParamsBase[ResponseFormatT]
|
||||
| Callable[[ParseMessageCreateParamsBase[ResponseFormatT]], ParseMessageCreateParamsBase[ResponseFormatT]],
|
||||
) -> None:
|
||||
"""
|
||||
Update the parameters for the next API call. This invalidates any cached tool responses.
|
||||
|
||||
Args:
|
||||
params (ParsedMessageCreateParamsBase[ResponseFormatT] | Callable): Either new parameters or a function to mutate existing parameters
|
||||
"""
|
||||
if callable(params):
|
||||
params = params(self._params)
|
||||
self._params = params
|
||||
|
||||
def append_messages(self, *messages: BetaMessageParam | ParsedBetaMessage[ResponseFormatT]) -> None:
|
||||
"""Add one or more messages to the conversation history.
|
||||
|
||||
This invalidates the cached tool response, i.e. if tools were already called, then they will
|
||||
be called again on the next loop iteration.
|
||||
"""
|
||||
message_params: List[BetaMessageParam] = [
|
||||
{"role": message.role, "content": message.content} if isinstance(message, BetaMessage) else message
|
||||
for message in messages
|
||||
]
|
||||
self._messages_modified = True
|
||||
self.set_messages_params(lambda params: {**params, "messages": [*self._params["messages"], *message_params]})
|
||||
self._cached_tool_call_response = None
|
||||
|
||||
def _should_stop(self) -> bool:
|
||||
if self._max_iterations is not None and self._iteration_count >= self._max_iterations:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class BaseSyncToolRunner(BaseToolRunner[BetaRunnableTool, ResponseFormatT], Generic[RunnerItemT, ResponseFormatT], ABC):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
params: ParseMessageCreateParamsBase[ResponseFormatT],
|
||||
options: RequestOptions,
|
||||
tools: Iterable[BetaRunnableTool],
|
||||
client: Anthropic,
|
||||
max_iterations: int | None = None,
|
||||
) -> None:
|
||||
super().__init__(params=params, options=options, tools=tools, max_iterations=max_iterations)
|
||||
self._client = client
|
||||
self._iterator = self.__run__()
|
||||
self._last_message: (
|
||||
Callable[[], ParsedBetaMessage[ResponseFormatT]] | ParsedBetaMessage[ResponseFormatT] | None
|
||||
) = None
|
||||
|
||||
def __next__(self) -> RunnerItemT:
|
||||
return self._iterator.__next__()
|
||||
|
||||
def __iter__(self) -> Iterator[RunnerItemT]:
|
||||
for item in self._iterator:
|
||||
yield item
|
||||
|
||||
@abstractmethod
|
||||
@contextmanager
|
||||
def _handle_request(self) -> Iterator[RunnerItemT]:
|
||||
raise NotImplementedError()
|
||||
yield # type: ignore[unreachable]
|
||||
|
||||
def __run__(self) -> Iterator[RunnerItemT]:
|
||||
with self._handle_request() as item:
|
||||
yield item
|
||||
message = self._get_last_message()
|
||||
assert message is not None
|
||||
self._iteration_count += 1
|
||||
|
||||
while not self._should_stop():
|
||||
response = self.generate_tool_call_response()
|
||||
if response is None:
|
||||
log.debug("Tool call was not requested, exiting from tool runner loop.")
|
||||
return
|
||||
|
||||
if not self._messages_modified:
|
||||
self.append_messages(message, response)
|
||||
|
||||
self._iteration_count += 1
|
||||
self._messages_modified = False
|
||||
self._cached_tool_call_response = None
|
||||
|
||||
with self._handle_request() as item:
|
||||
yield item
|
||||
message = self._get_last_message()
|
||||
assert message is not None
|
||||
|
||||
def until_done(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
"""
|
||||
Consumes the tool runner stream and returns the last message if it has not been consumed yet.
|
||||
If it has, it simply returns the last message.
|
||||
"""
|
||||
consume_sync_iterator(self)
|
||||
last_message = self._get_last_message()
|
||||
assert last_message is not None
|
||||
return last_message
|
||||
|
||||
def generate_tool_call_response(self) -> BetaMessageParam | None:
|
||||
"""Generate a MessageParam by calling tool functions with any tool use blocks from the last message.
|
||||
|
||||
Note the tool call response is cached, repeated calls to this method will return the same response.
|
||||
|
||||
None can be returned if no tool call was applicable.
|
||||
"""
|
||||
if self._cached_tool_call_response is not None:
|
||||
log.debug("Returning cached tool call response.")
|
||||
return self._cached_tool_call_response
|
||||
response = self._generate_tool_call_response()
|
||||
self._cached_tool_call_response = response
|
||||
return response
|
||||
|
||||
def _generate_tool_call_response(self) -> BetaMessageParam | None:
|
||||
content = self._get_last_assistant_message_content()
|
||||
if not content:
|
||||
return None
|
||||
|
||||
tool_use_blocks = [block for block in content if block.type == "tool_use"]
|
||||
if not tool_use_blocks:
|
||||
return None
|
||||
|
||||
results: list[BetaToolResultBlockParam] = []
|
||||
|
||||
for tool_use in tool_use_blocks:
|
||||
tool = self._tools_by_name.get(tool_use.name)
|
||||
if tool is None:
|
||||
results.append(
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": tool_use.id,
|
||||
"content": f"Error: Tool '{tool_use.name}' not found",
|
||||
"is_error": True,
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
result = tool.call(tool_use.input)
|
||||
results.append({"type": "tool_result", "tool_use_id": tool_use.id, "content": result})
|
||||
except Exception as exc:
|
||||
log.exception(f"Error occurred while calling tool: {tool.name}", exc_info=exc)
|
||||
results.append(
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": tool_use.id,
|
||||
"content": repr(exc),
|
||||
"is_error": True,
|
||||
}
|
||||
)
|
||||
|
||||
return {"role": "user", "content": results}
|
||||
|
||||
def _get_last_message(self) -> ParsedBetaMessage[ResponseFormatT] | None:
|
||||
if callable(self._last_message):
|
||||
return self._last_message()
|
||||
return self._last_message
|
||||
|
||||
def _get_last_assistant_message_content(self) -> list[ParsedBetaContentBlock[ResponseFormatT]] | None:
|
||||
last_message = self._get_last_message()
|
||||
if last_message is None or last_message.role != "assistant" or not last_message.content:
|
||||
return None
|
||||
|
||||
return last_message.content
|
||||
|
||||
|
||||
class BetaToolRunner(BaseSyncToolRunner[ParsedBetaMessage[ResponseFormatT], ResponseFormatT]):
|
||||
@override
|
||||
@contextmanager
|
||||
def _handle_request(self) -> Iterator[ParsedBetaMessage[ResponseFormatT]]:
|
||||
message = self._client.beta.messages.parse(**self._params, **self._options)
|
||||
self._last_message = message
|
||||
yield message
|
||||
|
||||
|
||||
class BetaStreamingToolRunner(BaseSyncToolRunner[BetaMessageStream[ResponseFormatT], ResponseFormatT]):
|
||||
@override
|
||||
@contextmanager
|
||||
def _handle_request(self) -> Iterator[BetaMessageStream[ResponseFormatT]]:
|
||||
with self._client.beta.messages.stream(**self._params, **self._options) as stream:
|
||||
self._last_message = stream.get_final_message
|
||||
yield stream
|
||||
|
||||
|
||||
class BaseAsyncToolRunner(
|
||||
BaseToolRunner[BetaAsyncRunnableTool, ResponseFormatT], Generic[RunnerItemT, ResponseFormatT], ABC
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
params: ParseMessageCreateParamsBase[ResponseFormatT],
|
||||
options: RequestOptions,
|
||||
tools: Iterable[BetaAsyncRunnableTool],
|
||||
client: AsyncAnthropic,
|
||||
max_iterations: int | None = None,
|
||||
) -> None:
|
||||
super().__init__(params=params, options=options, tools=tools, max_iterations=max_iterations)
|
||||
self._client = client
|
||||
self._iterator = self.__run__()
|
||||
self._last_message: (
|
||||
Callable[[], Coroutine[None, None, ParsedBetaMessage[ResponseFormatT]]]
|
||||
| ParsedBetaMessage[ResponseFormatT]
|
||||
| None
|
||||
) = None
|
||||
|
||||
async def __anext__(self) -> RunnerItemT:
|
||||
return await self._iterator.__anext__()
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[RunnerItemT]:
|
||||
async for item in self._iterator:
|
||||
yield item
|
||||
|
||||
@abstractmethod
|
||||
@asynccontextmanager
|
||||
async def _handle_request(self) -> AsyncIterator[RunnerItemT]:
|
||||
raise NotImplementedError()
|
||||
yield # type: ignore[unreachable]
|
||||
|
||||
async def __run__(self) -> AsyncIterator[RunnerItemT]:
|
||||
async with self._handle_request() as item:
|
||||
yield item
|
||||
message = await self._get_last_message()
|
||||
assert message is not None
|
||||
self._iteration_count += 1
|
||||
|
||||
while not self._should_stop():
|
||||
response = await self.generate_tool_call_response()
|
||||
if response is None:
|
||||
log.debug("Tool call was not requested, exiting from tool runner loop.")
|
||||
return
|
||||
|
||||
if not self._messages_modified:
|
||||
self.append_messages(message, response)
|
||||
self._iteration_count += 1
|
||||
self._messages_modified = False
|
||||
self._cached_tool_call_response = None
|
||||
|
||||
async with self._handle_request() as item:
|
||||
yield item
|
||||
message = await self._get_last_message()
|
||||
assert message is not None
|
||||
|
||||
async def until_done(self) -> ParsedBetaMessage[ResponseFormatT]:
|
||||
"""
|
||||
Consumes the tool runner stream and returns the last message if it has not been consumed yet.
|
||||
If it has, it simply returns the last message.
|
||||
"""
|
||||
await consume_async_iterator(self)
|
||||
last_message = await self._get_last_message()
|
||||
assert last_message is not None
|
||||
return last_message
|
||||
|
||||
async def generate_tool_call_response(self) -> BetaMessageParam | None:
|
||||
"""Generate a MessageParam by calling tool functions with any tool use blocks from the last message.
|
||||
|
||||
Note the tool call response is cached, repeated calls to this method will return the same response.
|
||||
|
||||
None can be returned if no tool call was applicable.
|
||||
"""
|
||||
if self._cached_tool_call_response is not None:
|
||||
log.debug("Returning cached tool call response.")
|
||||
return self._cached_tool_call_response
|
||||
|
||||
response = await self._generate_tool_call_response()
|
||||
self._cached_tool_call_response = response
|
||||
return response
|
||||
|
||||
async def _get_last_message(self) -> ParsedBetaMessage[ResponseFormatT] | None:
|
||||
if callable(self._last_message):
|
||||
return await self._last_message()
|
||||
return self._last_message
|
||||
|
||||
async def _get_last_assistant_message_content(self) -> list[ParsedBetaContentBlock[ResponseFormatT]] | None:
|
||||
last_message = await self._get_last_message()
|
||||
if last_message is None or last_message.role != "assistant" or not last_message.content:
|
||||
return None
|
||||
|
||||
return last_message.content
|
||||
|
||||
async def _generate_tool_call_response(self) -> BetaMessageParam | None:
|
||||
content = await self._get_last_assistant_message_content()
|
||||
if not content:
|
||||
return None
|
||||
|
||||
tool_use_blocks = [block for block in content if block.type == "tool_use"]
|
||||
if not tool_use_blocks:
|
||||
return None
|
||||
|
||||
results: list[BetaToolResultBlockParam] = []
|
||||
|
||||
for tool_use in tool_use_blocks:
|
||||
tool = self._tools_by_name.get(tool_use.name)
|
||||
if tool is None:
|
||||
results.append(
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": tool_use.id,
|
||||
"content": f"Error: Tool '{tool_use.name}' not found",
|
||||
"is_error": True,
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
result = await tool.call(tool_use.input)
|
||||
results.append({"type": "tool_result", "tool_use_id": tool_use.id, "content": result})
|
||||
except Exception as exc:
|
||||
log.exception(f"Error occurred while calling tool: {tool.name}", exc_info=exc)
|
||||
results.append(
|
||||
{
|
||||
"type": "tool_result",
|
||||
"tool_use_id": tool_use.id,
|
||||
"content": repr(exc),
|
||||
"is_error": True,
|
||||
}
|
||||
)
|
||||
|
||||
return {"role": "user", "content": results}
|
||||
|
||||
|
||||
class BetaAsyncToolRunner(BaseAsyncToolRunner[ParsedBetaMessage[ResponseFormatT], ResponseFormatT]):
|
||||
@override
|
||||
@asynccontextmanager
|
||||
async def _handle_request(self) -> AsyncIterator[ParsedBetaMessage[ResponseFormatT]]:
|
||||
message = await self._client.beta.messages.parse(**self._params, **self._options)
|
||||
self._last_message = message
|
||||
yield message
|
||||
|
||||
|
||||
class BetaAsyncStreamingToolRunner(BaseAsyncToolRunner[BetaAsyncMessageStream[ResponseFormatT], ResponseFormatT]):
|
||||
@override
|
||||
@asynccontextmanager
|
||||
async def _handle_request(self) -> AsyncIterator[BetaAsyncMessageStream[ResponseFormatT]]:
|
||||
async with self._client.beta.messages.stream(**self._params, **self._options) as stream:
|
||||
self._last_message = stream.get_final_message
|
||||
yield stream
|
||||
@@ -0,0 +1 @@
|
||||
from ._client import AnthropicVertex as AnthropicVertex, AsyncAnthropicVertex as AsyncAnthropicVertex
|
||||
@@ -0,0 +1,47 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from .._extras import google_auth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from google.auth.credentials import Credentials # type: ignore[import-untyped]
|
||||
|
||||
# pyright: reportMissingTypeStubs=false, reportUnknownVariableType=false, reportUnknownMemberType=false, reportUnknownArgumentType=false
|
||||
# google libraries don't provide types :/
|
||||
|
||||
# Note: these functions are blocking as they make HTTP requests, the async
|
||||
# client runs these functions in a separate thread to ensure they do not
|
||||
# cause synchronous blocking issues.
|
||||
|
||||
|
||||
def load_auth(*, project_id: str | None) -> tuple[Credentials, str]:
|
||||
try:
|
||||
from google.auth.transport.requests import Request # type: ignore[import-untyped]
|
||||
except ModuleNotFoundError as err:
|
||||
raise RuntimeError(
|
||||
f"Could not import google.auth, you need to install the SDK with `pip install anthropic[vertex]`"
|
||||
) from err
|
||||
|
||||
credentials, loaded_project_id = google_auth.default(
|
||||
scopes=["https://www.googleapis.com/auth/cloud-platform"],
|
||||
)
|
||||
credentials = cast(Any, credentials)
|
||||
credentials.refresh(Request())
|
||||
|
||||
if not project_id:
|
||||
project_id = loaded_project_id
|
||||
|
||||
if not project_id:
|
||||
raise ValueError("Could not resolve project_id")
|
||||
|
||||
if not isinstance(project_id, str):
|
||||
raise TypeError(f"Expected project_id to be a str but got {type(project_id)}")
|
||||
|
||||
return credentials, project_id
|
||||
|
||||
|
||||
def refresh_auth(credentials: Credentials) -> None:
|
||||
from google.auth.transport.requests import Request # type: ignore[import-untyped]
|
||||
|
||||
credentials.refresh(Request())
|
||||
@@ -0,0 +1,102 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ._beta_messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = ["Beta", "AsyncBeta"]
|
||||
|
||||
|
||||
class Beta(SyncAPIResource):
|
||||
@cached_property
|
||||
def messages(self) -> Messages:
|
||||
return Messages(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return BetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncBeta(AsyncAPIResource):
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessages:
|
||||
return AsyncMessages(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class BetaWithRawResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithRawResponse:
|
||||
return MessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
|
||||
class AsyncBetaWithRawResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithRawResponse:
|
||||
return AsyncMessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
|
||||
class BetaWithStreamingResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithStreamingResponse:
|
||||
return MessagesWithStreamingResponse(self._beta.messages)
|
||||
|
||||
|
||||
class AsyncBetaWithStreamingResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithStreamingResponse:
|
||||
return AsyncMessagesWithStreamingResponse(self._beta.messages)
|
||||
@@ -0,0 +1,97 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...resources.beta import Messages as FirstPartyMessagesAPI, AsyncMessages as FirstPartyAsyncMessagesAPI
|
||||
|
||||
__all__ = ["Messages", "AsyncMessages"]
|
||||
|
||||
|
||||
class Messages(SyncAPIResource):
|
||||
create = FirstPartyMessagesAPI.create
|
||||
stream = FirstPartyMessagesAPI.stream
|
||||
count_tokens = FirstPartyMessagesAPI.count_tokens
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> MessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return MessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> MessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return MessagesWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncMessages(AsyncAPIResource):
|
||||
create = FirstPartyAsyncMessagesAPI.create
|
||||
stream = FirstPartyAsyncMessagesAPI.stream
|
||||
count_tokens = FirstPartyAsyncMessagesAPI.count_tokens
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return the
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncMessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncMessagesWithStreamingResponse(self)
|
||||
|
||||
|
||||
class MessagesWithRawResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithRawResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class MessagesWithStreamingResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithStreamingResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
messages.create,
|
||||
)
|
||||
@@ -0,0 +1,412 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, Union, Mapping, TypeVar
|
||||
from typing_extensions import Self, override
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _exceptions
|
||||
from ._auth import load_auth, refresh_auth
|
||||
from ._beta import Beta, AsyncBeta
|
||||
from ..._types import NOT_GIVEN, NotGiven
|
||||
from ..._utils import is_dict, asyncify, is_given
|
||||
from ..._compat import model_copy, typed_cached_property
|
||||
from ..._models import FinalRequestOptions
|
||||
from ..._version import __version__
|
||||
from ..._streaming import Stream, AsyncStream
|
||||
from ..._exceptions import AnthropicError, APIStatusError
|
||||
from ..._base_client import (
|
||||
DEFAULT_MAX_RETRIES,
|
||||
BaseClient,
|
||||
SyncAPIClient,
|
||||
AsyncAPIClient,
|
||||
)
|
||||
from ...resources.messages import Messages, AsyncMessages
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from google.auth.credentials import Credentials as GoogleCredentials # type: ignore
|
||||
|
||||
|
||||
DEFAULT_VERSION = "vertex-2023-10-16"
|
||||
|
||||
_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
|
||||
_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
|
||||
|
||||
|
||||
class BaseVertexClient(BaseClient[_HttpxClientT, _DefaultStreamT]):
|
||||
@typed_cached_property
|
||||
def region(self) -> str:
|
||||
raise RuntimeError("region not set")
|
||||
|
||||
@typed_cached_property
|
||||
def project_id(self) -> str | None:
|
||||
project_id = os.environ.get("ANTHROPIC_VERTEX_PROJECT_ID")
|
||||
if project_id:
|
||||
return project_id
|
||||
|
||||
return None
|
||||
|
||||
@override
|
||||
def _make_status_error(
|
||||
self,
|
||||
err_msg: str,
|
||||
*,
|
||||
body: object,
|
||||
response: httpx.Response,
|
||||
) -> APIStatusError:
|
||||
if response.status_code == 400:
|
||||
return _exceptions.BadRequestError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 401:
|
||||
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 403:
|
||||
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 404:
|
||||
return _exceptions.NotFoundError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 409:
|
||||
return _exceptions.ConflictError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 422:
|
||||
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 429:
|
||||
return _exceptions.RateLimitError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 503:
|
||||
return _exceptions.ServiceUnavailableError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code == 504:
|
||||
return _exceptions.DeadlineExceededError(err_msg, response=response, body=body)
|
||||
|
||||
if response.status_code >= 500:
|
||||
return _exceptions.InternalServerError(err_msg, response=response, body=body)
|
||||
return APIStatusError(err_msg, response=response, body=body)
|
||||
|
||||
|
||||
class AnthropicVertex(BaseVertexClient[httpx.Client, Stream[Any]], SyncAPIClient):
|
||||
messages: Messages
|
||||
beta: Beta
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
region: str | NotGiven = NOT_GIVEN,
|
||||
project_id: str | NotGiven = NOT_GIVEN,
|
||||
access_token: str | None = None,
|
||||
credentials: GoogleCredentials | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: httpx.Client | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
if not is_given(region):
|
||||
region = os.environ.get("CLOUD_ML_REGION", NOT_GIVEN)
|
||||
if not is_given(region):
|
||||
raise ValueError(
|
||||
"No region was given. The client should be instantiated with the `region` argument or the `CLOUD_ML_REGION` environment variable should be set."
|
||||
)
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_VERTEX_BASE_URL")
|
||||
if base_url is None:
|
||||
if region == "global":
|
||||
base_url = "https://aiplatform.googleapis.com/v1"
|
||||
else:
|
||||
base_url = f"https://{region}-aiplatform.googleapis.com/v1"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
http_client=http_client,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
if is_given(project_id):
|
||||
self.project_id = project_id
|
||||
|
||||
self.region = region
|
||||
self.access_token = access_token
|
||||
self.credentials = credentials
|
||||
|
||||
self.messages = Messages(self)
|
||||
self.beta = Beta(self)
|
||||
|
||||
@override
|
||||
def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
|
||||
return _prepare_options(options, project_id=self.project_id, region=self.region)
|
||||
|
||||
@override
|
||||
def _prepare_request(self, request: httpx.Request) -> None:
|
||||
if request.headers.get("Authorization"):
|
||||
# already authenticated, nothing for us to do
|
||||
return
|
||||
|
||||
request.headers["Authorization"] = f"Bearer {self._ensure_access_token()}"
|
||||
|
||||
def _ensure_access_token(self) -> str:
|
||||
if self.access_token is not None:
|
||||
return self.access_token
|
||||
|
||||
if not self.credentials:
|
||||
self.credentials, project_id = load_auth(project_id=self.project_id)
|
||||
if not self.project_id:
|
||||
self.project_id = project_id
|
||||
|
||||
if self.credentials.expired or not self.credentials.token:
|
||||
refresh_auth(self.credentials)
|
||||
|
||||
if not self.credentials.token:
|
||||
raise RuntimeError("Could not resolve API token from the environment")
|
||||
|
||||
assert isinstance(self.credentials.token, str)
|
||||
return self.credentials.token
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
region: str | NotGiven = NOT_GIVEN,
|
||||
project_id: str | NotGiven = NOT_GIVEN,
|
||||
access_token: str | None = None,
|
||||
credentials: GoogleCredentials | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
http_client: httpx.Client | None = None,
|
||||
max_retries: int | NotGiven = NOT_GIVEN,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
http_client = http_client or self._client
|
||||
|
||||
return self.__class__(
|
||||
region=region if is_given(region) else self.region,
|
||||
project_id=project_id if is_given(project_id) else self.project_id or NOT_GIVEN,
|
||||
access_token=access_token or self.access_token,
|
||||
credentials=credentials or self.credentials,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
|
||||
|
||||
class AsyncAnthropicVertex(BaseVertexClient[httpx.AsyncClient, AsyncStream[Any]], AsyncAPIClient):
|
||||
messages: AsyncMessages
|
||||
beta: AsyncBeta
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
region: str | NotGiven = NOT_GIVEN,
|
||||
project_id: str | NotGiven = NOT_GIVEN,
|
||||
access_token: str | None = None,
|
||||
credentials: GoogleCredentials | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
max_retries: int = DEFAULT_MAX_RETRIES,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
# Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
_strict_response_validation: bool = False,
|
||||
) -> None:
|
||||
if not is_given(region):
|
||||
region = os.environ.get("CLOUD_ML_REGION", NOT_GIVEN)
|
||||
if not is_given(region):
|
||||
raise ValueError(
|
||||
"No region was given. The client should be instantiated with the `region` argument or the `CLOUD_ML_REGION` environment variable should be set."
|
||||
)
|
||||
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ANTHROPIC_VERTEX_BASE_URL")
|
||||
if base_url is None:
|
||||
if region == "global":
|
||||
base_url = "https://aiplatform.googleapis.com/v1"
|
||||
else:
|
||||
base_url = f"https://{region}-aiplatform.googleapis.com/v1"
|
||||
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
custom_headers=default_headers,
|
||||
custom_query=default_query,
|
||||
http_client=http_client,
|
||||
_strict_response_validation=_strict_response_validation,
|
||||
)
|
||||
|
||||
if is_given(project_id):
|
||||
self.project_id = project_id
|
||||
|
||||
self.region = region
|
||||
self.access_token = access_token
|
||||
self.credentials = credentials
|
||||
|
||||
self.messages = AsyncMessages(self)
|
||||
self.beta = AsyncBeta(self)
|
||||
|
||||
@override
|
||||
async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
|
||||
return _prepare_options(options, project_id=self.project_id, region=self.region)
|
||||
|
||||
@override
|
||||
async def _prepare_request(self, request: httpx.Request) -> None:
|
||||
if request.headers.get("Authorization"):
|
||||
# already authenticated, nothing for us to do
|
||||
return
|
||||
|
||||
request.headers["Authorization"] = f"Bearer {await self._ensure_access_token()}"
|
||||
|
||||
async def _ensure_access_token(self) -> str:
|
||||
if self.access_token is not None:
|
||||
return self.access_token
|
||||
|
||||
if not self.credentials:
|
||||
self.credentials, project_id = await asyncify(load_auth)(project_id=self.project_id)
|
||||
if not self.project_id:
|
||||
self.project_id = project_id
|
||||
|
||||
if self.credentials.expired or not self.credentials.token:
|
||||
await asyncify(refresh_auth)(self.credentials)
|
||||
|
||||
if not self.credentials.token:
|
||||
raise RuntimeError("Could not resolve API token from the environment")
|
||||
|
||||
assert isinstance(self.credentials.token, str)
|
||||
return self.credentials.token
|
||||
|
||||
def copy(
|
||||
self,
|
||||
*,
|
||||
region: str | NotGiven = NOT_GIVEN,
|
||||
project_id: str | NotGiven = NOT_GIVEN,
|
||||
access_token: str | None = None,
|
||||
credentials: GoogleCredentials | None = None,
|
||||
base_url: str | httpx.URL | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
max_retries: int | NotGiven = NOT_GIVEN,
|
||||
default_headers: Mapping[str, str] | None = None,
|
||||
set_default_headers: Mapping[str, str] | None = None,
|
||||
default_query: Mapping[str, object] | None = None,
|
||||
set_default_query: Mapping[str, object] | None = None,
|
||||
_extra_kwargs: Mapping[str, Any] = {},
|
||||
) -> Self:
|
||||
"""
|
||||
Create a new client instance re-using the same options given to the current client with optional overriding.
|
||||
"""
|
||||
if default_headers is not None and set_default_headers is not None:
|
||||
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
||||
|
||||
if default_query is not None and set_default_query is not None:
|
||||
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
||||
|
||||
headers = self._custom_headers
|
||||
if default_headers is not None:
|
||||
headers = {**headers, **default_headers}
|
||||
elif set_default_headers is not None:
|
||||
headers = set_default_headers
|
||||
|
||||
params = self._custom_query
|
||||
if default_query is not None:
|
||||
params = {**params, **default_query}
|
||||
elif set_default_query is not None:
|
||||
params = set_default_query
|
||||
|
||||
http_client = http_client or self._client
|
||||
|
||||
return self.__class__(
|
||||
region=region if is_given(region) else self.region,
|
||||
project_id=project_id if is_given(project_id) else self.project_id or NOT_GIVEN,
|
||||
access_token=access_token or self.access_token,
|
||||
credentials=credentials or self.credentials,
|
||||
base_url=base_url or self.base_url,
|
||||
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
||||
http_client=http_client,
|
||||
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
||||
default_headers=headers,
|
||||
default_query=params,
|
||||
**_extra_kwargs,
|
||||
)
|
||||
|
||||
# Alias for `copy` for nicer inline usage, e.g.
|
||||
# client.with_options(timeout=10).foo.create(...)
|
||||
with_options = copy
|
||||
|
||||
|
||||
def _prepare_options(input_options: FinalRequestOptions, *, project_id: str | None, region: str) -> FinalRequestOptions:
|
||||
options = model_copy(input_options, deep=True)
|
||||
|
||||
if is_dict(options.json_data):
|
||||
options.json_data.setdefault("anthropic_version", DEFAULT_VERSION)
|
||||
|
||||
if options.url in {"/v1/messages", "/v1/messages?beta=true"} and options.method == "post":
|
||||
if project_id is None:
|
||||
raise RuntimeError(
|
||||
"No project_id was given and it could not be resolved from credentials. The client should be instantiated with the `project_id` argument or the `ANTHROPIC_VERTEX_PROJECT_ID` environment variable should be set."
|
||||
)
|
||||
|
||||
if not is_dict(options.json_data):
|
||||
raise RuntimeError("Expected json data to be a dictionary for post /v1/messages")
|
||||
|
||||
model = options.json_data.pop("model")
|
||||
stream = options.json_data.get("stream", False)
|
||||
specifier = "streamRawPredict" if stream else "rawPredict"
|
||||
|
||||
options.url = f"/projects/{project_id}/locations/{region}/publishers/anthropic/models/{model}:{specifier}"
|
||||
|
||||
if options.url in {"/v1/messages/count_tokens", "/v1/messages/count_tokens?beta=true"} and options.method == "post":
|
||||
if project_id is None:
|
||||
raise RuntimeError(
|
||||
"No project_id was given and it could not be resolved from credentials. The client should be instantiated with the `project_id` argument or the `ANTHROPIC_VERTEX_PROJECT_ID` environment variable should be set."
|
||||
)
|
||||
|
||||
options.url = f"/projects/{project_id}/locations/{region}/publishers/anthropic/models/count-tokens:rawPredict"
|
||||
|
||||
if options.url.startswith("/v1/messages/batches"):
|
||||
raise AnthropicError("The Batch API is not supported in the Vertex client yet")
|
||||
|
||||
return options
|
||||
@@ -0,0 +1,200 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import List, Generic, TypeVar, Optional
|
||||
from typing_extensions import override
|
||||
|
||||
from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
|
||||
|
||||
__all__ = ["SyncPage", "AsyncPage", "SyncTokenPage", "AsyncTokenPage", "SyncPageCursor", "AsyncPageCursor"]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
first_id: Optional[str] = None
|
||||
last_id: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
if self._options.params.get("before_id"):
|
||||
first_id = self.first_id
|
||||
if not first_id:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"before_id": first_id})
|
||||
|
||||
last_id = self.last_id
|
||||
if not last_id:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"after_id": last_id})
|
||||
|
||||
|
||||
class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
first_id: Optional[str] = None
|
||||
last_id: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
if self._options.params.get("before_id"):
|
||||
first_id = self.first_id
|
||||
if not first_id:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"before_id": first_id})
|
||||
|
||||
last_id = self.last_id
|
||||
if not last_id:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"after_id": last_id})
|
||||
|
||||
|
||||
class SyncTokenPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
next_page: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
next_page = self.next_page
|
||||
if not next_page:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"page_token": next_page})
|
||||
|
||||
|
||||
class AsyncTokenPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
next_page: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
next_page = self.next_page
|
||||
if not next_page:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"page_token": next_page})
|
||||
|
||||
|
||||
class SyncPageCursor(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
next_page: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
next_page = self.next_page
|
||||
if not next_page:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"page": next_page})
|
||||
|
||||
|
||||
class AsyncPageCursor(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
|
||||
data: List[_T]
|
||||
has_more: Optional[bool] = None
|
||||
next_page: Optional[str] = None
|
||||
|
||||
@override
|
||||
def _get_page_items(self) -> List[_T]:
|
||||
data = self.data
|
||||
if not data:
|
||||
return []
|
||||
return data
|
||||
|
||||
@override
|
||||
def has_next_page(self) -> bool:
|
||||
has_more = self.has_more
|
||||
if has_more is not None and has_more is False:
|
||||
return False
|
||||
|
||||
return super().has_next_page()
|
||||
|
||||
@override
|
||||
def next_page_info(self) -> Optional[PageInfo]:
|
||||
next_page = self.next_page
|
||||
if not next_page:
|
||||
return None
|
||||
|
||||
return PageInfo(params={"page": next_page})
|
||||
@@ -0,0 +1,61 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .beta import (
|
||||
Beta,
|
||||
AsyncBeta,
|
||||
BetaWithRawResponse,
|
||||
AsyncBetaWithRawResponse,
|
||||
BetaWithStreamingResponse,
|
||||
AsyncBetaWithStreamingResponse,
|
||||
)
|
||||
from .models import (
|
||||
Models,
|
||||
AsyncModels,
|
||||
ModelsWithRawResponse,
|
||||
AsyncModelsWithRawResponse,
|
||||
ModelsWithStreamingResponse,
|
||||
AsyncModelsWithStreamingResponse,
|
||||
)
|
||||
from .messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
from .completions import (
|
||||
Completions,
|
||||
AsyncCompletions,
|
||||
CompletionsWithRawResponse,
|
||||
AsyncCompletionsWithRawResponse,
|
||||
CompletionsWithStreamingResponse,
|
||||
AsyncCompletionsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Completions",
|
||||
"AsyncCompletions",
|
||||
"CompletionsWithRawResponse",
|
||||
"AsyncCompletionsWithRawResponse",
|
||||
"CompletionsWithStreamingResponse",
|
||||
"AsyncCompletionsWithStreamingResponse",
|
||||
"Messages",
|
||||
"AsyncMessages",
|
||||
"MessagesWithRawResponse",
|
||||
"AsyncMessagesWithRawResponse",
|
||||
"MessagesWithStreamingResponse",
|
||||
"AsyncMessagesWithStreamingResponse",
|
||||
"Models",
|
||||
"AsyncModels",
|
||||
"ModelsWithRawResponse",
|
||||
"AsyncModelsWithRawResponse",
|
||||
"ModelsWithStreamingResponse",
|
||||
"AsyncModelsWithStreamingResponse",
|
||||
"Beta",
|
||||
"AsyncBeta",
|
||||
"BetaWithRawResponse",
|
||||
"AsyncBetaWithRawResponse",
|
||||
"BetaWithStreamingResponse",
|
||||
"AsyncBetaWithStreamingResponse",
|
||||
]
|
||||
@@ -0,0 +1,75 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .beta import (
|
||||
Beta,
|
||||
AsyncBeta,
|
||||
BetaWithRawResponse,
|
||||
AsyncBetaWithRawResponse,
|
||||
BetaWithStreamingResponse,
|
||||
AsyncBetaWithStreamingResponse,
|
||||
)
|
||||
from .files import (
|
||||
Files,
|
||||
AsyncFiles,
|
||||
FilesWithRawResponse,
|
||||
AsyncFilesWithRawResponse,
|
||||
FilesWithStreamingResponse,
|
||||
AsyncFilesWithStreamingResponse,
|
||||
)
|
||||
from .models import (
|
||||
Models,
|
||||
AsyncModels,
|
||||
ModelsWithRawResponse,
|
||||
AsyncModelsWithRawResponse,
|
||||
ModelsWithStreamingResponse,
|
||||
AsyncModelsWithStreamingResponse,
|
||||
)
|
||||
from .skills import (
|
||||
Skills,
|
||||
AsyncSkills,
|
||||
SkillsWithRawResponse,
|
||||
AsyncSkillsWithRawResponse,
|
||||
SkillsWithStreamingResponse,
|
||||
AsyncSkillsWithStreamingResponse,
|
||||
)
|
||||
from .messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Models",
|
||||
"AsyncModels",
|
||||
"ModelsWithRawResponse",
|
||||
"AsyncModelsWithRawResponse",
|
||||
"ModelsWithStreamingResponse",
|
||||
"AsyncModelsWithStreamingResponse",
|
||||
"Messages",
|
||||
"AsyncMessages",
|
||||
"MessagesWithRawResponse",
|
||||
"AsyncMessagesWithRawResponse",
|
||||
"MessagesWithStreamingResponse",
|
||||
"AsyncMessagesWithStreamingResponse",
|
||||
"Files",
|
||||
"AsyncFiles",
|
||||
"FilesWithRawResponse",
|
||||
"AsyncFilesWithRawResponse",
|
||||
"FilesWithStreamingResponse",
|
||||
"AsyncFilesWithStreamingResponse",
|
||||
"Skills",
|
||||
"AsyncSkills",
|
||||
"SkillsWithRawResponse",
|
||||
"AsyncSkillsWithRawResponse",
|
||||
"SkillsWithStreamingResponse",
|
||||
"AsyncSkillsWithStreamingResponse",
|
||||
"Beta",
|
||||
"AsyncBeta",
|
||||
"BetaWithRawResponse",
|
||||
"AsyncBetaWithRawResponse",
|
||||
"BetaWithStreamingResponse",
|
||||
"AsyncBetaWithStreamingResponse",
|
||||
]
|
||||
@@ -0,0 +1,198 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .files import (
|
||||
Files,
|
||||
AsyncFiles,
|
||||
FilesWithRawResponse,
|
||||
AsyncFilesWithRawResponse,
|
||||
FilesWithStreamingResponse,
|
||||
AsyncFilesWithStreamingResponse,
|
||||
)
|
||||
from .models import (
|
||||
Models,
|
||||
AsyncModels,
|
||||
ModelsWithRawResponse,
|
||||
AsyncModelsWithRawResponse,
|
||||
ModelsWithStreamingResponse,
|
||||
AsyncModelsWithStreamingResponse,
|
||||
)
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .skills.skills import (
|
||||
Skills,
|
||||
AsyncSkills,
|
||||
SkillsWithRawResponse,
|
||||
AsyncSkillsWithRawResponse,
|
||||
SkillsWithStreamingResponse,
|
||||
AsyncSkillsWithStreamingResponse,
|
||||
)
|
||||
from .messages.messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = ["Beta", "AsyncBeta"]
|
||||
|
||||
|
||||
class Beta(SyncAPIResource):
|
||||
@cached_property
|
||||
def models(self) -> Models:
|
||||
return Models(self._client)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> Messages:
|
||||
return Messages(self._client)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> Files:
|
||||
return Files(self._client)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> Skills:
|
||||
return Skills(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return BetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncBeta(AsyncAPIResource):
|
||||
@cached_property
|
||||
def models(self) -> AsyncModels:
|
||||
return AsyncModels(self._client)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessages:
|
||||
return AsyncMessages(self._client)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> AsyncFiles:
|
||||
return AsyncFiles(self._client)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> AsyncSkills:
|
||||
return AsyncSkills(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class BetaWithRawResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def models(self) -> ModelsWithRawResponse:
|
||||
return ModelsWithRawResponse(self._beta.models)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithRawResponse:
|
||||
return MessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> FilesWithRawResponse:
|
||||
return FilesWithRawResponse(self._beta.files)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> SkillsWithRawResponse:
|
||||
return SkillsWithRawResponse(self._beta.skills)
|
||||
|
||||
|
||||
class AsyncBetaWithRawResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def models(self) -> AsyncModelsWithRawResponse:
|
||||
return AsyncModelsWithRawResponse(self._beta.models)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithRawResponse:
|
||||
return AsyncMessagesWithRawResponse(self._beta.messages)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> AsyncFilesWithRawResponse:
|
||||
return AsyncFilesWithRawResponse(self._beta.files)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> AsyncSkillsWithRawResponse:
|
||||
return AsyncSkillsWithRawResponse(self._beta.skills)
|
||||
|
||||
|
||||
class BetaWithStreamingResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def models(self) -> ModelsWithStreamingResponse:
|
||||
return ModelsWithStreamingResponse(self._beta.models)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> MessagesWithStreamingResponse:
|
||||
return MessagesWithStreamingResponse(self._beta.messages)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> FilesWithStreamingResponse:
|
||||
return FilesWithStreamingResponse(self._beta.files)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> SkillsWithStreamingResponse:
|
||||
return SkillsWithStreamingResponse(self._beta.skills)
|
||||
|
||||
|
||||
class AsyncBetaWithStreamingResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def models(self) -> AsyncModelsWithStreamingResponse:
|
||||
return AsyncModelsWithStreamingResponse(self._beta.models)
|
||||
|
||||
@cached_property
|
||||
def messages(self) -> AsyncMessagesWithStreamingResponse:
|
||||
return AsyncMessagesWithStreamingResponse(self._beta.messages)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> AsyncFilesWithStreamingResponse:
|
||||
return AsyncFilesWithStreamingResponse(self._beta.files)
|
||||
|
||||
@cached_property
|
||||
def skills(self) -> AsyncSkillsWithStreamingResponse:
|
||||
return AsyncSkillsWithStreamingResponse(self._beta.skills)
|
||||
@@ -0,0 +1,710 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Mapping, cast
|
||||
from itertools import chain
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
|
||||
from ..._utils import is_given, extract_files, maybe_transform, strip_not_given, deepcopy_minimal, async_maybe_transform
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import (
|
||||
BinaryAPIResponse,
|
||||
AsyncBinaryAPIResponse,
|
||||
StreamedBinaryAPIResponse,
|
||||
AsyncStreamedBinaryAPIResponse,
|
||||
to_streamed_response_wrapper,
|
||||
to_custom_raw_response_wrapper,
|
||||
async_to_streamed_response_wrapper,
|
||||
to_custom_streamed_response_wrapper,
|
||||
async_to_custom_raw_response_wrapper,
|
||||
async_to_custom_streamed_response_wrapper,
|
||||
)
|
||||
from ...pagination import SyncPage, AsyncPage
|
||||
from ...types.beta import file_list_params, file_upload_params
|
||||
from ..._base_client import AsyncPaginator, make_request_options
|
||||
from ...types.beta.deleted_file import DeletedFile
|
||||
from ...types.beta.file_metadata import FileMetadata
|
||||
from ...types.anthropic_beta_param import AnthropicBetaParam
|
||||
|
||||
__all__ = ["Files", "AsyncFiles"]
|
||||
|
||||
|
||||
class Files(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> FilesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return FilesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> FilesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return FilesWithStreamingResponse(self)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPage[FileMetadata]:
|
||||
"""List Files
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination.
|
||||
|
||||
When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/files?beta=true",
|
||||
page=SyncPage[FileMetadata],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
file_list_params.FileListParams,
|
||||
),
|
||||
),
|
||||
model=FileMetadata,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> DeletedFile:
|
||||
"""
|
||||
Delete File
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/v1/files/{file_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=DeletedFile,
|
||||
)
|
||||
|
||||
def download(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BinaryAPIResponse:
|
||||
"""
|
||||
Download File
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/v1/files/{file_id}/content?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BinaryAPIResponse,
|
||||
)
|
||||
|
||||
def retrieve_metadata(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> FileMetadata:
|
||||
"""
|
||||
Get File Metadata
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/v1/files/{file_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=FileMetadata,
|
||||
)
|
||||
|
||||
def upload(
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> FileMetadata:
|
||||
"""
|
||||
Upload File
|
||||
|
||||
Args:
|
||||
file: The file to upload
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
body = deepcopy_minimal({"file": file})
|
||||
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return self._post(
|
||||
"/v1/files?beta=true",
|
||||
body=maybe_transform(body, file_upload_params.FileUploadParams),
|
||||
files=files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=FileMetadata,
|
||||
)
|
||||
|
||||
|
||||
class AsyncFiles(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncFilesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncFilesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncFilesWithStreamingResponse(self)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[FileMetadata, AsyncPage[FileMetadata]]:
|
||||
"""List Files
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination.
|
||||
|
||||
When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/files?beta=true",
|
||||
page=AsyncPage[FileMetadata],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
file_list_params.FileListParams,
|
||||
),
|
||||
),
|
||||
model=FileMetadata,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> DeletedFile:
|
||||
"""
|
||||
Delete File
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/v1/files/{file_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=DeletedFile,
|
||||
)
|
||||
|
||||
async def download(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncBinaryAPIResponse:
|
||||
"""
|
||||
Download File
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/v1/files/{file_id}/content?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=AsyncBinaryAPIResponse,
|
||||
)
|
||||
|
||||
async def retrieve_metadata(
|
||||
self,
|
||||
file_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> FileMetadata:
|
||||
"""
|
||||
Get File Metadata
|
||||
|
||||
Args:
|
||||
file_id: ID of the File.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not file_id:
|
||||
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/v1/files/{file_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=FileMetadata,
|
||||
)
|
||||
|
||||
async def upload(
|
||||
self,
|
||||
*,
|
||||
file: FileTypes,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> FileMetadata:
|
||||
"""
|
||||
Upload File
|
||||
|
||||
Args:
|
||||
file: The file to upload
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
|
||||
body = deepcopy_minimal({"file": file})
|
||||
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return await self._post(
|
||||
"/v1/files?beta=true",
|
||||
body=await async_maybe_transform(body, file_upload_params.FileUploadParams),
|
||||
files=files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=FileMetadata,
|
||||
)
|
||||
|
||||
|
||||
class FilesWithRawResponse:
|
||||
def __init__(self, files: Files) -> None:
|
||||
self._files = files
|
||||
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
files.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
files.delete,
|
||||
)
|
||||
self.download = to_custom_raw_response_wrapper(
|
||||
files.download,
|
||||
BinaryAPIResponse,
|
||||
)
|
||||
self.retrieve_metadata = _legacy_response.to_raw_response_wrapper(
|
||||
files.retrieve_metadata,
|
||||
)
|
||||
self.upload = _legacy_response.to_raw_response_wrapper(
|
||||
files.upload,
|
||||
)
|
||||
|
||||
|
||||
class AsyncFilesWithRawResponse:
|
||||
def __init__(self, files: AsyncFiles) -> None:
|
||||
self._files = files
|
||||
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
files.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
files.delete,
|
||||
)
|
||||
self.download = async_to_custom_raw_response_wrapper(
|
||||
files.download,
|
||||
AsyncBinaryAPIResponse,
|
||||
)
|
||||
self.retrieve_metadata = _legacy_response.async_to_raw_response_wrapper(
|
||||
files.retrieve_metadata,
|
||||
)
|
||||
self.upload = _legacy_response.async_to_raw_response_wrapper(
|
||||
files.upload,
|
||||
)
|
||||
|
||||
|
||||
class FilesWithStreamingResponse:
|
||||
def __init__(self, files: Files) -> None:
|
||||
self._files = files
|
||||
|
||||
self.list = to_streamed_response_wrapper(
|
||||
files.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
files.delete,
|
||||
)
|
||||
self.download = to_custom_streamed_response_wrapper(
|
||||
files.download,
|
||||
StreamedBinaryAPIResponse,
|
||||
)
|
||||
self.retrieve_metadata = to_streamed_response_wrapper(
|
||||
files.retrieve_metadata,
|
||||
)
|
||||
self.upload = to_streamed_response_wrapper(
|
||||
files.upload,
|
||||
)
|
||||
|
||||
|
||||
class AsyncFilesWithStreamingResponse:
|
||||
def __init__(self, files: AsyncFiles) -> None:
|
||||
self._files = files
|
||||
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
files.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
files.delete,
|
||||
)
|
||||
self.download = async_to_custom_streamed_response_wrapper(
|
||||
files.download,
|
||||
AsyncStreamedBinaryAPIResponse,
|
||||
)
|
||||
self.retrieve_metadata = async_to_streamed_response_wrapper(
|
||||
files.retrieve_metadata,
|
||||
)
|
||||
self.upload = async_to_streamed_response_wrapper(
|
||||
files.upload,
|
||||
)
|
||||
@@ -0,0 +1,33 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .batches import (
|
||||
Batches,
|
||||
AsyncBatches,
|
||||
BatchesWithRawResponse,
|
||||
AsyncBatchesWithRawResponse,
|
||||
BatchesWithStreamingResponse,
|
||||
AsyncBatchesWithStreamingResponse,
|
||||
)
|
||||
from .messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Batches",
|
||||
"AsyncBatches",
|
||||
"BatchesWithRawResponse",
|
||||
"AsyncBatchesWithRawResponse",
|
||||
"BatchesWithStreamingResponse",
|
||||
"AsyncBatchesWithStreamingResponse",
|
||||
"Messages",
|
||||
"AsyncMessages",
|
||||
"MessagesWithRawResponse",
|
||||
"AsyncMessagesWithRawResponse",
|
||||
"MessagesWithStreamingResponse",
|
||||
"AsyncMessagesWithStreamingResponse",
|
||||
]
|
||||
@@ -0,0 +1,884 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Iterable
|
||||
from itertools import chain
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ...._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....pagination import SyncPage, AsyncPage
|
||||
from ...._exceptions import AnthropicError
|
||||
from ...._base_client import AsyncPaginator, make_request_options
|
||||
from ...._decoders.jsonl import JSONLDecoder, AsyncJSONLDecoder
|
||||
from ....types.beta.messages import batch_list_params, batch_create_params
|
||||
from ....types.anthropic_beta_param import AnthropicBetaParam
|
||||
from ....types.beta.messages.beta_message_batch import BetaMessageBatch
|
||||
from ....types.beta.messages.beta_deleted_message_batch import BetaDeletedMessageBatch
|
||||
from ....types.beta.messages.beta_message_batch_individual_response import BetaMessageBatchIndividualResponse
|
||||
|
||||
__all__ = ["Batches", "AsyncBatches"]
|
||||
|
||||
|
||||
class Batches(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BatchesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BatchesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BatchesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return BatchesWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
requests: Iterable[batch_create_params.Request],
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""
|
||||
Send a batch of Message creation requests.
|
||||
|
||||
The Message Batches API can be used to process multiple Messages API requests at
|
||||
once. Once a Message Batch is created, it begins processing immediately. Batches
|
||||
can take up to 24 hours to complete.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
requests: List of requests for prompt completion. Each is an individual request to create
|
||||
a Message.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._post(
|
||||
"/v1/messages/batches?beta=true",
|
||||
body=maybe_transform({"requests": requests}, batch_create_params.BatchCreateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""This endpoint is idempotent and can be used to poll for Message Batch
|
||||
completion.
|
||||
|
||||
To access the results of a Message Batch, make a request to the
|
||||
`results_url` field in the response.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/v1/messages/batches/{message_batch_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPage[BetaMessageBatch]:
|
||||
"""List all Message Batches within a Workspace.
|
||||
|
||||
Most recently created batches are
|
||||
returned first.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/messages/batches?beta=true",
|
||||
page=SyncPage[BetaMessageBatch],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
batch_list_params.BatchListParams,
|
||||
),
|
||||
),
|
||||
model=BetaMessageBatch,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaDeletedMessageBatch:
|
||||
"""
|
||||
Delete a Message Batch.
|
||||
|
||||
Message Batches can only be deleted once they've finished processing. If you'd
|
||||
like to delete an in-progress batch, you must first cancel it.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/v1/messages/batches/{message_batch_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaDeletedMessageBatch,
|
||||
)
|
||||
|
||||
def cancel(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""Batches may be canceled any time before processing ends.
|
||||
|
||||
Once cancellation is
|
||||
initiated, the batch enters a `canceling` state, at which time the system may
|
||||
complete any in-progress, non-interruptible requests before finalizing
|
||||
cancellation.
|
||||
|
||||
The number of canceled requests is specified in `request_counts`. To determine
|
||||
which requests were canceled, check the individual results within the batch.
|
||||
Note that cancellation may not result in any canceled requests if they were
|
||||
non-interruptible.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._post(
|
||||
f"/v1/messages/batches/{message_batch_id}/cancel?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
def results(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> JSONLDecoder[BetaMessageBatchIndividualResponse]:
|
||||
"""
|
||||
Streams the results of a Message Batch as a `.jsonl` file.
|
||||
|
||||
Each line in the file is a JSON object containing the result of a single request
|
||||
in the Message Batch. Results are not guaranteed to be in the same order as
|
||||
requests. Use the `custom_id` field to match results to requests.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
|
||||
batch = self.retrieve(message_batch_id=message_batch_id)
|
||||
if not batch.results_url:
|
||||
raise AnthropicError(
|
||||
f"No `results_url` for the given batch; Has it finished processing? {batch.processing_status}"
|
||||
)
|
||||
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._get(
|
||||
batch.results_url,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=JSONLDecoder[BetaMessageBatchIndividualResponse],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatches(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBatchesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBatchesWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
requests: Iterable[batch_create_params.Request],
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""
|
||||
Send a batch of Message creation requests.
|
||||
|
||||
The Message Batches API can be used to process multiple Messages API requests at
|
||||
once. Once a Message Batch is created, it begins processing immediately. Batches
|
||||
can take up to 24 hours to complete.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
requests: List of requests for prompt completion. Each is an individual request to create
|
||||
a Message.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
"/v1/messages/batches?beta=true",
|
||||
body=await async_maybe_transform({"requests": requests}, batch_create_params.BatchCreateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""This endpoint is idempotent and can be used to poll for Message Batch
|
||||
completion.
|
||||
|
||||
To access the results of a Message Batch, make a request to the
|
||||
`results_url` field in the response.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/v1/messages/batches/{message_batch_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[BetaMessageBatch, AsyncPage[BetaMessageBatch]]:
|
||||
"""List all Message Batches within a Workspace.
|
||||
|
||||
Most recently created batches are
|
||||
returned first.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/messages/batches?beta=true",
|
||||
page=AsyncPage[BetaMessageBatch],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
batch_list_params.BatchListParams,
|
||||
),
|
||||
),
|
||||
model=BetaMessageBatch,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaDeletedMessageBatch:
|
||||
"""
|
||||
Delete a Message Batch.
|
||||
|
||||
Message Batches can only be deleted once they've finished processing. If you'd
|
||||
like to delete an in-progress batch, you must first cancel it.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/v1/messages/batches/{message_batch_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaDeletedMessageBatch,
|
||||
)
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaMessageBatch:
|
||||
"""Batches may be canceled any time before processing ends.
|
||||
|
||||
Once cancellation is
|
||||
initiated, the batch enters a `canceling` state, at which time the system may
|
||||
complete any in-progress, non-interruptible requests before finalizing
|
||||
cancellation.
|
||||
|
||||
The number of canceled requests is specified in `request_counts`. To determine
|
||||
which requests were canceled, check the individual results within the batch.
|
||||
Note that cancellation may not result in any canceled requests if they were
|
||||
non-interruptible.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
f"/v1/messages/batches/{message_batch_id}/cancel?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaMessageBatch,
|
||||
)
|
||||
|
||||
async def results(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncJSONLDecoder[BetaMessageBatchIndividualResponse]:
|
||||
"""
|
||||
Streams the results of a Message Batch as a `.jsonl` file.
|
||||
|
||||
Each line in the file is a JSON object containing the result of a single request
|
||||
in the Message Batch. Results are not guaranteed to be in the same order as
|
||||
requests. Use the `custom_id` field to match results to requests.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
|
||||
batch = await self.retrieve(message_batch_id=message_batch_id)
|
||||
if not batch.results_url:
|
||||
raise AnthropicError(
|
||||
f"No `results_url` for the given batch; Has it finished processing? {batch.processing_status}"
|
||||
)
|
||||
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["message-batches-2024-09-24"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "message-batches-2024-09-24", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
batch.results_url,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=AsyncJSONLDecoder[BetaMessageBatchIndividualResponse],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
|
||||
class BatchesWithRawResponse:
|
||||
def __init__(self, batches: Batches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = _legacy_response.to_raw_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatchesWithRawResponse:
|
||||
def __init__(self, batches: AsyncBatches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class BatchesWithStreamingResponse:
|
||||
def __init__(self, batches: Batches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = to_streamed_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatchesWithStreamingResponse:
|
||||
def __init__(self, batches: AsyncBatches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = async_to_streamed_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,331 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ..._utils import is_given, maybe_transform, strip_not_given
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...pagination import SyncPage, AsyncPage
|
||||
from ...types.beta import model_list_params
|
||||
from ..._base_client import AsyncPaginator, make_request_options
|
||||
from ...types.anthropic_beta_param import AnthropicBetaParam
|
||||
from ...types.beta.beta_model_info import BetaModelInfo
|
||||
|
||||
__all__ = ["Models", "AsyncModels"]
|
||||
|
||||
|
||||
class Models(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> ModelsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return ModelsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> ModelsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return ModelsWithStreamingResponse(self)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
model_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaModelInfo:
|
||||
"""
|
||||
Get a specific model.
|
||||
|
||||
The Models API response can be used to determine information about a specific
|
||||
model or resolve a model alias to a model ID.
|
||||
|
||||
Args:
|
||||
model_id: Model identifier or alias.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not model_id:
|
||||
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get(
|
||||
f"/v1/models/{model_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaModelInfo,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPage[BetaModelInfo]:
|
||||
"""
|
||||
List available models.
|
||||
|
||||
The Models API response can be used to determine which models are available for
|
||||
use in the API. More recently released models are listed first.
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get_api_list(
|
||||
"/v1/models?beta=true",
|
||||
page=SyncPage[BetaModelInfo],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
model_list_params.ModelListParams,
|
||||
),
|
||||
),
|
||||
model=BetaModelInfo,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModels(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncModelsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncModelsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncModelsWithStreamingResponse(self)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
model_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> BetaModelInfo:
|
||||
"""
|
||||
Get a specific model.
|
||||
|
||||
The Models API response can be used to determine information about a specific
|
||||
model or resolve a model alias to a model ID.
|
||||
|
||||
Args:
|
||||
model_id: Model identifier or alias.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not model_id:
|
||||
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return await self._get(
|
||||
f"/v1/models/{model_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=BetaModelInfo,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[BetaModelInfo, AsyncPage[BetaModelInfo]]:
|
||||
"""
|
||||
List available models.
|
||||
|
||||
The Models API response can be used to determine which models are available for
|
||||
use in the API. More recently released models are listed first.
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get_api_list(
|
||||
"/v1/models?beta=true",
|
||||
page=AsyncPage[BetaModelInfo],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
model_list_params.ModelListParams,
|
||||
),
|
||||
),
|
||||
model=BetaModelInfo,
|
||||
)
|
||||
|
||||
|
||||
class ModelsWithRawResponse:
|
||||
def __init__(self, models: Models) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModelsWithRawResponse:
|
||||
def __init__(self, models: AsyncModels) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class ModelsWithStreamingResponse:
|
||||
def __init__(self, models: Models) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModelsWithStreamingResponse:
|
||||
def __init__(self, models: AsyncModels) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
@@ -0,0 +1,33 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .skills import (
|
||||
Skills,
|
||||
AsyncSkills,
|
||||
SkillsWithRawResponse,
|
||||
AsyncSkillsWithRawResponse,
|
||||
SkillsWithStreamingResponse,
|
||||
AsyncSkillsWithStreamingResponse,
|
||||
)
|
||||
from .versions import (
|
||||
Versions,
|
||||
AsyncVersions,
|
||||
VersionsWithRawResponse,
|
||||
AsyncVersionsWithRawResponse,
|
||||
VersionsWithStreamingResponse,
|
||||
AsyncVersionsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Versions",
|
||||
"AsyncVersions",
|
||||
"VersionsWithRawResponse",
|
||||
"AsyncVersionsWithRawResponse",
|
||||
"VersionsWithStreamingResponse",
|
||||
"AsyncVersionsWithStreamingResponse",
|
||||
"Skills",
|
||||
"AsyncSkills",
|
||||
"SkillsWithRawResponse",
|
||||
"AsyncSkillsWithRawResponse",
|
||||
"SkillsWithStreamingResponse",
|
||||
"AsyncSkillsWithStreamingResponse",
|
||||
]
|
||||
@@ -0,0 +1,680 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Mapping, Optional, cast
|
||||
from itertools import chain
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from .versions import (
|
||||
Versions,
|
||||
AsyncVersions,
|
||||
VersionsWithRawResponse,
|
||||
AsyncVersionsWithRawResponse,
|
||||
VersionsWithStreamingResponse,
|
||||
AsyncVersionsWithStreamingResponse,
|
||||
)
|
||||
from ...._types import (
|
||||
Body,
|
||||
Omit,
|
||||
Query,
|
||||
Headers,
|
||||
NotGiven,
|
||||
FileTypes,
|
||||
SequenceNotStr,
|
||||
omit,
|
||||
not_given,
|
||||
)
|
||||
from ...._utils import (
|
||||
is_given,
|
||||
extract_files,
|
||||
maybe_transform,
|
||||
strip_not_given,
|
||||
deepcopy_minimal,
|
||||
async_maybe_transform,
|
||||
)
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....pagination import SyncPageCursor, AsyncPageCursor
|
||||
from ....types.beta import skill_list_params, skill_create_params
|
||||
from ...._base_client import AsyncPaginator, make_request_options
|
||||
from ....types.anthropic_beta_param import AnthropicBetaParam
|
||||
from ....types.beta.skill_list_response import SkillListResponse
|
||||
from ....types.beta.skill_create_response import SkillCreateResponse
|
||||
from ....types.beta.skill_delete_response import SkillDeleteResponse
|
||||
from ....types.beta.skill_retrieve_response import SkillRetrieveResponse
|
||||
|
||||
__all__ = ["Skills", "AsyncSkills"]
|
||||
|
||||
|
||||
class Skills(SyncAPIResource):
|
||||
@cached_property
|
||||
def versions(self) -> Versions:
|
||||
return Versions(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> SkillsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return SkillsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> SkillsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return SkillsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
display_title: Optional[str] | Omit = omit,
|
||||
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillCreateResponse:
|
||||
"""
|
||||
Create Skill
|
||||
|
||||
Args:
|
||||
display_title: Display title for the skill.
|
||||
|
||||
This is a human-readable label that is not included in the prompt sent to the
|
||||
model.
|
||||
|
||||
files: Files to upload for the skill.
|
||||
|
||||
All files must be in the same top-level directory and must include a SKILL.md
|
||||
file at the root of that directory.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
body = deepcopy_minimal(
|
||||
{
|
||||
"display_title": display_title,
|
||||
"files": files,
|
||||
}
|
||||
)
|
||||
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return self._post(
|
||||
"/v1/skills?beta=true",
|
||||
body=maybe_transform(body, skill_create_params.SkillCreateParams),
|
||||
files=extracted_files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillCreateResponse,
|
||||
)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillRetrieveResponse:
|
||||
"""
|
||||
Get Skill
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/v1/skills/{skill_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillRetrieveResponse,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
limit: int | Omit = omit,
|
||||
page: Optional[str] | Omit = omit,
|
||||
source: Optional[str] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPageCursor[SkillListResponse]:
|
||||
"""
|
||||
List Skills
|
||||
|
||||
Args:
|
||||
limit: Number of results to return per page.
|
||||
|
||||
Maximum value is 100. Defaults to 20.
|
||||
|
||||
page: Pagination token for fetching a specific page of results.
|
||||
|
||||
Pass the value from a previous response's `next_page` field to get the next page
|
||||
of results.
|
||||
|
||||
source: Filter skills by source.
|
||||
|
||||
If provided, only skills from the specified source will be returned:
|
||||
|
||||
- `"custom"`: only return user-created skills
|
||||
- `"anthropic"`: only return Anthropic-created skills
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/skills?beta=true",
|
||||
page=SyncPageCursor[SkillListResponse],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"limit": limit,
|
||||
"page": page,
|
||||
"source": source,
|
||||
},
|
||||
skill_list_params.SkillListParams,
|
||||
),
|
||||
),
|
||||
model=SkillListResponse,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillDeleteResponse:
|
||||
"""
|
||||
Delete Skill
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/v1/skills/{skill_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillDeleteResponse,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSkills(AsyncAPIResource):
|
||||
@cached_property
|
||||
def versions(self) -> AsyncVersions:
|
||||
return AsyncVersions(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncSkillsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncSkillsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncSkillsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncSkillsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
display_title: Optional[str] | Omit = omit,
|
||||
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillCreateResponse:
|
||||
"""
|
||||
Create Skill
|
||||
|
||||
Args:
|
||||
display_title: Display title for the skill.
|
||||
|
||||
This is a human-readable label that is not included in the prompt sent to the
|
||||
model.
|
||||
|
||||
files: Files to upload for the skill.
|
||||
|
||||
All files must be in the same top-level directory and must include a SKILL.md
|
||||
file at the root of that directory.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
body = deepcopy_minimal(
|
||||
{
|
||||
"display_title": display_title,
|
||||
"files": files,
|
||||
}
|
||||
)
|
||||
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return await self._post(
|
||||
"/v1/skills?beta=true",
|
||||
body=await async_maybe_transform(body, skill_create_params.SkillCreateParams),
|
||||
files=extracted_files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillCreateResponse,
|
||||
)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillRetrieveResponse:
|
||||
"""
|
||||
Get Skill
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/v1/skills/{skill_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillRetrieveResponse,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
limit: int | Omit = omit,
|
||||
page: Optional[str] | Omit = omit,
|
||||
source: Optional[str] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[SkillListResponse, AsyncPageCursor[SkillListResponse]]:
|
||||
"""
|
||||
List Skills
|
||||
|
||||
Args:
|
||||
limit: Number of results to return per page.
|
||||
|
||||
Maximum value is 100. Defaults to 20.
|
||||
|
||||
page: Pagination token for fetching a specific page of results.
|
||||
|
||||
Pass the value from a previous response's `next_page` field to get the next page
|
||||
of results.
|
||||
|
||||
source: Filter skills by source.
|
||||
|
||||
If provided, only skills from the specified source will be returned:
|
||||
|
||||
- `"custom"`: only return user-created skills
|
||||
- `"anthropic"`: only return Anthropic-created skills
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/v1/skills?beta=true",
|
||||
page=AsyncPageCursor[SkillListResponse],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"limit": limit,
|
||||
"page": page,
|
||||
"source": source,
|
||||
},
|
||||
skill_list_params.SkillListParams,
|
||||
),
|
||||
),
|
||||
model=SkillListResponse,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SkillDeleteResponse:
|
||||
"""
|
||||
Delete Skill
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/v1/skills/{skill_id}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SkillDeleteResponse,
|
||||
)
|
||||
|
||||
|
||||
class SkillsWithRawResponse:
|
||||
def __init__(self, skills: Skills) -> None:
|
||||
self._skills = skills
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
skills.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
skills.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
skills.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
skills.delete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def versions(self) -> VersionsWithRawResponse:
|
||||
return VersionsWithRawResponse(self._skills.versions)
|
||||
|
||||
|
||||
class AsyncSkillsWithRawResponse:
|
||||
def __init__(self, skills: AsyncSkills) -> None:
|
||||
self._skills = skills
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
skills.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
skills.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
skills.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
skills.delete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def versions(self) -> AsyncVersionsWithRawResponse:
|
||||
return AsyncVersionsWithRawResponse(self._skills.versions)
|
||||
|
||||
|
||||
class SkillsWithStreamingResponse:
|
||||
def __init__(self, skills: Skills) -> None:
|
||||
self._skills = skills
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
skills.create,
|
||||
)
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
skills.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
skills.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
skills.delete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def versions(self) -> VersionsWithStreamingResponse:
|
||||
return VersionsWithStreamingResponse(self._skills.versions)
|
||||
|
||||
|
||||
class AsyncSkillsWithStreamingResponse:
|
||||
def __init__(self, skills: AsyncSkills) -> None:
|
||||
self._skills = skills
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
skills.create,
|
||||
)
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
skills.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
skills.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
skills.delete,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def versions(self) -> AsyncVersionsWithStreamingResponse:
|
||||
return AsyncVersionsWithStreamingResponse(self._skills.versions)
|
||||
@@ -0,0 +1,658 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Mapping, Optional, cast
|
||||
from itertools import chain
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import (
|
||||
Body,
|
||||
Omit,
|
||||
Query,
|
||||
Headers,
|
||||
NotGiven,
|
||||
FileTypes,
|
||||
SequenceNotStr,
|
||||
omit,
|
||||
not_given,
|
||||
)
|
||||
from ...._utils import (
|
||||
is_given,
|
||||
extract_files,
|
||||
maybe_transform,
|
||||
strip_not_given,
|
||||
deepcopy_minimal,
|
||||
async_maybe_transform,
|
||||
)
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....pagination import SyncPageCursor, AsyncPageCursor
|
||||
from ...._base_client import AsyncPaginator, make_request_options
|
||||
from ....types.beta.skills import version_list_params, version_create_params
|
||||
from ....types.anthropic_beta_param import AnthropicBetaParam
|
||||
from ....types.beta.skills.version_list_response import VersionListResponse
|
||||
from ....types.beta.skills.version_create_response import VersionCreateResponse
|
||||
from ....types.beta.skills.version_delete_response import VersionDeleteResponse
|
||||
from ....types.beta.skills.version_retrieve_response import VersionRetrieveResponse
|
||||
|
||||
__all__ = ["Versions", "AsyncVersions"]
|
||||
|
||||
|
||||
class Versions(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> VersionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return VersionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> VersionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return VersionsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionCreateResponse:
|
||||
"""
|
||||
Create Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
files: Files to upload for the skill.
|
||||
|
||||
All files must be in the same top-level directory and must include a SKILL.md
|
||||
file at the root of that directory.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
body = deepcopy_minimal({"files": files})
|
||||
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return self._post(
|
||||
f"/v1/skills/{skill_id}/versions?beta=true",
|
||||
body=maybe_transform(body, version_create_params.VersionCreateParams),
|
||||
files=extracted_files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionCreateResponse,
|
||||
)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
version: str,
|
||||
*,
|
||||
skill_id: str,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionRetrieveResponse:
|
||||
"""
|
||||
Get Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
version: Version identifier for the skill.
|
||||
|
||||
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
if not version:
|
||||
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionRetrieveResponse,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
limit: Optional[int] | Omit = omit,
|
||||
page: Optional[str] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPageCursor[VersionListResponse]:
|
||||
"""
|
||||
List Skill Versions
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
page: Optionally set to the `next_page` token from the previous response.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/v1/skills/{skill_id}/versions?beta=true",
|
||||
page=SyncPageCursor[VersionListResponse],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"limit": limit,
|
||||
"page": page,
|
||||
},
|
||||
version_list_params.VersionListParams,
|
||||
),
|
||||
),
|
||||
model=VersionListResponse,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
version: str,
|
||||
*,
|
||||
skill_id: str,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionDeleteResponse:
|
||||
"""
|
||||
Delete Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
version: Version identifier for the skill.
|
||||
|
||||
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
if not version:
|
||||
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionDeleteResponse,
|
||||
)
|
||||
|
||||
|
||||
class AsyncVersions(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncVersionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncVersionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncVersionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncVersionsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionCreateResponse:
|
||||
"""
|
||||
Create Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
files: Files to upload for the skill.
|
||||
|
||||
All files must be in the same top-level directory and must include a SKILL.md
|
||||
file at the root of that directory.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
body = deepcopy_minimal({"files": files})
|
||||
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
|
||||
# It should be noted that the actual Content-Type header that will be
|
||||
# sent to the server will contain a `boundary` parameter, e.g.
|
||||
# multipart/form-data; boundary=---abc--
|
||||
extra_headers["Content-Type"] = "multipart/form-data"
|
||||
return await self._post(
|
||||
f"/v1/skills/{skill_id}/versions?beta=true",
|
||||
body=await async_maybe_transform(body, version_create_params.VersionCreateParams),
|
||||
files=extracted_files,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionCreateResponse,
|
||||
)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
version: str,
|
||||
*,
|
||||
skill_id: str,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionRetrieveResponse:
|
||||
"""
|
||||
Get Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
version: Version identifier for the skill.
|
||||
|
||||
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
if not version:
|
||||
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionRetrieveResponse,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
skill_id: str,
|
||||
*,
|
||||
limit: Optional[int] | Omit = omit,
|
||||
page: Optional[str] | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[VersionListResponse, AsyncPageCursor[VersionListResponse]]:
|
||||
"""
|
||||
List Skill Versions
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
page: Optionally set to the `next_page` token from the previous response.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/v1/skills/{skill_id}/versions?beta=true",
|
||||
page=AsyncPageCursor[VersionListResponse],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"limit": limit,
|
||||
"page": page,
|
||||
},
|
||||
version_list_params.VersionListParams,
|
||||
),
|
||||
),
|
||||
model=VersionListResponse,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
version: str,
|
||||
*,
|
||||
skill_id: str,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> VersionDeleteResponse:
|
||||
"""
|
||||
Delete Skill Version
|
||||
|
||||
Args:
|
||||
skill_id: Unique identifier for the skill.
|
||||
|
||||
The format and length of IDs may change over time.
|
||||
|
||||
version: Version identifier for the skill.
|
||||
|
||||
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not skill_id:
|
||||
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
|
||||
if not version:
|
||||
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given(
|
||||
{
|
||||
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
|
||||
if is_given(betas)
|
||||
else not_given
|
||||
}
|
||||
),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=VersionDeleteResponse,
|
||||
)
|
||||
|
||||
|
||||
class VersionsWithRawResponse:
|
||||
def __init__(self, versions: Versions) -> None:
|
||||
self._versions = versions
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
versions.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
versions.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
versions.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
versions.delete,
|
||||
)
|
||||
|
||||
|
||||
class AsyncVersionsWithRawResponse:
|
||||
def __init__(self, versions: AsyncVersions) -> None:
|
||||
self._versions = versions
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
versions.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
versions.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
versions.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
versions.delete,
|
||||
)
|
||||
|
||||
|
||||
class VersionsWithStreamingResponse:
|
||||
def __init__(self, versions: Versions) -> None:
|
||||
self._versions = versions
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
versions.create,
|
||||
)
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
versions.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
versions.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
versions.delete,
|
||||
)
|
||||
|
||||
|
||||
class AsyncVersionsWithStreamingResponse:
|
||||
def __init__(self, versions: AsyncVersions) -> None:
|
||||
self._versions = versions
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
versions.create,
|
||||
)
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
versions.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
versions.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
versions.delete,
|
||||
)
|
||||
@@ -0,0 +1,845 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
from typing_extensions import Literal, overload
|
||||
|
||||
import httpx
|
||||
|
||||
from .. import _legacy_response
|
||||
from ..types import completion_create_params
|
||||
from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
|
||||
from .._utils import is_given, required_args, maybe_transform, strip_not_given, async_maybe_transform
|
||||
from .._compat import cached_property
|
||||
from .._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from .._constants import DEFAULT_TIMEOUT
|
||||
from .._streaming import Stream, AsyncStream
|
||||
from .._base_client import make_request_options
|
||||
from ..types.completion import Completion
|
||||
from ..types.model_param import ModelParam
|
||||
from ..types.metadata_param import MetadataParam
|
||||
from ..types.anthropic_beta_param import AnthropicBetaParam
|
||||
|
||||
__all__ = ["Completions", "AsyncCompletions"]
|
||||
|
||||
|
||||
class Completions(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> CompletionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return CompletionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return CompletionsWithStreamingResponse(self)
|
||||
|
||||
@overload
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
stream: Literal[False] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@overload
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
stream: Literal[True],
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Stream[Completion]:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@overload
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
stream: bool,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion | Stream[Completion]:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@required_args(["max_tokens_to_sample", "model", "prompt"], ["max_tokens_to_sample", "model", "prompt", "stream"])
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
stream: Literal[False] | Literal[True] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion | Stream[Completion]:
|
||||
if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
|
||||
timeout = 600
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._post(
|
||||
"/v1/complete",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"max_tokens_to_sample": max_tokens_to_sample,
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"metadata": metadata,
|
||||
"stop_sequences": stop_sequences,
|
||||
"stream": stream,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
},
|
||||
completion_create_params.CompletionCreateParamsStreaming
|
||||
if stream
|
||||
else completion_create_params.CompletionCreateParamsNonStreaming,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Completion,
|
||||
stream=stream or False,
|
||||
stream_cls=Stream[Completion],
|
||||
)
|
||||
|
||||
|
||||
class AsyncCompletions(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncCompletionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncCompletionsWithStreamingResponse(self)
|
||||
|
||||
@overload
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
stream: Literal[False] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@overload
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
stream: Literal[True],
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncStream[Completion]:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@overload
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
stream: bool,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion | AsyncStream[Completion]:
|
||||
"""[Legacy] Create a Text Completion.
|
||||
|
||||
The Text Completions API is a legacy API.
|
||||
|
||||
We recommend using the
|
||||
[Messages API](https://docs.claude.com/en/api/messages) going forward.
|
||||
|
||||
Future models and features will not be compatible with Text Completions. See our
|
||||
[migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
|
||||
for guidance in migrating from Text Completions to Messages.
|
||||
|
||||
Args:
|
||||
max_tokens_to_sample: The maximum number of tokens to generate before stopping.
|
||||
|
||||
Note that our models may stop _before_ reaching this maximum. This parameter
|
||||
only specifies the absolute maximum number of tokens to generate.
|
||||
|
||||
model: The model that will complete your prompt.\n\nSee
|
||||
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
||||
details and options.
|
||||
|
||||
prompt: The prompt that you want Claude to complete.
|
||||
|
||||
For proper response generation you will need to format your prompt using
|
||||
alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
|
||||
|
||||
```
|
||||
"\n\nHuman: {userQuestion}\n\nAssistant:"
|
||||
```
|
||||
|
||||
See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
|
||||
our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
|
||||
for more details.
|
||||
|
||||
stream: Whether to incrementally stream the response using server-sent events.
|
||||
|
||||
See [streaming](https://docs.claude.com/en/api/streaming) for details.
|
||||
|
||||
metadata: An object describing metadata about the request.
|
||||
|
||||
stop_sequences: Sequences that will cause the model to stop generating.
|
||||
|
||||
Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
|
||||
sequences in the future. By providing the stop_sequences parameter, you may
|
||||
include additional strings that will cause the model to stop generating.
|
||||
|
||||
temperature: Amount of randomness injected into the response.
|
||||
|
||||
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
|
||||
for analytical / multiple choice, and closer to `1.0` for creative and
|
||||
generative tasks.
|
||||
|
||||
Note that even with `temperature` of `0.0`, the results will not be fully
|
||||
deterministic.
|
||||
|
||||
top_k: Only sample from the top K options for each subsequent token.
|
||||
|
||||
Used to remove "long tail" low probability responses.
|
||||
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
top_p: Use nucleus sampling.
|
||||
|
||||
In nucleus sampling, we compute the cumulative distribution over all the options
|
||||
for each subsequent token in decreasing probability order and cut it off once it
|
||||
reaches a particular probability specified by `top_p`. You should either alter
|
||||
`temperature` or `top_p`, but not both.
|
||||
|
||||
Recommended for advanced use cases only. You usually only need to use
|
||||
`temperature`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
...
|
||||
|
||||
@required_args(["max_tokens_to_sample", "model", "prompt"], ["max_tokens_to_sample", "model", "prompt", "stream"])
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
max_tokens_to_sample: int,
|
||||
model: ModelParam,
|
||||
prompt: str,
|
||||
metadata: MetadataParam | Omit = omit,
|
||||
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
||||
stream: Literal[False] | Literal[True] | Omit = omit,
|
||||
temperature: float | Omit = omit,
|
||||
top_k: int | Omit = omit,
|
||||
top_p: float | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Completion | AsyncStream[Completion]:
|
||||
if not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
|
||||
timeout = 600
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return await self._post(
|
||||
"/v1/complete",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"max_tokens_to_sample": max_tokens_to_sample,
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"metadata": metadata,
|
||||
"stop_sequences": stop_sequences,
|
||||
"stream": stream,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
},
|
||||
completion_create_params.CompletionCreateParamsStreaming
|
||||
if stream
|
||||
else completion_create_params.CompletionCreateParamsNonStreaming,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Completion,
|
||||
stream=stream or False,
|
||||
stream_cls=AsyncStream[Completion],
|
||||
)
|
||||
|
||||
|
||||
class CompletionsWithRawResponse:
|
||||
def __init__(self, completions: Completions) -> None:
|
||||
self._completions = completions
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
completions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCompletionsWithRawResponse:
|
||||
def __init__(self, completions: AsyncCompletions) -> None:
|
||||
self._completions = completions
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
completions.create,
|
||||
)
|
||||
|
||||
|
||||
class CompletionsWithStreamingResponse:
|
||||
def __init__(self, completions: Completions) -> None:
|
||||
self._completions = completions
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
completions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncCompletionsWithStreamingResponse:
|
||||
def __init__(self, completions: AsyncCompletions) -> None:
|
||||
self._completions = completions
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
completions.create,
|
||||
)
|
||||
@@ -0,0 +1,35 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .batches import (
|
||||
Batches,
|
||||
AsyncBatches,
|
||||
BatchesWithRawResponse,
|
||||
AsyncBatchesWithRawResponse,
|
||||
BatchesWithStreamingResponse,
|
||||
AsyncBatchesWithStreamingResponse,
|
||||
)
|
||||
from .messages import (
|
||||
DEPRECATED_MODELS,
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Batches",
|
||||
"AsyncBatches",
|
||||
"BatchesWithRawResponse",
|
||||
"AsyncBatchesWithRawResponse",
|
||||
"BatchesWithStreamingResponse",
|
||||
"AsyncBatchesWithStreamingResponse",
|
||||
"Messages",
|
||||
"AsyncMessages",
|
||||
"MessagesWithRawResponse",
|
||||
"AsyncMessagesWithRawResponse",
|
||||
"MessagesWithStreamingResponse",
|
||||
"AsyncMessagesWithStreamingResponse",
|
||||
"DEPRECATED_MODELS",
|
||||
]
|
||||
@@ -0,0 +1,714 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Iterable
|
||||
|
||||
import httpx
|
||||
|
||||
from ... import _legacy_response
|
||||
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ..._utils import maybe_transform, async_maybe_transform
|
||||
from ..._compat import cached_property
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...pagination import SyncPage, AsyncPage
|
||||
from ..._exceptions import AnthropicError
|
||||
from ..._base_client import AsyncPaginator, make_request_options
|
||||
from ...types.messages import batch_list_params, batch_create_params
|
||||
from ..._decoders.jsonl import JSONLDecoder, AsyncJSONLDecoder
|
||||
from ...types.messages.message_batch import MessageBatch
|
||||
from ...types.messages.deleted_message_batch import DeletedMessageBatch
|
||||
from ...types.messages.message_batch_individual_response import MessageBatchIndividualResponse
|
||||
|
||||
__all__ = ["Batches", "AsyncBatches"]
|
||||
|
||||
|
||||
class Batches(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BatchesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BatchesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BatchesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return BatchesWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
requests: Iterable[batch_create_params.Request],
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""
|
||||
Send a batch of Message creation requests.
|
||||
|
||||
The Message Batches API can be used to process multiple Messages API requests at
|
||||
once. Once a Message Batch is created, it begins processing immediately. Batches
|
||||
can take up to 24 hours to complete.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
requests: List of requests for prompt completion. Each is an individual request to create
|
||||
a Message.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return self._post(
|
||||
"/v1/messages/batches",
|
||||
body=maybe_transform({"requests": requests}, batch_create_params.BatchCreateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""This endpoint is idempotent and can be used to poll for Message Batch
|
||||
completion.
|
||||
|
||||
To access the results of a Message Batch, make a request to the
|
||||
`results_url` field in the response.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return self._get(
|
||||
f"/v1/messages/batches/{message_batch_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPage[MessageBatch]:
|
||||
"""List all Message Batches within a Workspace.
|
||||
|
||||
Most recently created batches are
|
||||
returned first.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return self._get_api_list(
|
||||
"/v1/messages/batches",
|
||||
page=SyncPage[MessageBatch],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
batch_list_params.BatchListParams,
|
||||
),
|
||||
),
|
||||
model=MessageBatch,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> DeletedMessageBatch:
|
||||
"""
|
||||
Delete a Message Batch.
|
||||
|
||||
Message Batches can only be deleted once they've finished processing. If you'd
|
||||
like to delete an in-progress batch, you must first cancel it.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return self._delete(
|
||||
f"/v1/messages/batches/{message_batch_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=DeletedMessageBatch,
|
||||
)
|
||||
|
||||
def cancel(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""Batches may be canceled any time before processing ends.
|
||||
|
||||
Once cancellation is
|
||||
initiated, the batch enters a `canceling` state, at which time the system may
|
||||
complete any in-progress, non-interruptible requests before finalizing
|
||||
cancellation.
|
||||
|
||||
The number of canceled requests is specified in `request_counts`. To determine
|
||||
which requests were canceled, check the individual results within the batch.
|
||||
Note that cancellation may not result in any canceled requests if they were
|
||||
non-interruptible.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return self._post(
|
||||
f"/v1/messages/batches/{message_batch_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
def results(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> JSONLDecoder[MessageBatchIndividualResponse]:
|
||||
"""
|
||||
Streams the results of a Message Batch as a `.jsonl` file.
|
||||
|
||||
Each line in the file is a JSON object containing the result of a single request
|
||||
in the Message Batch. Results are not guaranteed to be in the same order as
|
||||
requests. Use the `custom_id` field to match results to requests.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
|
||||
batch = self.retrieve(message_batch_id=message_batch_id)
|
||||
if not batch.results_url:
|
||||
raise AnthropicError(
|
||||
f"No `results_url` for the given batch; Has it finished processing? {batch.processing_status}"
|
||||
)
|
||||
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
return self._get(
|
||||
batch.results_url,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=JSONLDecoder[MessageBatchIndividualResponse],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatches(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBatchesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBatchesWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
requests: Iterable[batch_create_params.Request],
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""
|
||||
Send a batch of Message creation requests.
|
||||
|
||||
The Message Batches API can be used to process multiple Messages API requests at
|
||||
once. Once a Message Batch is created, it begins processing immediately. Batches
|
||||
can take up to 24 hours to complete.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
requests: List of requests for prompt completion. Each is an individual request to create
|
||||
a Message.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return await self._post(
|
||||
"/v1/messages/batches",
|
||||
body=await async_maybe_transform({"requests": requests}, batch_create_params.BatchCreateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""This endpoint is idempotent and can be used to poll for Message Batch
|
||||
completion.
|
||||
|
||||
To access the results of a Message Batch, make a request to the
|
||||
`results_url` field in the response.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return await self._get(
|
||||
f"/v1/messages/batches/{message_batch_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[MessageBatch, AsyncPage[MessageBatch]]:
|
||||
"""List all Message Batches within a Workspace.
|
||||
|
||||
Most recently created batches are
|
||||
returned first.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
return self._get_api_list(
|
||||
"/v1/messages/batches",
|
||||
page=AsyncPage[MessageBatch],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
batch_list_params.BatchListParams,
|
||||
),
|
||||
),
|
||||
model=MessageBatch,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> DeletedMessageBatch:
|
||||
"""
|
||||
Delete a Message Batch.
|
||||
|
||||
Message Batches can only be deleted once they've finished processing. If you'd
|
||||
like to delete an in-progress batch, you must first cancel it.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return await self._delete(
|
||||
f"/v1/messages/batches/{message_batch_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=DeletedMessageBatch,
|
||||
)
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageBatch:
|
||||
"""Batches may be canceled any time before processing ends.
|
||||
|
||||
Once cancellation is
|
||||
initiated, the batch enters a `canceling` state, at which time the system may
|
||||
complete any in-progress, non-interruptible requests before finalizing
|
||||
cancellation.
|
||||
|
||||
The number of canceled requests is specified in `request_counts`. To determine
|
||||
which requests were canceled, check the individual results within the batch.
|
||||
Note that cancellation may not result in any canceled requests if they were
|
||||
non-interruptible.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
return await self._post(
|
||||
f"/v1/messages/batches/{message_batch_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageBatch,
|
||||
)
|
||||
|
||||
async def results(
|
||||
self,
|
||||
message_batch_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncJSONLDecoder[MessageBatchIndividualResponse]:
|
||||
"""
|
||||
Streams the results of a Message Batch as a `.jsonl` file.
|
||||
|
||||
Each line in the file is a JSON object containing the result of a single request
|
||||
in the Message Batch. Results are not guaranteed to be in the same order as
|
||||
requests. Use the `custom_id` field to match results to requests.
|
||||
|
||||
Learn more about the Message Batches API in our
|
||||
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
|
||||
|
||||
Args:
|
||||
message_batch_id: ID of the Message Batch.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not message_batch_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
|
||||
|
||||
batch = await self.retrieve(message_batch_id=message_batch_id)
|
||||
if not batch.results_url:
|
||||
raise AnthropicError(
|
||||
f"No `results_url` for the given batch; Has it finished processing? {batch.processing_status}"
|
||||
)
|
||||
|
||||
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
batch.results_url,
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=AsyncJSONLDecoder[MessageBatchIndividualResponse],
|
||||
stream=True,
|
||||
)
|
||||
|
||||
|
||||
class BatchesWithRawResponse:
|
||||
def __init__(self, batches: Batches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = _legacy_response.to_raw_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatchesWithRawResponse:
|
||||
def __init__(self, batches: AsyncBatches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = _legacy_response.async_to_raw_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class BatchesWithStreamingResponse:
|
||||
def __init__(self, batches: Batches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = to_streamed_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncBatchesWithStreamingResponse:
|
||||
def __init__(self, batches: AsyncBatches) -> None:
|
||||
self._batches = batches
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
batches.create,
|
||||
)
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
batches.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
batches.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
batches.delete,
|
||||
)
|
||||
self.cancel = async_to_streamed_response_wrapper(
|
||||
batches.cancel,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,331 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
import httpx
|
||||
|
||||
from .. import _legacy_response
|
||||
from ..types import model_list_params
|
||||
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from .._utils import is_given, maybe_transform, strip_not_given
|
||||
from .._compat import cached_property
|
||||
from .._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ..pagination import SyncPage, AsyncPage
|
||||
from .._base_client import AsyncPaginator, make_request_options
|
||||
from ..types.model_info import ModelInfo
|
||||
from ..types.anthropic_beta_param import AnthropicBetaParam
|
||||
|
||||
__all__ = ["Models", "AsyncModels"]
|
||||
|
||||
|
||||
class Models(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> ModelsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return ModelsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> ModelsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return ModelsWithStreamingResponse(self)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
model_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ModelInfo:
|
||||
"""
|
||||
Get a specific model.
|
||||
|
||||
The Models API response can be used to determine information about a specific
|
||||
model or resolve a model alias to a model ID.
|
||||
|
||||
Args:
|
||||
model_id: Model identifier or alias.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not model_id:
|
||||
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get(
|
||||
f"/v1/models/{model_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ModelInfo,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncPage[ModelInfo]:
|
||||
"""
|
||||
List available models.
|
||||
|
||||
The Models API response can be used to determine which models are available for
|
||||
use in the API. More recently released models are listed first.
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get_api_list(
|
||||
"/v1/models",
|
||||
page=SyncPage[ModelInfo],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
model_list_params.ModelListParams,
|
||||
),
|
||||
),
|
||||
model=ModelInfo,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModels(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncModelsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncModelsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
|
||||
"""
|
||||
return AsyncModelsWithStreamingResponse(self)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
model_id: str,
|
||||
*,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ModelInfo:
|
||||
"""
|
||||
Get a specific model.
|
||||
|
||||
The Models API response can be used to determine information about a specific
|
||||
model or resolve a model alias to a model ID.
|
||||
|
||||
Args:
|
||||
model_id: Model identifier or alias.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not model_id:
|
||||
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return await self._get(
|
||||
f"/v1/models/{model_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ModelInfo,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after_id: str | Omit = omit,
|
||||
before_id: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
betas: List[AnthropicBetaParam] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[ModelInfo, AsyncPage[ModelInfo]]:
|
||||
"""
|
||||
List available models.
|
||||
|
||||
The Models API response can be used to determine which models are available for
|
||||
use in the API. More recently released models are listed first.
|
||||
|
||||
Args:
|
||||
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately after this object.
|
||||
|
||||
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
|
||||
page of results immediately before this object.
|
||||
|
||||
limit: Number of items to return per page.
|
||||
|
||||
Defaults to `20`. Ranges from `1` to `1000`.
|
||||
|
||||
betas: Optional header to specify the beta version(s) you want to use.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {
|
||||
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
|
||||
**(extra_headers or {}),
|
||||
}
|
||||
return self._get_api_list(
|
||||
"/v1/models",
|
||||
page=AsyncPage[ModelInfo],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after_id": after_id,
|
||||
"before_id": before_id,
|
||||
"limit": limit,
|
||||
},
|
||||
model_list_params.ModelListParams,
|
||||
),
|
||||
),
|
||||
model=ModelInfo,
|
||||
)
|
||||
|
||||
|
||||
class ModelsWithRawResponse:
|
||||
def __init__(self, models: Models) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModelsWithRawResponse:
|
||||
def __init__(self, models: AsyncModels) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class ModelsWithStreamingResponse:
|
||||
def __init__(self, models: Models) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
|
||||
|
||||
class AsyncModelsWithStreamingResponse:
|
||||
def __init__(self, models: AsyncModels) -> None:
|
||||
self._models = models
|
||||
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
models.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
models.list,
|
||||
)
|
||||
@@ -0,0 +1,135 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .model import Model as Model
|
||||
from .usage import Usage as Usage
|
||||
from .shared import (
|
||||
ErrorObject as ErrorObject,
|
||||
BillingError as BillingError,
|
||||
ErrorResponse as ErrorResponse,
|
||||
NotFoundError as NotFoundError,
|
||||
APIErrorObject as APIErrorObject,
|
||||
RateLimitError as RateLimitError,
|
||||
OverloadedError as OverloadedError,
|
||||
PermissionError as PermissionError,
|
||||
AuthenticationError as AuthenticationError,
|
||||
GatewayTimeoutError as GatewayTimeoutError,
|
||||
InvalidRequestError as InvalidRequestError,
|
||||
)
|
||||
from .message import Message as Message
|
||||
from .beta_error import BetaError as BetaError
|
||||
from .completion import Completion as Completion
|
||||
from .model_info import ModelInfo as ModelInfo
|
||||
from .text_block import TextBlock as TextBlock
|
||||
from .text_delta import TextDelta as TextDelta
|
||||
from .tool_param import ToolParam as ToolParam
|
||||
from .model_param import ModelParam as ModelParam
|
||||
from .stop_reason import StopReason as StopReason
|
||||
from .content_block import ContentBlock as ContentBlock
|
||||
from .message_param import MessageParam as MessageParam
|
||||
from .text_citation import TextCitation as TextCitation
|
||||
from .beta_api_error import BetaAPIError as BetaAPIError
|
||||
from .cache_creation import CacheCreation as CacheCreation
|
||||
from .metadata_param import MetadataParam as MetadataParam
|
||||
from .thinking_block import ThinkingBlock as ThinkingBlock
|
||||
from .thinking_delta import ThinkingDelta as ThinkingDelta
|
||||
from .tool_use_block import ToolUseBlock as ToolUseBlock
|
||||
from .citations_delta import CitationsDelta as CitationsDelta
|
||||
from .signature_delta import SignatureDelta as SignatureDelta
|
||||
from .input_json_delta import InputJSONDelta as InputJSONDelta
|
||||
from .text_block_param import TextBlockParam as TextBlockParam
|
||||
from .tool_union_param import ToolUnionParam as ToolUnionParam
|
||||
from .image_block_param import ImageBlockParam as ImageBlockParam
|
||||
from .model_list_params import ModelListParams as ModelListParams
|
||||
from .server_tool_usage import ServerToolUsage as ServerToolUsage
|
||||
from .tool_choice_param import ToolChoiceParam as ToolChoiceParam
|
||||
from .beta_billing_error import BetaBillingError as BetaBillingError
|
||||
from .message_stop_event import MessageStopEvent as MessageStopEvent
|
||||
from .beta_error_response import BetaErrorResponse as BetaErrorResponse
|
||||
from .content_block_param import ContentBlockParam as ContentBlockParam
|
||||
from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
|
||||
from .message_delta_usage import MessageDeltaUsage as MessageDeltaUsage
|
||||
from .message_start_event import MessageStartEvent as MessageStartEvent
|
||||
from .text_citation_param import TextCitationParam as TextCitationParam
|
||||
from .anthropic_beta_param import AnthropicBetaParam as AnthropicBetaParam
|
||||
from .beta_not_found_error import BetaNotFoundError as BetaNotFoundError
|
||||
from .document_block_param import DocumentBlockParam as DocumentBlockParam
|
||||
from .message_stream_event import MessageStreamEvent as MessageStreamEvent
|
||||
from .message_tokens_count import MessageTokensCount as MessageTokensCount
|
||||
from .thinking_block_param import ThinkingBlockParam as ThinkingBlockParam
|
||||
from .tool_use_block_param import ToolUseBlockParam as ToolUseBlockParam
|
||||
from .url_pdf_source_param import URLPDFSourceParam as URLPDFSourceParam
|
||||
from .beta_overloaded_error import BetaOverloadedError as BetaOverloadedError
|
||||
from .beta_permission_error import BetaPermissionError as BetaPermissionError
|
||||
from .beta_rate_limit_error import BetaRateLimitError as BetaRateLimitError
|
||||
from .message_create_params import MessageCreateParams as MessageCreateParams
|
||||
from .server_tool_use_block import ServerToolUseBlock as ServerToolUseBlock
|
||||
from .thinking_config_param import ThinkingConfigParam as ThinkingConfigParam
|
||||
from .tool_choice_any_param import ToolChoiceAnyParam as ToolChoiceAnyParam
|
||||
from .citation_char_location import CitationCharLocation as CitationCharLocation
|
||||
from .citation_page_location import CitationPageLocation as CitationPageLocation
|
||||
from .citations_config_param import CitationsConfigParam as CitationsConfigParam
|
||||
from .raw_message_stop_event import RawMessageStopEvent as RawMessageStopEvent
|
||||
from .tool_choice_auto_param import ToolChoiceAutoParam as ToolChoiceAutoParam
|
||||
from .tool_choice_none_param import ToolChoiceNoneParam as ToolChoiceNoneParam
|
||||
from .tool_choice_tool_param import ToolChoiceToolParam as ToolChoiceToolParam
|
||||
from .url_image_source_param import URLImageSourceParam as URLImageSourceParam
|
||||
from .base64_pdf_source_param import Base64PDFSourceParam as Base64PDFSourceParam
|
||||
from .plain_text_source_param import PlainTextSourceParam as PlainTextSourceParam
|
||||
from .raw_content_block_delta import RawContentBlockDelta as RawContentBlockDelta
|
||||
from .raw_message_delta_event import RawMessageDeltaEvent as RawMessageDeltaEvent
|
||||
from .raw_message_start_event import RawMessageStartEvent as RawMessageStartEvent
|
||||
from .redacted_thinking_block import RedactedThinkingBlock as RedactedThinkingBlock
|
||||
from .tool_result_block_param import ToolResultBlockParam as ToolResultBlockParam
|
||||
from .web_search_result_block import WebSearchResultBlock as WebSearchResultBlock
|
||||
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
||||
from .content_block_stop_event import ContentBlockStopEvent as ContentBlockStopEvent
|
||||
from .raw_message_stream_event import RawMessageStreamEvent as RawMessageStreamEvent
|
||||
from .tool_bash_20250124_param import ToolBash20250124Param as ToolBash20250124Param
|
||||
from .base64_image_source_param import Base64ImageSourceParam as Base64ImageSourceParam
|
||||
from .beta_authentication_error import BetaAuthenticationError as BetaAuthenticationError
|
||||
from .content_block_delta_event import ContentBlockDeltaEvent as ContentBlockDeltaEvent
|
||||
from .content_block_start_event import ContentBlockStartEvent as ContentBlockStartEvent
|
||||
from .search_result_block_param import SearchResultBlockParam as SearchResultBlockParam
|
||||
from .beta_gateway_timeout_error import BetaGatewayTimeoutError as BetaGatewayTimeoutError
|
||||
from .beta_invalid_request_error import BetaInvalidRequestError as BetaInvalidRequestError
|
||||
from .content_block_source_param import ContentBlockSourceParam as ContentBlockSourceParam
|
||||
from .message_count_tokens_params import MessageCountTokensParams as MessageCountTokensParams
|
||||
from .server_tool_use_block_param import ServerToolUseBlockParam as ServerToolUseBlockParam
|
||||
from .citation_char_location_param import CitationCharLocationParam as CitationCharLocationParam
|
||||
from .citation_page_location_param import CitationPageLocationParam as CitationPageLocationParam
|
||||
from .raw_content_block_stop_event import RawContentBlockStopEvent as RawContentBlockStopEvent
|
||||
from .web_search_tool_result_block import WebSearchToolResultBlock as WebSearchToolResultBlock
|
||||
from .web_search_tool_result_error import WebSearchToolResultError as WebSearchToolResultError
|
||||
from .cache_control_ephemeral_param import CacheControlEphemeralParam as CacheControlEphemeralParam
|
||||
from .raw_content_block_delta_event import RawContentBlockDeltaEvent as RawContentBlockDeltaEvent
|
||||
from .raw_content_block_start_event import RawContentBlockStartEvent as RawContentBlockStartEvent
|
||||
from .redacted_thinking_block_param import RedactedThinkingBlockParam as RedactedThinkingBlockParam
|
||||
from .thinking_config_enabled_param import ThinkingConfigEnabledParam as ThinkingConfigEnabledParam
|
||||
from .web_search_result_block_param import WebSearchResultBlockParam as WebSearchResultBlockParam
|
||||
from .thinking_config_disabled_param import ThinkingConfigDisabledParam as ThinkingConfigDisabledParam
|
||||
from .web_search_tool_20250305_param import WebSearchTool20250305Param as WebSearchTool20250305Param
|
||||
from .citation_content_block_location import CitationContentBlockLocation as CitationContentBlockLocation
|
||||
from .message_count_tokens_tool_param import MessageCountTokensToolParam as MessageCountTokensToolParam
|
||||
from .tool_text_editor_20250124_param import ToolTextEditor20250124Param as ToolTextEditor20250124Param
|
||||
from .tool_text_editor_20250429_param import ToolTextEditor20250429Param as ToolTextEditor20250429Param
|
||||
from .tool_text_editor_20250728_param import ToolTextEditor20250728Param as ToolTextEditor20250728Param
|
||||
from .citations_search_result_location import CitationsSearchResultLocation as CitationsSearchResultLocation
|
||||
from .content_block_source_content_param import ContentBlockSourceContentParam as ContentBlockSourceContentParam
|
||||
from .web_search_tool_result_block_param import WebSearchToolResultBlockParam as WebSearchToolResultBlockParam
|
||||
from .web_search_tool_request_error_param import WebSearchToolRequestErrorParam as WebSearchToolRequestErrorParam
|
||||
from .citations_web_search_result_location import CitationsWebSearchResultLocation as CitationsWebSearchResultLocation
|
||||
from .web_search_tool_result_block_content import WebSearchToolResultBlockContent as WebSearchToolResultBlockContent
|
||||
from .citation_content_block_location_param import (
|
||||
CitationContentBlockLocationParam as CitationContentBlockLocationParam,
|
||||
)
|
||||
from .citation_search_result_location_param import (
|
||||
CitationSearchResultLocationParam as CitationSearchResultLocationParam,
|
||||
)
|
||||
from .citation_web_search_result_location_param import (
|
||||
CitationWebSearchResultLocationParam as CitationWebSearchResultLocationParam,
|
||||
)
|
||||
from .web_search_tool_result_block_param_content_param import (
|
||||
WebSearchToolResultBlockParamContentParam as WebSearchToolResultBlockParamContentParam,
|
||||
)
|
||||
@@ -0,0 +1,32 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
|
||||
__all__ = ["AnthropicBetaParam"]
|
||||
|
||||
AnthropicBetaParam: TypeAlias = Union[
|
||||
str,
|
||||
Literal[
|
||||
"message-batches-2024-09-24",
|
||||
"prompt-caching-2024-07-31",
|
||||
"computer-use-2024-10-22",
|
||||
"computer-use-2025-01-24",
|
||||
"pdfs-2024-09-25",
|
||||
"token-counting-2024-11-01",
|
||||
"token-efficient-tools-2025-02-19",
|
||||
"output-128k-2025-02-19",
|
||||
"files-api-2025-04-14",
|
||||
"mcp-client-2025-04-04",
|
||||
"dev-full-thinking-2025-05-14",
|
||||
"interleaved-thinking-2025-05-14",
|
||||
"code-execution-2025-05-22",
|
||||
"extended-cache-ttl-2025-04-11",
|
||||
"context-1m-2025-08-07",
|
||||
"context-management-2025-06-27",
|
||||
"model-context-window-exceeded-2025-08-26",
|
||||
"skills-2025-10-02",
|
||||
],
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, Required, Annotated, TypedDict
|
||||
|
||||
from .._types import Base64FileInput
|
||||
from .._utils import PropertyInfo
|
||||
from .._models import set_pydantic_config
|
||||
|
||||
__all__ = ["Base64ImageSourceParam"]
|
||||
|
||||
|
||||
class Base64ImageSourceParam(TypedDict, total=False):
|
||||
data: Required[Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]]
|
||||
|
||||
media_type: Required[Literal["image/jpeg", "image/png", "image/gif", "image/webp"]]
|
||||
|
||||
type: Required[Literal["base64"]]
|
||||
|
||||
|
||||
set_pydantic_config(Base64ImageSourceParam, {"arbitrary_types_allowed": True})
|
||||
@@ -0,0 +1,23 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, Required, Annotated, TypedDict
|
||||
|
||||
from .._types import Base64FileInput
|
||||
from .._utils import PropertyInfo
|
||||
from .._models import set_pydantic_config
|
||||
|
||||
__all__ = ["Base64PDFSourceParam"]
|
||||
|
||||
|
||||
class Base64PDFSourceParam(TypedDict, total=False):
|
||||
data: Required[Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]]
|
||||
|
||||
media_type: Required[Literal["application/pdf"]]
|
||||
|
||||
type: Required[Literal["base64"]]
|
||||
|
||||
|
||||
set_pydantic_config(Base64PDFSourceParam, {"arbitrary_types_allowed": True})
|
||||
@@ -0,0 +1,283 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .beta_skill import BetaSkill as BetaSkill
|
||||
from .beta_usage import BetaUsage as BetaUsage
|
||||
from .beta_message import BetaMessage as BetaMessage
|
||||
from .deleted_file import DeletedFile as DeletedFile
|
||||
from .file_metadata import FileMetadata as FileMetadata
|
||||
from .beta_container import BetaContainer as BetaContainer
|
||||
from .beta_model_info import BetaModelInfo as BetaModelInfo
|
||||
from .beta_text_block import BetaTextBlock as BetaTextBlock
|
||||
from .beta_text_delta import BetaTextDelta as BetaTextDelta
|
||||
from .beta_tool_param import BetaToolParam as BetaToolParam
|
||||
from .beta_stop_reason import BetaStopReason as BetaStopReason
|
||||
from .file_list_params import FileListParams as FileListParams
|
||||
from .beta_skill_params import BetaSkillParams as BetaSkillParams
|
||||
from .model_list_params import ModelListParams as ModelListParams
|
||||
from .skill_list_params import SkillListParams as SkillListParams
|
||||
from .beta_content_block import BetaContentBlock as BetaContentBlock
|
||||
from .beta_message_param import BetaMessageParam as BetaMessageParam
|
||||
from .beta_text_citation import BetaTextCitation as BetaTextCitation
|
||||
from .file_upload_params import FileUploadParams as FileUploadParams
|
||||
from .beta_cache_creation import BetaCacheCreation as BetaCacheCreation
|
||||
from .beta_document_block import BetaDocumentBlock as BetaDocumentBlock
|
||||
from .beta_metadata_param import BetaMetadataParam as BetaMetadataParam
|
||||
from .beta_thinking_block import BetaThinkingBlock as BetaThinkingBlock
|
||||
from .beta_thinking_delta import BetaThinkingDelta as BetaThinkingDelta
|
||||
from .beta_tool_use_block import BetaToolUseBlock as BetaToolUseBlock
|
||||
from .skill_create_params import SkillCreateParams as SkillCreateParams
|
||||
from .skill_list_response import SkillListResponse as SkillListResponse
|
||||
from .beta_citation_config import BetaCitationConfig as BetaCitationConfig
|
||||
from .beta_citations_delta import BetaCitationsDelta as BetaCitationsDelta
|
||||
from .beta_signature_delta import BetaSignatureDelta as BetaSignatureDelta
|
||||
from .beta_web_fetch_block import BetaWebFetchBlock as BetaWebFetchBlock
|
||||
from .beta_container_params import BetaContainerParams as BetaContainerParams
|
||||
from .beta_input_json_delta import BetaInputJSONDelta as BetaInputJSONDelta
|
||||
from .beta_text_block_param import BetaTextBlockParam as BetaTextBlockParam
|
||||
from .beta_tool_union_param import BetaToolUnionParam as BetaToolUnionParam
|
||||
from .message_create_params import MessageCreateParams as MessageCreateParams
|
||||
from .skill_create_response import SkillCreateResponse as SkillCreateResponse
|
||||
from .skill_delete_response import SkillDeleteResponse as SkillDeleteResponse
|
||||
from .beta_base64_pdf_source import BetaBase64PDFSource as BetaBase64PDFSource
|
||||
from .beta_image_block_param import BetaImageBlockParam as BetaImageBlockParam
|
||||
from .beta_plain_text_source import BetaPlainTextSource as BetaPlainTextSource
|
||||
from .beta_server_tool_usage import BetaServerToolUsage as BetaServerToolUsage
|
||||
from .beta_tool_choice_param import BetaToolChoiceParam as BetaToolChoiceParam
|
||||
from .beta_mcp_tool_use_block import BetaMCPToolUseBlock as BetaMCPToolUseBlock
|
||||
from .skill_retrieve_response import SkillRetrieveResponse as SkillRetrieveResponse
|
||||
from .beta_content_block_param import BetaContentBlockParam as BetaContentBlockParam
|
||||
from .beta_message_delta_usage import BetaMessageDeltaUsage as BetaMessageDeltaUsage
|
||||
from .beta_text_citation_param import BetaTextCitationParam as BetaTextCitationParam
|
||||
from .beta_message_tokens_count import BetaMessageTokensCount as BetaMessageTokensCount
|
||||
from .beta_thinking_block_param import BetaThinkingBlockParam as BetaThinkingBlockParam
|
||||
from .beta_thinking_turns_param import BetaThinkingTurnsParam as BetaThinkingTurnsParam
|
||||
from .beta_tool_use_block_param import BetaToolUseBlockParam as BetaToolUseBlockParam
|
||||
from .beta_tool_uses_keep_param import BetaToolUsesKeepParam as BetaToolUsesKeepParam
|
||||
from .beta_url_pdf_source_param import BetaURLPDFSourceParam as BetaURLPDFSourceParam
|
||||
from .beta_mcp_tool_result_block import BetaMCPToolResultBlock as BetaMCPToolResultBlock
|
||||
from .beta_server_tool_use_block import BetaServerToolUseBlock as BetaServerToolUseBlock
|
||||
from .beta_thinking_config_param import BetaThinkingConfigParam as BetaThinkingConfigParam
|
||||
from .beta_tool_choice_any_param import BetaToolChoiceAnyParam as BetaToolChoiceAnyParam
|
||||
from .beta_web_fetch_block_param import BetaWebFetchBlockParam as BetaWebFetchBlockParam
|
||||
from .beta_base64_pdf_block_param import BetaBase64PDFBlockParam as BetaBase64PDFBlockParam
|
||||
from .beta_citation_char_location import BetaCitationCharLocation as BetaCitationCharLocation
|
||||
from .beta_citation_page_location import BetaCitationPageLocation as BetaCitationPageLocation
|
||||
from .beta_citations_config_param import BetaCitationsConfigParam as BetaCitationsConfigParam
|
||||
from .beta_container_upload_block import BetaContainerUploadBlock as BetaContainerUploadBlock
|
||||
from .beta_raw_message_stop_event import BetaRawMessageStopEvent as BetaRawMessageStopEvent
|
||||
from .beta_tool_choice_auto_param import BetaToolChoiceAutoParam as BetaToolChoiceAutoParam
|
||||
from .beta_tool_choice_none_param import BetaToolChoiceNoneParam as BetaToolChoiceNoneParam
|
||||
from .beta_tool_choice_tool_param import BetaToolChoiceToolParam as BetaToolChoiceToolParam
|
||||
from .beta_url_image_source_param import BetaURLImageSourceParam as BetaURLImageSourceParam
|
||||
from .message_count_tokens_params import MessageCountTokensParams as MessageCountTokensParams
|
||||
from .beta_base64_pdf_source_param import BetaBase64PDFSourceParam as BetaBase64PDFSourceParam
|
||||
from .beta_file_image_source_param import BetaFileImageSourceParam as BetaFileImageSourceParam
|
||||
from .beta_plain_text_source_param import BetaPlainTextSourceParam as BetaPlainTextSourceParam
|
||||
from .beta_raw_content_block_delta import BetaRawContentBlockDelta as BetaRawContentBlockDelta
|
||||
from .beta_raw_message_delta_event import BetaRawMessageDeltaEvent as BetaRawMessageDeltaEvent
|
||||
from .beta_raw_message_start_event import BetaRawMessageStartEvent as BetaRawMessageStartEvent
|
||||
from .beta_redacted_thinking_block import BetaRedactedThinkingBlock as BetaRedactedThinkingBlock
|
||||
from .beta_tool_result_block_param import BetaToolResultBlockParam as BetaToolResultBlockParam
|
||||
from .beta_tool_uses_trigger_param import BetaToolUsesTriggerParam as BetaToolUsesTriggerParam
|
||||
from .beta_web_search_result_block import BetaWebSearchResultBlock as BetaWebSearchResultBlock
|
||||
from .beta_all_thinking_turns_param import BetaAllThinkingTurnsParam as BetaAllThinkingTurnsParam
|
||||
from .beta_json_output_format_param import BetaJSONOutputFormatParam as BetaJSONOutputFormatParam
|
||||
from .beta_mcp_tool_use_block_param import BetaMCPToolUseBlockParam as BetaMCPToolUseBlockParam
|
||||
from .beta_raw_message_stream_event import BetaRawMessageStreamEvent as BetaRawMessageStreamEvent
|
||||
from .beta_tool_bash_20241022_param import BetaToolBash20241022Param as BetaToolBash20241022Param
|
||||
from .beta_tool_bash_20250124_param import BetaToolBash20250124Param as BetaToolBash20250124Param
|
||||
from .beta_base64_image_source_param import BetaBase64ImageSourceParam as BetaBase64ImageSourceParam
|
||||
from .beta_search_result_block_param import BetaSearchResultBlockParam as BetaSearchResultBlockParam
|
||||
from .beta_content_block_source_param import BetaContentBlockSourceParam as BetaContentBlockSourceParam
|
||||
from .beta_file_document_source_param import BetaFileDocumentSourceParam as BetaFileDocumentSourceParam
|
||||
from .beta_input_tokens_trigger_param import BetaInputTokensTriggerParam as BetaInputTokensTriggerParam
|
||||
from .beta_memory_tool_20250818_param import BetaMemoryTool20250818Param as BetaMemoryTool20250818Param
|
||||
from .beta_code_execution_output_block import BetaCodeExecutionOutputBlock as BetaCodeExecutionOutputBlock
|
||||
from .beta_code_execution_result_block import BetaCodeExecutionResultBlock as BetaCodeExecutionResultBlock
|
||||
from .beta_context_management_response import BetaContextManagementResponse as BetaContextManagementResponse
|
||||
from .beta_server_tool_use_block_param import BetaServerToolUseBlockParam as BetaServerToolUseBlockParam
|
||||
from .beta_web_fetch_tool_result_block import BetaWebFetchToolResultBlock as BetaWebFetchToolResultBlock
|
||||
from .beta_citation_char_location_param import BetaCitationCharLocationParam as BetaCitationCharLocationParam
|
||||
from .beta_citation_page_location_param import BetaCitationPageLocationParam as BetaCitationPageLocationParam
|
||||
from .beta_container_upload_block_param import BetaContainerUploadBlockParam as BetaContainerUploadBlockParam
|
||||
from .beta_memory_tool_20250818_command import BetaMemoryTool20250818Command as BetaMemoryTool20250818Command
|
||||
from .beta_raw_content_block_stop_event import BetaRawContentBlockStopEvent as BetaRawContentBlockStopEvent
|
||||
from .beta_request_document_block_param import BetaRequestDocumentBlockParam as BetaRequestDocumentBlockParam
|
||||
from .beta_web_search_tool_result_block import BetaWebSearchToolResultBlock as BetaWebSearchToolResultBlock
|
||||
from .beta_web_search_tool_result_error import BetaWebSearchToolResultError as BetaWebSearchToolResultError
|
||||
from .beta_cache_control_ephemeral_param import BetaCacheControlEphemeralParam as BetaCacheControlEphemeralParam
|
||||
from .beta_raw_content_block_delta_event import BetaRawContentBlockDeltaEvent as BetaRawContentBlockDeltaEvent
|
||||
from .beta_raw_content_block_start_event import BetaRawContentBlockStartEvent as BetaRawContentBlockStartEvent
|
||||
from .beta_redacted_thinking_block_param import BetaRedactedThinkingBlockParam as BetaRedactedThinkingBlockParam
|
||||
from .beta_thinking_config_enabled_param import BetaThinkingConfigEnabledParam as BetaThinkingConfigEnabledParam
|
||||
from .beta_web_fetch_tool_20250910_param import BetaWebFetchTool20250910Param as BetaWebFetchTool20250910Param
|
||||
from .beta_web_search_result_block_param import BetaWebSearchResultBlockParam as BetaWebSearchResultBlockParam
|
||||
from .beta_thinking_config_disabled_param import BetaThinkingConfigDisabledParam as BetaThinkingConfigDisabledParam
|
||||
from .beta_web_search_tool_20250305_param import BetaWebSearchTool20250305Param as BetaWebSearchTool20250305Param
|
||||
from .beta_citation_content_block_location import BetaCitationContentBlockLocation as BetaCitationContentBlockLocation
|
||||
from .beta_citation_search_result_location import BetaCitationSearchResultLocation as BetaCitationSearchResultLocation
|
||||
from .beta_context_management_config_param import BetaContextManagementConfigParam as BetaContextManagementConfigParam
|
||||
from .beta_tool_text_editor_20241022_param import BetaToolTextEditor20241022Param as BetaToolTextEditor20241022Param
|
||||
from .beta_tool_text_editor_20250124_param import BetaToolTextEditor20250124Param as BetaToolTextEditor20250124Param
|
||||
from .beta_tool_text_editor_20250429_param import BetaToolTextEditor20250429Param as BetaToolTextEditor20250429Param
|
||||
from .beta_tool_text_editor_20250728_param import BetaToolTextEditor20250728Param as BetaToolTextEditor20250728Param
|
||||
from .beta_bash_code_execution_output_block import BetaBashCodeExecutionOutputBlock as BetaBashCodeExecutionOutputBlock
|
||||
from .beta_bash_code_execution_result_block import BetaBashCodeExecutionResultBlock as BetaBashCodeExecutionResultBlock
|
||||
from .beta_code_execution_tool_result_block import BetaCodeExecutionToolResultBlock as BetaCodeExecutionToolResultBlock
|
||||
from .beta_code_execution_tool_result_error import BetaCodeExecutionToolResultError as BetaCodeExecutionToolResultError
|
||||
from .beta_tool_computer_use_20241022_param import BetaToolComputerUse20241022Param as BetaToolComputerUse20241022Param
|
||||
from .beta_tool_computer_use_20250124_param import BetaToolComputerUse20250124Param as BetaToolComputerUse20250124Param
|
||||
from .beta_web_fetch_tool_result_error_code import BetaWebFetchToolResultErrorCode as BetaWebFetchToolResultErrorCode
|
||||
from .beta_code_execution_output_block_param import (
|
||||
BetaCodeExecutionOutputBlockParam as BetaCodeExecutionOutputBlockParam,
|
||||
)
|
||||
from .beta_code_execution_result_block_param import (
|
||||
BetaCodeExecutionResultBlockParam as BetaCodeExecutionResultBlockParam,
|
||||
)
|
||||
from .beta_input_tokens_clear_at_least_param import BetaInputTokensClearAtLeastParam as BetaInputTokensClearAtLeastParam
|
||||
from .beta_memory_tool_20250818_view_command import (
|
||||
BetaMemoryTool20250818ViewCommand as BetaMemoryTool20250818ViewCommand,
|
||||
)
|
||||
from .beta_web_fetch_tool_result_block_param import BetaWebFetchToolResultBlockParam as BetaWebFetchToolResultBlockParam
|
||||
from .beta_web_fetch_tool_result_error_block import BetaWebFetchToolResultErrorBlock as BetaWebFetchToolResultErrorBlock
|
||||
from .beta_web_search_tool_result_error_code import BetaWebSearchToolResultErrorCode as BetaWebSearchToolResultErrorCode
|
||||
from .beta_clear_thinking_20251015_edit_param import (
|
||||
BetaClearThinking20251015EditParam as BetaClearThinking20251015EditParam,
|
||||
)
|
||||
from .beta_code_execution_tool_20250522_param import (
|
||||
BetaCodeExecutionTool20250522Param as BetaCodeExecutionTool20250522Param,
|
||||
)
|
||||
from .beta_code_execution_tool_20250825_param import (
|
||||
BetaCodeExecutionTool20250825Param as BetaCodeExecutionTool20250825Param,
|
||||
)
|
||||
from .beta_content_block_source_content_param import (
|
||||
BetaContentBlockSourceContentParam as BetaContentBlockSourceContentParam,
|
||||
)
|
||||
from .beta_web_search_tool_result_block_param import (
|
||||
BetaWebSearchToolResultBlockParam as BetaWebSearchToolResultBlockParam,
|
||||
)
|
||||
from .beta_clear_tool_uses_20250919_edit_param import (
|
||||
BetaClearToolUses20250919EditParam as BetaClearToolUses20250919EditParam,
|
||||
)
|
||||
from .beta_memory_tool_20250818_create_command import (
|
||||
BetaMemoryTool20250818CreateCommand as BetaMemoryTool20250818CreateCommand,
|
||||
)
|
||||
from .beta_memory_tool_20250818_delete_command import (
|
||||
BetaMemoryTool20250818DeleteCommand as BetaMemoryTool20250818DeleteCommand,
|
||||
)
|
||||
from .beta_memory_tool_20250818_insert_command import (
|
||||
BetaMemoryTool20250818InsertCommand as BetaMemoryTool20250818InsertCommand,
|
||||
)
|
||||
from .beta_memory_tool_20250818_rename_command import (
|
||||
BetaMemoryTool20250818RenameCommand as BetaMemoryTool20250818RenameCommand,
|
||||
)
|
||||
from .beta_request_mcp_tool_result_block_param import (
|
||||
BetaRequestMCPToolResultBlockParam as BetaRequestMCPToolResultBlockParam,
|
||||
)
|
||||
from .beta_web_search_tool_request_error_param import (
|
||||
BetaWebSearchToolRequestErrorParam as BetaWebSearchToolRequestErrorParam,
|
||||
)
|
||||
from .beta_citations_web_search_result_location import (
|
||||
BetaCitationsWebSearchResultLocation as BetaCitationsWebSearchResultLocation,
|
||||
)
|
||||
from .beta_web_search_tool_result_block_content import (
|
||||
BetaWebSearchToolResultBlockContent as BetaWebSearchToolResultBlockContent,
|
||||
)
|
||||
from .beta_bash_code_execution_tool_result_block import (
|
||||
BetaBashCodeExecutionToolResultBlock as BetaBashCodeExecutionToolResultBlock,
|
||||
)
|
||||
from .beta_bash_code_execution_tool_result_error import (
|
||||
BetaBashCodeExecutionToolResultError as BetaBashCodeExecutionToolResultError,
|
||||
)
|
||||
from .beta_citation_content_block_location_param import (
|
||||
BetaCitationContentBlockLocationParam as BetaCitationContentBlockLocationParam,
|
||||
)
|
||||
from .beta_citation_search_result_location_param import (
|
||||
BetaCitationSearchResultLocationParam as BetaCitationSearchResultLocationParam,
|
||||
)
|
||||
from .beta_clear_thinking_20251015_edit_response import (
|
||||
BetaClearThinking20251015EditResponse as BetaClearThinking20251015EditResponse,
|
||||
)
|
||||
from .beta_code_execution_tool_result_error_code import (
|
||||
BetaCodeExecutionToolResultErrorCode as BetaCodeExecutionToolResultErrorCode,
|
||||
)
|
||||
from .beta_bash_code_execution_output_block_param import (
|
||||
BetaBashCodeExecutionOutputBlockParam as BetaBashCodeExecutionOutputBlockParam,
|
||||
)
|
||||
from .beta_bash_code_execution_result_block_param import (
|
||||
BetaBashCodeExecutionResultBlockParam as BetaBashCodeExecutionResultBlockParam,
|
||||
)
|
||||
from .beta_clear_tool_uses_20250919_edit_response import (
|
||||
BetaClearToolUses20250919EditResponse as BetaClearToolUses20250919EditResponse,
|
||||
)
|
||||
from .beta_code_execution_tool_result_block_param import (
|
||||
BetaCodeExecutionToolResultBlockParam as BetaCodeExecutionToolResultBlockParam,
|
||||
)
|
||||
from .beta_code_execution_tool_result_error_param import (
|
||||
BetaCodeExecutionToolResultErrorParam as BetaCodeExecutionToolResultErrorParam,
|
||||
)
|
||||
from .beta_request_mcp_server_url_definition_param import (
|
||||
BetaRequestMCPServerURLDefinitionParam as BetaRequestMCPServerURLDefinitionParam,
|
||||
)
|
||||
from .beta_web_fetch_tool_result_error_block_param import (
|
||||
BetaWebFetchToolResultErrorBlockParam as BetaWebFetchToolResultErrorBlockParam,
|
||||
)
|
||||
from .beta_code_execution_tool_result_block_content import (
|
||||
BetaCodeExecutionToolResultBlockContent as BetaCodeExecutionToolResultBlockContent,
|
||||
)
|
||||
from .beta_count_tokens_context_management_response import (
|
||||
BetaCountTokensContextManagementResponse as BetaCountTokensContextManagementResponse,
|
||||
)
|
||||
from .beta_memory_tool_20250818_str_replace_command import (
|
||||
BetaMemoryTool20250818StrReplaceCommand as BetaMemoryTool20250818StrReplaceCommand,
|
||||
)
|
||||
from .beta_citation_web_search_result_location_param import (
|
||||
BetaCitationWebSearchResultLocationParam as BetaCitationWebSearchResultLocationParam,
|
||||
)
|
||||
from .beta_bash_code_execution_tool_result_block_param import (
|
||||
BetaBashCodeExecutionToolResultBlockParam as BetaBashCodeExecutionToolResultBlockParam,
|
||||
)
|
||||
from .beta_bash_code_execution_tool_result_error_param import (
|
||||
BetaBashCodeExecutionToolResultErrorParam as BetaBashCodeExecutionToolResultErrorParam,
|
||||
)
|
||||
from .beta_request_mcp_server_tool_configuration_param import (
|
||||
BetaRequestMCPServerToolConfigurationParam as BetaRequestMCPServerToolConfigurationParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_tool_result_block import (
|
||||
BetaTextEditorCodeExecutionToolResultBlock as BetaTextEditorCodeExecutionToolResultBlock,
|
||||
)
|
||||
from .beta_text_editor_code_execution_tool_result_error import (
|
||||
BetaTextEditorCodeExecutionToolResultError as BetaTextEditorCodeExecutionToolResultError,
|
||||
)
|
||||
from .beta_text_editor_code_execution_view_result_block import (
|
||||
BetaTextEditorCodeExecutionViewResultBlock as BetaTextEditorCodeExecutionViewResultBlock,
|
||||
)
|
||||
from .beta_text_editor_code_execution_create_result_block import (
|
||||
BetaTextEditorCodeExecutionCreateResultBlock as BetaTextEditorCodeExecutionCreateResultBlock,
|
||||
)
|
||||
from .beta_web_search_tool_result_block_param_content_param import (
|
||||
BetaWebSearchToolResultBlockParamContentParam as BetaWebSearchToolResultBlockParamContentParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_tool_result_block_param import (
|
||||
BetaTextEditorCodeExecutionToolResultBlockParam as BetaTextEditorCodeExecutionToolResultBlockParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_tool_result_error_param import (
|
||||
BetaTextEditorCodeExecutionToolResultErrorParam as BetaTextEditorCodeExecutionToolResultErrorParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_view_result_block_param import (
|
||||
BetaTextEditorCodeExecutionViewResultBlockParam as BetaTextEditorCodeExecutionViewResultBlockParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_str_replace_result_block import (
|
||||
BetaTextEditorCodeExecutionStrReplaceResultBlock as BetaTextEditorCodeExecutionStrReplaceResultBlock,
|
||||
)
|
||||
from .beta_code_execution_tool_result_block_param_content_param import (
|
||||
BetaCodeExecutionToolResultBlockParamContentParam as BetaCodeExecutionToolResultBlockParamContentParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_create_result_block_param import (
|
||||
BetaTextEditorCodeExecutionCreateResultBlockParam as BetaTextEditorCodeExecutionCreateResultBlockParam,
|
||||
)
|
||||
from .beta_text_editor_code_execution_str_replace_result_block_param import (
|
||||
BetaTextEditorCodeExecutionStrReplaceResultBlockParam as BetaTextEditorCodeExecutionStrReplaceResultBlockParam,
|
||||
)
|
||||
@@ -0,0 +1,11 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
__all__ = ["BetaAllThinkingTurnsParam"]
|
||||
|
||||
|
||||
class BetaAllThinkingTurnsParam(TypedDict, total=False):
|
||||
type: Required[Literal["all"]]
|
||||
@@ -0,0 +1,23 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, Required, Annotated, TypedDict
|
||||
|
||||
from ..._types import Base64FileInput
|
||||
from ..._utils import PropertyInfo
|
||||
from ..._models import set_pydantic_config
|
||||
|
||||
__all__ = ["BetaBase64ImageSourceParam"]
|
||||
|
||||
|
||||
class BetaBase64ImageSourceParam(TypedDict, total=False):
|
||||
data: Required[Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]]
|
||||
|
||||
media_type: Required[Literal["image/jpeg", "image/png", "image/gif", "image/webp"]]
|
||||
|
||||
type: Required[Literal["base64"]]
|
||||
|
||||
|
||||
set_pydantic_config(BetaBase64ImageSourceParam, {"arbitrary_types_allowed": True})
|
||||
@@ -0,0 +1,7 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .beta_request_document_block_param import BetaRequestDocumentBlockParam
|
||||
|
||||
BetaBase64PDFBlockParam = BetaRequestDocumentBlockParam
|
||||
@@ -0,0 +1,15 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaBase64PDFSource"]
|
||||
|
||||
|
||||
class BetaBase64PDFSource(BaseModel):
|
||||
data: str
|
||||
|
||||
media_type: Literal["application/pdf"]
|
||||
|
||||
type: Literal["base64"]
|
||||
@@ -0,0 +1,23 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, Required, Annotated, TypedDict
|
||||
|
||||
from ..._types import Base64FileInput
|
||||
from ..._utils import PropertyInfo
|
||||
from ..._models import set_pydantic_config
|
||||
|
||||
__all__ = ["BetaBase64PDFSourceParam"]
|
||||
|
||||
|
||||
class BetaBase64PDFSourceParam(TypedDict, total=False):
|
||||
data: Required[Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]]
|
||||
|
||||
media_type: Required[Literal["application/pdf"]]
|
||||
|
||||
type: Required[Literal["base64"]]
|
||||
|
||||
|
||||
set_pydantic_config(BetaBase64PDFSourceParam, {"arbitrary_types_allowed": True})
|
||||
@@ -0,0 +1,13 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionOutputBlock"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionOutputBlock(BaseModel):
|
||||
file_id: str
|
||||
|
||||
type: Literal["bash_code_execution_output"]
|
||||
@@ -0,0 +1,13 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionOutputBlockParam"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionOutputBlockParam(TypedDict, total=False):
|
||||
file_id: Required[str]
|
||||
|
||||
type: Required[Literal["bash_code_execution_output"]]
|
||||
@@ -0,0 +1,21 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import List
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
from .beta_bash_code_execution_output_block import BetaBashCodeExecutionOutputBlock
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionResultBlock"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionResultBlock(BaseModel):
|
||||
content: List[BetaBashCodeExecutionOutputBlock]
|
||||
|
||||
return_code: int
|
||||
|
||||
stderr: str
|
||||
|
||||
stdout: str
|
||||
|
||||
type: Literal["bash_code_execution_result"]
|
||||
@@ -0,0 +1,22 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Iterable
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
from .beta_bash_code_execution_output_block_param import BetaBashCodeExecutionOutputBlockParam
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionResultBlockParam"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionResultBlockParam(TypedDict, total=False):
|
||||
content: Required[Iterable[BetaBashCodeExecutionOutputBlockParam]]
|
||||
|
||||
return_code: Required[int]
|
||||
|
||||
stderr: Required[str]
|
||||
|
||||
stdout: Required[str]
|
||||
|
||||
type: Required[Literal["bash_code_execution_result"]]
|
||||
@@ -0,0 +1,20 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Union
|
||||
from typing_extensions import Literal, TypeAlias
|
||||
|
||||
from ..._models import BaseModel
|
||||
from .beta_bash_code_execution_result_block import BetaBashCodeExecutionResultBlock
|
||||
from .beta_bash_code_execution_tool_result_error import BetaBashCodeExecutionToolResultError
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionToolResultBlock", "Content"]
|
||||
|
||||
Content: TypeAlias = Union[BetaBashCodeExecutionToolResultError, BetaBashCodeExecutionResultBlock]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionToolResultBlock(BaseModel):
|
||||
content: Content
|
||||
|
||||
tool_use_id: str
|
||||
|
||||
type: Literal["bash_code_execution_tool_result"]
|
||||
@@ -0,0 +1,25 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union, Optional
|
||||
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
||||
|
||||
from .beta_cache_control_ephemeral_param import BetaCacheControlEphemeralParam
|
||||
from .beta_bash_code_execution_result_block_param import BetaBashCodeExecutionResultBlockParam
|
||||
from .beta_bash_code_execution_tool_result_error_param import BetaBashCodeExecutionToolResultErrorParam
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionToolResultBlockParam", "Content"]
|
||||
|
||||
Content: TypeAlias = Union[BetaBashCodeExecutionToolResultErrorParam, BetaBashCodeExecutionResultBlockParam]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionToolResultBlockParam(TypedDict, total=False):
|
||||
content: Required[Content]
|
||||
|
||||
tool_use_id: Required[str]
|
||||
|
||||
type: Required[Literal["bash_code_execution_tool_result"]]
|
||||
|
||||
cache_control: Optional[BetaCacheControlEphemeralParam]
|
||||
"""Create a cache control breakpoint at this content block."""
|
||||
@@ -0,0 +1,15 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionToolResultError"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionToolResultError(BaseModel):
|
||||
error_code: Literal[
|
||||
"invalid_tool_input", "unavailable", "too_many_requests", "execution_time_exceeded", "output_file_too_large"
|
||||
]
|
||||
|
||||
type: Literal["bash_code_execution_tool_result_error"]
|
||||
@@ -0,0 +1,17 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
__all__ = ["BetaBashCodeExecutionToolResultErrorParam"]
|
||||
|
||||
|
||||
class BetaBashCodeExecutionToolResultErrorParam(TypedDict, total=False):
|
||||
error_code: Required[
|
||||
Literal[
|
||||
"invalid_tool_input", "unavailable", "too_many_requests", "execution_time_exceeded", "output_file_too_large"
|
||||
]
|
||||
]
|
||||
|
||||
type: Required[Literal["bash_code_execution_tool_result_error"]]
|
||||
@@ -0,0 +1,22 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
__all__ = ["BetaCacheControlEphemeralParam"]
|
||||
|
||||
|
||||
class BetaCacheControlEphemeralParam(TypedDict, total=False):
|
||||
type: Required[Literal["ephemeral"]]
|
||||
|
||||
ttl: Literal["5m", "1h"]
|
||||
"""The time-to-live for the cache control breakpoint.
|
||||
|
||||
This may be one the following values:
|
||||
|
||||
- `5m`: 5 minutes
|
||||
- `1h`: 1 hour
|
||||
|
||||
Defaults to `5m`.
|
||||
"""
|
||||
@@ -0,0 +1,13 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaCacheCreation"]
|
||||
|
||||
|
||||
class BetaCacheCreation(BaseModel):
|
||||
ephemeral_1h_input_tokens: int
|
||||
"""The number of input tokens used to create the 1 hour cache entry."""
|
||||
|
||||
ephemeral_5m_input_tokens: int
|
||||
"""The number of input tokens used to create the 5 minute cache entry."""
|
||||
@@ -0,0 +1,24 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Optional
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaCitationCharLocation"]
|
||||
|
||||
|
||||
class BetaCitationCharLocation(BaseModel):
|
||||
cited_text: str
|
||||
|
||||
document_index: int
|
||||
|
||||
document_title: Optional[str] = None
|
||||
|
||||
end_char_index: int
|
||||
|
||||
file_id: Optional[str] = None
|
||||
|
||||
start_char_index: int
|
||||
|
||||
type: Literal["char_location"]
|
||||
@@ -0,0 +1,22 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from typing_extensions import Literal, Required, TypedDict
|
||||
|
||||
__all__ = ["BetaCitationCharLocationParam"]
|
||||
|
||||
|
||||
class BetaCitationCharLocationParam(TypedDict, total=False):
|
||||
cited_text: Required[str]
|
||||
|
||||
document_index: Required[int]
|
||||
|
||||
document_title: Required[Optional[str]]
|
||||
|
||||
end_char_index: Required[int]
|
||||
|
||||
start_char_index: Required[int]
|
||||
|
||||
type: Required[Literal["char_location"]]
|
||||
@@ -0,0 +1,9 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaCitationConfig"]
|
||||
|
||||
|
||||
class BetaCitationConfig(BaseModel):
|
||||
enabled: bool
|
||||
@@ -0,0 +1,24 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from typing import Optional
|
||||
from typing_extensions import Literal
|
||||
|
||||
from ..._models import BaseModel
|
||||
|
||||
__all__ = ["BetaCitationContentBlockLocation"]
|
||||
|
||||
|
||||
class BetaCitationContentBlockLocation(BaseModel):
|
||||
cited_text: str
|
||||
|
||||
document_index: int
|
||||
|
||||
document_title: Optional[str] = None
|
||||
|
||||
end_block_index: int
|
||||
|
||||
file_id: Optional[str] = None
|
||||
|
||||
start_block_index: int
|
||||
|
||||
type: Literal["content_block_location"]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user