chore: 添加虚拟环境到仓库
- 添加 backend_service/venv 虚拟环境 - 包含所有Python依赖包 - 注意:虚拟环境约393MB,包含12655个文件
This commit is contained in:
@@ -0,0 +1,61 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .beta import (
|
||||
Beta,
|
||||
AsyncBeta,
|
||||
BetaWithRawResponse,
|
||||
AsyncBetaWithRawResponse,
|
||||
BetaWithStreamingResponse,
|
||||
AsyncBetaWithStreamingResponse,
|
||||
)
|
||||
from .chatkit import (
|
||||
ChatKit,
|
||||
AsyncChatKit,
|
||||
ChatKitWithRawResponse,
|
||||
AsyncChatKitWithRawResponse,
|
||||
ChatKitWithStreamingResponse,
|
||||
AsyncChatKitWithStreamingResponse,
|
||||
)
|
||||
from .threads import (
|
||||
Threads,
|
||||
AsyncThreads,
|
||||
ThreadsWithRawResponse,
|
||||
AsyncThreadsWithRawResponse,
|
||||
ThreadsWithStreamingResponse,
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from .assistants import (
|
||||
Assistants,
|
||||
AsyncAssistants,
|
||||
AssistantsWithRawResponse,
|
||||
AsyncAssistantsWithRawResponse,
|
||||
AssistantsWithStreamingResponse,
|
||||
AsyncAssistantsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ChatKit",
|
||||
"AsyncChatKit",
|
||||
"ChatKitWithRawResponse",
|
||||
"AsyncChatKitWithRawResponse",
|
||||
"ChatKitWithStreamingResponse",
|
||||
"AsyncChatKitWithStreamingResponse",
|
||||
"Assistants",
|
||||
"AsyncAssistants",
|
||||
"AssistantsWithRawResponse",
|
||||
"AsyncAssistantsWithRawResponse",
|
||||
"AssistantsWithStreamingResponse",
|
||||
"AsyncAssistantsWithStreamingResponse",
|
||||
"Threads",
|
||||
"AsyncThreads",
|
||||
"ThreadsWithRawResponse",
|
||||
"AsyncThreadsWithRawResponse",
|
||||
"ThreadsWithStreamingResponse",
|
||||
"AsyncThreadsWithStreamingResponse",
|
||||
"Beta",
|
||||
"AsyncBeta",
|
||||
"BetaWithRawResponse",
|
||||
"AsyncBetaWithRawResponse",
|
||||
"BetaWithStreamingResponse",
|
||||
"AsyncBetaWithStreamingResponse",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,187 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ..._compat import cached_property
|
||||
from .assistants import (
|
||||
Assistants,
|
||||
AsyncAssistants,
|
||||
AssistantsWithRawResponse,
|
||||
AsyncAssistantsWithRawResponse,
|
||||
AssistantsWithStreamingResponse,
|
||||
AsyncAssistantsWithStreamingResponse,
|
||||
)
|
||||
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||||
from .chatkit.chatkit import (
|
||||
ChatKit,
|
||||
AsyncChatKit,
|
||||
ChatKitWithRawResponse,
|
||||
AsyncChatKitWithRawResponse,
|
||||
ChatKitWithStreamingResponse,
|
||||
AsyncChatKitWithStreamingResponse,
|
||||
)
|
||||
from .threads.threads import (
|
||||
Threads,
|
||||
AsyncThreads,
|
||||
ThreadsWithRawResponse,
|
||||
AsyncThreadsWithRawResponse,
|
||||
ThreadsWithStreamingResponse,
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from ...resources.chat import Chat, AsyncChat
|
||||
from .realtime.realtime import (
|
||||
Realtime,
|
||||
AsyncRealtime,
|
||||
)
|
||||
|
||||
__all__ = ["Beta", "AsyncBeta"]
|
||||
|
||||
|
||||
class Beta(SyncAPIResource):
|
||||
@cached_property
|
||||
def chat(self) -> Chat:
|
||||
return Chat(self._client)
|
||||
|
||||
@cached_property
|
||||
def realtime(self) -> Realtime:
|
||||
return Realtime(self._client)
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> ChatKit:
|
||||
return ChatKit(self._client)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> Assistants:
|
||||
return Assistants(self._client)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> Threads:
|
||||
return Threads(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> BetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return BetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> BetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return BetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncBeta(AsyncAPIResource):
|
||||
@cached_property
|
||||
def chat(self) -> AsyncChat:
|
||||
return AsyncChat(self._client)
|
||||
|
||||
@cached_property
|
||||
def realtime(self) -> AsyncRealtime:
|
||||
return AsyncRealtime(self._client)
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> AsyncChatKit:
|
||||
return AsyncChatKit(self._client)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> AsyncAssistants:
|
||||
return AsyncAssistants(self._client)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreads:
|
||||
return AsyncThreads(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncBetaWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncBetaWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncBetaWithStreamingResponse(self)
|
||||
|
||||
|
||||
class BetaWithRawResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> ChatKitWithRawResponse:
|
||||
return ChatKitWithRawResponse(self._beta.chatkit)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> AssistantsWithRawResponse:
|
||||
return AssistantsWithRawResponse(self._beta.assistants)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> ThreadsWithRawResponse:
|
||||
return ThreadsWithRawResponse(self._beta.threads)
|
||||
|
||||
|
||||
class AsyncBetaWithRawResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> AsyncChatKitWithRawResponse:
|
||||
return AsyncChatKitWithRawResponse(self._beta.chatkit)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> AsyncAssistantsWithRawResponse:
|
||||
return AsyncAssistantsWithRawResponse(self._beta.assistants)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreadsWithRawResponse:
|
||||
return AsyncThreadsWithRawResponse(self._beta.threads)
|
||||
|
||||
|
||||
class BetaWithStreamingResponse:
|
||||
def __init__(self, beta: Beta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> ChatKitWithStreamingResponse:
|
||||
return ChatKitWithStreamingResponse(self._beta.chatkit)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> AssistantsWithStreamingResponse:
|
||||
return AssistantsWithStreamingResponse(self._beta.assistants)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> ThreadsWithStreamingResponse:
|
||||
return ThreadsWithStreamingResponse(self._beta.threads)
|
||||
|
||||
|
||||
class AsyncBetaWithStreamingResponse:
|
||||
def __init__(self, beta: AsyncBeta) -> None:
|
||||
self._beta = beta
|
||||
|
||||
@cached_property
|
||||
def chatkit(self) -> AsyncChatKitWithStreamingResponse:
|
||||
return AsyncChatKitWithStreamingResponse(self._beta.chatkit)
|
||||
|
||||
@cached_property
|
||||
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
|
||||
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreadsWithStreamingResponse:
|
||||
return AsyncThreadsWithStreamingResponse(self._beta.threads)
|
||||
@@ -0,0 +1,47 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .chatkit import (
|
||||
ChatKit,
|
||||
AsyncChatKit,
|
||||
ChatKitWithRawResponse,
|
||||
AsyncChatKitWithRawResponse,
|
||||
ChatKitWithStreamingResponse,
|
||||
AsyncChatKitWithStreamingResponse,
|
||||
)
|
||||
from .threads import (
|
||||
Threads,
|
||||
AsyncThreads,
|
||||
ThreadsWithRawResponse,
|
||||
AsyncThreadsWithRawResponse,
|
||||
ThreadsWithStreamingResponse,
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from .sessions import (
|
||||
Sessions,
|
||||
AsyncSessions,
|
||||
SessionsWithRawResponse,
|
||||
AsyncSessionsWithRawResponse,
|
||||
SessionsWithStreamingResponse,
|
||||
AsyncSessionsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Sessions",
|
||||
"AsyncSessions",
|
||||
"SessionsWithRawResponse",
|
||||
"AsyncSessionsWithRawResponse",
|
||||
"SessionsWithStreamingResponse",
|
||||
"AsyncSessionsWithStreamingResponse",
|
||||
"Threads",
|
||||
"AsyncThreads",
|
||||
"ThreadsWithRawResponse",
|
||||
"AsyncThreadsWithRawResponse",
|
||||
"ThreadsWithStreamingResponse",
|
||||
"AsyncThreadsWithStreamingResponse",
|
||||
"ChatKit",
|
||||
"AsyncChatKit",
|
||||
"ChatKitWithRawResponse",
|
||||
"AsyncChatKitWithRawResponse",
|
||||
"ChatKitWithStreamingResponse",
|
||||
"AsyncChatKitWithStreamingResponse",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,134 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .threads import (
|
||||
Threads,
|
||||
AsyncThreads,
|
||||
ThreadsWithRawResponse,
|
||||
AsyncThreadsWithRawResponse,
|
||||
ThreadsWithStreamingResponse,
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from .sessions import (
|
||||
Sessions,
|
||||
AsyncSessions,
|
||||
SessionsWithRawResponse,
|
||||
AsyncSessionsWithRawResponse,
|
||||
SessionsWithStreamingResponse,
|
||||
AsyncSessionsWithStreamingResponse,
|
||||
)
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
|
||||
__all__ = ["ChatKit", "AsyncChatKit"]
|
||||
|
||||
|
||||
class ChatKit(SyncAPIResource):
|
||||
@cached_property
|
||||
def sessions(self) -> Sessions:
|
||||
return Sessions(self._client)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> Threads:
|
||||
return Threads(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> ChatKitWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return ChatKitWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> ChatKitWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return ChatKitWithStreamingResponse(self)
|
||||
|
||||
|
||||
class AsyncChatKit(AsyncAPIResource):
|
||||
@cached_property
|
||||
def sessions(self) -> AsyncSessions:
|
||||
return AsyncSessions(self._client)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreads:
|
||||
return AsyncThreads(self._client)
|
||||
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncChatKitWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncChatKitWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncChatKitWithStreamingResponse(self)
|
||||
|
||||
|
||||
class ChatKitWithRawResponse:
|
||||
def __init__(self, chatkit: ChatKit) -> None:
|
||||
self._chatkit = chatkit
|
||||
|
||||
@cached_property
|
||||
def sessions(self) -> SessionsWithRawResponse:
|
||||
return SessionsWithRawResponse(self._chatkit.sessions)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> ThreadsWithRawResponse:
|
||||
return ThreadsWithRawResponse(self._chatkit.threads)
|
||||
|
||||
|
||||
class AsyncChatKitWithRawResponse:
|
||||
def __init__(self, chatkit: AsyncChatKit) -> None:
|
||||
self._chatkit = chatkit
|
||||
|
||||
@cached_property
|
||||
def sessions(self) -> AsyncSessionsWithRawResponse:
|
||||
return AsyncSessionsWithRawResponse(self._chatkit.sessions)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreadsWithRawResponse:
|
||||
return AsyncThreadsWithRawResponse(self._chatkit.threads)
|
||||
|
||||
|
||||
class ChatKitWithStreamingResponse:
|
||||
def __init__(self, chatkit: ChatKit) -> None:
|
||||
self._chatkit = chatkit
|
||||
|
||||
@cached_property
|
||||
def sessions(self) -> SessionsWithStreamingResponse:
|
||||
return SessionsWithStreamingResponse(self._chatkit.sessions)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> ThreadsWithStreamingResponse:
|
||||
return ThreadsWithStreamingResponse(self._chatkit.threads)
|
||||
|
||||
|
||||
class AsyncChatKitWithStreamingResponse:
|
||||
def __init__(self, chatkit: AsyncChatKit) -> None:
|
||||
self._chatkit = chatkit
|
||||
|
||||
@cached_property
|
||||
def sessions(self) -> AsyncSessionsWithStreamingResponse:
|
||||
return AsyncSessionsWithStreamingResponse(self._chatkit.sessions)
|
||||
|
||||
@cached_property
|
||||
def threads(self) -> AsyncThreadsWithStreamingResponse:
|
||||
return AsyncThreadsWithStreamingResponse(self._chatkit.threads)
|
||||
@@ -0,0 +1,301 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ...._utils import maybe_transform, async_maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...._base_client import make_request_options
|
||||
from ....types.beta.chatkit import (
|
||||
ChatSessionWorkflowParam,
|
||||
ChatSessionRateLimitsParam,
|
||||
ChatSessionExpiresAfterParam,
|
||||
ChatSessionChatKitConfigurationParam,
|
||||
session_create_params,
|
||||
)
|
||||
from ....types.beta.chatkit.chat_session import ChatSession
|
||||
from ....types.beta.chatkit.chat_session_workflow_param import ChatSessionWorkflowParam
|
||||
from ....types.beta.chatkit.chat_session_rate_limits_param import ChatSessionRateLimitsParam
|
||||
from ....types.beta.chatkit.chat_session_expires_after_param import ChatSessionExpiresAfterParam
|
||||
from ....types.beta.chatkit.chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
|
||||
|
||||
__all__ = ["Sessions", "AsyncSessions"]
|
||||
|
||||
|
||||
class Sessions(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> SessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return SessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> SessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return SessionsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
user: str,
|
||||
workflow: ChatSessionWorkflowParam,
|
||||
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
|
||||
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
|
||||
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Create a ChatKit session
|
||||
|
||||
Args:
|
||||
user: A free-form string that identifies your end user; ensures this Session can
|
||||
access other objects that have the same `user` scope.
|
||||
|
||||
workflow: Workflow that powers the session.
|
||||
|
||||
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
|
||||
|
||||
expires_after: Optional override for session expiration timing in seconds from creation.
|
||||
Defaults to 10 minutes.
|
||||
|
||||
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._post(
|
||||
"/chatkit/sessions",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"user": user,
|
||||
"workflow": workflow,
|
||||
"chatkit_configuration": chatkit_configuration,
|
||||
"expires_after": expires_after,
|
||||
"rate_limits": rate_limits,
|
||||
},
|
||||
session_create_params.SessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatSession,
|
||||
)
|
||||
|
||||
def cancel(
|
||||
self,
|
||||
session_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Cancel a ChatKit session
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not session_id:
|
||||
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._post(
|
||||
f"/chatkit/sessions/{session_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatSession,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessions(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncSessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncSessionsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
user: str,
|
||||
workflow: ChatSessionWorkflowParam,
|
||||
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
|
||||
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
|
||||
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Create a ChatKit session
|
||||
|
||||
Args:
|
||||
user: A free-form string that identifies your end user; ensures this Session can
|
||||
access other objects that have the same `user` scope.
|
||||
|
||||
workflow: Workflow that powers the session.
|
||||
|
||||
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
|
||||
|
||||
expires_after: Optional override for session expiration timing in seconds from creation.
|
||||
Defaults to 10 minutes.
|
||||
|
||||
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
"/chatkit/sessions",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"user": user,
|
||||
"workflow": workflow,
|
||||
"chatkit_configuration": chatkit_configuration,
|
||||
"expires_after": expires_after,
|
||||
"rate_limits": rate_limits,
|
||||
},
|
||||
session_create_params.SessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatSession,
|
||||
)
|
||||
|
||||
async def cancel(
|
||||
self,
|
||||
session_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Cancel a ChatKit session
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not session_id:
|
||||
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
f"/chatkit/sessions/{session_id}/cancel",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatSession,
|
||||
)
|
||||
|
||||
|
||||
class SessionsWithRawResponse:
|
||||
def __init__(self, sessions: Sessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
self.cancel = _legacy_response.to_raw_response_wrapper(
|
||||
sessions.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessionsWithRawResponse:
|
||||
def __init__(self, sessions: AsyncSessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
self.cancel = _legacy_response.async_to_raw_response_wrapper(
|
||||
sessions.cancel,
|
||||
)
|
||||
|
||||
|
||||
class SessionsWithStreamingResponse:
|
||||
def __init__(self, sessions: Sessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
self.cancel = to_streamed_response_wrapper(
|
||||
sessions.cancel,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessionsWithStreamingResponse:
|
||||
def __init__(self, sessions: AsyncSessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
self.cancel = async_to_streamed_response_wrapper(
|
||||
sessions.cancel,
|
||||
)
|
||||
@@ -0,0 +1,521 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, cast
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ...._utils import maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
|
||||
from ...._base_client import AsyncPaginator, make_request_options
|
||||
from ....types.beta.chatkit import thread_list_params, thread_list_items_params
|
||||
from ....types.beta.chatkit.chatkit_thread import ChatKitThread
|
||||
from ....types.beta.chatkit.thread_delete_response import ThreadDeleteResponse
|
||||
from ....types.beta.chatkit.chatkit_thread_item_list import Data
|
||||
|
||||
__all__ = ["Threads", "AsyncThreads"]
|
||||
|
||||
|
||||
class Threads(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> ThreadsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return ThreadsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> ThreadsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return ThreadsWithStreamingResponse(self)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatKitThread:
|
||||
"""
|
||||
Retrieve a ChatKit thread
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/chatkit/threads/{thread_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatKitThread,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
user: str | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncConversationCursorPage[ChatKitThread]:
|
||||
"""
|
||||
List ChatKit threads
|
||||
|
||||
Args:
|
||||
after: List items created after this thread item ID. Defaults to null for the first
|
||||
page.
|
||||
|
||||
before: List items created before this thread item ID. Defaults to null for the newest
|
||||
results.
|
||||
|
||||
limit: Maximum number of thread items to return. Defaults to 20.
|
||||
|
||||
order: Sort order for results by creation time. Defaults to `desc`.
|
||||
|
||||
user: Filter threads that belong to this user identifier. Defaults to null to return
|
||||
all users.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/chatkit/threads",
|
||||
page=SyncConversationCursorPage[ChatKitThread],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
"user": user,
|
||||
},
|
||||
thread_list_params.ThreadListParams,
|
||||
),
|
||||
),
|
||||
model=ChatKitThread,
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ThreadDeleteResponse:
|
||||
"""
|
||||
Delete a ChatKit thread
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/chatkit/threads/{thread_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ThreadDeleteResponse,
|
||||
)
|
||||
|
||||
def list_items(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncConversationCursorPage[Data]:
|
||||
"""
|
||||
List ChatKit thread items
|
||||
|
||||
Args:
|
||||
after: List items created after this thread item ID. Defaults to null for the first
|
||||
page.
|
||||
|
||||
before: List items created before this thread item ID. Defaults to null for the newest
|
||||
results.
|
||||
|
||||
limit: Maximum number of thread items to return. Defaults to 20.
|
||||
|
||||
order: Sort order for results by creation time. Defaults to `desc`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/chatkit/threads/{thread_id}/items",
|
||||
page=SyncConversationCursorPage[Data],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
},
|
||||
thread_list_items_params.ThreadListItemsParams,
|
||||
),
|
||||
),
|
||||
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
|
||||
)
|
||||
|
||||
|
||||
class AsyncThreads(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncThreadsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncThreadsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncThreadsWithStreamingResponse(self)
|
||||
|
||||
async def retrieve(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ChatKitThread:
|
||||
"""
|
||||
Retrieve a ChatKit thread
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/chatkit/threads/{thread_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ChatKitThread,
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
user: str | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]:
|
||||
"""
|
||||
List ChatKit threads
|
||||
|
||||
Args:
|
||||
after: List items created after this thread item ID. Defaults to null for the first
|
||||
page.
|
||||
|
||||
before: List items created before this thread item ID. Defaults to null for the newest
|
||||
results.
|
||||
|
||||
limit: Maximum number of thread items to return. Defaults to 20.
|
||||
|
||||
order: Sort order for results by creation time. Defaults to `desc`.
|
||||
|
||||
user: Filter threads that belong to this user identifier. Defaults to null to return
|
||||
all users.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
"/chatkit/threads",
|
||||
page=AsyncConversationCursorPage[ChatKitThread],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
"user": user,
|
||||
},
|
||||
thread_list_params.ThreadListParams,
|
||||
),
|
||||
),
|
||||
model=ChatKitThread,
|
||||
)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> ThreadDeleteResponse:
|
||||
"""
|
||||
Delete a ChatKit thread
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/chatkit/threads/{thread_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=ThreadDeleteResponse,
|
||||
)
|
||||
|
||||
def list_items(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]:
|
||||
"""
|
||||
List ChatKit thread items
|
||||
|
||||
Args:
|
||||
after: List items created after this thread item ID. Defaults to null for the first
|
||||
page.
|
||||
|
||||
before: List items created before this thread item ID. Defaults to null for the newest
|
||||
results.
|
||||
|
||||
limit: Maximum number of thread items to return. Defaults to 20.
|
||||
|
||||
order: Sort order for results by creation time. Defaults to `desc`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/chatkit/threads/{thread_id}/items",
|
||||
page=AsyncConversationCursorPage[Data],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
},
|
||||
thread_list_items_params.ThreadListItemsParams,
|
||||
),
|
||||
),
|
||||
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
|
||||
)
|
||||
|
||||
|
||||
class ThreadsWithRawResponse:
|
||||
def __init__(self, threads: Threads) -> None:
|
||||
self._threads = threads
|
||||
|
||||
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
||||
threads.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.to_raw_response_wrapper(
|
||||
threads.list,
|
||||
)
|
||||
self.delete = _legacy_response.to_raw_response_wrapper(
|
||||
threads.delete,
|
||||
)
|
||||
self.list_items = _legacy_response.to_raw_response_wrapper(
|
||||
threads.list_items,
|
||||
)
|
||||
|
||||
|
||||
class AsyncThreadsWithRawResponse:
|
||||
def __init__(self, threads: AsyncThreads) -> None:
|
||||
self._threads = threads
|
||||
|
||||
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
||||
threads.retrieve,
|
||||
)
|
||||
self.list = _legacy_response.async_to_raw_response_wrapper(
|
||||
threads.list,
|
||||
)
|
||||
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
||||
threads.delete,
|
||||
)
|
||||
self.list_items = _legacy_response.async_to_raw_response_wrapper(
|
||||
threads.list_items,
|
||||
)
|
||||
|
||||
|
||||
class ThreadsWithStreamingResponse:
|
||||
def __init__(self, threads: Threads) -> None:
|
||||
self._threads = threads
|
||||
|
||||
self.retrieve = to_streamed_response_wrapper(
|
||||
threads.retrieve,
|
||||
)
|
||||
self.list = to_streamed_response_wrapper(
|
||||
threads.list,
|
||||
)
|
||||
self.delete = to_streamed_response_wrapper(
|
||||
threads.delete,
|
||||
)
|
||||
self.list_items = to_streamed_response_wrapper(
|
||||
threads.list_items,
|
||||
)
|
||||
|
||||
|
||||
class AsyncThreadsWithStreamingResponse:
|
||||
def __init__(self, threads: AsyncThreads) -> None:
|
||||
self._threads = threads
|
||||
|
||||
self.retrieve = async_to_streamed_response_wrapper(
|
||||
threads.retrieve,
|
||||
)
|
||||
self.list = async_to_streamed_response_wrapper(
|
||||
threads.list,
|
||||
)
|
||||
self.delete = async_to_streamed_response_wrapper(
|
||||
threads.delete,
|
||||
)
|
||||
self.list_items = async_to_streamed_response_wrapper(
|
||||
threads.list_items,
|
||||
)
|
||||
@@ -0,0 +1,47 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .realtime import (
|
||||
Realtime,
|
||||
AsyncRealtime,
|
||||
RealtimeWithRawResponse,
|
||||
AsyncRealtimeWithRawResponse,
|
||||
RealtimeWithStreamingResponse,
|
||||
AsyncRealtimeWithStreamingResponse,
|
||||
)
|
||||
from .sessions import (
|
||||
Sessions,
|
||||
AsyncSessions,
|
||||
SessionsWithRawResponse,
|
||||
AsyncSessionsWithRawResponse,
|
||||
SessionsWithStreamingResponse,
|
||||
AsyncSessionsWithStreamingResponse,
|
||||
)
|
||||
from .transcription_sessions import (
|
||||
TranscriptionSessions,
|
||||
AsyncTranscriptionSessions,
|
||||
TranscriptionSessionsWithRawResponse,
|
||||
AsyncTranscriptionSessionsWithRawResponse,
|
||||
TranscriptionSessionsWithStreamingResponse,
|
||||
AsyncTranscriptionSessionsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Sessions",
|
||||
"AsyncSessions",
|
||||
"SessionsWithRawResponse",
|
||||
"AsyncSessionsWithRawResponse",
|
||||
"SessionsWithStreamingResponse",
|
||||
"AsyncSessionsWithStreamingResponse",
|
||||
"TranscriptionSessions",
|
||||
"AsyncTranscriptionSessions",
|
||||
"TranscriptionSessionsWithRawResponse",
|
||||
"AsyncTranscriptionSessionsWithRawResponse",
|
||||
"TranscriptionSessionsWithStreamingResponse",
|
||||
"AsyncTranscriptionSessionsWithStreamingResponse",
|
||||
"Realtime",
|
||||
"AsyncRealtime",
|
||||
"RealtimeWithRawResponse",
|
||||
"AsyncRealtimeWithRawResponse",
|
||||
"RealtimeWithStreamingResponse",
|
||||
"AsyncRealtimeWithStreamingResponse",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,424 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Union, Iterable
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||||
from ...._utils import maybe_transform, async_maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...._base_client import make_request_options
|
||||
from ....types.beta.realtime import session_create_params
|
||||
from ....types.beta.realtime.session_create_response import SessionCreateResponse
|
||||
|
||||
__all__ = ["Sessions", "AsyncSessions"]
|
||||
|
||||
|
||||
class Sessions(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> SessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return SessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> SessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return SessionsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
|
||||
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
|
||||
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
|
||||
instructions: str | NotGiven = NOT_GIVEN,
|
||||
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
|
||||
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
|
||||
model: Literal[
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
speed: float | NotGiven = NOT_GIVEN,
|
||||
temperature: float | NotGiven = NOT_GIVEN,
|
||||
tool_choice: str | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
|
||||
tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
|
||||
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
|
||||
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> SessionCreateResponse:
|
||||
"""
|
||||
Create an ephemeral API token for use in client-side applications with the
|
||||
Realtime API. Can be configured with the same session parameters as the
|
||||
`session.update` client event.
|
||||
|
||||
It responds with a session object, plus a `client_secret` key which contains a
|
||||
usable ephemeral API token that can be used to authenticate browser clients for
|
||||
the Realtime API.
|
||||
|
||||
Args:
|
||||
client_secret: Configuration options for the generated client secret.
|
||||
|
||||
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
(mono), and little-endian byte order.
|
||||
|
||||
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
detection accuracy (reducing false positives) and model performance by improving
|
||||
perception of the input audio.
|
||||
|
||||
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
|
||||
`null` to turn off once on. Input audio transcription is not native to the
|
||||
model, since the model consumes audio directly. Transcription runs
|
||||
asynchronously through
|
||||
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
and should be treated as guidance of input audio content rather than precisely
|
||||
what the model heard. The client can optionally set the language and prompt for
|
||||
transcription, these offer additional guidance to the transcription service.
|
||||
|
||||
instructions: The default system instructions (i.e. system message) prepended to model calls.
|
||||
This field allows the client to guide the model on desired responses. The model
|
||||
can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
"act friendly", "here are examples of good responses") and on audio behavior
|
||||
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
instructions are not guaranteed to be followed by the model, but they provide
|
||||
guidance to the model on the desired behavior.
|
||||
|
||||
Note that the server sets default instructions which will be used if this field
|
||||
is not set and are visible in the `session.created` event at the start of the
|
||||
session.
|
||||
|
||||
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
|
||||
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
|
||||
modalities: The set of modalities the model can respond with. To disable audio, set this to
|
||||
["text"].
|
||||
|
||||
model: The Realtime model used for this session.
|
||||
|
||||
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
|
||||
speed: The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
between model turns, not while a response is in progress.
|
||||
|
||||
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
temperature of 0.8 is highly recommended for best performance.
|
||||
|
||||
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
a function.
|
||||
|
||||
tools: Tools (functions) available to the model.
|
||||
|
||||
tracing: Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
is enabled for a session, the configuration cannot be modified.
|
||||
|
||||
`auto` will create a trace for the session with default values for the workflow
|
||||
name, group id, and metadata.
|
||||
|
||||
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
set to `null` to turn off, in which case the client must manually trigger model
|
||||
response. Server VAD means that the model will detect the start and end of
|
||||
speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
is more advanced and uses a turn detection model (in conjunction with VAD) to
|
||||
semantically estimate whether the user has finished speaking, then dynamically
|
||||
sets a timeout based on this probability. For example, if user audio trails off
|
||||
with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
for the user to continue speaking. This can be useful for more natural
|
||||
conversations, but may have a higher latency.
|
||||
|
||||
voice: The voice the model uses to respond. Voice cannot be changed during the session
|
||||
once the model has responded with audio at least once. Current voice options are
|
||||
`alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._post(
|
||||
"/realtime/sessions",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"client_secret": client_secret,
|
||||
"input_audio_format": input_audio_format,
|
||||
"input_audio_noise_reduction": input_audio_noise_reduction,
|
||||
"input_audio_transcription": input_audio_transcription,
|
||||
"instructions": instructions,
|
||||
"max_response_output_tokens": max_response_output_tokens,
|
||||
"modalities": modalities,
|
||||
"model": model,
|
||||
"output_audio_format": output_audio_format,
|
||||
"speed": speed,
|
||||
"temperature": temperature,
|
||||
"tool_choice": tool_choice,
|
||||
"tools": tools,
|
||||
"tracing": tracing,
|
||||
"turn_detection": turn_detection,
|
||||
"voice": voice,
|
||||
},
|
||||
session_create_params.SessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SessionCreateResponse,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessions(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncSessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncSessionsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
|
||||
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
|
||||
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
|
||||
instructions: str | NotGiven = NOT_GIVEN,
|
||||
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
|
||||
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
|
||||
model: Literal[
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
speed: float | NotGiven = NOT_GIVEN,
|
||||
temperature: float | NotGiven = NOT_GIVEN,
|
||||
tool_choice: str | NotGiven = NOT_GIVEN,
|
||||
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
|
||||
tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
|
||||
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
|
||||
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
|
||||
| NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> SessionCreateResponse:
|
||||
"""
|
||||
Create an ephemeral API token for use in client-side applications with the
|
||||
Realtime API. Can be configured with the same session parameters as the
|
||||
`session.update` client event.
|
||||
|
||||
It responds with a session object, plus a `client_secret` key which contains a
|
||||
usable ephemeral API token that can be used to authenticate browser clients for
|
||||
the Realtime API.
|
||||
|
||||
Args:
|
||||
client_secret: Configuration options for the generated client secret.
|
||||
|
||||
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
(mono), and little-endian byte order.
|
||||
|
||||
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
detection accuracy (reducing false positives) and model performance by improving
|
||||
perception of the input audio.
|
||||
|
||||
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
|
||||
`null` to turn off once on. Input audio transcription is not native to the
|
||||
model, since the model consumes audio directly. Transcription runs
|
||||
asynchronously through
|
||||
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
and should be treated as guidance of input audio content rather than precisely
|
||||
what the model heard. The client can optionally set the language and prompt for
|
||||
transcription, these offer additional guidance to the transcription service.
|
||||
|
||||
instructions: The default system instructions (i.e. system message) prepended to model calls.
|
||||
This field allows the client to guide the model on desired responses. The model
|
||||
can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
"act friendly", "here are examples of good responses") and on audio behavior
|
||||
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
instructions are not guaranteed to be followed by the model, but they provide
|
||||
guidance to the model on the desired behavior.
|
||||
|
||||
Note that the server sets default instructions which will be used if this field
|
||||
is not set and are visible in the `session.created` event at the start of the
|
||||
session.
|
||||
|
||||
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
|
||||
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
|
||||
modalities: The set of modalities the model can respond with. To disable audio, set this to
|
||||
["text"].
|
||||
|
||||
model: The Realtime model used for this session.
|
||||
|
||||
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
|
||||
speed: The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
between model turns, not while a response is in progress.
|
||||
|
||||
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
temperature of 0.8 is highly recommended for best performance.
|
||||
|
||||
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
a function.
|
||||
|
||||
tools: Tools (functions) available to the model.
|
||||
|
||||
tracing: Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
is enabled for a session, the configuration cannot be modified.
|
||||
|
||||
`auto` will create a trace for the session with default values for the workflow
|
||||
name, group id, and metadata.
|
||||
|
||||
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
set to `null` to turn off, in which case the client must manually trigger model
|
||||
response. Server VAD means that the model will detect the start and end of
|
||||
speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
is more advanced and uses a turn detection model (in conjunction with VAD) to
|
||||
semantically estimate whether the user has finished speaking, then dynamically
|
||||
sets a timeout based on this probability. For example, if user audio trails off
|
||||
with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
for the user to continue speaking. This can be useful for more natural
|
||||
conversations, but may have a higher latency.
|
||||
|
||||
voice: The voice the model uses to respond. Voice cannot be changed during the session
|
||||
once the model has responded with audio at least once. Current voice options are
|
||||
`alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
"/realtime/sessions",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"client_secret": client_secret,
|
||||
"input_audio_format": input_audio_format,
|
||||
"input_audio_noise_reduction": input_audio_noise_reduction,
|
||||
"input_audio_transcription": input_audio_transcription,
|
||||
"instructions": instructions,
|
||||
"max_response_output_tokens": max_response_output_tokens,
|
||||
"modalities": modalities,
|
||||
"model": model,
|
||||
"output_audio_format": output_audio_format,
|
||||
"speed": speed,
|
||||
"temperature": temperature,
|
||||
"tool_choice": tool_choice,
|
||||
"tools": tools,
|
||||
"tracing": tracing,
|
||||
"turn_detection": turn_detection,
|
||||
"voice": voice,
|
||||
},
|
||||
session_create_params.SessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=SessionCreateResponse,
|
||||
)
|
||||
|
||||
|
||||
class SessionsWithRawResponse:
|
||||
def __init__(self, sessions: Sessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessionsWithRawResponse:
|
||||
def __init__(self, sessions: AsyncSessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class SessionsWithStreamingResponse:
|
||||
def __init__(self, sessions: Sessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSessionsWithStreamingResponse:
|
||||
def __init__(self, sessions: AsyncSessions) -> None:
|
||||
self._sessions = sessions
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
sessions.create,
|
||||
)
|
||||
@@ -0,0 +1,282 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||||
from ...._utils import maybe_transform, async_maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ...._base_client import make_request_options
|
||||
from ....types.beta.realtime import transcription_session_create_params
|
||||
from ....types.beta.realtime.transcription_session import TranscriptionSession
|
||||
|
||||
__all__ = ["TranscriptionSessions", "AsyncTranscriptionSessions"]
|
||||
|
||||
|
||||
class TranscriptionSessions(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> TranscriptionSessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return TranscriptionSessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> TranscriptionSessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return TranscriptionSessionsWithStreamingResponse(self)
|
||||
|
||||
def create(
|
||||
self,
|
||||
*,
|
||||
client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
|
||||
include: List[str] | NotGiven = NOT_GIVEN,
|
||||
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
|
||||
| NotGiven = NOT_GIVEN,
|
||||
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
|
||||
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
|
||||
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> TranscriptionSession:
|
||||
"""
|
||||
Create an ephemeral API token for use in client-side applications with the
|
||||
Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
the same session parameters as the `transcription_session.update` client event.
|
||||
|
||||
It responds with a session object, plus a `client_secret` key which contains a
|
||||
usable ephemeral API token that can be used to authenticate browser clients for
|
||||
the Realtime API.
|
||||
|
||||
Args:
|
||||
client_secret: Configuration options for the generated client secret.
|
||||
|
||||
include:
|
||||
The set of items to include in the transcription. Current available items are:
|
||||
|
||||
- `item.input_audio_transcription.logprobs`
|
||||
|
||||
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
(mono), and little-endian byte order.
|
||||
|
||||
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
detection accuracy (reducing false positives) and model performance by improving
|
||||
perception of the input audio.
|
||||
|
||||
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
|
||||
language and prompt for transcription, these offer additional guidance to the
|
||||
transcription service.
|
||||
|
||||
modalities: The set of modalities the model can respond with. To disable audio, set this to
|
||||
["text"].
|
||||
|
||||
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
set to `null` to turn off, in which case the client must manually trigger model
|
||||
response. Server VAD means that the model will detect the start and end of
|
||||
speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
is more advanced and uses a turn detection model (in conjunction with VAD) to
|
||||
semantically estimate whether the user has finished speaking, then dynamically
|
||||
sets a timeout based on this probability. For example, if user audio trails off
|
||||
with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
for the user to continue speaking. This can be useful for more natural
|
||||
conversations, but may have a higher latency.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._post(
|
||||
"/realtime/transcription_sessions",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"client_secret": client_secret,
|
||||
"include": include,
|
||||
"input_audio_format": input_audio_format,
|
||||
"input_audio_noise_reduction": input_audio_noise_reduction,
|
||||
"input_audio_transcription": input_audio_transcription,
|
||||
"modalities": modalities,
|
||||
"turn_detection": turn_detection,
|
||||
},
|
||||
transcription_session_create_params.TranscriptionSessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=TranscriptionSession,
|
||||
)
|
||||
|
||||
|
||||
class AsyncTranscriptionSessions(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncTranscriptionSessionsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncTranscriptionSessionsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncTranscriptionSessionsWithStreamingResponse(self)
|
||||
|
||||
async def create(
|
||||
self,
|
||||
*,
|
||||
client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
|
||||
include: List[str] | NotGiven = NOT_GIVEN,
|
||||
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
|
||||
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
|
||||
| NotGiven = NOT_GIVEN,
|
||||
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
|
||||
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
|
||||
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||||
) -> TranscriptionSession:
|
||||
"""
|
||||
Create an ephemeral API token for use in client-side applications with the
|
||||
Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
the same session parameters as the `transcription_session.update` client event.
|
||||
|
||||
It responds with a session object, plus a `client_secret` key which contains a
|
||||
usable ephemeral API token that can be used to authenticate browser clients for
|
||||
the Realtime API.
|
||||
|
||||
Args:
|
||||
client_secret: Configuration options for the generated client secret.
|
||||
|
||||
include:
|
||||
The set of items to include in the transcription. Current available items are:
|
||||
|
||||
- `item.input_audio_transcription.logprobs`
|
||||
|
||||
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
(mono), and little-endian byte order.
|
||||
|
||||
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
detection accuracy (reducing false positives) and model performance by improving
|
||||
perception of the input audio.
|
||||
|
||||
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
|
||||
language and prompt for transcription, these offer additional guidance to the
|
||||
transcription service.
|
||||
|
||||
modalities: The set of modalities the model can respond with. To disable audio, set this to
|
||||
["text"].
|
||||
|
||||
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
set to `null` to turn off, in which case the client must manually trigger model
|
||||
response. Server VAD means that the model will detect the start and end of
|
||||
speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
is more advanced and uses a turn detection model (in conjunction with VAD) to
|
||||
semantically estimate whether the user has finished speaking, then dynamically
|
||||
sets a timeout based on this probability. For example, if user audio trails off
|
||||
with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
for the user to continue speaking. This can be useful for more natural
|
||||
conversations, but may have a higher latency.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
"/realtime/transcription_sessions",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"client_secret": client_secret,
|
||||
"include": include,
|
||||
"input_audio_format": input_audio_format,
|
||||
"input_audio_noise_reduction": input_audio_noise_reduction,
|
||||
"input_audio_transcription": input_audio_transcription,
|
||||
"modalities": modalities,
|
||||
"turn_detection": turn_detection,
|
||||
},
|
||||
transcription_session_create_params.TranscriptionSessionCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=TranscriptionSession,
|
||||
)
|
||||
|
||||
|
||||
class TranscriptionSessionsWithRawResponse:
|
||||
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
|
||||
self._transcription_sessions = transcription_sessions
|
||||
|
||||
self.create = _legacy_response.to_raw_response_wrapper(
|
||||
transcription_sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncTranscriptionSessionsWithRawResponse:
|
||||
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
|
||||
self._transcription_sessions = transcription_sessions
|
||||
|
||||
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||||
transcription_sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class TranscriptionSessionsWithStreamingResponse:
|
||||
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
|
||||
self._transcription_sessions = transcription_sessions
|
||||
|
||||
self.create = to_streamed_response_wrapper(
|
||||
transcription_sessions.create,
|
||||
)
|
||||
|
||||
|
||||
class AsyncTranscriptionSessionsWithStreamingResponse:
|
||||
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
|
||||
self._transcription_sessions = transcription_sessions
|
||||
|
||||
self.create = async_to_streamed_response_wrapper(
|
||||
transcription_sessions.create,
|
||||
)
|
||||
@@ -0,0 +1,47 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .runs import (
|
||||
Runs,
|
||||
AsyncRuns,
|
||||
RunsWithRawResponse,
|
||||
AsyncRunsWithRawResponse,
|
||||
RunsWithStreamingResponse,
|
||||
AsyncRunsWithStreamingResponse,
|
||||
)
|
||||
from .threads import (
|
||||
Threads,
|
||||
AsyncThreads,
|
||||
ThreadsWithRawResponse,
|
||||
AsyncThreadsWithRawResponse,
|
||||
ThreadsWithStreamingResponse,
|
||||
AsyncThreadsWithStreamingResponse,
|
||||
)
|
||||
from .messages import (
|
||||
Messages,
|
||||
AsyncMessages,
|
||||
MessagesWithRawResponse,
|
||||
AsyncMessagesWithRawResponse,
|
||||
MessagesWithStreamingResponse,
|
||||
AsyncMessagesWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Runs",
|
||||
"AsyncRuns",
|
||||
"RunsWithRawResponse",
|
||||
"AsyncRunsWithRawResponse",
|
||||
"RunsWithStreamingResponse",
|
||||
"AsyncRunsWithStreamingResponse",
|
||||
"Messages",
|
||||
"AsyncMessages",
|
||||
"MessagesWithRawResponse",
|
||||
"AsyncMessagesWithRawResponse",
|
||||
"MessagesWithStreamingResponse",
|
||||
"AsyncMessagesWithStreamingResponse",
|
||||
"Threads",
|
||||
"AsyncThreads",
|
||||
"ThreadsWithRawResponse",
|
||||
"AsyncThreadsWithRawResponse",
|
||||
"ThreadsWithStreamingResponse",
|
||||
"AsyncThreadsWithStreamingResponse",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,718 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing_extensions
|
||||
from typing import Union, Iterable, Optional
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from .... import _legacy_response
|
||||
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ...._utils import maybe_transform, async_maybe_transform
|
||||
from ...._compat import cached_property
|
||||
from ...._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from ....pagination import SyncCursorPage, AsyncCursorPage
|
||||
from ...._base_client import (
|
||||
AsyncPaginator,
|
||||
make_request_options,
|
||||
)
|
||||
from ....types.beta.threads import message_list_params, message_create_params, message_update_params
|
||||
from ....types.beta.threads.message import Message
|
||||
from ....types.shared_params.metadata import Metadata
|
||||
from ....types.beta.threads.message_deleted import MessageDeleted
|
||||
from ....types.beta.threads.message_content_part_param import MessageContentPartParam
|
||||
|
||||
__all__ = ["Messages", "AsyncMessages"]
|
||||
|
||||
|
||||
class Messages(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> MessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return MessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> MessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return MessagesWithStreamingResponse(self)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
content: Union[str, Iterable[MessageContentPartParam]],
|
||||
role: Literal["user", "assistant"],
|
||||
attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit,
|
||||
metadata: Optional[Metadata] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Create a message.
|
||||
|
||||
Args:
|
||||
content: The text contents of the message.
|
||||
|
||||
role:
|
||||
The role of the entity that is creating the message. Allowed values include:
|
||||
|
||||
- `user`: Indicates the message is sent by an actual user and should be used in
|
||||
most cases to represent user-generated messages.
|
||||
- `assistant`: Indicates the message is generated by the assistant. Use this
|
||||
value to insert messages from the assistant into the conversation.
|
||||
|
||||
attachments: A list of files attached to the message, and the tools they should be added to.
|
||||
|
||||
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
||||
for storing additional information about the object in a structured format, and
|
||||
querying for objects via API or the dashboard.
|
||||
|
||||
Keys are strings with a maximum length of 64 characters. Values are strings with
|
||||
a maximum length of 512 characters.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._post(
|
||||
f"/threads/{thread_id}/messages",
|
||||
body=maybe_transform(
|
||||
{
|
||||
"content": content,
|
||||
"role": role,
|
||||
"attachments": attachments,
|
||||
"metadata": metadata,
|
||||
},
|
||||
message_create_params.MessageCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def retrieve(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Retrieve a message.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def update(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
metadata: Optional[Metadata] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Modifies a message.
|
||||
|
||||
Args:
|
||||
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
||||
for storing additional information about the object in a structured format, and
|
||||
querying for objects via API or the dashboard.
|
||||
|
||||
Keys are strings with a maximum length of 64 characters. Values are strings with
|
||||
a maximum length of 512 characters.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._post(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def list(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
run_id: str | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncCursorPage[Message]:
|
||||
"""
|
||||
Returns a list of messages for a given thread.
|
||||
|
||||
Args:
|
||||
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
||||
fetch the next page of the list.
|
||||
|
||||
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
||||
to fetch the previous page of the list.
|
||||
|
||||
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
||||
100, and the default is 20.
|
||||
|
||||
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
||||
order and `desc` for descending order.
|
||||
|
||||
run_id: Filter messages by the run ID that generated them.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/threads/{thread_id}/messages",
|
||||
page=SyncCursorPage[Message],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
"run_id": run_id,
|
||||
},
|
||||
message_list_params.MessageListParams,
|
||||
),
|
||||
),
|
||||
model=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def delete(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageDeleted:
|
||||
"""
|
||||
Deletes a message.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._delete(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageDeleted,
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessages(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncMessagesWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncMessagesWithStreamingResponse(self)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
async def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
content: Union[str, Iterable[MessageContentPartParam]],
|
||||
role: Literal["user", "assistant"],
|
||||
attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit,
|
||||
metadata: Optional[Metadata] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Create a message.
|
||||
|
||||
Args:
|
||||
content: The text contents of the message.
|
||||
|
||||
role:
|
||||
The role of the entity that is creating the message. Allowed values include:
|
||||
|
||||
- `user`: Indicates the message is sent by an actual user and should be used in
|
||||
most cases to represent user-generated messages.
|
||||
- `assistant`: Indicates the message is generated by the assistant. Use this
|
||||
value to insert messages from the assistant into the conversation.
|
||||
|
||||
attachments: A list of files attached to the message, and the tools they should be added to.
|
||||
|
||||
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
||||
for storing additional information about the object in a structured format, and
|
||||
querying for objects via API or the dashboard.
|
||||
|
||||
Keys are strings with a maximum length of 64 characters. Values are strings with
|
||||
a maximum length of 512 characters.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
f"/threads/{thread_id}/messages",
|
||||
body=await async_maybe_transform(
|
||||
{
|
||||
"content": content,
|
||||
"role": role,
|
||||
"attachments": attachments,
|
||||
"metadata": metadata,
|
||||
},
|
||||
message_create_params.MessageCreateParams,
|
||||
),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
async def retrieve(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Retrieve a message.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
async def update(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
metadata: Optional[Metadata] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> Message:
|
||||
"""
|
||||
Modifies a message.
|
||||
|
||||
Args:
|
||||
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
||||
for storing additional information about the object in a structured format, and
|
||||
querying for objects via API or the dashboard.
|
||||
|
||||
Keys are strings with a maximum length of 64 characters. Values are strings with
|
||||
a maximum length of 512 characters.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._post(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def list(
|
||||
self,
|
||||
thread_id: str,
|
||||
*,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
run_id: str | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[Message, AsyncCursorPage[Message]]:
|
||||
"""
|
||||
Returns a list of messages for a given thread.
|
||||
|
||||
Args:
|
||||
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
||||
fetch the next page of the list.
|
||||
|
||||
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
||||
to fetch the previous page of the list.
|
||||
|
||||
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
||||
100, and the default is 20.
|
||||
|
||||
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
||||
order and `desc` for descending order.
|
||||
|
||||
run_id: Filter messages by the run ID that generated them.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/threads/{thread_id}/messages",
|
||||
page=AsyncCursorPage[Message],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
"run_id": run_id,
|
||||
},
|
||||
message_list_params.MessageListParams,
|
||||
),
|
||||
),
|
||||
model=Message,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
async def delete(
|
||||
self,
|
||||
message_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> MessageDeleted:
|
||||
"""
|
||||
Deletes a message.
|
||||
|
||||
Args:
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not message_id:
|
||||
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._delete(
|
||||
f"/threads/{thread_id}/messages/{message_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||||
),
|
||||
cast_to=MessageDeleted,
|
||||
)
|
||||
|
||||
|
||||
class MessagesWithRawResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
messages.create, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
messages.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.update = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
messages.update, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
messages.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.delete = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
messages.delete, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithRawResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
messages.create, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
messages.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.update = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
messages.update, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
messages.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.delete = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
messages.delete, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class MessagesWithStreamingResponse:
|
||||
def __init__(self, messages: Messages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
messages.create, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
messages.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.update = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
messages.update, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
messages.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.delete = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
messages.delete, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AsyncMessagesWithStreamingResponse:
|
||||
def __init__(self, messages: AsyncMessages) -> None:
|
||||
self._messages = messages
|
||||
|
||||
self.create = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
messages.create, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
messages.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.update = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
messages.update, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
messages.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.delete = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
messages.delete, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
@@ -0,0 +1,33 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from .runs import (
|
||||
Runs,
|
||||
AsyncRuns,
|
||||
RunsWithRawResponse,
|
||||
AsyncRunsWithRawResponse,
|
||||
RunsWithStreamingResponse,
|
||||
AsyncRunsWithStreamingResponse,
|
||||
)
|
||||
from .steps import (
|
||||
Steps,
|
||||
AsyncSteps,
|
||||
StepsWithRawResponse,
|
||||
AsyncStepsWithRawResponse,
|
||||
StepsWithStreamingResponse,
|
||||
AsyncStepsWithStreamingResponse,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Steps",
|
||||
"AsyncSteps",
|
||||
"StepsWithRawResponse",
|
||||
"AsyncStepsWithRawResponse",
|
||||
"StepsWithStreamingResponse",
|
||||
"AsyncStepsWithStreamingResponse",
|
||||
"Runs",
|
||||
"AsyncRuns",
|
||||
"RunsWithRawResponse",
|
||||
"AsyncRunsWithRawResponse",
|
||||
"RunsWithStreamingResponse",
|
||||
"AsyncRunsWithStreamingResponse",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,399 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing_extensions
|
||||
from typing import List
|
||||
from typing_extensions import Literal
|
||||
|
||||
import httpx
|
||||
|
||||
from ..... import _legacy_response
|
||||
from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
|
||||
from ....._utils import maybe_transform, async_maybe_transform
|
||||
from ....._compat import cached_property
|
||||
from ....._resource import SyncAPIResource, AsyncAPIResource
|
||||
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||||
from .....pagination import SyncCursorPage, AsyncCursorPage
|
||||
from ....._base_client import AsyncPaginator, make_request_options
|
||||
from .....types.beta.threads.runs import step_list_params, step_retrieve_params
|
||||
from .....types.beta.threads.runs.run_step import RunStep
|
||||
from .....types.beta.threads.runs.run_step_include import RunStepInclude
|
||||
|
||||
__all__ = ["Steps", "AsyncSteps"]
|
||||
|
||||
|
||||
class Steps(SyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> StepsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return StepsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> StepsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return StepsWithStreamingResponse(self)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def retrieve(
|
||||
self,
|
||||
step_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
include: List[RunStepInclude] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> RunStep:
|
||||
"""
|
||||
Retrieves a run step.
|
||||
|
||||
Args:
|
||||
include: A list of additional fields to include in the response. Currently the only
|
||||
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
||||
to fetch the file search result content.
|
||||
|
||||
See the
|
||||
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
||||
for more information.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not run_id:
|
||||
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
||||
if not step_id:
|
||||
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get(
|
||||
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
|
||||
),
|
||||
cast_to=RunStep,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def list(
|
||||
self,
|
||||
run_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
include: List[RunStepInclude] | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> SyncCursorPage[RunStep]:
|
||||
"""
|
||||
Returns a list of run steps belonging to a run.
|
||||
|
||||
Args:
|
||||
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
||||
fetch the next page of the list.
|
||||
|
||||
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
||||
to fetch the previous page of the list.
|
||||
|
||||
include: A list of additional fields to include in the response. Currently the only
|
||||
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
||||
to fetch the file search result content.
|
||||
|
||||
See the
|
||||
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
||||
for more information.
|
||||
|
||||
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
||||
100, and the default is 20.
|
||||
|
||||
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
||||
order and `desc` for descending order.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not run_id:
|
||||
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/threads/{thread_id}/runs/{run_id}/steps",
|
||||
page=SyncCursorPage[RunStep],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"include": include,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
},
|
||||
step_list_params.StepListParams,
|
||||
),
|
||||
),
|
||||
model=RunStep,
|
||||
)
|
||||
|
||||
|
||||
class AsyncSteps(AsyncAPIResource):
|
||||
@cached_property
|
||||
def with_raw_response(self) -> AsyncStepsWithRawResponse:
|
||||
"""
|
||||
This property can be used as a prefix for any HTTP method call to return
|
||||
the raw response object instead of the parsed content.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
||||
"""
|
||||
return AsyncStepsWithRawResponse(self)
|
||||
|
||||
@cached_property
|
||||
def with_streaming_response(self) -> AsyncStepsWithStreamingResponse:
|
||||
"""
|
||||
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
||||
|
||||
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
||||
"""
|
||||
return AsyncStepsWithStreamingResponse(self)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
async def retrieve(
|
||||
self,
|
||||
step_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
run_id: str,
|
||||
include: List[RunStepInclude] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> RunStep:
|
||||
"""
|
||||
Retrieves a run step.
|
||||
|
||||
Args:
|
||||
include: A list of additional fields to include in the response. Currently the only
|
||||
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
||||
to fetch the file search result content.
|
||||
|
||||
See the
|
||||
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
||||
for more information.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not run_id:
|
||||
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
||||
if not step_id:
|
||||
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return await self._get(
|
||||
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
|
||||
),
|
||||
cast_to=RunStep,
|
||||
)
|
||||
|
||||
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
||||
def list(
|
||||
self,
|
||||
run_id: str,
|
||||
*,
|
||||
thread_id: str,
|
||||
after: str | Omit = omit,
|
||||
before: str | Omit = omit,
|
||||
include: List[RunStepInclude] | Omit = omit,
|
||||
limit: int | Omit = omit,
|
||||
order: Literal["asc", "desc"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
extra_query: Query | None = None,
|
||||
extra_body: Body | None = None,
|
||||
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
||||
) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]:
|
||||
"""
|
||||
Returns a list of run steps belonging to a run.
|
||||
|
||||
Args:
|
||||
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
||||
fetch the next page of the list.
|
||||
|
||||
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
||||
in the list. For instance, if you make a list request and receive 100 objects,
|
||||
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
||||
to fetch the previous page of the list.
|
||||
|
||||
include: A list of additional fields to include in the response. Currently the only
|
||||
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
||||
to fetch the file search result content.
|
||||
|
||||
See the
|
||||
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
||||
for more information.
|
||||
|
||||
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
||||
100, and the default is 20.
|
||||
|
||||
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
||||
order and `desc` for descending order.
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
|
||||
extra_body: Add additional JSON properties to the request
|
||||
|
||||
timeout: Override the client-level default timeout for this request, in seconds
|
||||
"""
|
||||
if not thread_id:
|
||||
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
||||
if not run_id:
|
||||
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
||||
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
||||
return self._get_api_list(
|
||||
f"/threads/{thread_id}/runs/{run_id}/steps",
|
||||
page=AsyncCursorPage[RunStep],
|
||||
options=make_request_options(
|
||||
extra_headers=extra_headers,
|
||||
extra_query=extra_query,
|
||||
extra_body=extra_body,
|
||||
timeout=timeout,
|
||||
query=maybe_transform(
|
||||
{
|
||||
"after": after,
|
||||
"before": before,
|
||||
"include": include,
|
||||
"limit": limit,
|
||||
"order": order,
|
||||
},
|
||||
step_list_params.StepListParams,
|
||||
),
|
||||
),
|
||||
model=RunStep,
|
||||
)
|
||||
|
||||
|
||||
class StepsWithRawResponse:
|
||||
def __init__(self, steps: Steps) -> None:
|
||||
self._steps = steps
|
||||
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
steps.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.to_raw_response_wrapper(
|
||||
steps.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AsyncStepsWithRawResponse:
|
||||
def __init__(self, steps: AsyncSteps) -> None:
|
||||
self._steps = steps
|
||||
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
steps.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
_legacy_response.async_to_raw_response_wrapper(
|
||||
steps.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class StepsWithStreamingResponse:
|
||||
def __init__(self, steps: Steps) -> None:
|
||||
self._steps = steps
|
||||
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
steps.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
to_streamed_response_wrapper(
|
||||
steps.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AsyncStepsWithStreamingResponse:
|
||||
def __init__(self, steps: AsyncSteps) -> None:
|
||||
self._steps = steps
|
||||
|
||||
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
steps.retrieve, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
self.list = ( # pyright: ignore[reportDeprecated]
|
||||
async_to_streamed_response_wrapper(
|
||||
steps.list, # pyright: ignore[reportDeprecated],
|
||||
)
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user