增加环绕侦察场景适配
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1264,9 +1264,12 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
||||
*,
|
||||
cast_to: Type[ResponseT],
|
||||
body: Body | None = None,
|
||||
files: RequestFiles | None = None,
|
||||
options: RequestOptions = {},
|
||||
) -> ResponseT:
|
||||
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
|
||||
opts = FinalRequestOptions.construct(
|
||||
method="patch", url=path, json_data=body, files=to_httpx_files(files), **options
|
||||
)
|
||||
return self.request(cast_to, opts)
|
||||
|
||||
def put(
|
||||
@@ -1799,9 +1802,12 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
||||
*,
|
||||
cast_to: Type[ResponseT],
|
||||
body: Body | None = None,
|
||||
files: RequestFiles | None = None,
|
||||
options: RequestOptions = {},
|
||||
) -> ResponseT:
|
||||
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
|
||||
opts = FinalRequestOptions.construct(
|
||||
method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options
|
||||
)
|
||||
return await self.request(cast_to, opts)
|
||||
|
||||
async def put(
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -55,49 +55,51 @@ class Stream(Generic[_T]):
|
||||
process_data = self._client._process_response_data
|
||||
iterator = self._iter_events()
|
||||
|
||||
for sse in iterator:
|
||||
if sse.data.startswith("[DONE]"):
|
||||
break
|
||||
try:
|
||||
for sse in iterator:
|
||||
if sse.data.startswith("[DONE]"):
|
||||
break
|
||||
|
||||
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
|
||||
if sse.event and sse.event.startswith("thread."):
|
||||
data = sse.json()
|
||||
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
|
||||
if sse.event and sse.event.startswith("thread."):
|
||||
data = sse.json()
|
||||
|
||||
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
|
||||
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
||||
else:
|
||||
data = sse.json()
|
||||
if is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
||||
else:
|
||||
data = sse.json()
|
||||
if is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
|
||||
# As we might not fully consume the response stream, we need to close it explicitly
|
||||
response.close()
|
||||
finally:
|
||||
# Ensure the response is closed even if the consumer doesn't read all data
|
||||
response.close()
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
return self
|
||||
@@ -156,49 +158,51 @@ class AsyncStream(Generic[_T]):
|
||||
process_data = self._client._process_response_data
|
||||
iterator = self._iter_events()
|
||||
|
||||
async for sse in iterator:
|
||||
if sse.data.startswith("[DONE]"):
|
||||
break
|
||||
try:
|
||||
async for sse in iterator:
|
||||
if sse.data.startswith("[DONE]"):
|
||||
break
|
||||
|
||||
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
|
||||
if sse.event and sse.event.startswith("thread."):
|
||||
data = sse.json()
|
||||
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
|
||||
if sse.event and sse.event.startswith("thread."):
|
||||
data = sse.json()
|
||||
|
||||
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
|
||||
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
||||
else:
|
||||
data = sse.json()
|
||||
if is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
||||
else:
|
||||
data = sse.json()
|
||||
if is_mapping(data) and data.get("error"):
|
||||
message = None
|
||||
error = data.get("error")
|
||||
if is_mapping(error):
|
||||
message = error.get("message")
|
||||
if not message or not isinstance(message, str):
|
||||
message = "An error occurred during streaming"
|
||||
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
raise APIError(
|
||||
message=message,
|
||||
request=self.response.request,
|
||||
body=data["error"],
|
||||
)
|
||||
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
yield process_data(data=data, cast_to=cast_to, response=response)
|
||||
|
||||
# As we might not fully consume the response stream, we need to close it explicitly
|
||||
await response.aclose()
|
||||
finally:
|
||||
# Ensure the response is closed even if the consumer doesn't read all data
|
||||
await response.aclose()
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
return self
|
||||
|
||||
@@ -247,6 +247,9 @@ _T_co = TypeVar("_T_co", covariant=True)
|
||||
if TYPE_CHECKING:
|
||||
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
|
||||
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
|
||||
#
|
||||
# Note: index() and count() methods are intentionally omitted to allow pyright to properly
|
||||
# infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
|
||||
class SequenceNotStr(Protocol[_T_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
|
||||
@@ -255,8 +258,6 @@ if TYPE_CHECKING:
|
||||
def __contains__(self, value: object, /) -> bool: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __iter__(self) -> Iterator[_T_co]: ...
|
||||
def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
|
||||
def count(self, value: Any, /) -> int: ...
|
||||
def __reversed__(self) -> Iterator[_T_co]: ...
|
||||
else:
|
||||
# just point this to a normal `Sequence` at runtime to avoid having to special case
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,4 +1,4 @@
|
||||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
|
||||
__title__ = "openai"
|
||||
__version__ = "2.8.0" # x-release-please-version
|
||||
__version__ = "2.14.0" # x-release-please-version
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -103,6 +103,7 @@ def parse_response(
|
||||
or output.type == "file_search_call"
|
||||
or output.type == "web_search_call"
|
||||
or output.type == "reasoning"
|
||||
or output.type == "compaction"
|
||||
or output.type == "mcp_call"
|
||||
or output.type == "mcp_approval_request"
|
||||
or output.type == "image_generation_call"
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -72,7 +72,7 @@ class Speech(SyncAPIResource):
|
||||
|
||||
model:
|
||||
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
||||
`tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
||||
`tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
||||
|
||||
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
|
||||
`ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
|
||||
@@ -168,7 +168,7 @@ class AsyncSpeech(AsyncAPIResource):
|
||||
|
||||
model:
|
||||
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
||||
`tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
||||
`tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
||||
|
||||
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
|
||||
`ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
|
||||
|
||||
@@ -91,8 +91,9 @@ class Transcriptions(SyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model).
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
|
||||
first normalizes loudness and then uses voice activity detection (VAD) to choose
|
||||
@@ -102,8 +103,9 @@ class Transcriptions(SyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
language: The language of the input audio. Supplying the input language in
|
||||
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
@@ -239,8 +241,9 @@ class Transcriptions(SyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
stream: If set to true, the model response data will be streamed to the client as it is
|
||||
generated using
|
||||
@@ -261,9 +264,9 @@ class Transcriptions(SyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`. This field is not supported when using
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
|
||||
`known_speaker_references[]`. Each entry should be a short identifier (for
|
||||
@@ -346,8 +349,9 @@ class Transcriptions(SyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
stream: If set to true, the model response data will be streamed to the client as it is
|
||||
generated using
|
||||
@@ -368,9 +372,9 @@ class Transcriptions(SyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`. This field is not supported when using
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
|
||||
`known_speaker_references[]`. Each entry should be a short identifier (for
|
||||
@@ -535,8 +539,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server
|
||||
first normalizes loudness and then uses voice activity detection (VAD) to choose
|
||||
@@ -548,9 +553,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`. This field is not supported when using
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
|
||||
`known_speaker_references[]`. Each entry should be a short identifier (for
|
||||
@@ -679,8 +684,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
stream: If set to true, the model response data will be streamed to the client as it is
|
||||
generated using
|
||||
@@ -701,9 +707,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`. This field is not supported when using
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
|
||||
`known_speaker_references[]`. Each entry should be a short identifier (for
|
||||
@@ -786,8 +792,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
||||
|
||||
model: ID of the model to use. The options are `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
|
||||
Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
||||
`gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
||||
(which is powered by our open source Whisper V2 model), and
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
|
||||
stream: If set to true, the model response data will be streamed to the client as it is
|
||||
generated using
|
||||
@@ -808,9 +815,9 @@ class AsyncTranscriptions(AsyncAPIResource):
|
||||
include: Additional information to include in the transcription response. `logprobs` will
|
||||
return the log probabilities of the tokens in the response to understand the
|
||||
model's confidence in the transcription. `logprobs` only works with
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe` and
|
||||
`gpt-4o-mini-transcribe`. This field is not supported when using
|
||||
`gpt-4o-transcribe-diarize`.
|
||||
response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
||||
`gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
||||
not supported when using `gpt-4o-transcribe-diarize`.
|
||||
|
||||
known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in
|
||||
`known_speaker_references[]`. Each entry should be a short identifier (for
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -98,9 +98,9 @@ class Assistants(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -108,6 +108,7 @@ class Assistants(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -312,9 +313,9 @@ class Assistants(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -322,6 +323,7 @@ class Assistants(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -565,9 +567,9 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -575,6 +577,7 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -779,9 +782,9 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -789,6 +792,7 @@ class AsyncAssistants(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -169,9 +169,9 @@ class Runs(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -179,6 +179,7 @@ class Runs(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -330,9 +331,9 @@ class Runs(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -340,6 +341,7 @@ class Runs(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -487,9 +489,9 @@ class Runs(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -497,6 +499,7 @@ class Runs(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -1620,9 +1623,9 @@ class AsyncRuns(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -1630,6 +1633,7 @@ class AsyncRuns(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -1781,9 +1785,9 @@ class AsyncRuns(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -1791,6 +1795,7 @@ class AsyncRuns(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
@@ -1938,9 +1943,9 @@ class AsyncRuns(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -1948,6 +1953,7 @@ class AsyncRuns(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: Specifies the format that the model must output. Compatible with
|
||||
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -411,9 +411,9 @@ class Completions(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -421,6 +421,7 @@ class Completions(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
@@ -721,9 +722,9 @@ class Completions(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -731,6 +732,7 @@ class Completions(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
@@ -1022,9 +1024,9 @@ class Completions(SyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -1032,6 +1034,7 @@ class Completions(SyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
@@ -1894,9 +1897,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -1904,6 +1907,7 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
@@ -2204,9 +2208,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -2214,6 +2218,7 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
@@ -2505,9 +2510,9 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
|
||||
reasoning_effort: Constrains effort on reasoning for
|
||||
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
||||
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
||||
reasoning effort can result in faster responses and fewer tokens used on
|
||||
reasoning in a response.
|
||||
supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
||||
Reducing reasoning effort can result in faster responses and fewer tokens used
|
||||
on reasoning in a response.
|
||||
|
||||
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
||||
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
||||
@@ -2515,6 +2520,7 @@ class AsyncCompletions(AsyncAPIResource):
|
||||
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
||||
support `none`.
|
||||
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
||||
- `xhigh` is supported for all models after `gpt-5.1-codex-max`.
|
||||
|
||||
response_format: An object specifying the format that the model must output.
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -60,6 +60,7 @@ class Containers(SyncAPIResource):
|
||||
name: str,
|
||||
expires_after: container_create_params.ExpiresAfter | Omit = omit,
|
||||
file_ids: SequenceNotStr[str] | Omit = omit,
|
||||
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
@@ -77,6 +78,8 @@ class Containers(SyncAPIResource):
|
||||
|
||||
file_ids: IDs of files to copy to the container.
|
||||
|
||||
memory_limit: Optional memory limit for the container. Defaults to "1g".
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
@@ -92,6 +95,7 @@ class Containers(SyncAPIResource):
|
||||
"name": name,
|
||||
"expires_after": expires_after,
|
||||
"file_ids": file_ids,
|
||||
"memory_limit": memory_limit,
|
||||
},
|
||||
container_create_params.ContainerCreateParams,
|
||||
),
|
||||
@@ -256,6 +260,7 @@ class AsyncContainers(AsyncAPIResource):
|
||||
name: str,
|
||||
expires_after: container_create_params.ExpiresAfter | Omit = omit,
|
||||
file_ids: SequenceNotStr[str] | Omit = omit,
|
||||
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
|
||||
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||||
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||||
extra_headers: Headers | None = None,
|
||||
@@ -273,6 +278,8 @@ class AsyncContainers(AsyncAPIResource):
|
||||
|
||||
file_ids: IDs of files to copy to the container.
|
||||
|
||||
memory_limit: Optional memory limit for the container. Defaults to "1g".
|
||||
|
||||
extra_headers: Send extra headers
|
||||
|
||||
extra_query: Add additional query parameters to the request
|
||||
@@ -288,6 +295,7 @@ class AsyncContainers(AsyncAPIResource):
|
||||
"name": name,
|
||||
"expires_after": expires_after,
|
||||
"file_ids": file_ids,
|
||||
"memory_limit": memory_limit,
|
||||
},
|
||||
container_create_params.ContainerCreateParams,
|
||||
),
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user