chore: automatic commit 2025-04-30 12:48

This commit is contained in:
2025-04-30 12:48:06 +02:00
parent f69356473b
commit e4ab1e1bb5
5284 changed files with 868438 additions and 0 deletions

View File

@@ -0,0 +1,215 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .beta import (
Beta,
AsyncBeta,
BetaWithRawResponse,
AsyncBetaWithRawResponse,
BetaWithStreamingResponse,
AsyncBetaWithStreamingResponse,
)
from .chat import (
Chat,
AsyncChat,
ChatWithRawResponse,
AsyncChatWithRawResponse,
ChatWithStreamingResponse,
AsyncChatWithStreamingResponse,
)
from .audio import (
Audio,
AsyncAudio,
AudioWithRawResponse,
AsyncAudioWithRawResponse,
AudioWithStreamingResponse,
AsyncAudioWithStreamingResponse,
)
from .evals import (
Evals,
AsyncEvals,
EvalsWithRawResponse,
AsyncEvalsWithRawResponse,
EvalsWithStreamingResponse,
AsyncEvalsWithStreamingResponse,
)
from .files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from .images import (
Images,
AsyncImages,
ImagesWithRawResponse,
AsyncImagesWithRawResponse,
ImagesWithStreamingResponse,
AsyncImagesWithStreamingResponse,
)
from .models import (
Models,
AsyncModels,
ModelsWithRawResponse,
AsyncModelsWithRawResponse,
ModelsWithStreamingResponse,
AsyncModelsWithStreamingResponse,
)
from .batches import (
Batches,
AsyncBatches,
BatchesWithRawResponse,
AsyncBatchesWithRawResponse,
BatchesWithStreamingResponse,
AsyncBatchesWithStreamingResponse,
)
from .uploads import (
Uploads,
AsyncUploads,
UploadsWithRawResponse,
AsyncUploadsWithRawResponse,
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
from .responses import (
Responses,
AsyncResponses,
ResponsesWithRawResponse,
AsyncResponsesWithRawResponse,
ResponsesWithStreamingResponse,
AsyncResponsesWithStreamingResponse,
)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
EmbeddingsWithRawResponse,
AsyncEmbeddingsWithRawResponse,
EmbeddingsWithStreamingResponse,
AsyncEmbeddingsWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
from .fine_tuning import (
FineTuning,
AsyncFineTuning,
FineTuningWithRawResponse,
AsyncFineTuningWithRawResponse,
FineTuningWithStreamingResponse,
AsyncFineTuningWithStreamingResponse,
)
from .moderations import (
Moderations,
AsyncModerations,
ModerationsWithRawResponse,
AsyncModerationsWithRawResponse,
ModerationsWithStreamingResponse,
AsyncModerationsWithStreamingResponse,
)
from .vector_stores import (
VectorStores,
AsyncVectorStores,
VectorStoresWithRawResponse,
AsyncVectorStoresWithRawResponse,
VectorStoresWithStreamingResponse,
AsyncVectorStoresWithStreamingResponse,
)
__all__ = [
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
"Chat",
"AsyncChat",
"ChatWithRawResponse",
"AsyncChatWithRawResponse",
"ChatWithStreamingResponse",
"AsyncChatWithStreamingResponse",
"Embeddings",
"AsyncEmbeddings",
"EmbeddingsWithRawResponse",
"AsyncEmbeddingsWithRawResponse",
"EmbeddingsWithStreamingResponse",
"AsyncEmbeddingsWithStreamingResponse",
"Files",
"AsyncFiles",
"FilesWithRawResponse",
"AsyncFilesWithRawResponse",
"FilesWithStreamingResponse",
"AsyncFilesWithStreamingResponse",
"Images",
"AsyncImages",
"ImagesWithRawResponse",
"AsyncImagesWithRawResponse",
"ImagesWithStreamingResponse",
"AsyncImagesWithStreamingResponse",
"Audio",
"AsyncAudio",
"AudioWithRawResponse",
"AsyncAudioWithRawResponse",
"AudioWithStreamingResponse",
"AsyncAudioWithStreamingResponse",
"Moderations",
"AsyncModerations",
"ModerationsWithRawResponse",
"AsyncModerationsWithRawResponse",
"ModerationsWithStreamingResponse",
"AsyncModerationsWithStreamingResponse",
"Models",
"AsyncModels",
"ModelsWithRawResponse",
"AsyncModelsWithRawResponse",
"ModelsWithStreamingResponse",
"AsyncModelsWithStreamingResponse",
"FineTuning",
"AsyncFineTuning",
"FineTuningWithRawResponse",
"AsyncFineTuningWithRawResponse",
"FineTuningWithStreamingResponse",
"AsyncFineTuningWithStreamingResponse",
"VectorStores",
"AsyncVectorStores",
"VectorStoresWithRawResponse",
"AsyncVectorStoresWithRawResponse",
"VectorStoresWithStreamingResponse",
"AsyncVectorStoresWithStreamingResponse",
"Beta",
"AsyncBeta",
"BetaWithRawResponse",
"AsyncBetaWithRawResponse",
"BetaWithStreamingResponse",
"AsyncBetaWithStreamingResponse",
"Batches",
"AsyncBatches",
"BatchesWithRawResponse",
"AsyncBatchesWithRawResponse",
"BatchesWithStreamingResponse",
"AsyncBatchesWithStreamingResponse",
"Uploads",
"AsyncUploads",
"UploadsWithRawResponse",
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
"Responses",
"AsyncResponses",
"ResponsesWithRawResponse",
"AsyncResponsesWithRawResponse",
"ResponsesWithStreamingResponse",
"AsyncResponsesWithStreamingResponse",
"Evals",
"AsyncEvals",
"EvalsWithRawResponse",
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
]

View File

@@ -0,0 +1,61 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .audio import (
Audio,
AsyncAudio,
AudioWithRawResponse,
AsyncAudioWithRawResponse,
AudioWithStreamingResponse,
AsyncAudioWithStreamingResponse,
)
from .speech import (
Speech,
AsyncSpeech,
SpeechWithRawResponse,
AsyncSpeechWithRawResponse,
SpeechWithStreamingResponse,
AsyncSpeechWithStreamingResponse,
)
from .translations import (
Translations,
AsyncTranslations,
TranslationsWithRawResponse,
AsyncTranslationsWithRawResponse,
TranslationsWithStreamingResponse,
AsyncTranslationsWithStreamingResponse,
)
from .transcriptions import (
Transcriptions,
AsyncTranscriptions,
TranscriptionsWithRawResponse,
AsyncTranscriptionsWithRawResponse,
TranscriptionsWithStreamingResponse,
AsyncTranscriptionsWithStreamingResponse,
)
__all__ = [
"Transcriptions",
"AsyncTranscriptions",
"TranscriptionsWithRawResponse",
"AsyncTranscriptionsWithRawResponse",
"TranscriptionsWithStreamingResponse",
"AsyncTranscriptionsWithStreamingResponse",
"Translations",
"AsyncTranslations",
"TranslationsWithRawResponse",
"AsyncTranslationsWithRawResponse",
"TranslationsWithStreamingResponse",
"AsyncTranslationsWithStreamingResponse",
"Speech",
"AsyncSpeech",
"SpeechWithRawResponse",
"AsyncSpeechWithRawResponse",
"SpeechWithStreamingResponse",
"AsyncSpeechWithStreamingResponse",
"Audio",
"AsyncAudio",
"AudioWithRawResponse",
"AsyncAudioWithRawResponse",
"AudioWithStreamingResponse",
"AsyncAudioWithStreamingResponse",
]

View File

@@ -0,0 +1,166 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .speech import (
Speech,
AsyncSpeech,
SpeechWithRawResponse,
AsyncSpeechWithRawResponse,
SpeechWithStreamingResponse,
AsyncSpeechWithStreamingResponse,
)
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from .translations import (
Translations,
AsyncTranslations,
TranslationsWithRawResponse,
AsyncTranslationsWithRawResponse,
TranslationsWithStreamingResponse,
AsyncTranslationsWithStreamingResponse,
)
from .transcriptions import (
Transcriptions,
AsyncTranscriptions,
TranscriptionsWithRawResponse,
AsyncTranscriptionsWithRawResponse,
TranscriptionsWithStreamingResponse,
AsyncTranscriptionsWithStreamingResponse,
)
__all__ = ["Audio", "AsyncAudio"]
class Audio(SyncAPIResource):
@cached_property
def transcriptions(self) -> Transcriptions:
return Transcriptions(self._client)
@cached_property
def translations(self) -> Translations:
return Translations(self._client)
@cached_property
def speech(self) -> Speech:
return Speech(self._client)
@cached_property
def with_raw_response(self) -> AudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AudioWithStreamingResponse(self)
class AsyncAudio(AsyncAPIResource):
@cached_property
def transcriptions(self) -> AsyncTranscriptions:
return AsyncTranscriptions(self._client)
@cached_property
def translations(self) -> AsyncTranslations:
return AsyncTranslations(self._client)
@cached_property
def speech(self) -> AsyncSpeech:
return AsyncSpeech(self._client)
@cached_property
def with_raw_response(self) -> AsyncAudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAudioWithStreamingResponse(self)
class AudioWithRawResponse:
def __init__(self, audio: Audio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> TranscriptionsWithRawResponse:
return TranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithRawResponse:
return TranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithRawResponse:
return SpeechWithRawResponse(self._audio.speech)
class AsyncAudioWithRawResponse:
def __init__(self, audio: AsyncAudio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithRawResponse:
return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithRawResponse:
return AsyncTranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithRawResponse:
return AsyncSpeechWithRawResponse(self._audio.speech)
class AudioWithStreamingResponse:
def __init__(self, audio: Audio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> TranscriptionsWithStreamingResponse:
return TranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithStreamingResponse:
return TranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithStreamingResponse:
return SpeechWithStreamingResponse(self._audio.speech)
class AsyncAudioWithStreamingResponse:
def __init__(self, audio: AsyncAudio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse:
return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithStreamingResponse:
return AsyncTranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithStreamingResponse:
return AsyncSpeechWithStreamingResponse(self._audio.speech)

View File

@@ -0,0 +1,245 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ...types.audio import speech_create_params
from ..._base_client import make_request_options
from ...types.audio.speech_model import SpeechModel
__all__ = ["Speech", "AsyncSpeech"]
class Speech(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SpeechWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SpeechWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SpeechWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SpeechWithStreamingResponse(self)
def create(
self,
*,
input: str,
model: Union[str, SpeechModel],
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
],
instructions: str | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Generates audio from the input text.
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
model:
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
`ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
`verse`. Previews of the voices are available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return self._post(
"/audio/speech",
body=maybe_transform(
{
"input": input,
"model": model,
"voice": voice,
"instructions": instructions,
"response_format": response_format,
"speed": speed,
},
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncSpeech(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSpeechWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSpeechWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSpeechWithStreamingResponse(self)
async def create(
self,
*,
input: str,
model: Union[str, SpeechModel],
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
],
instructions: str | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Generates audio from the input text.
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
model:
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
`ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
`verse`. Previews of the voices are available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
return await self._post(
"/audio/speech",
body=await async_maybe_transform(
{
"input": input,
"model": model,
"voice": voice,
"instructions": instructions,
"response_format": response_format,
"speed": speed,
},
speech_create_params.SpeechCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class SpeechWithRawResponse:
def __init__(self, speech: Speech) -> None:
self._speech = speech
self.create = _legacy_response.to_raw_response_wrapper(
speech.create,
)
class AsyncSpeechWithRawResponse:
def __init__(self, speech: AsyncSpeech) -> None:
self._speech = speech
self.create = _legacy_response.async_to_raw_response_wrapper(
speech.create,
)
class SpeechWithStreamingResponse:
def __init__(self, speech: Speech) -> None:
self._speech = speech
self.create = to_custom_streamed_response_wrapper(
speech.create,
StreamedBinaryAPIResponse,
)
class AsyncSpeechWithStreamingResponse:
def __init__(self, speech: AsyncSpeech) -> None:
self._speech = speech
self.create = async_to_custom_streamed_response_wrapper(
speech.create,
AsyncStreamedBinaryAPIResponse,
)

View File

@@ -0,0 +1,686 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, Union, Mapping, Optional, cast
from typing_extensions import Literal, overload, assert_never
import httpx
from ... import _legacy_response
from ...types import AudioResponseFormat
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from ..._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._streaming import Stream, AsyncStream
from ...types.audio import transcription_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.transcription import Transcription
from ...types.audio_response_format import AudioResponseFormat
from ...types.audio.transcription_include import TranscriptionInclude
from ...types.audio.transcription_verbose import TranscriptionVerbose
from ...types.audio.transcription_stream_event import TranscriptionStreamEvent
from ...types.audio.transcription_create_response import TranscriptionCreateResponse
__all__ = ["Transcriptions", "AsyncTranscriptions"]
log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
class Transcriptions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranscriptionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranscriptionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranscriptionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranscriptionsWithStreamingResponse(self)
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Transcription: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
response_format: Literal["verbose_json"],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionVerbose: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["text", "srt", "vtt"],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: Literal[True],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Stream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
Whisper V2 model).
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
the only supported format is `json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: bool,
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionCreateResponse | Stream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
Whisper V2 model).
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
the only supported format is `json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["file", "model"], ["file", "model", "stream"])
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str | Transcription | TranscriptionVerbose | Stream[TranscriptionStreamEvent]:
body = deepcopy_minimal(
{
"file": file,
"model": model,
"include": include,
"language": language,
"prompt": prompt,
"response_format": response_format,
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post( # type: ignore[return-value]
"/audio/transcriptions",
body=maybe_transform(
body,
transcription_create_params.TranscriptionCreateParamsStreaming
if stream
else transcription_create_params.TranscriptionCreateParamsNonStreaming,
),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
stream_cls=Stream[TranscriptionStreamEvent],
)
class AsyncTranscriptions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranscriptionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranscriptionsWithStreamingResponse(self)
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Transcription: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
response_format: Literal["verbose_json"],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionVerbose: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
response_format: Literal["text", "srt", "vtt"],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: Literal[True],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncStream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
Whisper V2 model).
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
the only supported format is `json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
stream: bool,
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionCreateResponse | AsyncStream[TranscriptionStreamEvent]:
"""
Transcribes audio into the input language.
Args:
file:
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. The options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
Whisper V2 model).
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
include: Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
the only supported format is `json`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["file", "model"], ["file", "model", "stream"])
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN,
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Transcription | TranscriptionVerbose | str | AsyncStream[TranscriptionStreamEvent]:
body = deepcopy_minimal(
{
"file": file,
"model": model,
"include": include,
"language": language,
"prompt": prompt,
"response_format": response_format,
"stream": stream,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/audio/transcriptions",
body=await async_maybe_transform(
body,
transcription_create_params.TranscriptionCreateParamsStreaming
if stream
else transcription_create_params.TranscriptionCreateParamsNonStreaming,
),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
stream=stream or False,
stream_cls=AsyncStream[TranscriptionStreamEvent],
)
class TranscriptionsWithRawResponse:
def __init__(self, transcriptions: Transcriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.to_raw_response_wrapper(
transcriptions.create,
)
class AsyncTranscriptionsWithRawResponse:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.async_to_raw_response_wrapper(
transcriptions.create,
)
class TranscriptionsWithStreamingResponse:
def __init__(self, transcriptions: Transcriptions) -> None:
self._transcriptions = transcriptions
self.create = to_streamed_response_wrapper(
transcriptions.create,
)
class AsyncTranscriptionsWithStreamingResponse:
def __init__(self, transcriptions: AsyncTranscriptions) -> None:
self._transcriptions = transcriptions
self.create = async_to_streamed_response_wrapper(
transcriptions.create,
)
def _get_response_format_type(
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven,
) -> type[Transcription | TranscriptionVerbose | str]:
if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison]
return Transcription
if response_format == "json":
return Transcription
elif response_format == "verbose_json":
return TranscriptionVerbose
elif response_format == "srt" or response_format == "text" or response_format == "vtt":
return str
elif TYPE_CHECKING: # type: ignore[unreachable]
assert_never(response_format)
else:
log.warn("Unexpected audio response format: %s", response_format)
return Transcription

View File

@@ -0,0 +1,367 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Union, Mapping, cast
from typing_extensions import Literal, overload, assert_never
import httpx
from ... import _legacy_response
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...types.audio import translation_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.translation import Translation
from ...types.audio_response_format import AudioResponseFormat
from ...types.audio.translation_verbose import TranslationVerbose
__all__ = ["Translations", "AsyncTranslations"]
log: logging.Logger = logging.getLogger("openai.audio.transcriptions")
class Translations(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranslationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranslationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranslationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranslationsWithStreamingResponse(self)
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Translation: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["verbose_json"],
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranslationVerbose: ...
@overload
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["text", "srt", "vtt"],
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str: ...
def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[Literal["json", "text", "srt", "verbose_json", "vtt"], NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Translation | TranslationVerbose | str:
"""
Translates audio into English.
Args:
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post( # type: ignore[return-value]
"/audio/translations",
body=maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
)
class AsyncTranslations(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranslationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranslationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranslationsWithStreamingResponse(self)
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Translation: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["verbose_json"],
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranslationVerbose: ...
@overload
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
response_format: Literal["text", "srt", "vtt"],
prompt: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str: ...
async def create(
self,
*,
file: FileTypes,
model: Union[str, AudioModel],
prompt: str | NotGiven = NOT_GIVEN,
response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Translation | TranslationVerbose | str:
"""
Translates audio into English.
Args:
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
[prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
output more random, while lower values like 0.2 will make it more focused and
deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"model": model,
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/audio/translations",
body=await async_maybe_transform(body, translation_create_params.TranslationCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_get_response_format_type(response_format),
)
class TranslationsWithRawResponse:
def __init__(self, translations: Translations) -> None:
self._translations = translations
self.create = _legacy_response.to_raw_response_wrapper(
translations.create,
)
class AsyncTranslationsWithRawResponse:
def __init__(self, translations: AsyncTranslations) -> None:
self._translations = translations
self.create = _legacy_response.async_to_raw_response_wrapper(
translations.create,
)
class TranslationsWithStreamingResponse:
def __init__(self, translations: Translations) -> None:
self._translations = translations
self.create = to_streamed_response_wrapper(
translations.create,
)
class AsyncTranslationsWithStreamingResponse:
def __init__(self, translations: AsyncTranslations) -> None:
self._translations = translations
self.create = async_to_streamed_response_wrapper(
translations.create,
)
def _get_response_format_type(
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven,
) -> type[Translation | TranslationVerbose | str]:
if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison]
return Translation
if response_format == "json":
return Translation
elif response_format == "verbose_json":
return TranslationVerbose
elif response_format == "srt" or response_format == "text" or response_format == "vtt":
return str
elif TYPE_CHECKING: # type: ignore[unreachable]
assert_never(response_format)
else:
log.warn("Unexpected audio response format: %s", response_format)
return Transcription

View File

@@ -0,0 +1,514 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import batch_list_params, batch_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncCursorPage, AsyncCursorPage
from ..types.batch import Batch
from .._base_client import AsyncPaginator, make_request_options
from ..types.shared_params.metadata import Metadata
__all__ = ["Batches", "AsyncBatches"]
class Batches(SyncAPIResource):
@cached_property
def with_raw_response(self) -> BatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return BatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return BatchesWithStreamingResponse(self)
def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Creates and executes a batch from an uploaded file of requests
Args:
completion_window: The time frame within which the batch should be processed. Currently only `24h`
is supported.
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
are supported. Note that `/v1/embeddings` batches are also restricted to a
maximum of 50,000 embedding inputs across all requests in the batch.
input_file_id: The ID of an uploaded file that contains requests for the new batch.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/batches",
body=maybe_transform(
{
"completion_window": completion_window,
"endpoint": endpoint,
"input_file_id": input_file_id,
"metadata": metadata,
},
batch_create_params.BatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def retrieve(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Retrieves a batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get(
f"/batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[Batch]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=SyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)
def cancel(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""Cancels an in-progress batch.
The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._post(
f"/batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
class AsyncBatches(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncBatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncBatchesWithStreamingResponse(self)
async def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Creates and executes a batch from an uploaded file of requests
Args:
completion_window: The time frame within which the batch should be processed. Currently only `24h`
is supported.
endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
are supported. Note that `/v1/embeddings` batches are also restricted to a
maximum of 50,000 embedding inputs across all requests in the batch.
input_file_id: The ID of an uploaded file that contains requests for the new batch.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/batches",
body=await async_maybe_transform(
{
"completion_window": completion_window,
"endpoint": endpoint,
"input_file_id": input_file_id,
"metadata": metadata,
},
batch_create_params.BatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
async def retrieve(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Retrieves a batch.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._get(
f"/batches/{batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
"""List your organization's batches.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/batches",
page=AsyncCursorPage[Batch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=Batch,
)
async def cancel(
self,
batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""Cancels an in-progress batch.
The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._post(
f"/batches/{batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Batch,
)
class BatchesWithRawResponse:
def __init__(self, batches: Batches) -> None:
self._batches = batches
self.create = _legacy_response.to_raw_response_wrapper(
batches.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
batches.cancel,
)
class AsyncBatchesWithRawResponse:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = _legacy_response.async_to_raw_response_wrapper(
batches.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
batches.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
batches.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
batches.cancel,
)
class BatchesWithStreamingResponse:
def __init__(self, batches: Batches) -> None:
self._batches = batches
self.create = to_streamed_response_wrapper(
batches.create,
)
self.retrieve = to_streamed_response_wrapper(
batches.retrieve,
)
self.list = to_streamed_response_wrapper(
batches.list,
)
self.cancel = to_streamed_response_wrapper(
batches.cancel,
)
class AsyncBatchesWithStreamingResponse:
def __init__(self, batches: AsyncBatches) -> None:
self._batches = batches
self.create = async_to_streamed_response_wrapper(
batches.create,
)
self.retrieve = async_to_streamed_response_wrapper(
batches.retrieve,
)
self.list = async_to_streamed_response_wrapper(
batches.list,
)
self.cancel = async_to_streamed_response_wrapper(
batches.cancel,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .beta import (
Beta,
AsyncBeta,
BetaWithRawResponse,
AsyncBetaWithRawResponse,
BetaWithStreamingResponse,
AsyncBetaWithStreamingResponse,
)
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .assistants import (
Assistants,
AsyncAssistants,
AssistantsWithRawResponse,
AsyncAssistantsWithRawResponse,
AssistantsWithStreamingResponse,
AsyncAssistantsWithStreamingResponse,
)
__all__ = [
"Assistants",
"AsyncAssistants",
"AssistantsWithRawResponse",
"AsyncAssistantsWithRawResponse",
"AssistantsWithStreamingResponse",
"AsyncAssistantsWithStreamingResponse",
"Threads",
"AsyncThreads",
"ThreadsWithRawResponse",
"AsyncThreadsWithRawResponse",
"ThreadsWithStreamingResponse",
"AsyncThreadsWithStreamingResponse",
"Beta",
"AsyncBeta",
"BetaWithRawResponse",
"AsyncBetaWithRawResponse",
"BetaWithStreamingResponse",
"AsyncBetaWithStreamingResponse",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,175 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from .chat.chat import Chat, AsyncChat
from .assistants import (
Assistants,
AsyncAssistants,
AssistantsWithRawResponse,
AsyncAssistantsWithRawResponse,
AssistantsWithStreamingResponse,
AsyncAssistantsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from .threads.threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .realtime.realtime import (
Realtime,
AsyncRealtime,
RealtimeWithRawResponse,
AsyncRealtimeWithRawResponse,
RealtimeWithStreamingResponse,
AsyncRealtimeWithStreamingResponse,
)
__all__ = ["Beta", "AsyncBeta"]
class Beta(SyncAPIResource):
@cached_property
def chat(self) -> Chat:
return Chat(self._client)
@cached_property
def realtime(self) -> Realtime:
return Realtime(self._client)
@cached_property
def assistants(self) -> Assistants:
return Assistants(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> BetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return BetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> BetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return BetaWithStreamingResponse(self)
class AsyncBeta(AsyncAPIResource):
@cached_property
def chat(self) -> AsyncChat:
return AsyncChat(self._client)
@cached_property
def realtime(self) -> AsyncRealtime:
return AsyncRealtime(self._client)
@cached_property
def assistants(self) -> AsyncAssistants:
return AsyncAssistants(self._client)
@cached_property
def threads(self) -> AsyncThreads:
return AsyncThreads(self._client)
@cached_property
def with_raw_response(self) -> AsyncBetaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncBetaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncBetaWithStreamingResponse(self)
class BetaWithRawResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def realtime(self) -> RealtimeWithRawResponse:
return RealtimeWithRawResponse(self._beta.realtime)
@cached_property
def assistants(self) -> AssistantsWithRawResponse:
return AssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
return ThreadsWithRawResponse(self._beta.threads)
class AsyncBetaWithRawResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def realtime(self) -> AsyncRealtimeWithRawResponse:
return AsyncRealtimeWithRawResponse(self._beta.realtime)
@cached_property
def assistants(self) -> AsyncAssistantsWithRawResponse:
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
return AsyncThreadsWithRawResponse(self._beta.threads)
class BetaWithStreamingResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def realtime(self) -> RealtimeWithStreamingResponse:
return RealtimeWithStreamingResponse(self._beta.realtime)
@cached_property
def assistants(self) -> AssistantsWithStreamingResponse:
return AssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
return ThreadsWithStreamingResponse(self._beta.threads)
class AsyncBetaWithStreamingResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def realtime(self) -> AsyncRealtimeWithStreamingResponse:
return AsyncRealtimeWithStreamingResponse(self._beta.realtime)
@cached_property
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
return AsyncThreadsWithStreamingResponse(self._beta.threads)

View File

@@ -0,0 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .chat import Chat, AsyncChat
from .completions import Completions, AsyncCompletions
__all__ = [
"Completions",
"AsyncCompletions",
"Chat",
"AsyncChat",
]

View File

@@ -0,0 +1,21 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ...._compat import cached_property
from .completions import Completions, AsyncCompletions
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["Chat", "AsyncChat"]
class Chat(SyncAPIResource):
@cached_property
def completions(self) -> Completions:
return Completions(self._client)
class AsyncChat(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
return AsyncCompletions(self._client)

View File

@@ -0,0 +1,634 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, List, Type, Union, Iterable, Optional, cast
from functools import partial
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._streaming import Stream
from ....types.chat import completion_create_params
from ...._base_client import make_request_options
from ....lib._parsing import (
ResponseFormatT,
validate_input_tools as _validate_input_tools,
parse_chat_completion as _parse_chat_completion,
type_to_response_format_param as _type_to_response_format,
)
from ....types.chat_model import ChatModel
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
from ....types.shared_params import Metadata, ReasoningEffort
from ....types.chat.chat_completion import ChatCompletion
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
__all__ = ["Completions", "AsyncCompletions"]
class Completions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CompletionsWithStreamingResponse(self)
def parse(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ParsedChatCompletion[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.
This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`
Example usage:
```py
from pydantic import BaseModel
from openai import OpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
completion = client.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
)
message = completion.choices[0].message
if message.parsed:
print(message.parsed.steps)
print("answer: ", message.parsed.final_answer)
```
"""
_validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
**(extra_headers or {}),
}
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
)
return self._post(
"/chat/completions",
body=maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"store": store,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)
def stream(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ChatCompletionStreamManager[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.
This also supports all of the parsing utilities that `.parse()` does.
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
```py
with client.beta.chat.completions.stream(
model="gpt-4o-2024-08-06",
messages=[...],
) as stream:
for event in stream:
if event.type == "content.delta":
print(event.delta, flush=True, end="")
```
When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
"""
extra_headers = {
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
**(extra_headers or {}),
}
api_request: partial[Stream[ChatCompletionChunk]] = partial(
self._client.chat.completions.create,
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
prediction=prediction,
presence_penalty=presence_penalty,
reasoning_effort=reasoning_effort,
seed=seed,
service_tier=service_tier,
store=store,
stop=stop,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
web_search_options=web_search_options,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return ChatCompletionStreamManager(
api_request,
response_format=response_format,
input_tools=tools,
)
class AsyncCompletions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCompletionsWithStreamingResponse(self)
async def parse(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ParsedChatCompletion[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.
This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`
Example usage:
```py
from pydantic import BaseModel
from openai import AsyncOpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = AsyncOpenAI()
completion = await client.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
)
message = completion.choices[0].message
if message.parsed:
print(message.parsed.steps)
print("answer: ", message.parsed.final_answer)
```
"""
_validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "beta.chat.completions.parse",
**(extra_headers or {}),
}
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
)
return await self._post(
"/chat/completions",
body=await async_maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
"seed": seed,
"service_tier": service_tier,
"store": store,
"stop": stop,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)
def stream(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.
This also supports all of the parsing utilities that `.parse()` does.
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
```py
async with client.beta.chat.completions.stream(
model="gpt-4o-2024-08-06",
messages=[...],
) as stream:
async for event in stream:
if event.type == "content.delta":
print(event.delta, flush=True, end="")
```
When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
"""
_validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
**(extra_headers or {}),
}
api_request = self._client.chat.completions.create(
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
prediction=prediction,
presence_penalty=presence_penalty,
reasoning_effort=reasoning_effort,
seed=seed,
service_tier=service_tier,
stop=stop,
store=store,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
web_search_options=web_search_options,
)
return AsyncChatCompletionStreamManager(
api_request,
response_format=response_format,
input_tools=tools,
)
class CompletionsWithRawResponse:
def __init__(self, completions: Completions) -> None:
self._completions = completions
self.parse = _legacy_response.to_raw_response_wrapper(
completions.parse,
)
class AsyncCompletionsWithRawResponse:
def __init__(self, completions: AsyncCompletions) -> None:
self._completions = completions
self.parse = _legacy_response.async_to_raw_response_wrapper(
completions.parse,
)
class CompletionsWithStreamingResponse:
def __init__(self, completions: Completions) -> None:
self._completions = completions
self.parse = to_streamed_response_wrapper(
completions.parse,
)
class AsyncCompletionsWithStreamingResponse:
def __init__(self, completions: AsyncCompletions) -> None:
self._completions = completions
self.parse = async_to_streamed_response_wrapper(
completions.parse,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .realtime import (
Realtime,
AsyncRealtime,
RealtimeWithRawResponse,
AsyncRealtimeWithRawResponse,
RealtimeWithStreamingResponse,
AsyncRealtimeWithStreamingResponse,
)
from .sessions import (
Sessions,
AsyncSessions,
SessionsWithRawResponse,
AsyncSessionsWithRawResponse,
SessionsWithStreamingResponse,
AsyncSessionsWithStreamingResponse,
)
from .transcription_sessions import (
TranscriptionSessions,
AsyncTranscriptionSessions,
TranscriptionSessionsWithRawResponse,
AsyncTranscriptionSessionsWithRawResponse,
TranscriptionSessionsWithStreamingResponse,
AsyncTranscriptionSessionsWithStreamingResponse,
)
__all__ = [
"Sessions",
"AsyncSessions",
"SessionsWithRawResponse",
"AsyncSessionsWithRawResponse",
"SessionsWithStreamingResponse",
"AsyncSessionsWithStreamingResponse",
"TranscriptionSessions",
"AsyncTranscriptionSessions",
"TranscriptionSessionsWithRawResponse",
"AsyncTranscriptionSessionsWithRawResponse",
"TranscriptionSessionsWithStreamingResponse",
"AsyncTranscriptionSessionsWithStreamingResponse",
"Realtime",
"AsyncRealtime",
"RealtimeWithRawResponse",
"AsyncRealtimeWithRawResponse",
"RealtimeWithStreamingResponse",
"AsyncRealtimeWithStreamingResponse",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,388 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.realtime import session_create_params
from ....types.beta.realtime.session_create_response import SessionCreateResponse
__all__ = ["Sessions", "AsyncSessions"]
class Sessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SessionsWithStreamingResponse(self)
def create(
self,
*,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
model: Literal[
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
]
| NotGiven = NOT_GIVEN,
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: str | NotGiven = NOT_GIVEN,
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
]
| NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SessionCreateResponse:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API. Can be configured with the same session parameters as the
`session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
model: The Realtime model used for this session.
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
For `pcm16`, output audio is sampled at a rate of 24kHz.
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
temperature of 0.8 is highly recommended for best performance.
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
a function.
tools: Tools (functions) available to the model.
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjuction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
voice: The voice the model uses to respond. Voice cannot be changed during the session
once the model has responded with audio at least once. Current voice options are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
`shimmer`, and `verse`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/realtime/sessions",
body=maybe_transform(
{
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"instructions": instructions,
"max_response_output_tokens": max_response_output_tokens,
"modalities": modalities,
"model": model,
"output_audio_format": output_audio_format,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"turn_detection": turn_detection,
"voice": voice,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SessionCreateResponse,
)
class AsyncSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSessionsWithStreamingResponse(self)
async def create(
self,
*,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
instructions: str | NotGiven = NOT_GIVEN,
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
model: Literal[
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
]
| NotGiven = NOT_GIVEN,
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: str | NotGiven = NOT_GIVEN,
tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
]
| NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SessionCreateResponse:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API. Can be configured with the same session parameters as the
`session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
model: The Realtime model used for this session.
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
For `pcm16`, output audio is sampled at a rate of 24kHz.
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
temperature of 0.8 is highly recommended for best performance.
tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
a function.
tools: Tools (functions) available to the model.
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjuction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
voice: The voice the model uses to respond. Voice cannot be changed during the session
once the model has responded with audio at least once. Current voice options are
`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
`shimmer`, and `verse`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/realtime/sessions",
body=await async_maybe_transform(
{
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"instructions": instructions,
"max_response_output_tokens": max_response_output_tokens,
"modalities": modalities,
"model": model,
"output_audio_format": output_audio_format,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"turn_detection": turn_detection,
"voice": voice,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SessionCreateResponse,
)
class SessionsWithRawResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = _legacy_response.to_raw_response_wrapper(
sessions.create,
)
class AsyncSessionsWithRawResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
sessions.create,
)
class SessionsWithStreamingResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = to_streamed_response_wrapper(
sessions.create,
)
class AsyncSessionsWithStreamingResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = async_to_streamed_response_wrapper(
sessions.create,
)

View File

@@ -0,0 +1,274 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.realtime import transcription_session_create_params
from ....types.beta.realtime.transcription_session import TranscriptionSession
__all__ = ["TranscriptionSessions", "AsyncTranscriptionSessions"]
class TranscriptionSessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> TranscriptionSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return TranscriptionSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> TranscriptionSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return TranscriptionSessionsWithStreamingResponse(self)
def create(
self,
*,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
| NotGiven = NOT_GIVEN,
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionSession:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API specifically for realtime transcriptions. Can be configured with
the same session parameters as the `transcription_session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
include:
The set of items to include in the transcription. Current available items are:
- `item.input_audio_transcription.logprobs`
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
language and prompt for transcription, these offer additional guidance to the
transcription service.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjuction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/realtime/transcription_sessions",
body=maybe_transform(
{
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"modalities": modalities,
"turn_detection": turn_detection,
},
transcription_session_create_params.TranscriptionSessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TranscriptionSession,
)
class AsyncTranscriptionSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncTranscriptionSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncTranscriptionSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncTranscriptionSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncTranscriptionSessionsWithStreamingResponse(self)
async def create(
self,
*,
include: List[str] | NotGiven = NOT_GIVEN,
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
| NotGiven = NOT_GIVEN,
input_audio_transcription: transcription_session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
turn_detection: transcription_session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> TranscriptionSession:
"""
Create an ephemeral API token for use in client-side applications with the
Realtime API specifically for realtime transcriptions. Can be configured with
the same session parameters as the `transcription_session.update` client event.
It responds with a session object, plus a `client_secret` key which contains a
usable ephemeral API token that can be used to authenticate browser clients for
the Realtime API.
Args:
include:
The set of items to include in the transcription. Current available items are:
- `item.input_audio_transcription.logprobs`
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
(mono), and little-endian byte order.
input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
off. Noise reduction filters audio added to the input audio buffer before it is
sent to VAD and the model. Filtering the audio can improve VAD and turn
detection accuracy (reducing false positives) and model performance by improving
perception of the input audio.
input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
language and prompt for transcription, these offer additional guidance to the
transcription service.
modalities: The set of modalities the model can respond with. To disable audio, set this to
["text"].
turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
set to `null` to turn off, in which case the client must manually trigger model
response. Server VAD means that the model will detect the start and end of
speech based on audio volume and respond at the end of user speech. Semantic VAD
is more advanced and uses a turn detection model (in conjuction with VAD) to
semantically estimate whether the user has finished speaking, then dynamically
sets a timeout based on this probability. For example, if user audio trails off
with "uhhm", the model will score a low probability of turn end and wait longer
for the user to continue speaking. This can be useful for more natural
conversations, but may have a higher latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/realtime/transcription_sessions",
body=await async_maybe_transform(
{
"include": include,
"input_audio_format": input_audio_format,
"input_audio_noise_reduction": input_audio_noise_reduction,
"input_audio_transcription": input_audio_transcription,
"modalities": modalities,
"turn_detection": turn_detection,
},
transcription_session_create_params.TranscriptionSessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TranscriptionSession,
)
class TranscriptionSessionsWithRawResponse:
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = _legacy_response.to_raw_response_wrapper(
transcription_sessions.create,
)
class AsyncTranscriptionSessionsWithRawResponse:
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
transcription_sessions.create,
)
class TranscriptionSessionsWithStreamingResponse:
def __init__(self, transcription_sessions: TranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = to_streamed_response_wrapper(
transcription_sessions.create,
)
class AsyncTranscriptionSessionsWithStreamingResponse:
def __init__(self, transcription_sessions: AsyncTranscriptionSessions) -> None:
self._transcription_sessions = transcription_sessions
self.create = async_to_streamed_response_wrapper(
transcription_sessions.create,
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .messages import (
Messages,
AsyncMessages,
MessagesWithRawResponse,
AsyncMessagesWithRawResponse,
MessagesWithStreamingResponse,
AsyncMessagesWithStreamingResponse,
)
__all__ = [
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
"Messages",
"AsyncMessages",
"MessagesWithRawResponse",
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
"Threads",
"AsyncThreads",
"ThreadsWithRawResponse",
"AsyncThreadsWithRawResponse",
"ThreadsWithStreamingResponse",
"AsyncThreadsWithStreamingResponse",
]

View File

@@ -0,0 +1,667 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.beta.threads import message_list_params, message_create_params, message_update_params
from ....types.beta.threads.message import Message
from ....types.shared_params.metadata import Metadata
from ....types.beta.threads.message_deleted import MessageDeleted
from ....types.beta.threads.message_content_part_param import MessageContentPartParam
__all__ = ["Messages", "AsyncMessages"]
class Messages(SyncAPIResource):
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
def create(
self,
thread_id: str,
*,
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Create a message.
Args:
content: The text contents of the message.
role:
The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages",
body=maybe_transform(
{
"content": content,
"role": role,
"attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
def retrieve(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Retrieve a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
def update(
self,
message_id: str,
*,
thread_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Modifies a message.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
def list(
self,
thread_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
run_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[Message]:
"""
Returns a list of messages for a given thread.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
run_id: Filter messages by the run ID that generated them.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=SyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"run_id": run_id,
},
message_list_params.MessageListParams,
),
),
model=Message,
)
def delete(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> MessageDeleted:
"""
Deletes a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageDeleted,
)
class AsyncMessages(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
async def create(
self,
thread_id: str,
*,
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Create a message.
Args:
content: The text contents of the message.
role:
The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages",
body=await async_maybe_transform(
{
"content": content,
"role": role,
"attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
async def retrieve(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Retrieve a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
async def update(
self,
message_id: str,
*,
thread_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Message:
"""
Modifies a message.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
def list(
self,
thread_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
run_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[Message, AsyncCursorPage[Message]]:
"""
Returns a list of messages for a given thread.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
run_id: Filter messages by the run ID that generated them.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=AsyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"run_id": run_id,
},
message_list_params.MessageListParams,
),
),
model=Message,
)
async def delete(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> MessageDeleted:
"""
Deletes a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageDeleted,
)
class MessagesWithRawResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = _legacy_response.to_raw_response_wrapper(
messages.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
messages.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
messages.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
messages.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
messages.delete,
)
class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = _legacy_response.async_to_raw_response_wrapper(
messages.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
messages.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
messages.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
messages.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
messages.delete,
)
class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = to_streamed_response_wrapper(
messages.create,
)
self.retrieve = to_streamed_response_wrapper(
messages.retrieve,
)
self.update = to_streamed_response_wrapper(
messages.update,
)
self.list = to_streamed_response_wrapper(
messages.list,
)
self.delete = to_streamed_response_wrapper(
messages.delete,
)
class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = async_to_streamed_response_wrapper(
messages.create,
)
self.retrieve = async_to_streamed_response_wrapper(
messages.retrieve,
)
self.update = async_to_streamed_response_wrapper(
messages.update,
)
self.list = async_to_streamed_response_wrapper(
messages.list,
)
self.delete = async_to_streamed_response_wrapper(
messages.delete,
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .steps import (
Steps,
AsyncSteps,
StepsWithRawResponse,
AsyncStepsWithRawResponse,
StepsWithStreamingResponse,
AsyncStepsWithStreamingResponse,
)
__all__ = [
"Steps",
"AsyncSteps",
"StepsWithRawResponse",
"AsyncStepsWithRawResponse",
"StepsWithStreamingResponse",
"AsyncStepsWithStreamingResponse",
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,378 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal
import httpx
from ..... import _legacy_response
from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ....._utils import maybe_transform, async_maybe_transform
from ....._compat import cached_property
from ....._resource import SyncAPIResource, AsyncAPIResource
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .....pagination import SyncCursorPage, AsyncCursorPage
from ....._base_client import AsyncPaginator, make_request_options
from .....types.beta.threads.runs import step_list_params, step_retrieve_params
from .....types.beta.threads.runs.run_step import RunStep
from .....types.beta.threads.runs.run_step_include import RunStepInclude
__all__ = ["Steps", "AsyncSteps"]
class Steps(SyncAPIResource):
@cached_property
def with_raw_response(self) -> StepsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return StepsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> StepsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return StepsWithStreamingResponse(self)
def retrieve(
self,
step_id: str,
*,
thread_id: str,
run_id: str,
include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunStep:
"""
Retrieves a run step.
Args:
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
def list(
self,
run_id: str,
*,
thread_id: str,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[RunStep]:
"""
Returns a list of run steps belonging to a run.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=SyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
step_list_params.StepListParams,
),
),
model=RunStep,
)
class AsyncSteps(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncStepsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncStepsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncStepsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncStepsWithStreamingResponse(self)
async def retrieve(
self,
step_id: str,
*,
thread_id: str,
run_id: str,
include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunStep:
"""
Retrieves a run step.
Args:
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
def list(
self,
run_id: str,
*,
thread_id: str,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]:
"""
Returns a list of run steps belonging to a run.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=AsyncCursorPage[RunStep],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
step_list_params.StepListParams,
),
),
model=RunStep,
)
class StepsWithRawResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
self.retrieve = _legacy_response.to_raw_response_wrapper(
steps.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
steps.list,
)
class AsyncStepsWithRawResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
steps.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
steps.list,
)
class StepsWithStreamingResponse:
def __init__(self, steps: Steps) -> None:
self._steps = steps
self.retrieve = to_streamed_response_wrapper(
steps.retrieve,
)
self.list = to_streamed_response_wrapper(
steps.list,
)
class AsyncStepsWithStreamingResponse:
def __init__(self, steps: AsyncSteps) -> None:
self._steps = steps
self.retrieve = async_to_streamed_response_wrapper(
steps.retrieve,
)
self.list = async_to_streamed_response_wrapper(
steps.list,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .chat import (
Chat,
AsyncChat,
ChatWithRawResponse,
AsyncChatWithRawResponse,
ChatWithStreamingResponse,
AsyncChatWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = [
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
"Chat",
"AsyncChat",
"ChatWithRawResponse",
"AsyncChatWithRawResponse",
"ChatWithStreamingResponse",
"AsyncChatWithStreamingResponse",
]

View File

@@ -0,0 +1,102 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from .completions.completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = ["Chat", "AsyncChat"]
class Chat(SyncAPIResource):
@cached_property
def completions(self) -> Completions:
return Completions(self._client)
@cached_property
def with_raw_response(self) -> ChatWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ChatWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ChatWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ChatWithStreamingResponse(self)
class AsyncChat(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
return AsyncCompletions(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatWithStreamingResponse(self)
class ChatWithRawResponse:
def __init__(self, chat: Chat) -> None:
self._chat = chat
@cached_property
def completions(self) -> CompletionsWithRawResponse:
return CompletionsWithRawResponse(self._chat.completions)
class AsyncChatWithRawResponse:
def __init__(self, chat: AsyncChat) -> None:
self._chat = chat
@cached_property
def completions(self) -> AsyncCompletionsWithRawResponse:
return AsyncCompletionsWithRawResponse(self._chat.completions)
class ChatWithStreamingResponse:
def __init__(self, chat: Chat) -> None:
self._chat = chat
@cached_property
def completions(self) -> CompletionsWithStreamingResponse:
return CompletionsWithStreamingResponse(self._chat.completions)
class AsyncChatWithStreamingResponse:
def __init__(self, chat: AsyncChat) -> None:
self._chat = chat
@cached_property
def completions(self) -> AsyncCompletionsWithStreamingResponse:
return AsyncCompletionsWithStreamingResponse(self._chat.completions)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .messages import (
Messages,
AsyncMessages,
MessagesWithRawResponse,
AsyncMessagesWithRawResponse,
MessagesWithStreamingResponse,
AsyncMessagesWithStreamingResponse,
)
from .completions import (
Completions,
AsyncCompletions,
CompletionsWithRawResponse,
AsyncCompletionsWithRawResponse,
CompletionsWithStreamingResponse,
AsyncCompletionsWithStreamingResponse,
)
__all__ = [
"Messages",
"AsyncMessages",
"MessagesWithRawResponse",
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
"Completions",
"AsyncCompletions",
"CompletionsWithRawResponse",
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
]

View File

@@ -0,0 +1,212 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.chat.completions import message_list_params
from ....types.chat.chat_completion_store_message import ChatCompletionStoreMessage
__all__ = ["Messages", "AsyncMessages"]
class Messages(SyncAPIResource):
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
def list(
self,
completion_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[ChatCompletionStoreMessage]:
"""Get the messages in a stored chat completion.
Only Chat Completions that have
been created with the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last message from the previous pagination request.
limit: Number of messages to retrieve.
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
f"/chat/completions/{completion_id}/messages",
page=SyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
message_list_params.MessageListParams,
),
),
model=ChatCompletionStoreMessage,
)
class AsyncMessages(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
def list(
self,
completion_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[ChatCompletionStoreMessage, AsyncCursorPage[ChatCompletionStoreMessage]]:
"""Get the messages in a stored chat completion.
Only Chat Completions that have
been created with the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last message from the previous pagination request.
limit: Number of messages to retrieve.
order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get_api_list(
f"/chat/completions/{completion_id}/messages",
page=AsyncCursorPage[ChatCompletionStoreMessage],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
message_list_params.MessageListParams,
),
),
model=ChatCompletionStoreMessage,
)
class MessagesWithRawResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.list = _legacy_response.to_raw_response_wrapper(
messages.list,
)
class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = _legacy_response.async_to_raw_response_wrapper(
messages.list,
)
class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.list = to_streamed_response_wrapper(
messages.list,
)
class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = async_to_streamed_response_wrapper(
messages.list,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,290 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import array
import base64
from typing import List, Union, Iterable, cast
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import embedding_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import is_given, maybe_transform
from .._compat import cached_property
from .._extras import numpy as np, has_numpy
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .._base_client import make_request_options
from ..types.embedding_model import EmbeddingModel
from ..types.create_embedding_response import CreateEmbeddingResponse
__all__ = ["Embeddings", "AsyncEmbeddings"]
class Embeddings(SyncAPIResource):
@cached_property
def with_raw_response(self) -> EmbeddingsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return EmbeddingsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> EmbeddingsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return EmbeddingsWithStreamingResponse(self)
def create(
self,
*,
input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
model: Union[str, EmbeddingModel],
dimensions: int | NotGiven = NOT_GIVEN,
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateEmbeddingResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model (8192 tokens for
`text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens. Some models may also impose a limit on total number of
tokens summed across inputs.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
encoding_format: The format to return the embeddings in. Can be either `float` or
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
params = {
"input": input,
"model": model,
"user": user,
"dimensions": dimensions,
"encoding_format": encoding_format,
}
if not is_given(encoding_format):
params["encoding_format"] = "base64"
def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
if is_given(encoding_format):
# don't modify the response object if a user explicitly asked for a format
return obj
for embedding in obj.data:
data = cast(object, embedding.embedding)
if not isinstance(data, str):
continue
if not has_numpy():
# use array for base64 optimisation
embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
else:
embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call]
base64.b64decode(data), dtype="float32"
).tolist()
return obj
return self._post(
"/embeddings",
body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=CreateEmbeddingResponse,
)
class AsyncEmbeddings(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEmbeddingsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncEmbeddingsWithStreamingResponse(self)
async def create(
self,
*,
input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
model: Union[str, EmbeddingModel],
dimensions: int | NotGiven = NOT_GIVEN,
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CreateEmbeddingResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model (8192 tokens for
`text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens. Some models may also impose a limit on total number of
tokens summed across inputs.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
encoding_format: The format to return the embeddings in. Can be either `float` or
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
params = {
"input": input,
"model": model,
"user": user,
"dimensions": dimensions,
"encoding_format": encoding_format,
}
if not is_given(encoding_format):
params["encoding_format"] = "base64"
def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
if is_given(encoding_format):
# don't modify the response object if a user explicitly asked for a format
return obj
for embedding in obj.data:
data = cast(object, embedding.embedding)
if not isinstance(data, str):
continue
if not has_numpy():
# use array for base64 optimisation
embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
else:
embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call]
base64.b64decode(data), dtype="float32"
).tolist()
return obj
return await self._post(
"/embeddings",
body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=CreateEmbeddingResponse,
)
class EmbeddingsWithRawResponse:
def __init__(self, embeddings: Embeddings) -> None:
self._embeddings = embeddings
self.create = _legacy_response.to_raw_response_wrapper(
embeddings.create,
)
class AsyncEmbeddingsWithRawResponse:
def __init__(self, embeddings: AsyncEmbeddings) -> None:
self._embeddings = embeddings
self.create = _legacy_response.async_to_raw_response_wrapper(
embeddings.create,
)
class EmbeddingsWithStreamingResponse:
def __init__(self, embeddings: Embeddings) -> None:
self._embeddings = embeddings
self.create = to_streamed_response_wrapper(
embeddings.create,
)
class AsyncEmbeddingsWithStreamingResponse:
def __init__(self, embeddings: AsyncEmbeddings) -> None:
self._embeddings = embeddings
self.create = async_to_streamed_response_wrapper(
embeddings.create,
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .evals import (
Evals,
AsyncEvals,
EvalsWithRawResponse,
AsyncEvalsWithRawResponse,
EvalsWithStreamingResponse,
AsyncEvalsWithStreamingResponse,
)
__all__ = [
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
"Evals",
"AsyncEvals",
"EvalsWithRawResponse",
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
]

View File

@@ -0,0 +1,652 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ...types import eval_list_params, eval_create_params, eval_update_params
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from .runs.runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.eval_list_response import EvalListResponse
from ...types.eval_create_response import EvalCreateResponse
from ...types.eval_delete_response import EvalDeleteResponse
from ...types.eval_update_response import EvalUpdateResponse
from ...types.eval_retrieve_response import EvalRetrieveResponse
from ...types.shared_params.metadata import Metadata
__all__ = ["Evals", "AsyncEvals"]
class Evals(SyncAPIResource):
@cached_property
def runs(self) -> Runs:
return Runs(self._client)
@cached_property
def with_raw_response(self) -> EvalsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return EvalsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> EvalsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return EvalsWithStreamingResponse(self)
def create(
self,
*,
data_source_config: eval_create_params.DataSourceConfig,
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalCreateResponse:
"""
Create the structure of an evaluation that can be used to test a model's
performance. An evaluation is a set of testing criteria and a datasource. After
creating an evaluation, you can run it on different models and model parameters.
We support several types of graders and datasources. For more information, see
the [Evals guide](https://platform.openai.com/docs/guides/evals).
Args:
data_source_config: The configuration for the data source used for the evaluation runs.
testing_criteria: A list of graders for all eval runs in this group.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/evals",
body=maybe_transform(
{
"data_source_config": data_source_config,
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
},
eval_create_params.EvalCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalCreateResponse,
)
def retrieve(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalRetrieveResponse:
"""
Get an evaluation by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalRetrieveResponse,
)
def update(
self,
eval_id: str,
*,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalUpdateResponse:
"""
Update certain properties of an evaluation.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: Rename the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
f"/evals/{eval_id}",
body=maybe_transform(
{
"metadata": metadata,
"name": name,
},
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalUpdateResponse,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
order_by: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[EvalListResponse]:
"""
List evaluations for a project.
Args:
after: Identifier for the last eval from the previous pagination request.
limit: Number of evals to retrieve.
order: Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
descending order.
order_by: Evals can be ordered by creation time or last updated time. Use `created_at` for
creation time or `updated_at` for last updated time.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/evals",
page=SyncCursorPage[EvalListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"order_by": order_by,
},
eval_list_params.EvalListParams,
),
),
model=EvalListResponse,
)
def delete(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalDeleteResponse:
"""
Delete an evaluation.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._delete(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalDeleteResponse,
)
class AsyncEvals(AsyncAPIResource):
@cached_property
def runs(self) -> AsyncRuns:
return AsyncRuns(self._client)
@cached_property
def with_raw_response(self) -> AsyncEvalsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEvalsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncEvalsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncEvalsWithStreamingResponse(self)
async def create(
self,
*,
data_source_config: eval_create_params.DataSourceConfig,
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalCreateResponse:
"""
Create the structure of an evaluation that can be used to test a model's
performance. An evaluation is a set of testing criteria and a datasource. After
creating an evaluation, you can run it on different models and model parameters.
We support several types of graders and datasources. For more information, see
the [Evals guide](https://platform.openai.com/docs/guides/evals).
Args:
data_source_config: The configuration for the data source used for the evaluation runs.
testing_criteria: A list of graders for all eval runs in this group.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/evals",
body=await async_maybe_transform(
{
"data_source_config": data_source_config,
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
},
eval_create_params.EvalCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalCreateResponse,
)
async def retrieve(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalRetrieveResponse:
"""
Get an evaluation by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._get(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalRetrieveResponse,
)
async def update(
self,
eval_id: str,
*,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalUpdateResponse:
"""
Update certain properties of an evaluation.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: Rename the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}",
body=await async_maybe_transform(
{
"metadata": metadata,
"name": name,
},
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalUpdateResponse,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
order_by: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[EvalListResponse, AsyncCursorPage[EvalListResponse]]:
"""
List evaluations for a project.
Args:
after: Identifier for the last eval from the previous pagination request.
limit: Number of evals to retrieve.
order: Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
descending order.
order_by: Evals can be ordered by creation time or last updated time. Use `created_at` for
creation time or `updated_at` for last updated time.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/evals",
page=AsyncCursorPage[EvalListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"order_by": order_by,
},
eval_list_params.EvalListParams,
),
),
model=EvalListResponse,
)
async def delete(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> EvalDeleteResponse:
"""
Delete an evaluation.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._delete(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalDeleteResponse,
)
class EvalsWithRawResponse:
def __init__(self, evals: Evals) -> None:
self._evals = evals
self.create = _legacy_response.to_raw_response_wrapper(
evals.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
evals.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
evals.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
evals.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> RunsWithRawResponse:
return RunsWithRawResponse(self._evals.runs)
class AsyncEvalsWithRawResponse:
def __init__(self, evals: AsyncEvals) -> None:
self._evals = evals
self.create = _legacy_response.async_to_raw_response_wrapper(
evals.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
evals.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
evals.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
evals.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
return AsyncRunsWithRawResponse(self._evals.runs)
class EvalsWithStreamingResponse:
def __init__(self, evals: Evals) -> None:
self._evals = evals
self.create = to_streamed_response_wrapper(
evals.create,
)
self.retrieve = to_streamed_response_wrapper(
evals.retrieve,
)
self.update = to_streamed_response_wrapper(
evals.update,
)
self.list = to_streamed_response_wrapper(
evals.list,
)
self.delete = to_streamed_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> RunsWithStreamingResponse:
return RunsWithStreamingResponse(self._evals.runs)
class AsyncEvalsWithStreamingResponse:
def __init__(self, evals: AsyncEvals) -> None:
self._evals = evals
self.create = async_to_streamed_response_wrapper(
evals.create,
)
self.retrieve = async_to_streamed_response_wrapper(
evals.retrieve,
)
self.update = async_to_streamed_response_wrapper(
evals.update,
)
self.list = async_to_streamed_response_wrapper(
evals.list,
)
self.delete = async_to_streamed_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
return AsyncRunsWithStreamingResponse(self._evals.runs)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .runs import (
Runs,
AsyncRuns,
RunsWithRawResponse,
AsyncRunsWithRawResponse,
RunsWithStreamingResponse,
AsyncRunsWithStreamingResponse,
)
from .output_items import (
OutputItems,
AsyncOutputItems,
OutputItemsWithRawResponse,
AsyncOutputItemsWithRawResponse,
OutputItemsWithStreamingResponse,
AsyncOutputItemsWithStreamingResponse,
)
__all__ = [
"OutputItems",
"AsyncOutputItems",
"OutputItemsWithRawResponse",
"AsyncOutputItemsWithRawResponse",
"OutputItemsWithStreamingResponse",
"AsyncOutputItemsWithStreamingResponse",
"Runs",
"AsyncRuns",
"RunsWithRawResponse",
"AsyncRunsWithRawResponse",
"RunsWithStreamingResponse",
"AsyncRunsWithStreamingResponse",
]

View File

@@ -0,0 +1,315 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.evals.runs import output_item_list_params
from ....types.evals.runs.output_item_list_response import OutputItemListResponse
from ....types.evals.runs.output_item_retrieve_response import OutputItemRetrieveResponse
__all__ = ["OutputItems", "AsyncOutputItems"]
class OutputItems(SyncAPIResource):
@cached_property
def with_raw_response(self) -> OutputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return OutputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> OutputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return OutputItemsWithStreamingResponse(self)
def retrieve(
self,
output_item_id: str,
*,
eval_id: str,
run_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> OutputItemRetrieveResponse:
"""
Get an evaluation run output item by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return self._get(
f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=OutputItemRetrieveResponse,
)
def list(
self,
run_id: str,
*,
eval_id: str,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["fail", "pass"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[OutputItemListResponse]:
"""
Get a list of output items for an evaluation run.
Args:
after: Identifier for the last output item from the previous pagination request.
limit: Number of output items to retrieve.
order: Sort order for output items by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
status: Filter output items by status. Use `failed` to filter by failed output items or
`pass` to filter by passed output items.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs/{run_id}/output_items",
page=SyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
output_item_list_params.OutputItemListParams,
),
),
model=OutputItemListResponse,
)
class AsyncOutputItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncOutputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncOutputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncOutputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncOutputItemsWithStreamingResponse(self)
async def retrieve(
self,
output_item_id: str,
*,
eval_id: str,
run_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> OutputItemRetrieveResponse:
"""
Get an evaluation run output item by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not output_item_id:
raise ValueError(f"Expected a non-empty value for `output_item_id` but received {output_item_id!r}")
return await self._get(
f"/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=OutputItemRetrieveResponse,
)
def list(
self,
run_id: str,
*,
eval_id: str,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["fail", "pass"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[OutputItemListResponse, AsyncCursorPage[OutputItemListResponse]]:
"""
Get a list of output items for an evaluation run.
Args:
after: Identifier for the last output item from the previous pagination request.
limit: Number of output items to retrieve.
order: Sort order for output items by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
status: Filter output items by status. Use `failed` to filter by failed output items or
`pass` to filter by passed output items.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs/{run_id}/output_items",
page=AsyncCursorPage[OutputItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
output_item_list_params.OutputItemListParams,
),
),
model=OutputItemListResponse,
)
class OutputItemsWithRawResponse:
def __init__(self, output_items: OutputItems) -> None:
self._output_items = output_items
self.retrieve = _legacy_response.to_raw_response_wrapper(
output_items.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
output_items.list,
)
class AsyncOutputItemsWithRawResponse:
def __init__(self, output_items: AsyncOutputItems) -> None:
self._output_items = output_items
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
output_items.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
output_items.list,
)
class OutputItemsWithStreamingResponse:
def __init__(self, output_items: OutputItems) -> None:
self._output_items = output_items
self.retrieve = to_streamed_response_wrapper(
output_items.retrieve,
)
self.list = to_streamed_response_wrapper(
output_items.list,
)
class AsyncOutputItemsWithStreamingResponse:
def __init__(self, output_items: AsyncOutputItems) -> None:
self._output_items = output_items
self.retrieve = async_to_streamed_response_wrapper(
output_items.retrieve,
)
self.list = async_to_streamed_response_wrapper(
output_items.list,
)

View File

@@ -0,0 +1,632 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .output_items import (
OutputItems,
AsyncOutputItems,
OutputItemsWithRawResponse,
AsyncOutputItemsWithRawResponse,
OutputItemsWithStreamingResponse,
AsyncOutputItemsWithStreamingResponse,
)
from ....pagination import SyncCursorPage, AsyncCursorPage
from ....types.evals import run_list_params, run_create_params
from ...._base_client import AsyncPaginator, make_request_options
from ....types.shared_params.metadata import Metadata
from ....types.evals.run_list_response import RunListResponse
from ....types.evals.run_cancel_response import RunCancelResponse
from ....types.evals.run_create_response import RunCreateResponse
from ....types.evals.run_delete_response import RunDeleteResponse
from ....types.evals.run_retrieve_response import RunRetrieveResponse
__all__ = ["Runs", "AsyncRuns"]
class Runs(SyncAPIResource):
@cached_property
def output_items(self) -> OutputItems:
return OutputItems(self._client)
@cached_property
def with_raw_response(self) -> RunsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return RunsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> RunsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return RunsWithStreamingResponse(self)
def create(
self,
eval_id: str,
*,
data_source: run_create_params.DataSource,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunCreateResponse:
"""Create a new evaluation run.
This is the endpoint that will kick off grading.
Args:
data_source: Details about the run's data source.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the run.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._post(
f"/evals/{eval_id}/runs",
body=maybe_transform(
{
"data_source": data_source,
"metadata": metadata,
"name": name,
},
run_create_params.RunCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCreateResponse,
)
def retrieve(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunRetrieveResponse:
"""
Get an evaluation run by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunRetrieveResponse,
)
def list(
self,
eval_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[RunListResponse]:
"""
Get a list of runs for an evaluation.
Args:
after: Identifier for the last run from the previous pagination request.
limit: Number of runs to retrieve.
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
| `canceled`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs",
page=SyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
run_list_params.RunListParams,
),
),
model=RunListResponse,
)
def delete(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunDeleteResponse:
"""
Delete an eval run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._delete(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunDeleteResponse,
)
def cancel(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunCancelResponse:
"""
Cancel an ongoing evaluation run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._post(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCancelResponse,
)
class AsyncRuns(AsyncAPIResource):
@cached_property
def output_items(self) -> AsyncOutputItems:
return AsyncOutputItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncRunsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncRunsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncRunsWithStreamingResponse(self)
async def create(
self,
eval_id: str,
*,
data_source: run_create_params.DataSource,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunCreateResponse:
"""Create a new evaluation run.
This is the endpoint that will kick off grading.
Args:
data_source: Details about the run's data source.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the run.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}/runs",
body=await async_maybe_transform(
{
"data_source": data_source,
"metadata": metadata,
"name": name,
},
run_create_params.RunCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCreateResponse,
)
async def retrieve(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunRetrieveResponse:
"""
Get an evaluation run by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunRetrieveResponse,
)
def list(
self,
eval_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[RunListResponse, AsyncCursorPage[RunListResponse]]:
"""
Get a list of runs for an evaluation.
Args:
after: Identifier for the last run from the previous pagination request.
limit: Number of runs to retrieve.
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
| `canceled`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs",
page=AsyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
run_list_params.RunListParams,
),
),
model=RunListResponse,
)
async def delete(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunDeleteResponse:
"""
Delete an eval run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._delete(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunDeleteResponse,
)
async def cancel(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> RunCancelResponse:
"""
Cancel an ongoing evaluation run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._post(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCancelResponse,
)
class RunsWithRawResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = _legacy_response.to_raw_response_wrapper(
runs.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
runs.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
runs.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
runs.delete,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithRawResponse:
return OutputItemsWithRawResponse(self._runs.output_items)
class AsyncRunsWithRawResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = _legacy_response.async_to_raw_response_wrapper(
runs.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
runs.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
runs.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
runs.delete,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> AsyncOutputItemsWithRawResponse:
return AsyncOutputItemsWithRawResponse(self._runs.output_items)
class RunsWithStreamingResponse:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = to_streamed_response_wrapper(
runs.create,
)
self.retrieve = to_streamed_response_wrapper(
runs.retrieve,
)
self.list = to_streamed_response_wrapper(
runs.list,
)
self.delete = to_streamed_response_wrapper(
runs.delete,
)
self.cancel = to_streamed_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithStreamingResponse:
return OutputItemsWithStreamingResponse(self._runs.output_items)
class AsyncRunsWithStreamingResponse:
def __init__(self, runs: AsyncRuns) -> None:
self._runs = runs
self.create = async_to_streamed_response_wrapper(
runs.create,
)
self.retrieve = async_to_streamed_response_wrapper(
runs.retrieve,
)
self.list = async_to_streamed_response_wrapper(
runs.list,
)
self.delete = async_to_streamed_response_wrapper(
runs.delete,
)
self.cancel = async_to_streamed_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> AsyncOutputItemsWithStreamingResponse:
return AsyncOutputItemsWithStreamingResponse(self._runs.output_items)

View File

@@ -0,0 +1,762 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import time
import typing_extensions
from typing import Mapping, cast
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import FilePurpose, file_list_params, file_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..pagination import SyncCursorPage, AsyncCursorPage
from .._base_client import AsyncPaginator, make_request_options
from ..types.file_object import FileObject
from ..types.file_deleted import FileDeleted
from ..types.file_purpose import FilePurpose
__all__ = ["Files", "AsyncFiles"]
class Files(SyncAPIResource):
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 100 GB.
The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
details.
The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/files",
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
purpose: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[FileObject]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=SyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileDeleted:
"""
Delete a file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = self.retrieve(id)
while file.status not in TERMINAL_STATES:
self._sleep(poll_interval)
file = self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
class AsyncFiles(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 100 GB.
The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
details.
The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/files",
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
async def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
purpose: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=AsyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
async def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FileDeleted:
"""
Delete a file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
async def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
async def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
async def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = await self.retrieve(id)
while file.status not in TERMINAL_STATES:
await self._sleep(poll_interval)
file = await self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
class FilesWithRawResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.to_raw_response_wrapper(
files.content,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
_legacy_response.to_raw_response_wrapper(
files.retrieve_content # pyright: ignore[reportDeprecated],
)
)
class AsyncFilesWithRawResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
self.content = _legacy_response.async_to_raw_response_wrapper(
files.content,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
files.retrieve_content # pyright: ignore[reportDeprecated],
)
)
class FilesWithStreamingResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
self.content = to_custom_streamed_response_wrapper(
files.content,
StreamedBinaryAPIResponse,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
files.retrieve_content # pyright: ignore[reportDeprecated],
)
)
class AsyncFilesWithStreamingResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = async_to_streamed_response_wrapper(
files.create,
)
self.retrieve = async_to_streamed_response_wrapper(
files.retrieve,
)
self.list = async_to_streamed_response_wrapper(
files.list,
)
self.delete = async_to_streamed_response_wrapper(
files.delete,
)
self.content = async_to_custom_streamed_response_wrapper(
files.content,
AsyncStreamedBinaryAPIResponse,
)
self.retrieve_content = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
files.retrieve_content # pyright: ignore[reportDeprecated],
)
)

View File

@@ -0,0 +1,47 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from .fine_tuning import (
FineTuning,
AsyncFineTuning,
FineTuningWithRawResponse,
AsyncFineTuningWithRawResponse,
FineTuningWithStreamingResponse,
AsyncFineTuningWithStreamingResponse,
)
__all__ = [
"Jobs",
"AsyncJobs",
"JobsWithRawResponse",
"AsyncJobsWithRawResponse",
"JobsWithStreamingResponse",
"AsyncJobsWithStreamingResponse",
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
"FineTuning",
"AsyncFineTuning",
"FineTuningWithRawResponse",
"AsyncFineTuningWithRawResponse",
"FineTuningWithStreamingResponse",
"AsyncFineTuningWithStreamingResponse",
]

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from .permissions import (
Permissions,
AsyncPermissions,
PermissionsWithRawResponse,
AsyncPermissionsWithRawResponse,
PermissionsWithStreamingResponse,
AsyncPermissionsWithStreamingResponse,
)
__all__ = [
"Permissions",
"AsyncPermissions",
"PermissionsWithRawResponse",
"AsyncPermissionsWithRawResponse",
"PermissionsWithStreamingResponse",
"AsyncPermissionsWithStreamingResponse",
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
]

View File

@@ -0,0 +1,102 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ...._compat import cached_property
from .permissions import (
Permissions,
AsyncPermissions,
PermissionsWithRawResponse,
AsyncPermissionsWithRawResponse,
PermissionsWithStreamingResponse,
AsyncPermissionsWithStreamingResponse,
)
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["Checkpoints", "AsyncCheckpoints"]
class Checkpoints(SyncAPIResource):
@cached_property
def permissions(self) -> Permissions:
return Permissions(self._client)
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CheckpointsWithStreamingResponse(self)
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def permissions(self) -> AsyncPermissions:
return AsyncPermissions(self._client)
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCheckpointsWithStreamingResponse(self)
class CheckpointsWithRawResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> PermissionsWithRawResponse:
return PermissionsWithRawResponse(self._checkpoints.permissions)
class AsyncCheckpointsWithRawResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithRawResponse:
return AsyncPermissionsWithRawResponse(self._checkpoints.permissions)
class CheckpointsWithStreamingResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> PermissionsWithStreamingResponse:
return PermissionsWithStreamingResponse(self._checkpoints.permissions)
class AsyncCheckpointsWithStreamingResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> AsyncPermissionsWithStreamingResponse:
return AsyncPermissionsWithStreamingResponse(self._checkpoints.permissions)

View File

@@ -0,0 +1,419 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncPage, AsyncPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
from ....types.fine_tuning.checkpoints.permission_create_response import PermissionCreateResponse
from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
from ....types.fine_tuning.checkpoints.permission_retrieve_response import PermissionRetrieveResponse
__all__ = ["Permissions", "AsyncPermissions"]
class Permissions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return PermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> PermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return PermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: List[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncPage[PermissionCreateResponse]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=SyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
project_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
class AsyncPermissions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncPermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncPermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncPermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncPermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: List[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[PermissionCreateResponse, AsyncPage[PermissionCreateResponse]]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=AsyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
project_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return await self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
async def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return await self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
class PermissionsWithRawResponse:
def __init__(self, permissions: Permissions) -> None:
self._permissions = permissions
self.create = _legacy_response.to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.to_raw_response_wrapper(
permissions.delete,
)
class AsyncPermissionsWithRawResponse:
def __init__(self, permissions: AsyncPermissions) -> None:
self._permissions = permissions
self.create = _legacy_response.async_to_raw_response_wrapper(
permissions.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
permissions.retrieve,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
permissions.delete,
)
class PermissionsWithStreamingResponse:
def __init__(self, permissions: Permissions) -> None:
self._permissions = permissions
self.create = to_streamed_response_wrapper(
permissions.create,
)
self.retrieve = to_streamed_response_wrapper(
permissions.retrieve,
)
self.delete = to_streamed_response_wrapper(
permissions.delete,
)
class AsyncPermissionsWithStreamingResponse:
def __init__(self, permissions: AsyncPermissions) -> None:
self._permissions = permissions
self.create = async_to_streamed_response_wrapper(
permissions.create,
)
self.retrieve = async_to_streamed_response_wrapper(
permissions.retrieve,
)
self.delete = async_to_streamed_response_wrapper(
permissions.delete,
)

View File

@@ -0,0 +1,134 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from ..._compat import cached_property
from .jobs.jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
from .checkpoints.checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
__all__ = ["FineTuning", "AsyncFineTuning"]
class FineTuning(SyncAPIResource):
@cached_property
def jobs(self) -> Jobs:
return Jobs(self._client)
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def with_raw_response(self) -> FineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FineTuningWithStreamingResponse(self)
class AsyncFineTuning(AsyncAPIResource):
@cached_property
def jobs(self) -> AsyncJobs:
return AsyncJobs(self._client)
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
@cached_property
def with_raw_response(self) -> AsyncFineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFineTuningWithStreamingResponse(self)
class FineTuningWithRawResponse:
def __init__(self, fine_tuning: FineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> JobsWithRawResponse:
return JobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
return CheckpointsWithRawResponse(self._fine_tuning.checkpoints)
class AsyncFineTuningWithRawResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithRawResponse:
return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._fine_tuning.checkpoints)
class FineTuningWithStreamingResponse:
def __init__(self, fine_tuning: FineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> JobsWithStreamingResponse:
return JobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
class AsyncFineTuningWithStreamingResponse:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithStreamingResponse:
return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .jobs import (
Jobs,
AsyncJobs,
JobsWithRawResponse,
AsyncJobsWithRawResponse,
JobsWithStreamingResponse,
AsyncJobsWithStreamingResponse,
)
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
__all__ = [
"Checkpoints",
"AsyncCheckpoints",
"CheckpointsWithRawResponse",
"AsyncCheckpointsWithRawResponse",
"CheckpointsWithStreamingResponse",
"AsyncCheckpointsWithStreamingResponse",
"Jobs",
"AsyncJobs",
"JobsWithRawResponse",
"AsyncJobsWithRawResponse",
"JobsWithStreamingResponse",
"AsyncJobsWithStreamingResponse",
]

View File

@@ -0,0 +1,199 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.fine_tuning.jobs import checkpoint_list_params
from ....types.fine_tuning.jobs.fine_tuning_job_checkpoint import FineTuningJobCheckpoint
__all__ = ["Checkpoints", "AsyncCheckpoints"]
class Checkpoints(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[FineTuningJobCheckpoint]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=SyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[FineTuningJobCheckpoint, AsyncCursorPage[FineTuningJobCheckpoint]]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=AsyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
class CheckpointsWithRawResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.to_raw_response_wrapper(
checkpoints.list,
)
class AsyncCheckpointsWithRawResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.async_to_raw_response_wrapper(
checkpoints.list,
)
class CheckpointsWithStreamingResponse:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = to_streamed_response_wrapper(
checkpoints.list,
)
class AsyncCheckpointsWithStreamingResponse:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = async_to_streamed_response_wrapper(
checkpoints.list,
)

View File

@@ -0,0 +1,758 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from .checkpoints import (
Checkpoints,
AsyncCheckpoints,
CheckpointsWithRawResponse,
AsyncCheckpointsWithRawResponse,
CheckpointsWithStreamingResponse,
AsyncCheckpointsWithStreamingResponse,
)
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import (
AsyncPaginator,
make_request_options,
)
from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params
from ....types.shared_params.metadata import Metadata
from ....types.fine_tuning.fine_tuning_job import FineTuningJob
from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent
__all__ = ["Jobs", "AsyncJobs"]
class Jobs(SyncAPIResource):
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def with_raw_response(self) -> JobsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return JobsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> JobsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return JobsWithStreamingResponse(self)
def create(
self,
*,
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
method: job_create_params.Method | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
suffix: Optional[str] | NotGiven = NOT_GIVEN,
validation_file: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Creates a fine-tuning job which begins the process of creating a new model from
a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
Args:
model: The name of the model to fine-tune. You can select one of the
[supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.
The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format, or if the fine-tuning method uses the
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
format.
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
in favor of `method`, and should be passed in under the `method` parameter.
integrations: A list of integrations to enable for your fine-tuning job.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
method: The method used for fine-tuning.
seed: The seed controls the reproducibility of the job. Passing in the same seed and
job parameters should produce the same results, but may differ in rare cases. If
a seed is not specified, one will be generated for you.
suffix: A string of up to 64 characters that will be added to your fine-tuned model
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
If you provide this file, the data is used to generate validation metrics
periodically during fine-tuning. These metrics can be viewed in the fine-tuning
results file. The same data should not be present in both train and validation
files.
Your dataset must be formatted as a JSONL file. You must upload your file with
the purpose `fine-tune`.
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/jobs",
body=maybe_transform(
{
"model": model,
"training_file": training_file,
"hyperparameters": hyperparameters,
"integrations": integrations,
"metadata": metadata,
"method": method,
"seed": seed,
"suffix": suffix,
"validation_file": validation_file,
},
job_create_params.JobCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def retrieve(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Get info about a fine-tuning job.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get(
f"/fine_tuning/jobs/{fine_tuning_job_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[FineTuningJob]:
"""
List your organization's fine-tuning jobs
Args:
after: Identifier for the last job from the previous pagination request.
limit: Number of fine-tuning jobs to retrieve.
metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
Alternatively, set `metadata=null` to indicate no metadata.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/fine_tuning/jobs",
page=SyncCursorPage[FineTuningJob],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
},
job_list_params.JobListParams,
),
),
model=FineTuningJob,
)
def cancel(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Immediately cancel a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list_events(
self,
fine_tuning_job_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[FineTuningJobEvent]:
"""
Get status updates for a fine-tuning job.
Args:
after: Identifier for the last event from the previous pagination request.
limit: Number of events to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
page=SyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
job_list_events_params.JobListEventsParams,
),
),
model=FineTuningJobEvent,
)
class AsyncJobs(AsyncAPIResource):
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
@cached_property
def with_raw_response(self) -> AsyncJobsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncJobsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncJobsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncJobsWithStreamingResponse(self)
async def create(
self,
*,
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
method: job_create_params.Method | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
suffix: Optional[str] | NotGiven = NOT_GIVEN,
validation_file: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Creates a fine-tuning job which begins the process of creating a new model from
a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
Args:
model: The name of the model to fine-tune. You can select one of the
[supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.
The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format, or if the fine-tuning method uses the
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
format.
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
in favor of `method`, and should be passed in under the `method` parameter.
integrations: A list of integrations to enable for your fine-tuning job.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
method: The method used for fine-tuning.
seed: The seed controls the reproducibility of the job. Passing in the same seed and
job parameters should produce the same results, but may differ in rare cases. If
a seed is not specified, one will be generated for you.
suffix: A string of up to 64 characters that will be added to your fine-tuned model
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
If you provide this file, the data is used to generate validation metrics
periodically during fine-tuning. These metrics can be viewed in the fine-tuning
results file. The same data should not be present in both train and validation
files.
Your dataset must be formatted as a JSONL file. You must upload your file with
the purpose `fine-tune`.
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/jobs",
body=await async_maybe_transform(
{
"model": model,
"training_file": training_file,
"hyperparameters": hyperparameters,
"integrations": integrations,
"metadata": metadata,
"method": method,
"seed": seed,
"suffix": suffix,
"validation_file": validation_file,
},
job_create_params.JobCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
async def retrieve(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Get info about a fine-tuning job.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._get(
f"/fine_tuning/jobs/{fine_tuning_job_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list(
self,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]:
"""
List your organization's fine-tuning jobs
Args:
after: Identifier for the last job from the previous pagination request.
limit: Number of fine-tuning jobs to retrieve.
metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
Alternatively, set `metadata=null` to indicate no metadata.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/fine_tuning/jobs",
page=AsyncCursorPage[FineTuningJob],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
},
job_list_params.JobListParams,
),
),
model=FineTuningJob,
)
async def cancel(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
Immediately cancel a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return await self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list_events(
self,
fine_tuning_job_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]:
"""
Get status updates for a fine-tuning job.
Args:
after: Identifier for the last event from the previous pagination request.
limit: Number of events to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
page=AsyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
job_list_events_params.JobListEventsParams,
),
),
model=FineTuningJobEvent,
)
class JobsWithRawResponse:
def __init__(self, jobs: Jobs) -> None:
self._jobs = jobs
self.create = _legacy_response.to_raw_response_wrapper(
jobs.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
jobs.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
jobs.list,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
jobs.cancel,
)
self.list_events = _legacy_response.to_raw_response_wrapper(
jobs.list_events,
)
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
return CheckpointsWithRawResponse(self._jobs.checkpoints)
class AsyncJobsWithRawResponse:
def __init__(self, jobs: AsyncJobs) -> None:
self._jobs = jobs
self.create = _legacy_response.async_to_raw_response_wrapper(
jobs.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
jobs.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
jobs.list,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
jobs.cancel,
)
self.list_events = _legacy_response.async_to_raw_response_wrapper(
jobs.list_events,
)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints)
class JobsWithStreamingResponse:
def __init__(self, jobs: Jobs) -> None:
self._jobs = jobs
self.create = to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = to_streamed_response_wrapper(
jobs.list,
)
self.cancel = to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = to_streamed_response_wrapper(
jobs.list_events,
)
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
class AsyncJobsWithStreamingResponse:
def __init__(self, jobs: AsyncJobs) -> None:
self._jobs = jobs
self.create = async_to_streamed_response_wrapper(
jobs.create,
)
self.retrieve = async_to_streamed_response_wrapper(
jobs.retrieve,
)
self.list = async_to_streamed_response_wrapper(
jobs.list,
)
self.cancel = async_to_streamed_response_wrapper(
jobs.cancel,
)
self.list_events = async_to_streamed_response_wrapper(
jobs.list_events,
)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)

View File

@@ -0,0 +1,701 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Mapping, Optional, cast
from typing_extensions import Literal
import httpx
from .. import _legacy_response
from ..types import image_edit_params, image_generate_params, image_create_variation_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .._base_client import make_request_options
from ..types.image_model import ImageModel
from ..types.images_response import ImagesResponse
__all__ = ["Images", "AsyncImages"]
class Images(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ImagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ImagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ImagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ImagesWithStreamingResponse(self)
def create_variation(
self,
*,
image: FileTypes,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""Creates a variation of a given image.
This endpoint only supports `dall-e-2`.
Args:
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
less than 4MB, and square.
model: The model to use for image generation. Only `dall-e-2` is supported at this
time.
n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"image": image,
"model": model,
"n": n,
"response_format": response_format,
"size": size,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/images/variations",
body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
def edit(
self,
*,
image: Union[FileTypes, List[FileTypes]],
prompt: str,
mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""Creates an edited or extended image given one or more source images and a
prompt.
This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images. For
`gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
25MB. For `dall-e-2`, you can only provide one image, and it should be a square
`png` file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
is used.
n: The number of images to generate. Must be between 1 and 10.
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"image": image,
"prompt": prompt,
"mask": mask,
"model": model,
"n": n,
"quality": quality,
"response_format": response_format,
"size": size,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/images/edits",
body=maybe_transform(body, image_edit_params.ImageEditParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
def generate(
self,
*,
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| NotGiven = NOT_GIVEN,
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
Args:
prompt: A text description of the desired image(s). The maximum length is 32000
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
`gpt-image-1` is used.
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
after the image has been generated. This parameter isn't supported for
`gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/images/generations",
body=maybe_transform(
{
"prompt": prompt,
"background": background,
"model": model,
"moderation": moderation,
"n": n,
"output_compression": output_compression,
"output_format": output_format,
"quality": quality,
"response_format": response_format,
"size": size,
"style": style,
"user": user,
},
image_generate_params.ImageGenerateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
class AsyncImages(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncImagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncImagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncImagesWithStreamingResponse(self)
async def create_variation(
self,
*,
image: FileTypes,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""Creates a variation of a given image.
This endpoint only supports `dall-e-2`.
Args:
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
less than 4MB, and square.
model: The model to use for image generation. Only `dall-e-2` is supported at this
time.
n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"image": image,
"model": model,
"n": n,
"response_format": response_format,
"size": size,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/images/variations",
body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
async def edit(
self,
*,
image: Union[FileTypes, List[FileTypes]],
prompt: str,
mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""Creates an edited or extended image given one or more source images and a
prompt.
This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images. For
`gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
25MB. For `dall-e-2`, you can only provide one image, and it should be a square
`png` file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
is used.
n: The number of images to generate. Must be between 1 and 10.
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"image": image,
"prompt": prompt,
"mask": mask,
"model": model,
"n": n,
"quality": quality,
"response_format": response_format,
"size": size,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/images/edits",
body=await async_maybe_transform(body, image_edit_params.ImageEditParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
async def generate(
self,
*,
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| NotGiven = NOT_GIVEN,
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
Args:
prompt: A text description of the desired image(s). The maximum length is 32000
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
`gpt-image-1` is used.
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
after the image has been generated. This parameter isn't supported for
`gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/images/generations",
body=await async_maybe_transform(
{
"prompt": prompt,
"background": background,
"model": model,
"moderation": moderation,
"n": n,
"output_compression": output_compression,
"output_format": output_format,
"quality": quality,
"response_format": response_format,
"size": size,
"style": style,
"user": user,
},
image_generate_params.ImageGenerateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
class ImagesWithRawResponse:
def __init__(self, images: Images) -> None:
self._images = images
self.create_variation = _legacy_response.to_raw_response_wrapper(
images.create_variation,
)
self.edit = _legacy_response.to_raw_response_wrapper(
images.edit,
)
self.generate = _legacy_response.to_raw_response_wrapper(
images.generate,
)
class AsyncImagesWithRawResponse:
def __init__(self, images: AsyncImages) -> None:
self._images = images
self.create_variation = _legacy_response.async_to_raw_response_wrapper(
images.create_variation,
)
self.edit = _legacy_response.async_to_raw_response_wrapper(
images.edit,
)
self.generate = _legacy_response.async_to_raw_response_wrapper(
images.generate,
)
class ImagesWithStreamingResponse:
def __init__(self, images: Images) -> None:
self._images = images
self.create_variation = to_streamed_response_wrapper(
images.create_variation,
)
self.edit = to_streamed_response_wrapper(
images.edit,
)
self.generate = to_streamed_response_wrapper(
images.generate,
)
class AsyncImagesWithStreamingResponse:
def __init__(self, images: AsyncImages) -> None:
self._images = images
self.create_variation = async_to_streamed_response_wrapper(
images.create_variation,
)
self.edit = async_to_streamed_response_wrapper(
images.edit,
)
self.generate = async_to_streamed_response_wrapper(
images.generate,
)

View File

@@ -0,0 +1,306 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .. import _legacy_response
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncPage, AsyncPage
from ..types.model import Model
from .._base_client import (
AsyncPaginator,
make_request_options,
)
from ..types.model_deleted import ModelDeleted
__all__ = ["Models", "AsyncModels"]
class Models(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ModelsWithStreamingResponse(self)
def retrieve(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Model:
"""
Retrieves a model instance, providing basic information about the model such as
the owner and permissioning.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._get(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Model,
)
def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncPage[Model]:
"""
Lists the currently available models, and provides basic information about each
one such as the owner and availability.
"""
return self._get_api_list(
"/models",
page=SyncPage[Model],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=Model,
)
def delete(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModelDeleted:
"""Delete a fine-tuned model.
You must have the Owner role in your organization to
delete a model.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._delete(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelDeleted,
)
class AsyncModels(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncModelsWithStreamingResponse(self)
async def retrieve(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Model:
"""
Retrieves a model instance, providing basic information about the model such as
the owner and permissioning.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._get(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Model,
)
def list(
self,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[Model, AsyncPage[Model]]:
"""
Lists the currently available models, and provides basic information about each
one such as the owner and availability.
"""
return self._get_api_list(
"/models",
page=AsyncPage[Model],
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=Model,
)
async def delete(
self,
model: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModelDeleted:
"""Delete a fine-tuned model.
You must have the Owner role in your organization to
delete a model.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model:
raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._delete(
f"/models/{model}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelDeleted,
)
class ModelsWithRawResponse:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = _legacy_response.to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
models.delete,
)
class AsyncModelsWithRawResponse:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
models.delete,
)
class ModelsWithStreamingResponse:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = to_streamed_response_wrapper(
models.retrieve,
)
self.list = to_streamed_response_wrapper(
models.list,
)
self.delete = to_streamed_response_wrapper(
models.delete,
)
class AsyncModelsWithStreamingResponse:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = async_to_streamed_response_wrapper(
models.retrieve,
)
self.list = async_to_streamed_response_wrapper(
models.list,
)
self.delete = async_to_streamed_response_wrapper(
models.delete,
)

View File

@@ -0,0 +1,197 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable
import httpx
from .. import _legacy_response
from ..types import moderation_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .._base_client import make_request_options
from ..types.moderation_model import ModerationModel
from ..types.moderation_create_response import ModerationCreateResponse
from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam
__all__ = ["Moderations", "AsyncModerations"]
class Moderations(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ModerationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ModerationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ModerationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ModerationsWithStreamingResponse(self)
def create(
self,
*,
input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]],
model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModerationCreateResponse:
"""Classifies if text and/or image inputs are potentially harmful.
Learn more in
the [moderation guide](https://platform.openai.com/docs/guides/moderation).
Args:
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
an array of multi-modal input objects similar to other models.
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
[here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/moderations",
body=maybe_transform(
{
"input": input,
"model": model,
},
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModerationCreateResponse,
)
class AsyncModerations(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModerationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncModerationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncModerationsWithStreamingResponse(self)
async def create(
self,
*,
input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]],
model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModerationCreateResponse:
"""Classifies if text and/or image inputs are potentially harmful.
Learn more in
the [moderation guide](https://platform.openai.com/docs/guides/moderation).
Args:
input: Input (or inputs) to classify. Can be a single string, an array of strings, or
an array of multi-modal input objects similar to other models.
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
[here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/moderations",
body=await async_maybe_transform(
{
"input": input,
"model": model,
},
moderation_create_params.ModerationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModerationCreateResponse,
)
class ModerationsWithRawResponse:
def __init__(self, moderations: Moderations) -> None:
self._moderations = moderations
self.create = _legacy_response.to_raw_response_wrapper(
moderations.create,
)
class AsyncModerationsWithRawResponse:
def __init__(self, moderations: AsyncModerations) -> None:
self._moderations = moderations
self.create = _legacy_response.async_to_raw_response_wrapper(
moderations.create,
)
class ModerationsWithStreamingResponse:
def __init__(self, moderations: Moderations) -> None:
self._moderations = moderations
self.create = to_streamed_response_wrapper(
moderations.create,
)
class AsyncModerationsWithStreamingResponse:
def __init__(self, moderations: AsyncModerations) -> None:
self._moderations = moderations
self.create = async_to_streamed_response_wrapper(
moderations.create,
)

View File

@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .responses import (
Responses,
AsyncResponses,
ResponsesWithRawResponse,
AsyncResponsesWithRawResponse,
ResponsesWithStreamingResponse,
AsyncResponsesWithStreamingResponse,
)
from .input_items import (
InputItems,
AsyncInputItems,
InputItemsWithRawResponse,
AsyncInputItemsWithRawResponse,
InputItemsWithStreamingResponse,
AsyncInputItemsWithStreamingResponse,
)
__all__ = [
"InputItems",
"AsyncInputItems",
"InputItemsWithRawResponse",
"AsyncInputItemsWithRawResponse",
"InputItemsWithStreamingResponse",
"AsyncInputItemsWithStreamingResponse",
"Responses",
"AsyncResponses",
"ResponsesWithRawResponse",
"AsyncResponsesWithRawResponse",
"ResponsesWithStreamingResponse",
"AsyncResponsesWithStreamingResponse",
]

View File

@@ -0,0 +1,234 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, List, cast
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.responses import input_item_list_params
from ...types.responses.response_item import ResponseItem
from ...types.responses.response_includable import ResponseIncludable
__all__ = ["InputItems", "AsyncInputItems"]
class InputItems(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputItemsWithStreamingResponse(self)
def list(
self,
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> SyncCursorPage[ResponseItem]:
"""
Returns a list of input items for a given response.
Args:
after: An item ID to list items after, used in pagination.
before: An item ID to list items before, used in pagination.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `asc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
f"/responses/{response_id}/input_items",
page=SyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
class AsyncInputItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncInputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncInputItemsWithStreamingResponse(self)
def list(
self,
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]:
"""
Returns a list of input items for a given response.
Args:
after: An item ID to list items after, used in pagination.
before: An item ID to list items before, used in pagination.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `asc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
f"/responses/{response_id}/input_items",
page=AsyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
class InputItemsWithRawResponse:
def __init__(self, input_items: InputItems) -> None:
self._input_items = input_items
self.list = _legacy_response.to_raw_response_wrapper(
input_items.list,
)
class AsyncInputItemsWithRawResponse:
def __init__(self, input_items: AsyncInputItems) -> None:
self._input_items = input_items
self.list = _legacy_response.async_to_raw_response_wrapper(
input_items.list,
)
class InputItemsWithStreamingResponse:
def __init__(self, input_items: InputItems) -> None:
self._input_items = input_items
self.list = to_streamed_response_wrapper(
input_items.list,
)
class AsyncInputItemsWithStreamingResponse:
def __init__(self, input_items: AsyncInputItems) -> None:
self._input_items = input_items
self.list = async_to_streamed_response_wrapper(
input_items.list,
)

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More