chore: automatic commit 2025-04-30 12:48

This commit is contained in:
2025-04-30 12:48:06 +02:00
parent f69356473b
commit e4ab1e1bb5
5284 changed files with 868438 additions and 0 deletions

View File

@@ -0,0 +1 @@
from ._main import register_commands as register_commands

View File

@@ -0,0 +1,16 @@
from __future__ import annotations
from argparse import ArgumentParser
from . import chat, audio, files, image, models, completions
def register_commands(parser: ArgumentParser) -> None:
subparsers = parser.add_subparsers(help="All API subcommands")
chat.register(subparsers)
image.register(subparsers)
audio.register(subparsers)
files.register(subparsers)
models.register(subparsers)
completions.register(subparsers)

View File

@@ -0,0 +1,108 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any, Optional, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from ..._types import NOT_GIVEN
from .._models import BaseModel
from .._progress import BufferReader
from ...types.audio import Transcription
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
# transcriptions
sub = subparser.add_parser("audio.transcriptions.create")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs)
# translations
sub = subparser.add_parser("audio.translations.create")
# Required
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("--response-format", type=str)
# TODO: doesn't seem to be supported by the API
# sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs)
class CLITranscribeArgs(BaseModel):
model: str
file: str
response_format: Optional[str] = None
language: Optional[str] = None
temperature: Optional[float] = None
prompt: Optional[str] = None
class CLITranslationArgs(BaseModel):
model: str
file: str
response_format: Optional[str] = None
language: Optional[str] = None
temperature: Optional[float] = None
prompt: Optional[str] = None
class CLIAudio:
@staticmethod
def transcribe(args: CLITranscribeArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.transcriptions.create(
file=(args.file, buffer_reader),
model=args.model,
language=args.language or NOT_GIVEN,
temperature=args.temperature or NOT_GIVEN,
prompt=args.prompt or NOT_GIVEN,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)
@staticmethod
def translate(args: CLITranslationArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.translations.create(
file=(args.file, buffer_reader),
model=args.model,
temperature=args.temperature or NOT_GIVEN,
prompt=args.prompt or NOT_GIVEN,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)

View File

@@ -0,0 +1,13 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from . import completions
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
completions.register(subparser)

View File

@@ -0,0 +1,160 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, List, Optional, cast
from argparse import ArgumentParser
from typing_extensions import Literal, NamedTuple
from ..._utils import get_client
from ..._models import BaseModel
from ...._streaming import Stream
from ....types.chat import (
ChatCompletionRole,
ChatCompletionChunk,
CompletionCreateParams,
)
from ....types.chat.completion_create_params import (
CompletionCreateParamsStreaming,
CompletionCreateParamsNonStreaming,
)
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("chat.completions.create")
sub._action_groups.pop()
req = sub.add_argument_group("required arguments")
opt = sub.add_argument_group("optional arguments")
req.add_argument(
"-g",
"--message",
action="append",
nargs=2,
metavar=("ROLE", "CONTENT"),
help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
required=True,
)
req.add_argument(
"-m",
"--model",
help="The model to use.",
required=True,
)
opt.add_argument(
"-n",
"--n",
help="How many completions to generate for the conversation.",
type=int,
)
opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int)
opt.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
opt.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
opt.add_argument(
"--stop",
help="A stop sequence at which to stop generating tokens for the message.",
)
opt.add_argument("--stream", help="Stream messages as they're ready.", action="store_true")
sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs)
class CLIMessage(NamedTuple):
role: ChatCompletionRole
content: str
class CLIChatCompletionCreateArgs(BaseModel):
message: List[CLIMessage]
model: str
n: Optional[int] = None
max_tokens: Optional[int] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
stop: Optional[str] = None
stream: bool = False
class CLIChatCompletion:
@staticmethod
def create(args: CLIChatCompletionCreateArgs) -> None:
params: CompletionCreateParams = {
"model": args.model,
"messages": [
{"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message
],
# type checkers are not good at inferring union types so we have to set stream afterwards
"stream": False,
}
if args.temperature is not None:
params["temperature"] = args.temperature
if args.stop is not None:
params["stop"] = args.stop
if args.top_p is not None:
params["top_p"] = args.top_p
if args.n is not None:
params["n"] = args.n
if args.stream:
params["stream"] = args.stream # type: ignore
if args.max_tokens is not None:
params["max_tokens"] = args.max_tokens
if args.stream:
return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params))
return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params))
@staticmethod
def _create(params: CompletionCreateParamsNonStreaming) -> None:
completion = get_client().chat.completions.create(**params)
should_print_header = len(completion.choices) > 1
for choice in completion.choices:
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
content = choice.message.content if choice.message.content is not None else "None"
sys.stdout.write(content)
if should_print_header or not content.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.flush()
@staticmethod
def _stream_create(params: CompletionCreateParamsStreaming) -> None:
# cast is required for mypy
stream = cast( # pyright: ignore[reportUnnecessaryCast]
Stream[ChatCompletionChunk], get_client().chat.completions.create(**params)
)
for chunk in stream:
should_print_header = len(chunk.choices) > 1
for choice in chunk.choices:
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
content = choice.delta.content or ""
sys.stdout.write(content)
if should_print_header:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("\n")

View File

@@ -0,0 +1,173 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Optional, cast
from argparse import ArgumentParser
from functools import partial
from openai.types.completion import Completion
from .._utils import get_client
from ..._types import NOT_GIVEN, NotGivenOr
from ..._utils import is_given
from .._errors import CLIError
from .._models import BaseModel
from ..._streaming import Stream
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("completions.create")
# Required
sub.add_argument(
"-m",
"--model",
help="The model to use",
required=True,
)
# Optional
sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true")
sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--n",
help="How many sub-completions to generate for each prompt.",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
type=int,
)
sub.add_argument(
"--best_of",
help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.",
type=int,
)
sub.add_argument(
"--echo",
help="Echo back the prompt in addition to the completion",
action="store_true",
)
sub.add_argument(
"--frequency_penalty",
help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
type=float,
)
sub.add_argument(
"--presence_penalty",
help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
type=float,
)
sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.")
sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.")
sub.add_argument(
"--user",
help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
)
# TODO: add support for logit_bias
sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs)
class CLICompletionCreateArgs(BaseModel):
model: str
stream: bool = False
prompt: Optional[str] = None
n: NotGivenOr[int] = NOT_GIVEN
stop: NotGivenOr[str] = NOT_GIVEN
user: NotGivenOr[str] = NOT_GIVEN
echo: NotGivenOr[bool] = NOT_GIVEN
suffix: NotGivenOr[str] = NOT_GIVEN
best_of: NotGivenOr[int] = NOT_GIVEN
top_p: NotGivenOr[float] = NOT_GIVEN
logprobs: NotGivenOr[int] = NOT_GIVEN
max_tokens: NotGivenOr[int] = NOT_GIVEN
temperature: NotGivenOr[float] = NOT_GIVEN
presence_penalty: NotGivenOr[float] = NOT_GIVEN
frequency_penalty: NotGivenOr[float] = NOT_GIVEN
class CLICompletions:
@staticmethod
def create(args: CLICompletionCreateArgs) -> None:
if is_given(args.n) and args.n > 1 and args.stream:
raise CLIError("Can't stream completions with n>1 with the current CLI")
make_request = partial(
get_client().completions.create,
n=args.n,
echo=args.echo,
stop=args.stop,
user=args.user,
model=args.model,
top_p=args.top_p,
prompt=args.prompt,
suffix=args.suffix,
best_of=args.best_of,
logprobs=args.logprobs,
max_tokens=args.max_tokens,
temperature=args.temperature,
presence_penalty=args.presence_penalty,
frequency_penalty=args.frequency_penalty,
)
if args.stream:
return CLICompletions._stream_create(
# mypy doesn't understand the `partial` function but pyright does
cast(Stream[Completion], make_request(stream=True)) # pyright: ignore[reportUnnecessaryCast]
)
return CLICompletions._create(make_request())
@staticmethod
def _create(completion: Completion) -> None:
should_print_header = len(completion.choices) > 1
for choice in completion.choices:
if should_print_header:
sys.stdout.write("===== Completion {} =====\n".format(choice.index))
sys.stdout.write(choice.text)
if should_print_header or not choice.text.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.flush()
@staticmethod
def _stream_create(stream: Stream[Completion]) -> None:
for completion in stream:
should_print_header = len(completion.choices) > 1
for choice in sorted(completion.choices, key=lambda c: c.index):
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
sys.stdout.write(choice.text)
if should_print_header:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("\n")

View File

@@ -0,0 +1,80 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from .._models import BaseModel
from .._progress import BufferReader
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("files.create")
sub.add_argument(
"-f",
"--file",
required=True,
help="File to upload",
)
sub.add_argument(
"-p",
"--purpose",
help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
required=True,
)
sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.retrieve")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.delete")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.list")
sub.set_defaults(func=CLIFile.list)
class CLIFileIDArgs(BaseModel):
id: str
class CLIFileCreateArgs(BaseModel):
file: str
purpose: str
class CLIFile:
@staticmethod
def create(args: CLIFileCreateArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
file = get_client().files.create(
file=(args.file, buffer_reader),
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
purpose=cast(Any, args.purpose),
)
print_model(file)
@staticmethod
def get(args: CLIFileIDArgs) -> None:
file = get_client().files.retrieve(file_id=args.id)
print_model(file)
@staticmethod
def delete(args: CLIFileIDArgs) -> None:
file = get_client().files.delete(file_id=args.id)
print_model(file)
@staticmethod
def list() -> None:
files = get_client().files.list()
for file in files:
print_model(file)

View File

@@ -0,0 +1,139 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from ..._types import NOT_GIVEN, NotGiven, NotGivenOr
from .._models import BaseModel
from .._progress import BufferReader
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("images.generate")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs)
sub = subparser.add_parser("images.edit")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.add_argument(
"-M",
"--mask",
type=str,
required=False,
help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
)
sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs)
sub = subparser.add_parser("images.create_variation")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs)
class CLIImageCreateArgs(BaseModel):
prompt: str
num_images: int
size: str
response_format: str
model: NotGivenOr[str] = NOT_GIVEN
class CLIImageCreateVariationArgs(BaseModel):
image: str
num_images: int
size: str
response_format: str
model: NotGivenOr[str] = NOT_GIVEN
class CLIImageEditArgs(BaseModel):
image: str
num_images: int
size: str
response_format: str
prompt: str
mask: NotGivenOr[str] = NOT_GIVEN
model: NotGivenOr[str] = NOT_GIVEN
class CLIImage:
@staticmethod
def create(args: CLIImageCreateArgs) -> None:
image = get_client().images.generate(
model=args.model,
prompt=args.prompt,
n=args.num_images,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)
@staticmethod
def create_variation(args: CLIImageCreateVariationArgs) -> None:
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
image = get_client().images.create_variation(
model=args.model,
image=("image", buffer_reader),
n=args.num_images,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)
@staticmethod
def edit(args: CLIImageEditArgs) -> None:
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress")
if isinstance(args.mask, NotGiven):
mask: NotGivenOr[BufferReader] = NOT_GIVEN
else:
with open(args.mask, "rb") as file_reader:
mask = BufferReader(file_reader.read(), desc="Mask progress")
image = get_client().images.edit(
model=args.model,
prompt=args.prompt,
image=("image", buffer_reader),
n=args.num_images,
mask=("mask", mask) if not isinstance(mask, NotGiven) else mask,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)

View File

@@ -0,0 +1,45 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from .._utils import get_client, print_model
from .._models import BaseModel
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("models.list")
sub.set_defaults(func=CLIModels.list)
sub = subparser.add_parser("models.retrieve")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs)
sub = subparser.add_parser("models.delete")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs)
class CLIModelIDArgs(BaseModel):
id: str
class CLIModels:
@staticmethod
def get(args: CLIModelIDArgs) -> None:
model = get_client().models.retrieve(model=args.id)
print_model(model)
@staticmethod
def delete(args: CLIModelIDArgs) -> None:
model = get_client().models.delete(model=args.id)
print_model(model)
@staticmethod
def list() -> None:
models = get_client().models.list()
for model in models:
print_model(model)