Gibbidy-In-Uniddy/Assets/Packages/OpenAI.1.7.2/lib/netstandard2.0/OpenAI_API.xml

2389 lines
169 KiB
XML
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

<?xml version="1.0"?>
<doc>
<assembly>
<name>OpenAI_API</name>
</assembly>
<members>
<member name="T:OpenAI_API.APIAuthentication">
<summary>
Represents authentication to the OpenAPI API endpoint
</summary>
</member>
<member name="P:OpenAI_API.APIAuthentication.ApiKey">
<summary>
The API key, required to access the API endpoint.
</summary>
</member>
<member name="P:OpenAI_API.APIAuthentication.OpenAIOrganization">
<summary>
The Organization ID to count API requests against. This can be found at https://beta.openai.com/account/org-settings.
</summary>
</member>
<member name="M:OpenAI_API.APIAuthentication.op_Implicit(System.String)~OpenAI_API.APIAuthentication">
<summary>
Allows implicit casting from a string, so that a simple string API key can be provided in place of an instance of <see cref="T:OpenAI_API.APIAuthentication"/>
</summary>
<param name="key">The API key to convert into a <see cref="T:OpenAI_API.APIAuthentication"/>.</param>
</member>
<member name="M:OpenAI_API.APIAuthentication.#ctor(System.String)">
<summary>
Instantiates a new Authentication object with the given <paramref name="apiKey"/>, which may be <see langword="null"/>.
</summary>
<param name="apiKey">The API key, required to access the API endpoint.</param>
</member>
<member name="M:OpenAI_API.APIAuthentication.#ctor(System.String,System.String)">
<summary>
Instantiates a new Authentication object with the given <paramref name="apiKey"/>, which may be <see langword="null"/>. For users who belong to multiple organizations, you can specify which organization is used. Usage from these API requests will count against the specified organization's subscription quota.
</summary>
<param name="apiKey">The API key, required to access the API endpoint.</param>
<param name="openAIOrganization">The Organization ID to count API requests against. This can be found at https://beta.openai.com/account/org-settings.</param>
</member>
<member name="P:OpenAI_API.APIAuthentication.Default">
<summary>
The default authentication to use when no other auth is specified. This can be set manually, or automatically loaded via environment variables or a config file. <seealso cref="M:OpenAI_API.APIAuthentication.LoadFromEnv"/><seealso cref="M:OpenAI_API.APIAuthentication.LoadFromPath(System.String,System.String,System.Boolean)"/>
</summary>
</member>
<member name="M:OpenAI_API.APIAuthentication.LoadFromEnv">
<summary>
Attempts to load api key from environment variables, as "OPENAI_KEY" or "OPENAI_API_KEY". Also loads org if from "OPENAI_ORGANIZATION" if present.
</summary>
<returns>Returns the loaded <see cref="T:OpenAI_API.APIAuthentication"/> any api keys were found, or <see langword="null"/> if there were no matching environment vars.</returns>
</member>
<member name="M:OpenAI_API.APIAuthentication.LoadFromPath(System.String,System.String,System.Boolean)">
<summary>
Attempts to load api keys from a configuration file, by default ".openai" in the current directory, optionally traversing up the directory tree
</summary>
<param name="directory">The directory to look in, or <see langword="null"/> for the current directory</param>
<param name="filename">The filename of the config file</param>
<param name="searchUp">Whether to recursively traverse up the directory tree if the <paramref name="filename"/> is not found in the <paramref name="directory"/></param>
<returns>Returns the loaded <see cref="T:OpenAI_API.APIAuthentication"/> any api keys were found, or <see langword="null"/> if it was not successful in finding a config (or if the config file didn't contain correctly formatted API keys)</returns>
</member>
<member name="M:OpenAI_API.APIAuthentication.ValidateAPIKey">
<summary>
Tests the api key against the OpenAI API, to ensure it is valid. This hits the models endpoint so should not be charged for usage.
</summary>
<returns><see langword="true"/> if the api key is valid, or <see langword="false"/> if empty or not accepted by the OpenAI API.</returns>
</member>
<member name="M:OpenAI_API.AuthHelpers.ThisOrDefault(OpenAI_API.APIAuthentication)">
<summary>
A helper method to swap out <see langword="null"/> <see cref="T:OpenAI_API.APIAuthentication"/> objects with the <see cref="P:OpenAI_API.APIAuthentication.Default"/> authentication, possibly loaded from ENV or a config file.
</summary>
<param name="auth">The specific authentication to use if not <see langword="null"/></param>
<returns>Either the provided <paramref name="auth"/> or the <see cref="P:OpenAI_API.APIAuthentication.Default"/></returns>
</member>
<member name="T:OpenAI_API.ApiResultBase">
<summary>
Represents a result from calling the OpenAI API, with all the common metadata returned from every endpoint
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.Created">
The time when the result was generated
</member>
<member name="P:OpenAI_API.ApiResultBase.CreatedUnixTime">
<summary>
The time when the result was generated in unix epoch format
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.Model">
<summary>
Which model was used to generate this result.
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.Object">
<summary>
Object type, ie: text_completion, file, fine-tune, list, etc
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.Organization">
<summary>
The organization associated with the API request, as reported by the API.
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.ProcessingTime">
<summary>
The server-side processing time as reported by the API. This can be useful for debugging where a delay occurs.
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.RequestId">
<summary>
The request id of this API call, as reported in the response headers. This may be useful for troubleshooting or when contacting OpenAI support in reference to a specific request.
</summary>
</member>
<member name="P:OpenAI_API.ApiResultBase.OpenaiVersion">
<summary>
The Openai-Version used to generate this response, as reported in the response headers. This may be useful for troubleshooting or when contacting OpenAI support in reference to a specific request.
</summary>
</member>
<member name="T:OpenAI_API.Chat.ChatEndpoint">
<summary>
ChatGPT API endpoint. Use this endpoint to send multiple messages and carry on a conversation.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs">
<summary>
This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "completions".
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Completions"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateConversation(OpenAI_API.Chat.ChatRequest)">
<summary>
Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
</summary>
<param name="defaultChatRequestArgs">Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See <see href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI documentation for a list of possible parameters to tweak.</see></param>
<returns>A <see cref="T:OpenAI_API.Chat.Conversation"/> which encapulates a back and forth chat betwen a user and an assistant.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatRequest)">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="request">The request to send to the API.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatRequest,System.Int32)">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="request">The request to send to the API.</param>
<param name="numOutputs">Overrides <see cref="P:OpenAI_API.Chat.ChatRequest.NumChoicesPerMessage"/> as a convenience.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateChatCompletionAsync(System.Collections.Generic.IList{OpenAI_API.Chat.ChatMessage},OpenAI_API.Models.Model,System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Collections.Generic.IReadOnlyDictionary{System.String,System.Single},System.String[])">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="messages">The array of messages to send to the API</param>
<param name="model">The model to use. See the ChatGPT models available from <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/></param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatMessage[])">
<summary>
Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="messages">The messages to use in the generation.</param>
<returns>The <see cref="T:OpenAI_API.Chat.ChatResult"/> with the API response.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.CreateChatCompletionAsync(System.String[])">
<summary>
Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="userMessages">The user message or messages to use in the generation. All strings are assumed to be of Role <see cref="P:OpenAI_API.Chat.ChatMessageRole.User"/></param>
<returns>The <see cref="T:OpenAI_API.Chat.ChatResult"/> with the API response.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.StreamCompletionAsync(OpenAI_API.Chat.ChatRequest,System.Action{System.Int32,OpenAI_API.Chat.ChatResult})">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.ChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.ChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives.</param>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Chat.ChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.ChatEndpoint.DefaultChatRequestArgs"/>.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatEndpoint.StreamChatEnumerableAsync(System.Collections.Generic.IList{OpenAI_API.Chat.ChatMessage},OpenAI_API.Models.Model,System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Collections.Generic.IReadOnlyDictionary{System.String,System.Single},System.String[])">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Chat.ChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})"/> instead.
</summary>
<param name="messages">The array of messages to send to the API</param>
<param name="model">The model to use. See the ChatGPT models available from <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/></param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
</member>
<member name="T:OpenAI_API.Chat.ChatMessage">
<summary>
Chat message sent or received from the API. Includes who is speaking in the "role" and the message text in the "content"
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatMessage.#ctor">
<summary>
Creates an empty <see cref="T:OpenAI_API.Chat.ChatMessage"/>, with <see cref="P:OpenAI_API.Chat.ChatMessage.Role"/> defaulting to <see cref="P:OpenAI_API.Chat.ChatMessageRole.User"/>
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatMessage.#ctor(OpenAI_API.Chat.ChatMessageRole,System.String)">
<summary>
Constructor for a new Chat Message
</summary>
<param name="role">The role of the message, which can be "system", "assistant" or "user"</param>
<param name="content">The text to send in the message</param>
</member>
<member name="P:OpenAI_API.Chat.ChatMessage.Role">
<summary>
The role of the message, which can be "system", "assistant" or "user"
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatMessage.Content">
<summary>
The content of the message
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatMessage.Name">
<summary>
An optional name of the user in a multi-user chat
</summary>
</member>
<member name="T:OpenAI_API.Chat.ChatMessageRole">
<summary>
Represents the Role of a <see cref="T:OpenAI_API.Chat.ChatMessage"/>. Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages. See <see href="https://platform.openai.com/docs/guides/chat/introduction">the OpenAI docs</see> for more details about usage.
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.#ctor(System.String)">
<summary>
Contructor is private to force usage of strongly typed values
</summary>
<param name="value"></param>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.FromString(System.String)">
<summary>
Gets the singleton instance of <see cref="T:OpenAI_API.Chat.ChatMessageRole"/> based on the string value.
</summary>
<param name="roleName">Muse be one of "system", "user", or "assistant"</param>
<returns></returns>
</member>
<member name="P:OpenAI_API.Chat.ChatMessageRole.System">
<summary>
The system message helps set the behavior of the assistant.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatMessageRole.User">
<summary>
The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatMessageRole.Assistant">
<summary>
The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior.
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.ToString">
<summary>
Gets the string value for this role to pass to the API
</summary>
<returns>The size as a string</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.Equals(System.Object)">
<summary>
Determines whether this instance and a specified object have the same value.
</summary>
<param name="obj">The ChatMessageRole to compare to this instance</param>
<returns>true if obj is a ChatMessageRole and its value is the same as this instance; otherwise, false. If obj is null, the method returns false</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.GetHashCode">
<summary>
Returns the hash code for this object
</summary>
<returns>A 32-bit signed integer hash code</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.Equals(OpenAI_API.Chat.ChatMessageRole)">
<summary>
Determines whether this instance and a specified object have the same value.
</summary>
<param name="other">The ChatMessageRole to compare to this instance</param>
<returns>true if other's value is the same as this instance; otherwise, false. If other is null, the method returns false</returns>
</member>
<member name="M:OpenAI_API.Chat.ChatMessageRole.op_Implicit(OpenAI_API.Chat.ChatMessageRole)~System.String">
<summary>
Gets the string value for this role to pass to the API
</summary>
<param name="value">The ChatMessageRole to convert</param>
</member>
<member name="T:OpenAI_API.Chat.ChatRequest">
<summary>
A request to the Chat API. This is similar, but not exactly the same as the <see cref="T:OpenAI_API.Completions.CompletionRequest"/>
Based on the <see href="https://platform.openai.com/docs/api-reference/chat">OpenAI API docs</see>
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.Model">
<summary>
The model to use for this request
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.Messages">
<summary>
The messages to send with this Chat Request
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.Temperature">
<summary>
What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <see cref="P:OpenAI_API.Chat.ChatRequest.TopP"/> but not both.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.TopP">
<summary>
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <see cref="P:OpenAI_API.Chat.ChatRequest.Temperature"/> but not both.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.NumChoicesPerMessage">
<summary>
How many different choices to request for each message. Defaults to 1.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.Stream">
<summary>
Specifies where the results should stream and be returned at one time. Do not set this yourself, use the appropriate methods on <see cref="!:CompletionEndpoint"/> instead.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.CompiledStop">
<summary>
This is only used for serializing the request into JSON, do not use it directly.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.MultipleStopSequences">
<summary>
One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.StopSequence">
<summary>
The stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence. For convenience, if you are only requesting a single stop sequence, set it here
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.MaxTokens">
<summary>
How many tokens to complete to. Can return fewer if a stop sequence is hit. Defaults to 16.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.FrequencyPenalty">
<summary>
The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.PresencePenalty">
<summary>
The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.LogitBias">
<summary>
Modify the likelihood of specified tokens appearing in the completion.
Accepts a json object that maps tokens(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
Mathematically, the bias is added to the logits generated by the model prior to sampling.
The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatRequest.user">
<summary>
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatRequest.#ctor">
<summary>
Creates a new, empty <see cref="T:OpenAI_API.Chat.ChatRequest"/>
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatRequest.#ctor(OpenAI_API.Chat.ChatRequest)">
<summary>
Create a new chat request using the data from the input chat request.
</summary>
<param name="basedOn"></param>
</member>
<member name="T:OpenAI_API.Chat.ChatResult">
<summary>
Represents a result from calling the Chat API
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatResult.Id">
<summary>
The identifier of the result, which may be used during troubleshooting
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatResult.Choices">
<summary>
The list of choices that the user was presented with during the chat interaction
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatResult.Usage">
<summary>
The usage statistics for the chat interaction
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatResult.ToString">
<summary>
A convenience method to return the content of the message in the first choice of this response
</summary>
<returns>The content of the message, not including <see cref="T:OpenAI_API.Chat.ChatMessageRole"/>.</returns>
</member>
<member name="T:OpenAI_API.Chat.ChatChoice">
<summary>
A message received from the API, including the message text, index, and reason why the message finished.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatChoice.Index">
<summary>
The index of the choice in the list of choices
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatChoice.Message">
<summary>
The message that was presented to the user as the choice
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatChoice.FinishReason">
<summary>
The reason why the chat interaction ended after this choice was presented to the user
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatChoice.Delta">
<summary>
Partial message "delta" from a stream. For example, the result from <see cref="M:OpenAI_API.Chat.ChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)">StreamChatEnumerableAsync.</see>
If this result object is not from a stream, this will be null
</summary>
</member>
<member name="M:OpenAI_API.Chat.ChatChoice.ToString">
<summary>
A convenience method to return the content of the message in this response
</summary>
<returns>The content of the message in this response, not including <see cref="T:OpenAI_API.Chat.ChatMessageRole"/>.</returns>
</member>
<member name="T:OpenAI_API.Chat.ChatUsage">
<summary>
How many tokens were used in this chat message.
</summary>
</member>
<member name="P:OpenAI_API.Chat.ChatUsage.CompletionTokens">
<summary>
The number of completion tokens used during the chat interaction
</summary>
</member>
<member name="T:OpenAI_API.Chat.Conversation">
<summary>
Represents on ongoing chat with back-and-forth interactions between the user and the chatbot. This is the simplest way to interact with the ChatGPT API, rather than manually using the ChatEnpoint methods. You do lose some flexibility though.
</summary>
</member>
<member name="F:OpenAI_API.Chat.Conversation._endpoint">
<summary>
An internal reference to the API endpoint, needed for API requests
</summary>
</member>
<member name="P:OpenAI_API.Chat.Conversation.RequestParameters">
<summary>
Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. <see href="https://platform.openai.com/docs/api-reference/chat/create">Se OpenAI documentation for a list of possible parameters to tweak.</see>
</summary>
</member>
<member name="P:OpenAI_API.Chat.Conversation.Model">
<summary>
Specifies the model to use for ChatGPT requests. This is just a shorthand to access <see cref="P:OpenAI_API.Chat.Conversation.RequestParameters"/>.Model
</summary>
</member>
<member name="P:OpenAI_API.Chat.Conversation.MostResentAPIResult">
<summary>
After calling <see cref="M:OpenAI_API.Chat.Conversation.GetResponseFromChatbotAsync"/>, this contains the full response object which can contain useful metadata like token usages, <see cref="P:OpenAI_API.Chat.ChatChoice.FinishReason"/>, etc. This is overwritten with every call to <see cref="M:OpenAI_API.Chat.Conversation.GetResponseFromChatbotAsync"/> and only contains the most recent result.
</summary>
</member>
<member name="M:OpenAI_API.Chat.Conversation.#ctor(OpenAI_API.Chat.ChatEndpoint,OpenAI_API.Models.Model,OpenAI_API.Chat.ChatRequest)">
<summary>
Creates a new conversation with ChatGPT chat
</summary>
<param name="endpoint">A reference to the API endpoint, needed for API requests. Generally should be <see cref="P:OpenAI_API.OpenAIAPI.Chat"/>.</param>
<param name="model">Optionally specify the model to use for ChatGPT requests. If not specified, used <paramref name="defaultChatRequestArgs"/>.Model or falls back to <see cref="P:OpenAI_API.Models.Model.ChatGPTTurbo"/></param>
<param name="defaultChatRequestArgs">Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See <see href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI documentation for a list of possible parameters to tweak.</see></param>
</member>
<member name="P:OpenAI_API.Chat.Conversation.Messages">
<summary>
A list of messages exchanged so far. Do not modify this list directly. Instead, use <see cref="M:OpenAI_API.Chat.Conversation.AppendMessage(OpenAI_API.Chat.ChatMessage)"/>, <see cref="M:OpenAI_API.Chat.Conversation.AppendUserInput(System.String)"/>, <see cref="M:OpenAI_API.Chat.Conversation.AppendSystemMessage(System.String)"/>, or <see cref="M:OpenAI_API.Chat.Conversation.AppendExampleChatbotOutput(System.String)"/>.
</summary>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendMessage(OpenAI_API.Chat.ChatMessage)">
<summary>
Appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory
</summary>
<param name="message">The <see cref="T:OpenAI_API.Chat.ChatMessage"/> to append to the chat history</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendMessage(OpenAI_API.Chat.ChatMessageRole,System.String)">
<summary>
Creates and appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory
</summary>
<param name="role">The <see cref="T:OpenAI_API.Chat.ChatMessageRole"/> for the message. Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages. See <see href="https://platform.openai.com/docs/guides/chat/introduction">the OpenAI docs</see> for more details about usage.</param>
<param name="content">The content of the message)</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendUserInput(System.String)">
<summary>
Creates and appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory with the Role of <see cref="P:OpenAI_API.Chat.ChatMessageRole.User"/>. The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
</summary>
<param name="content">Text content generated by the end users of an application, or set by a developer as an instruction</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendUserInputWithName(System.String,System.String)">
<summary>
Creates and appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory with the Role of <see cref="P:OpenAI_API.Chat.ChatMessageRole.User"/>. The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
</summary>
<param name="userName">The name of the user in a multi-user chat</param>
<param name="content">Text content generated by the end users of an application, or set by a developer as an instruction</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendSystemMessage(System.String)">
<summary>
Creates and appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory with the Role of <see cref="P:OpenAI_API.Chat.ChatMessageRole.System"/>. The system message helps set the behavior of the assistant.
</summary>
<param name="content">text content that helps set the behavior of the assistant</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.AppendExampleChatbotOutput(System.String)">
<summary>
Creates and appends a <see cref="T:OpenAI_API.Chat.ChatMessage"/> to the chat hstory with the Role of <see cref="P:OpenAI_API.Chat.ChatMessageRole.Assistant"/>. Assistant messages can be written by a developer to help give examples of desired behavior.
</summary>
<param name="content">Text content written by a developer to help give examples of desired behavior</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.GetResponseFromChatbotAsync">
<summary>
Calls the API to get a response, which is appended to the current chat's <see cref="P:OpenAI_API.Chat.Conversation.Messages"/> as an <see cref="P:OpenAI_API.Chat.ChatMessageRole.Assistant"/> <see cref="T:OpenAI_API.Chat.ChatMessage"/>.
</summary>
<returns>The string of the response from the chatbot API</returns>
</member>
<member name="M:OpenAI_API.Chat.Conversation.GetResponseFromChatbot">
<summary>
OBSOLETE: GetResponseFromChatbot() has been renamed to <see cref="M:OpenAI_API.Chat.Conversation.GetResponseFromChatbotAsync"/> to follow .NET naming guidelines. This alias will be removed in a future version.
</summary>
<returns>The string of the response from the chatbot API</returns>
</member>
<member name="M:OpenAI_API.Chat.Conversation.StreamResponseFromChatbotAsync(System.Action{System.String})">
<summary>
Calls the API to get a response, which is appended to the current chat's <see cref="P:OpenAI_API.Chat.Conversation.Messages"/> as an <see cref="P:OpenAI_API.Chat.ChatMessageRole.Assistant"/> <see cref="T:OpenAI_API.Chat.ChatMessage"/>, and streams the results to the <paramref name="resultHandler"/> as they come in. <br/>
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.Conversation.StreamResponseEnumerableFromChatbotAsync"/> instead.
</summary>
<param name="resultHandler">An action to be called as each new result arrives.</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.StreamResponseFromChatbotAsync(System.Action{System.Int32,System.String})">
<summary>
Calls the API to get a response, which is appended to the current chat's <see cref="P:OpenAI_API.Chat.Conversation.Messages"/> as an <see cref="P:OpenAI_API.Chat.ChatMessageRole.Assistant"/> <see cref="T:OpenAI_API.Chat.ChatMessage"/>, and streams the results to the <paramref name="resultHandler"/> as they come in. <br/>
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.Conversation.StreamResponseEnumerableFromChatbotAsync"/> instead.
</summary>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="M:OpenAI_API.Chat.Conversation.StreamResponseEnumerableFromChatbotAsync">
<summary>
Calls the API to get a response, which is appended to the current chat's <see cref="P:OpenAI_API.Chat.Conversation.Messages"/> as an <see cref="P:OpenAI_API.Chat.ChatMessageRole.Assistant"/> <see cref="T:OpenAI_API.Chat.ChatMessage"/>, and streams the results as they come in. <br/>
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Chat.Conversation.StreamResponseFromChatbotAsync(System.Action{System.String})"/> instead.
</summary>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
</member>
<member name="T:OpenAI_API.Chat.IChatEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Chat.ChatEndpoint"/>, the ChatGPT API endpoint. Use this endpoint to send multiple messages and carry on a conversation.
</summary>
</member>
<member name="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs">
<summary>
This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
</summary>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateConversation(OpenAI_API.Chat.ChatRequest)">
<summary>
Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
</summary>
<param name="defaultChatRequestArgs">Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See <see href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI documentation for a list of possible parameters to tweak.</see></param>
<returns>A <see cref="T:OpenAI_API.Chat.Conversation"/> which encapulates a back and forth chat betwen a user and an assistant.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatRequest)">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="request">The request to send to the API.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatRequest,System.Int32)">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="request">The request to send to the API.</param>
<param name="numOutputs">Overrides <see cref="P:OpenAI_API.Chat.ChatRequest.NumChoicesPerMessage"/> as a convenience.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateChatCompletionAsync(System.Collections.Generic.IList{OpenAI_API.Chat.ChatMessage},OpenAI_API.Models.Model,System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Collections.Generic.IReadOnlyDictionary{System.String,System.Single},System.String[])">
<summary>
Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="messages">The array of messages to send to the API</param>
<param name="model">The model to use. See the ChatGPT models available from <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/></param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Chat.ChatResult.Choices"/> property for the results.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateChatCompletionAsync(OpenAI_API.Chat.ChatMessage[])">
<summary>
Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="messages">The messages to use in the generation.</param>
<returns>The <see cref="T:OpenAI_API.Chat.ChatResult"/> with the API response.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.CreateChatCompletionAsync(System.String[])">
<summary>
Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/> if present.
</summary>
<param name="userMessages">The user message or messages to use in the generation. All strings are assumed to be of Role <see cref="P:OpenAI_API.Chat.ChatMessageRole.User"/></param>
<returns>The <see cref="T:OpenAI_API.Chat.ChatResult"/> with the API response.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.IChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Chat.IChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/>.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.StreamChatEnumerableAsync(System.Collections.Generic.IList{OpenAI_API.Chat.ChatMessage},OpenAI_API.Models.Model,System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Collections.Generic.IReadOnlyDictionary{System.String,System.Single},System.String[])">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Chat.IChatEndpoint.StreamChatAsync(OpenAI_API.Chat.ChatRequest,System.Action{OpenAI_API.Chat.ChatResult})"/> instead.
</summary>
<param name="messages">The array of messages to send to the API</param>
<param name="model">The model to use. See the ChatGPT models available from <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/></param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Chat.IChatEndpoint.StreamCompletionAsync(OpenAI_API.Chat.ChatRequest,System.Action{System.Int32,OpenAI_API.Chat.ChatResult})">
<summary>
Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Chat.IChatEndpoint.StreamChatEnumerableAsync(OpenAI_API.Chat.ChatRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Chat.IChatEndpoint.DefaultChatRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="T:OpenAI_API.Completions.CompletionEndpoint">
<summary>
Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs">
<summary>
This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "completions".
</summary>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Completions"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.CreateCompletionAsync(OpenAI_API.Completions.CompletionRequest)">
<summary>
Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions.</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.CreateCompletionsAsync(OpenAI_API.Completions.CompletionRequest,System.Int32)">
<summary>
Ask the API to complete the prompt(s) using the specified request and a requested number of outputs. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="numOutputs">Overrides <see cref="P:OpenAI_API.Completions.CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.CreateCompletionAsync(System.String,OpenAI_API.Models.Model,System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Boolean},System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/> if present.
</summary>
<param name="prompt">The prompt to generate from</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
<param name="echo">Echo back the prompt in addition to the completion.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions.</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.CreateCompletionAsync(System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified promptes, with other paramets being drawn from default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/> if present. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="prompts">One or more prompts to generate from</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{System.Int32,OpenAI_API.Completions.CompletionResult})">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives.</param>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionEnumerableAsync(System.String,OpenAI_API.Models.Model,System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Boolean},System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified parameters.
Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/> if present.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Completions.CompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})"/> instead.
</summary>
<param name="prompt">The prompt to generate from</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
<param name="echo">Echo back the prompt in addition to the completion.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.CreateAndFormatCompletion(OpenAI_API.Completions.CompletionRequest)">
<summary>
Simply returns a string of the prompt followed by the best completion
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.CompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>A string of the prompt followed by the best completion</returns>
</member>
<member name="M:OpenAI_API.Completions.CompletionEndpoint.GetCompletion(System.String)">
<summary>
Simply returns the best completion
</summary>
<param name="prompt">The prompt to complete</param>
<returns>The best completion</returns>
</member>
<member name="T:OpenAI_API.Completions.CompletionRequest">
<summary>
Represents a request to the Completions API. Mostly matches the parameters in <see href="https://beta.openai.com/api-ref#create-completion-post">the OpenAI docs</see>, although some have been renamed or expanded into single/multiple properties for ease of use.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Model">
<summary>
ID of the model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.CompiledPrompt">
<summary>
This is only used for serializing the request into JSON, do not use it directly.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.MultiplePrompts">
<summary>
If you are requesting more than one prompt, specify them as an array of strings.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Prompt">
<summary>
For convenience, if you are only requesting a single prompt, set it here
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Suffix">
<summary>
The suffix that comes after a completion of inserted text. Defaults to null.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.MaxTokens">
<summary>
How many tokens to complete to. Can return fewer if a stop sequence is hit. Defaults to 16.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Temperature">
<summary>
What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <see cref="P:OpenAI_API.Completions.CompletionRequest.TopP"/> but not both.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.TopP">
<summary>
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <see cref="P:OpenAI_API.Completions.CompletionRequest.Temperature"/> but not both.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.PresencePenalty">
<summary>
The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.FrequencyPenalty">
<summary>
The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.NumChoicesPerPrompt">
<summary>
How many different choices to request for each prompt. Defaults to 1.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Stream">
<summary>
Specifies where the results should stream and be returned at one time. Do not set this yourself, use the appropriate methods on <see cref="T:OpenAI_API.Completions.CompletionEndpoint"/> instead.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Logprobs">
<summary>
Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.Echo">
<summary>
Echo back the prompt in addition to the completion. Defaults to false.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.CompiledStop">
<summary>
This is only used for serializing the request into JSON, do not use it directly.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.MultipleStopSequences">
<summary>
One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.StopSequence">
<summary>
The stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence. For convenience, if you are only requesting a single stop sequence, set it here
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.BestOf">
<summary>
Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
When used with n, best_of controls the number of candidate completions and n specifies how many to return best_of must be greater than n.
Note: Because this parameter generates many completions, it can quickly consume your token quota.Use carefully and ensure that you have reasonable settings for max_tokens and stop.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionRequest.user">
<summary>
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
</summary>
</member>
<member name="M:OpenAI_API.Completions.CompletionRequest.#ctor">
<summary>
Cretes a new, empty <see cref="T:OpenAI_API.Completions.CompletionRequest"/>
</summary>
</member>
<member name="M:OpenAI_API.Completions.CompletionRequest.#ctor(OpenAI_API.Completions.CompletionRequest)">
<summary>
Creates a new <see cref="T:OpenAI_API.Completions.CompletionRequest"/>, inheriting any parameters set in <paramref name="basedOn"/>.
</summary>
<param name="basedOn">The <see cref="T:OpenAI_API.Completions.CompletionRequest"/> to copy</param>
</member>
<member name="M:OpenAI_API.Completions.CompletionRequest.#ctor(System.String[])">
<summary>
Creates a new <see cref="T:OpenAI_API.Completions.CompletionRequest"/>, using the specified prompts
</summary>
<param name="prompts">One or more prompts to generate from</param>
</member>
<member name="M:OpenAI_API.Completions.CompletionRequest.#ctor(System.String,OpenAI_API.Models.Model,System.Nullable{System.Int32},System.Nullable{System.Double},System.String,System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Boolean},System.String[])">
<summary>
Creates a new <see cref="T:OpenAI_API.Completions.CompletionRequest"/> with the specified parameters
</summary>
<param name="prompt">The prompt to generate from</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="suffix">The suffix that comes after a completion of inserted text</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
<param name="echo">Echo back the prompt in addition to the completion.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
</member>
<member name="T:OpenAI_API.Completions.Choice">
<summary>
Represents a completion choice returned by the Completion API.
</summary>
</member>
<member name="P:OpenAI_API.Completions.Choice.Text">
<summary>
The main text of the completion
</summary>
</member>
<member name="P:OpenAI_API.Completions.Choice.Index">
<summary>
If multiple completion choices we returned, this is the index withing the various choices
</summary>
</member>
<member name="P:OpenAI_API.Completions.Choice.Logprobs">
<summary>
If the request specified <see cref="P:OpenAI_API.Completions.CompletionRequest.Logprobs"/>, this contains the list of the most likely tokens.
</summary>
</member>
<member name="P:OpenAI_API.Completions.Choice.FinishReason">
<summary>
If this is the last segment of the completion result, this specifies why the completion has ended.
</summary>
</member>
<member name="M:OpenAI_API.Completions.Choice.ToString">
<summary>
Gets the main text of this completion
</summary>
</member>
<member name="T:OpenAI_API.Completions.CompletionUsage">
<summary>
API usage as reported by the OpenAI API for this request
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionUsage.CompletionTokens">
<summary>
How many tokens are in the completion(s)
</summary>
</member>
<member name="T:OpenAI_API.Completions.CompletionResult">
<summary>
Represents a result from calling the Completion API
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionResult.Id">
<summary>
The identifier of the result, which may be used during troubleshooting
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionResult.Completions">
<summary>
The completions returned by the API. Depending on your request, there may be 1 or many choices.
</summary>
</member>
<member name="P:OpenAI_API.Completions.CompletionResult.Usage">
<summary>
API token usage as reported by the OpenAI API for this request
</summary>
</member>
<member name="M:OpenAI_API.Completions.CompletionResult.ToString">
<summary>
Gets the text of the first completion, representing the main result
</summary>
</member>
<member name="T:OpenAI_API.Completions.ICompletionEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Completions.CompletionEndpoint"/>, for ease of mock testing, etc
</summary>
</member>
<member name="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs">
<summary>
This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
</summary>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.CreateCompletionAsync(OpenAI_API.Completions.CompletionRequest)">
<summary>
Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions.</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.CreateCompletionAsync(System.String,OpenAI_API.Models.Model,System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Boolean},System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/> if present.
</summary>
<param name="prompt">The prompt to generate from</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
<param name="echo">Echo back the prompt in addition to the completion.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions.</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.CreateCompletionAsync(System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified promptes, with other paramets being drawn from default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/> if present. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="prompts">One or more prompts to generate from</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.CreateCompletionsAsync(OpenAI_API.Completions.CompletionRequest,System.Int32)">
<summary>
Ask the API to complete the prompt(s) using the specified request and a requested number of outputs. This is non-streaming, so it will wait until the API returns the full result.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="numOutputs">Overrides <see cref="P:OpenAI_API.Completions.CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
<returns>Asynchronously returns the completion result. Look in its <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{System.Int32,OpenAI_API.Completions.CompletionResult})">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<param name="resultHandler">An action to be called as each new result arrives.</param>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionEnumerableAsync(OpenAI_API.Completions.CompletionRequest)">
<summary>
Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})"/> instead.
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionEnumerableAsync(System.String,OpenAI_API.Models.Model,System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Double},System.Nullable{System.Double},System.Nullable{System.Int32},System.Nullable{System.Boolean},System.String[])">
<summary>
Ask the API to complete the prompt(s) using the specified parameters.
Any non-specified parameters will fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/> if present.
If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="M:OpenAI_API.Completions.ICompletionEndpoint.StreamCompletionAsync(OpenAI_API.Completions.CompletionRequest,System.Action{OpenAI_API.Completions.CompletionResult})"/> instead.
</summary>
<param name="prompt">The prompt to generate from</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.DavinciText"/>.</param>
<param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
<param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
<param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
<param name="numOutputs">How many different choices to request for each prompt.</param>
<param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
<param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="P:OpenAI_API.Completions.CompletionResult.Completions"/> -> <see cref="P:OpenAI_API.Completions.Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
<param name="echo">Echo back the prompt in addition to the completion.</param>
<param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
<returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.CreateAndFormatCompletion(OpenAI_API.Completions.CompletionRequest)">
<summary>
Simply returns a string of the prompt followed by the best completion
</summary>
<param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="P:OpenAI_API.Completions.ICompletionEndpoint.DefaultCompletionRequestArgs"/>.</param>
<returns>A string of the prompt followed by the best completion</returns>
</member>
<member name="M:OpenAI_API.Completions.ICompletionEndpoint.GetCompletion(System.String)">
<summary>
Simply returns the best completion
</summary>
<param name="prompt">The prompt to complete</param>
<returns>The best completion</returns>
</member>
<member name="T:OpenAI_API.Embedding.EmbeddingEndpoint">
<summary>
OpenAIs text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingEndpoint.DefaultEmbeddingRequestArgs">
<summary>
This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/> model
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "embeddings".
</summary>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Embeddings"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingEndpoint.CreateEmbeddingAsync(System.String)">
<summary>
Ask the API to embedd text using the default embedding model <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>
</summary>
<param name="input">Text to be embedded</param>
<returns>Asynchronously returns the embedding result. Look in its <see cref="P:OpenAI_API.Embedding.Data.Embedding"/> property of <see cref="P:OpenAI_API.Embedding.EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingEndpoint.CreateEmbeddingAsync(OpenAI_API.Embedding.EmbeddingRequest)">
<summary>
Ask the API to embedd text using a custom request
</summary>
<param name="request">Request to be send</param>
<returns>Asynchronously returns the embedding result. Look in its <see cref="P:OpenAI_API.Embedding.Data.Embedding"/> property of <see cref="P:OpenAI_API.Embedding.EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingEndpoint.GetEmbeddingsAsync(System.String)">
<summary>
Ask the API to embedd text using the default embedding model <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>
</summary>
<param name="input">Text to be embedded</param>
<returns>Asynchronously returns the first embedding result as an array of floats.</returns>
</member>
<member name="T:OpenAI_API.Embedding.EmbeddingRequest">
<summary>
Represents a request to the Completions API. Matches with the docs at <see href="https://platform.openai.com/docs/api-reference/embeddings">the OpenAI docs</see>
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingRequest.Model">
<summary>
ID of the model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>.
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingRequest.Input">
<summary>
Main text to be embedded
</summary>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingRequest.#ctor">
<summary>
Cretes a new, empty <see cref="T:OpenAI_API.Embedding.EmbeddingRequest"/>
</summary>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingRequest.#ctor(OpenAI_API.Models.Model,System.String)">
<summary>
Creates a new <see cref="T:OpenAI_API.Embedding.EmbeddingRequest"/> with the specified parameters
</summary>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>.</param>
<param name="input">The prompt to transform</param>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingRequest.#ctor(System.String)">
<summary>
Creates a new <see cref="T:OpenAI_API.Embedding.EmbeddingRequest"/> with the specified input and the <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/> model.
</summary>
<param name="input">The prompt to transform</param>
</member>
<member name="T:OpenAI_API.Embedding.EmbeddingResult">
<summary>
Represents an embedding result returned by the Embedding API.
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingResult.Data">
<summary>
List of results of the embedding
</summary>
</member>
<member name="P:OpenAI_API.Embedding.EmbeddingResult.Usage">
<summary>
Usage statistics of how many tokens have been used for this request
</summary>
</member>
<member name="M:OpenAI_API.Embedding.EmbeddingResult.op_Implicit(OpenAI_API.Embedding.EmbeddingResult)~System.Single[]">
<summary>
Allows an EmbeddingResult to be implicitly cast to the array of floats repsresenting the first ebmedding result
</summary>
<param name="embeddingResult">The <see cref="T:OpenAI_API.Embedding.EmbeddingResult"/> to cast to an array of floats.</param>
</member>
<member name="T:OpenAI_API.Embedding.Data">
<summary>
Data returned from the Embedding API.
</summary>
</member>
<member name="P:OpenAI_API.Embedding.Data.Object">
<summary>
Type of the response. In case of Data, this will be "embedding"
</summary>
</member>
<member name="P:OpenAI_API.Embedding.Data.Embedding">
<summary>
The input text represented as a vector (list) of floating point numbers
</summary>
</member>
<member name="P:OpenAI_API.Embedding.Data.Index">
<summary>
Index
</summary>
</member>
<member name="T:OpenAI_API.Embedding.IEmbeddingEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Embedding.EmbeddingEndpoint"/>, for ease of mock testing, etc
</summary>
</member>
<member name="P:OpenAI_API.Embedding.IEmbeddingEndpoint.DefaultEmbeddingRequestArgs">
<summary>
This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/> model
</summary>
</member>
<member name="M:OpenAI_API.Embedding.IEmbeddingEndpoint.CreateEmbeddingAsync(System.String)">
<summary>
Ask the API to embedd text using the default embedding model <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>
</summary>
<param name="input">Text to be embedded</param>
<returns>Asynchronously returns the embedding result. Look in its <see cref="P:OpenAI_API.Embedding.Data.Embedding"/> property of <see cref="P:OpenAI_API.Embedding.EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
</member>
<member name="M:OpenAI_API.Embedding.IEmbeddingEndpoint.CreateEmbeddingAsync(OpenAI_API.Embedding.EmbeddingRequest)">
<summary>
Ask the API to embedd text using a custom request
</summary>
<param name="request">Request to be send</param>
<returns>Asynchronously returns the embedding result. Look in its <see cref="P:OpenAI_API.Embedding.Data.Embedding"/> property of <see cref="P:OpenAI_API.Embedding.EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
</member>
<member name="M:OpenAI_API.Embedding.IEmbeddingEndpoint.GetEmbeddingsAsync(System.String)">
<summary>
Ask the API to embedd text using the default embedding model <see cref="P:OpenAI_API.Models.Model.AdaTextEmbedding"/>
</summary>
<param name="input">Text to be embedded</param>
<returns>Asynchronously returns the first embedding result as an array of floats.</returns>
</member>
<member name="T:OpenAI_API.EndpointBase">
<summary>
A base object for any OpenAI API endpoint, encompassing common functionality
</summary>
</member>
<member name="F:OpenAI_API.EndpointBase._Api">
<summary>
The internal reference to the API, mostly used for authentication
</summary>
</member>
<member name="M:OpenAI_API.EndpointBase.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint base, to be called from the contructor of any devived classes. Rather than instantiating any endpoint yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/>.
</summary>
<param name="api"></param>
</member>
<member name="P:OpenAI_API.EndpointBase.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. Must be overriden in a derived class.
</summary>
</member>
<member name="P:OpenAI_API.EndpointBase.Url">
<summary>
Gets the URL of the endpoint, based on the base OpenAI API URL followed by the endpoint name. For example "https://api.openai.com/v1/completions"
</summary>
</member>
<member name="M:OpenAI_API.EndpointBase.GetClient">
<summary>
Gets an HTTPClient with the appropriate authorization and other headers set
</summary>
<returns>The fully initialized HttpClient</returns>
<exception cref="T:System.Security.Authentication.AuthenticationException">Thrown if there is no valid authentication. Please refer to <see href="https://github.com/OkGoDoIt/OpenAI-API-dotnet#authentication"/> for details.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.GetErrorMessage(System.String,System.Net.Http.HttpResponseMessage,System.String,System.String)">
<summary>
Formats a human-readable error message relating to calling the API and parsing the response
</summary>
<param name="resultAsString">The full content returned in the http response</param>
<param name="response">The http response object itself</param>
<param name="name">The name of the endpoint being used</param>
<param name="description">Additional details about the endpoint of this request (optional)</param>
<returns>A human-readable string error message.</returns>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpRequestRaw(System.String,System.Net.Http.HttpMethod,System.Object,System.Boolean)">
<summary>
Sends an HTTP request and returns the response. Does not do any parsing, but does do error handling.
</summary>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="verb">(optional) The HTTP verb to use, for example "<see cref="P:System.Net.Http.HttpMethod.Get"/>". If omitted, then "GET" is assumed.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<param name="streaming">(optional) If true, streams the response. Otherwise waits for the entire response before returning.</param>
<returns>The HttpResponseMessage of the response, which is confirmed to be successful.</returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpGetContent``1(System.String)">
<summary>
Sends an HTTP Get request and return the string content of the response without parsing, and does error handling.
</summary>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<returns>The text string of the response, which is confirmed to be successful.</returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpRequest``1(System.String,System.Net.Http.HttpMethod,System.Object)">
<summary>
Sends an HTTP Request and does initial parsing
</summary>
<typeparam name="T">The <see cref="T:OpenAI_API.ApiResultBase"/>-derived class for the result</typeparam>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="verb">(optional) The HTTP verb to use, for example "<see cref="P:System.Net.Http.HttpMethod.Get"/>". If omitted, then "GET" is assumed.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<returns>An awaitable Task with the parsed result of type <typeparamref name="T"/></returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned or if the result couldn't be parsed.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpGet``1(System.String)">
<summary>
Sends an HTTP Get request and does initial parsing
</summary>
<typeparam name="T">The <see cref="T:OpenAI_API.ApiResultBase"/>-derived class for the result</typeparam>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<returns>An awaitable Task with the parsed result of type <typeparamref name="T"/></returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned or if the result couldn't be parsed.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpPost``1(System.String,System.Object)">
<summary>
Sends an HTTP Post request and does initial parsing
</summary>
<typeparam name="T">The <see cref="T:OpenAI_API.ApiResultBase"/>-derived class for the result</typeparam>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<returns>An awaitable Task with the parsed result of type <typeparamref name="T"/></returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned or if the result couldn't be parsed.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpDelete``1(System.String,System.Object)">
<summary>
Sends an HTTP Delete request and does initial parsing
</summary>
<typeparam name="T">The <see cref="T:OpenAI_API.ApiResultBase"/>-derived class for the result</typeparam>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<returns>An awaitable Task with the parsed result of type <typeparamref name="T"/></returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned or if the result couldn't be parsed.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpPut``1(System.String,System.Object)">
<summary>
Sends an HTTP Put request and does initial parsing
</summary>
<typeparam name="T">The <see cref="T:OpenAI_API.ApiResultBase"/>-derived class for the result</typeparam>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<returns>An awaitable Task with the parsed result of type <typeparamref name="T"/></returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned or if the result couldn't be parsed.</exception>
</member>
<member name="M:OpenAI_API.EndpointBase.HttpStreamingRequest``1(System.String,System.Net.Http.HttpMethod,System.Object)">
<summary>
Sends an HTTP request and handles a streaming response. Does basic line splitting and error handling.
</summary>
<param name="url">(optional) If provided, overrides the url endpoint for this request. If omitted, then <see cref="P:OpenAI_API.EndpointBase.Url"/> will be used.</param>
<param name="verb">(optional) The HTTP verb to use, for example "<see cref="P:System.Net.Http.HttpMethod.Get"/>". If omitted, then "GET" is assumed.</param>
<param name="postData">(optional) A json-serializable object to include in the request body.</param>
<returns>The HttpResponseMessage of the response, which is confirmed to be successful.</returns>
<exception cref="T:System.Net.Http.HttpRequestException">Throws an exception if a non-success HTTP response was returned</exception>
</member>
<member name="T:OpenAI_API.Files.File">
<summary>
Represents a single file used with the OpenAI Files endpoint. Files are used to upload and manage documents that can be used with features like Fine-tuning.
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Id">
<summary>
Unique id for this file, so that it can be referenced in other operations
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Name">
<summary>
The name of the file
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Purpose">
<summary>
What is the purpose of this file, fine-tune, search, etc
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Bytes">
<summary>
The size of the file in bytes
</summary>
</member>
<member name="P:OpenAI_API.Files.File.CreatedAt">
<summary>
Timestamp for the creation time of this file
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Deleted">
<summary>
When the object is deleted, this attribute is used in the Delete file operation
</summary>
</member>
<member name="P:OpenAI_API.Files.File.Status">
<summary>
The status of the File (ie when an upload operation was done: "uploaded")
</summary>
</member>
<member name="P:OpenAI_API.Files.File.StatusDetails">
<summary>
The status details, it could be null
</summary>
</member>
<member name="T:OpenAI_API.Files.FilesEndpoint">
<summary>
The API endpoint for operations List, Upload, Delete, Retrieve files
</summary>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Files"/>.
</summary>
<param name="api"></param>
</member>
<member name="P:OpenAI_API.Files.FilesEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "files".
</summary>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.GetFilesAsync">
<summary>
Get the list of all files
</summary>
<returns></returns>
<exception cref="T:System.Net.Http.HttpRequestException"></exception>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.GetFileAsync(System.String)">
<summary>
Returns information about a specific file
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.GetFileContentAsStringAsync(System.String)">
<summary>
Returns the contents of the specific file as string
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.DeleteFileAsync(System.String)">
<summary>
Delete a file
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.FilesEndpoint.UploadFileAsync(System.String,System.String)">
<summary>
Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
</summary>
<param name="filePath">The name of the file to use for this request</param>
<param name="purpose">The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.</param>
</member>
<member name="T:OpenAI_API.Files.FilesEndpoint.FilesData">
<summary>
A helper class to deserialize the JSON API responses. This should not be used directly.
</summary>
</member>
<member name="T:OpenAI_API.Files.IFilesEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Files.FilesEndpoint"/>, for ease of mock testing, etc
</summary>
</member>
<member name="M:OpenAI_API.Files.IFilesEndpoint.GetFilesAsync">
<summary>
Get the list of all files
</summary>
<returns></returns>
<exception cref="!:HttpRequestException"></exception>
</member>
<member name="M:OpenAI_API.Files.IFilesEndpoint.GetFileAsync(System.String)">
<summary>
Returns information about a specific file
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.IFilesEndpoint.GetFileContentAsStringAsync(System.String)">
<summary>
Returns the contents of the specific file as string
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.IFilesEndpoint.DeleteFileAsync(System.String)">
<summary>
Delete a file
</summary>
<param name="fileId">The ID of the file to use for this request</param>
<returns></returns>
</member>
<member name="M:OpenAI_API.Files.IFilesEndpoint.UploadFileAsync(System.String,System.String)">
<summary>
Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
</summary>
<param name="filePath">The name of the file to use for this request</param>
<param name="purpose">The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.</param>
</member>
<member name="T:OpenAI_API.Images.IImageGenerationEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Images.ImageGenerationEndpoint"/>. Given a prompt, the model will generate a new image.
</summary>
</member>
<member name="M:OpenAI_API.Images.IImageGenerationEndpoint.CreateImageAsync(OpenAI_API.Images.ImageGenerationRequest)">
<summary>
Ask the API to Creates an image given a prompt.
</summary>
<param name="request">Request to be send</param>
<returns>Asynchronously returns the image result. Look in its <see cref="P:OpenAI_API.Images.Data.Url"/> </returns>
</member>
<member name="M:OpenAI_API.Images.IImageGenerationEndpoint.CreateImageAsync(System.String)">
<summary>
Ask the API to Creates an image given a prompt.
</summary>
<param name="input">A text description of the desired image(s)</param>
<returns>Asynchronously returns the image result. Look in its <see cref="P:OpenAI_API.Images.Data.Url"/> </returns>
</member>
<member name="T:OpenAI_API.Images.ImageGenerationEndpoint">
<summary>
Given a prompt, the model will generate a new image.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "image".
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageGenerationEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.ImageGenerations"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Images.ImageGenerationEndpoint.CreateImageAsync(System.String)">
<summary>
Ask the API to Creates an image given a prompt.
</summary>
<param name="input">A text description of the desired image(s)</param>
<returns>Asynchronously returns the image result. Look in its <see cref="P:OpenAI_API.Images.Data.Url"/> </returns>
</member>
<member name="M:OpenAI_API.Images.ImageGenerationEndpoint.CreateImageAsync(OpenAI_API.Images.ImageGenerationRequest)">
<summary>
Ask the API to Creates an image given a prompt.
</summary>
<param name="request">Request to be send</param>
<returns>Asynchronously returns the image result. Look in its <see cref="P:OpenAI_API.Images.Data.Url"/> </returns>
</member>
<member name="T:OpenAI_API.Images.ImageGenerationRequest">
<summary>
Represents a request to the Images API. Mostly matches the parameters in <see href="https://platform.openai.com/docs/api-reference/images/create">the OpenAI docs</see>, although some have been renamed or expanded into single/multiple properties for ease of use.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationRequest.Prompt">
<summary>
A text description of the desired image(s). The maximum length is 1000 characters.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationRequest.NumOfImages">
<summary>
How many different choices to request for each prompt. Defaults to 1.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationRequest.User">
<summary>
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Optional.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationRequest.Size">
<summary>
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defauls to 1024x1024
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageGenerationRequest.ResponseFormat">
<summary>
The format in which the generated images are returned. Must be one of url or b64_json. Defaults to Url.
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageGenerationRequest.#ctor">
<summary>
Cretes a new, empty <see cref="T:OpenAI_API.Images.ImageGenerationRequest"/>
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageGenerationRequest.#ctor(System.String,System.Nullable{System.Int32},OpenAI_API.Images.ImageSize,System.String,OpenAI_API.Images.ImageResponseFormat)">
<summary>
Creates a new <see cref="T:OpenAI_API.Images.ImageGenerationRequest"/> with the specified parameters
</summary>
<param name="prompt">A text description of the desired image(s). The maximum length is 1000 characters.</param>
<param name="numOfImages">How many different choices to request for each prompt. Defaults to 1.</param>
<param name="size">The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.</param>
<param name="user">A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.</param>
<param name="responseFormat">The format in which the generated images are returned. Must be one of url or b64_json.</param>
</member>
<member name="T:OpenAI_API.Images.ImageResponseFormat">
<summary>
Represents available response formats for image generation endpoints
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageResponseFormat.Url">
<summary>
Requests an image that is 256x256
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageResponseFormat.B64_json">
<summary>
Requests an image that is 512x512
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageResponseFormat.ToString">
<summary>
Gets the string value for this response format to pass to the API
</summary>
<returns>The response format as a string</returns>
</member>
<member name="M:OpenAI_API.Images.ImageResponseFormat.op_Implicit(OpenAI_API.Images.ImageResponseFormat)~System.String">
<summary>
Gets the string value for this response format to pass to the API
</summary>
<param name="value">The ImageResponseFormat to convert</param>
</member>
<member name="T:OpenAI_API.Images.ImageResult">
<summary>
Represents an image result returned by the Image API.
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageResult.Data">
<summary>
List of results of the embedding
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageResult.ToString">
<summary>
Gets the url or base64-encoded image data of the first result, or null if there are no results
</summary>
<returns></returns>
</member>
<member name="T:OpenAI_API.Images.Data">
<summary>
Data returned from the Image API.
</summary>
</member>
<member name="P:OpenAI_API.Images.Data.Url">
<summary>
The url of the image result
</summary>
</member>
<member name="P:OpenAI_API.Images.Data.Base64Data">
<summary>
The base64-encoded image data as returned by the API
</summary>
</member>
<member name="T:OpenAI_API.Images.ImageSize">
<summary>
Represents available sizes for image generation endpoints
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageSize._256">
<summary>
Requests an image that is 256x256
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageSize._512">
<summary>
Requests an image that is 512x512
</summary>
</member>
<member name="P:OpenAI_API.Images.ImageSize._1024">
<summary>
Requests and image that is 1024x1024
</summary>
</member>
<member name="M:OpenAI_API.Images.ImageSize.ToString">
<summary>
Gets the string value for this size to pass to the API
</summary>
<returns>The size as a string</returns>
</member>
<member name="M:OpenAI_API.Images.ImageSize.op_Implicit(OpenAI_API.Images.ImageSize)~System.String">
<summary>
Gets the string value for this size to pass to the API
</summary>
<param name="value">The ImageSize to convert</param>
</member>
<member name="T:OpenAI_API.IOpenAIAPI">
<summary>
An interface for <see cref="T:OpenAI_API.OpenAIAPI"/>, for ease of mock testing, etc
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.ApiUrlFormat">
<summary>
Base url for OpenAI
for OpenAI, should be "https://api.openai.com/{0}/{1}"
for Azure, should be "https://(your-resource-name.openai.azure.com/openai/deployments/(deployment-id)/{1}?api-version={0}"
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.ApiVersion">
<summary>
Version of the Rest Api
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Auth">
<summary>
The API authentication information to use for API calls
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Chat">
<summary>
Text generation in the form of chat messages. This interacts with the ChatGPT API.
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Moderation">
<summary>
Classify text against the OpenAI Content Policy.
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Completions">
<summary>
Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Embeddings">
<summary>
The API lets you transform text into a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Models">
<summary>
The API endpoint for querying available Engines/models
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.Files">
<summary>
The API lets you do operations with files. You can upload, delete or retrieve files. Files can be used for fine-tuning, search, etc.
</summary>
</member>
<member name="P:OpenAI_API.IOpenAIAPI.ImageGenerations">
<summary>
The API lets you do operations with images. You can Given a prompt and/or an input image, the model will generate a new image.
</summary>
</member>
<member name="T:OpenAI_API.Models.IModelsEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Models.ModelsEndpoint"/>, for ease of mock testing, etc
</summary>
</member>
<member name="M:OpenAI_API.Models.IModelsEndpoint.RetrieveModelDetailsAsync(System.String)">
<summary>
Get details about a particular Model from the API, specifically properties such as <see cref="P:OpenAI_API.Models.Model.OwnedBy"/> and permissions.
</summary>
<param name="id">The id/name of the model to get more details about</param>
<returns>Asynchronously returns the <see cref="T:OpenAI_API.Models.Model"/> with all available properties</returns>
</member>
<member name="M:OpenAI_API.Models.IModelsEndpoint.RetrieveModelDetailsAsync(System.String,OpenAI_API.APIAuthentication)">
<summary>
Get details about a particular Model from the API, specifically properties such as <see cref="P:OpenAI_API.Models.Model.OwnedBy"/> and permissions.
</summary>
<param name="id">The id/name of the model to get more details about</param>
<param name="auth">Obsolete: IGNORED</param>
<returns>Asynchronously returns the <see cref="T:OpenAI_API.Models.Model"/> with all available properties</returns>
</member>
<member name="M:OpenAI_API.Models.IModelsEndpoint.GetModelsAsync">
<summary>
List all models via the API
</summary>
<returns>Asynchronously returns the list of all <see cref="T:OpenAI_API.Models.Model"/>s</returns>
</member>
<member name="T:OpenAI_API.Models.Model">
<summary>
Represents a language model
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.ModelID">
<summary>
The id/name of the model
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.OwnedBy">
<summary>
The owner of this model. Generally "openai" is a generic OpenAI model, or the organization if a custom or finetuned model.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.Object">
<summary>
The type of object. Should always be 'model'.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.Created">
The time when the model was created
</member>
<member name="P:OpenAI_API.Models.Model.CreatedUnixTime">
<summary>
The time when the model was created in unix epoch format
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.Permission">
<summary>
Permissions for use of the model
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.Root">
<summary>
Currently (2023-01-27) seems like this is duplicate of <see cref="P:OpenAI_API.Models.Model.ModelID"/> but including for completeness.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.Parent">
<summary>
Currently (2023-01-27) seems unused, probably intended for nesting of models in a later release
</summary>
</member>
<member name="M:OpenAI_API.Models.Model.op_Implicit(OpenAI_API.Models.Model)~System.String">
<summary>
Allows an model to be implicitly cast to the string of its <see cref="P:OpenAI_API.Models.Model.ModelID"/>
</summary>
<param name="model">The <see cref="T:OpenAI_API.Models.Model"/> to cast to a string.</param>
</member>
<member name="M:OpenAI_API.Models.Model.op_Implicit(System.String)~OpenAI_API.Models.Model">
<summary>
Allows a string to be implicitly cast as an <see cref="T:OpenAI_API.Models.Model"/> with that <see cref="P:OpenAI_API.Models.Model.ModelID"/>
</summary>
<param name="name">The id/<see cref="P:OpenAI_API.Models.Model.ModelID"/> to use</param>
</member>
<member name="M:OpenAI_API.Models.Model.#ctor(System.String)">
<summary>
Represents an Model with the given id/<see cref="P:OpenAI_API.Models.Model.ModelID"/>
</summary>
<param name="name">The id/<see cref="P:OpenAI_API.Models.Model.ModelID"/> to use.
</param>
</member>
<member name="M:OpenAI_API.Models.Model.#ctor">
<summary>
Represents a generic Model/model
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.DefaultModel">
<summary>
The default model to use in requests if no other model is specified.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.AdaText">
<summary>
Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.BabbageText">
<summary>
Capable of straightforward tasks, very fast, and lower cost.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.CurieText">
<summary>
Very capable, but faster and lower cost than Davinci.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.DavinciText">
<summary>
Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.CushmanCode">
<summary>
Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.DavinciCode">
<summary>
Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.AdaTextEmbedding">
<summary>
OpenAI offers one second-generation embedding model for use with the embeddings API endpoint.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.ChatGPTTurbo">
<summary>
Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with the latest model iteration.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.ChatGPTTurbo0301">
<summary>
Snapshot of gpt-3.5-turbo from March 1st 2023. Unlike gpt-3.5-turbo, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.GPT4">
<summary>
More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with the latest model iteration. Currently in limited beta so your OpenAI account needs to be whitelisted to use this.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.GPT4_32k_Context">
<summary>
Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with the latest model iteration. Currently in limited beta so your OpenAI account needs to be whitelisted to use this.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.TextModerationStable">
<summary>
Stable text moderation model that may provide lower accuracy compared to TextModerationLatest.
OpenAI states they will provide advanced notice before updating this model.
</summary>
</member>
<member name="P:OpenAI_API.Models.Model.TextModerationLatest">
<summary>
The latest text moderation model. This model will be automatically upgraded over time.
</summary>
</member>
<member name="M:OpenAI_API.Models.Model.RetrieveModelDetailsAsync(OpenAI_API.OpenAIAPI)">
<summary>
Gets more details about this Model from the API, specifically properties such as <see cref="P:OpenAI_API.Models.Model.OwnedBy"/> and permissions.
</summary>
<param name="api">An instance of the API with authentication in order to call the endpoint.</param>
<returns>Asynchronously returns an Model with all relevant properties filled in</returns>
</member>
<member name="T:OpenAI_API.Models.Permissions">
<summary>
Permissions for using the model
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.Id">
<summary>
Permission Id (not to be confused with ModelId)
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.Object">
<summary>
Object type, should always be 'model_permission'
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.Created">
The time when the permission was created
</member>
<member name="P:OpenAI_API.Models.Permissions.CreatedUnixTime">
<summary>
Unix timestamp for creation date/time
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.AllowCreateEngine">
<summary>
Can the model be created?
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.AllowSampling">
<summary>
Does the model support temperature sampling?
https://beta.openai.com/docs/api-reference/completions/create#completions/create-temperature
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.AllowLogProbs">
<summary>
Does the model support logprobs?
https://beta.openai.com/docs/api-reference/completions/create#completions/create-logprobs
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.AllowSearchIndices">
<summary>
Does the model support search indices?
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.AllowFineTuning">
<summary>
Does the model allow fine tuning?
https://beta.openai.com/docs/api-reference/fine-tunes
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.Organization">
<summary>
Is the model only allowed for a particular organization? May not be implemented yet.
</summary>
</member>
<member name="P:OpenAI_API.Models.Permissions.Group">
<summary>
Is the model part of a group? Seems not implemented yet. Always null.
</summary>
</member>
<member name="T:OpenAI_API.Models.ModelsEndpoint">
<summary>
The API endpoint for querying available models
</summary>
</member>
<member name="P:OpenAI_API.Models.ModelsEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "models".
</summary>
</member>
<member name="M:OpenAI_API.Models.ModelsEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Models"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Models.ModelsEndpoint.RetrieveModelDetailsAsync(System.String)">
<summary>
Get details about a particular Model from the API, specifically properties such as <see cref="P:OpenAI_API.Models.Model.OwnedBy"/> and permissions.
</summary>
<param name="id">The id/name of the model to get more details about</param>
<returns>Asynchronously returns the <see cref="T:OpenAI_API.Models.Model"/> with all available properties</returns>
</member>
<member name="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync">
<summary>
List all models via the API
</summary>
<returns>Asynchronously returns the list of all <see cref="T:OpenAI_API.Models.Model"/>s</returns>
</member>
<member name="M:OpenAI_API.Models.ModelsEndpoint.RetrieveModelDetailsAsync(System.String,OpenAI_API.APIAuthentication)">
<summary>
Get details about a particular Model from the API, specifically properties such as <see cref="P:OpenAI_API.Models.Model.OwnedBy"/> and permissions.
</summary>
<param name="id">The id/name of the model to get more details about</param>
<param name="auth">Obsolete: IGNORED</param>
<returns>Asynchronously returns the <see cref="T:OpenAI_API.Models.Model"/> with all available properties</returns>
</member>
<member name="T:OpenAI_API.Models.ModelsEndpoint.JsonHelperRoot">
<summary>
A helper class to deserialize the JSON API responses. This should not be used directly.
</summary>
</member>
<member name="T:OpenAI_API.Moderation.IModerationEndpoint">
<summary>
An interface for <see cref="T:OpenAI_API.Moderation.ModerationEndpoint"/>, which classifies text against the OpenAI Content Policy
</summary>
</member>
<member name="P:OpenAI_API.Moderation.IModerationEndpoint.DefaultModerationRequestArgs">
<summary>
This allows you to send request to the recommended model without needing to specify. OpenAI recommends using the <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/> model
</summary>
</member>
<member name="M:OpenAI_API.Moderation.IModerationEndpoint.CallModerationAsync(OpenAI_API.Moderation.ModerationRequest)">
<summary>
Ask the API to classify the text using a custom request.
</summary>
<param name="request">Request to send to the API</param>
<returns>Asynchronously returns the classification result</returns>
</member>
<member name="M:OpenAI_API.Moderation.IModerationEndpoint.CallModerationAsync(System.String)">
<summary>
Ask the API to classify the text using the default model.
</summary>
<param name="input">Text to classify</param>
<returns>Asynchronously returns the classification result</returns>
</member>
<member name="T:OpenAI_API.Moderation.ModerationEndpoint">
<summary>
This endpoint classifies text against the OpenAI Content Policy
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationEndpoint.DefaultModerationRequestArgs">
<summary>
This allows you to send request to the recommended model without needing to specify. OpenAI recommends using the <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/> model
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationEndpoint.Endpoint">
<summary>
The name of the endpoint, which is the final path segment in the API URL. For example, "completions".
</summary>
</member>
<member name="M:OpenAI_API.Moderation.ModerationEndpoint.#ctor(OpenAI_API.OpenAIAPI)">
<summary>
Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="T:OpenAI_API.OpenAIAPI"/> as <see cref="P:OpenAI_API.OpenAIAPI.Moderation"/>.
</summary>
<param name="api"></param>
</member>
<member name="M:OpenAI_API.Moderation.ModerationEndpoint.CallModerationAsync(System.String)">
<summary>
Ask the API to classify the text using the default model.
</summary>
<param name="input">Text to classify</param>
<returns>Asynchronously returns the classification result</returns>
</member>
<member name="M:OpenAI_API.Moderation.ModerationEndpoint.CallModerationAsync(OpenAI_API.Moderation.ModerationRequest)">
<summary>
Ask the API to classify the text using a custom request.
</summary>
<param name="request">Request to send to the API</param>
<returns>Asynchronously returns the classification result</returns>
</member>
<member name="T:OpenAI_API.Moderation.ModerationRequest">
<summary>
Represents a request to the Moderations API.
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationRequest.Model">
<summary>
Which Moderation model to use for this request. Two content moderations models are available: <see cref="P:OpenAI_API.Models.Model.TextModerationStable"/> and <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/>. The default is <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/> which will be automatically upgraded over time.This ensures you are always using our most accurate model.If you use <see cref="P:OpenAI_API.Models.Model.TextModerationStable"/>, we will provide advanced notice before updating the model. Accuracy of <see cref="P:OpenAI_API.Models.Model.TextModerationStable"/> may be slightly lower than for <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/>.
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationRequest.Input">
<summary>
The input text to classify
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationRequest.Inputs">
<summary>
An array of inputs to classify
</summary>
</member>
<member name="M:OpenAI_API.Moderation.ModerationRequest.#ctor">
<summary>
Cretes a new, empty <see cref="T:OpenAI_API.Moderation.ModerationRequest"/>
</summary>
</member>
<member name="M:OpenAI_API.Moderation.ModerationRequest.#ctor(System.String,OpenAI_API.Models.Model)">
<summary>
Creates a new <see cref="T:OpenAI_API.Moderation.ModerationRequest"/> with the specified parameters
</summary>
<param name="input">The prompt to classify</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/>.</param>
</member>
<member name="M:OpenAI_API.Moderation.ModerationRequest.#ctor(System.String[],OpenAI_API.Models.Model)">
<summary>
Creates a new <see cref="T:OpenAI_API.Moderation.ModerationRequest"/> with the specified parameters
</summary>
<param name="inputs">An array of prompts to classify</param>
<param name="model">The model to use. You can use <see cref="M:OpenAI_API.Models.ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/>.</param>
</member>
<member name="M:OpenAI_API.Moderation.ModerationRequest.#ctor(System.String[])">
<summary>
Creates a new <see cref="T:OpenAI_API.Moderation.ModerationRequest"/> with the specified input(s) and the <see cref="P:OpenAI_API.Models.Model.TextModerationLatest"/> model.
</summary>
<param name="input">One or more prompts to classify</param>
</member>
<member name="T:OpenAI_API.Moderation.ModerationResult">
<summary>
Represents a moderation result returned by the Moderations API
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationResult.Results">
<summary>
List of results returned from the Moderations API request
</summary>
</member>
<member name="P:OpenAI_API.Moderation.ModerationResult.Id">
<summary>
The unique identifier associated with a moderation request
Consists of the prefix "modr-" followed by a randomly generated alphanumeric string
</summary>
</member>
<member name="M:OpenAI_API.Moderation.ModerationResult.ToString">
<summary>
Convenience function to return the highest confidence category for which the content was flagged, or null if no content flags
</summary>
<returns>the highest confidence category for which the content was flagged, or null if no content flags</returns>
</member>
<member name="T:OpenAI_API.Moderation.Result">
<summary>
The result generated by the Moderations API request
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.Categories">
<summary>
A series of categories that the content could be flagged for. Values are bool's, indicating if the txt is flagged in that category
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.CategoryScores">
<summary>
Confidence scores for the different category flags. Values are between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.Flagged">
<summary>
True if the text was flagged in any of the categories
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.FlaggedCategories">
<summary>
Returns a list of all categories for which the content was flagged, sorted from highest confidence to lowest
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.MainContentFlag">
<summary>
Returns the highest confidence category for which the content was flagged, or null if no content flags
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Result.HighestFlagScore">
<summary>
Returns the highest confidence flag score across all categories
</summary>
</member>
<member name="T:OpenAI_API.Moderation.Categories">
<summary>
Series of boolean values indiciating what the text is flagged for
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.Hate">
<summary>
If the text contains hate speech
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.HateThreatening">
<summary>
If the text contains hate / threatening speech
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.SelfHarm">
<summary>
If the text contains content about self-harm
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.Sexual">
<summary>
If the text contains sexual content
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.SexualMinors">
<summary>
If the text contains sexual content featuring minors
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.Violence">
<summary>
If the text contains violent content
</summary>
</member>
<member name="P:OpenAI_API.Moderation.Categories.ViolenceGraphic">
<summary>
If the text contains violent and graphic content
</summary>
</member>
<member name="T:OpenAI_API.Moderation.CategoryScores">
<summary>
Confidence scores for the different category flags
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.Hate">
<summary>
Confidence score indicating "hate" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.HateThreatening">
<summary>
Confidence score indicating "hate/threatening" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.SelfHarm">
<summary>
Confidence score indicating "self-harm" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.Sexual">
<summary>
Confidence score indicating "sexual" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.SexualMinors">
<summary>
Confidence score indicating "sexual/minors" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.Violence">
<summary>
Confidence score indicating "violence" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="P:OpenAI_API.Moderation.CategoryScores.ViolenceGraphic">
<summary>
Confidence score indicating "violence/graphic" content is detected in the text
A value between 0 and 1, where 0 indicates low confidence
</summary>
</member>
<member name="T:OpenAI_API.OpenAIAPI">
<summary>
Entry point to the OpenAPI API, handling auth and allowing access to the various API endpoints
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.ApiUrlFormat">
<summary>
Base url for OpenAI
for OpenAI, should be "https://api.openai.com/{0}/{1}"
for Azure, should be "https://(your-resource-name.openai.azure.com/openai/deployments/(deployment-id)/{1}?api-version={0}"
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.ApiVersion">
<summary>
Version of the Rest Api
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Auth">
<summary>
The API authentication information to use for API calls
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.HttpClientFactory">
<summary>
Optionally provide an IHttpClientFactory to create the client to send requests.
</summary>
</member>
<member name="M:OpenAI_API.OpenAIAPI.#ctor(OpenAI_API.APIAuthentication)">
<summary>
Creates a new entry point to the OpenAPI API, handling auth and allowing access to the various API endpoints
</summary>
<param name="apiKeys">The API authentication information to use for API calls, or <see langword="null"/> to attempt to use the <see cref="P:OpenAI_API.APIAuthentication.Default"/>, potentially loading from environment vars or from a config file.</param>
</member>
<member name="M:OpenAI_API.OpenAIAPI.ForAzure(System.String,System.String,OpenAI_API.APIAuthentication)">
<summary>
Instantiates a version of the API for connecting to the Azure OpenAI endpoint instead of the main OpenAI endpoint.
</summary>
<param name="YourResourceName">The name of your Azure OpenAI Resource</param>
<param name="deploymentId">The name of your model deployment. You're required to first deploy a model before you can make calls.</param>
<param name="apiKey">The API authentication information to use for API calls, or <see langword="null"/> to attempt to use the <see cref="P:OpenAI_API.APIAuthentication.Default"/>, potentially loading from environment vars or from a config file. Currently this library only supports the api-key flow, not the AD-Flow.</param>
<returns></returns>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Completions">
<summary>
Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Embeddings">
<summary>
The API lets you transform text into a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Chat">
<summary>
Text generation in the form of chat messages. This interacts with the ChatGPT API.
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Moderation">
<summary>
Classify text against the OpenAI Content Policy.
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Models">
<summary>
The API endpoint for querying available Engines/models
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.Files">
<summary>
The API lets you do operations with files. You can upload, delete or retrieve files. Files can be used for fine-tuning, search, etc.
</summary>
</member>
<member name="P:OpenAI_API.OpenAIAPI.ImageGenerations">
<summary>
The API lets you do operations with images. Given a prompt and/or an input image, the model will generate a new image.
</summary>
</member>
<member name="T:OpenAI_API.Usage">
<summary>
Usage statistics of how many tokens have been used for this request.
</summary>
</member>
<member name="P:OpenAI_API.Usage.PromptTokens">
<summary>
How many tokens did the prompt consist of
</summary>
</member>
<member name="P:OpenAI_API.Usage.TotalTokens">
<summary>
How many tokens did the request consume total
</summary>
</member>
</members>
</doc>