diff --git a/openai-api/src/main/java/openai/OpenAiResponse.java b/openai-api/src/main/java/openai/OpenAiResponse.java index b58d766..93274ad 100644 --- a/openai-api/src/main/java/openai/OpenAiResponse.java +++ b/openai-api/src/main/java/openai/OpenAiResponse.java @@ -4,8 +4,18 @@ import lombok.Data; import java.util.List; +/** + * A wrapper class to fit the OpenAI engine and search endpoints + */ @Data public class OpenAiResponse { + /** + * A list containing the actual results + */ public List data; + + /** + * The type of object returned, should be "list" + */ public String object; } diff --git a/openai-api/src/main/java/openai/completion/CompletionChoice.java b/openai-api/src/main/java/openai/completion/CompletionChoice.java index 64c8f50..0ed87cc 100644 --- a/openai-api/src/main/java/openai/completion/CompletionChoice.java +++ b/openai-api/src/main/java/openai/completion/CompletionChoice.java @@ -2,10 +2,26 @@ package openai.completion; import lombok.Data; +/** + * A completion generated by GPT-3 + * + * https://beta.openai.com/docs/api-reference/create-completion + */ @Data public class CompletionChoice { + /** + * The generated text. Will include the prompt if {@link CompletionRequest#echo } is true + */ String text; + + /** + * This index of this completion in the returned list. + */ Integer index; // todo add logprobs + + /** + * The reason why GPT-3 stopped generating, for example "length". + */ String finish_reason; } diff --git a/openai-api/src/main/java/openai/completion/CompletionRequest.java b/openai-api/src/main/java/openai/completion/CompletionRequest.java index 1591321..f55a202 100644 --- a/openai-api/src/main/java/openai/completion/CompletionRequest.java +++ b/openai-api/src/main/java/openai/completion/CompletionRequest.java @@ -4,18 +4,97 @@ import lombok.Data; import java.util.List; +/** + * A request for OpenAi to generate a predicted completion for a prompt. + * All fields are nullable. + * + * Documentation taken from + * https://beta.openai.com/docs/api-reference/create-completion + */ @Data public class CompletionRequest { + /** + * An optional prompt to complete from + */ String prompt; + + /** + * The maximum number of tokens to generate. + * Requests can use up to 2048 tokens shared between prompt and completion. + * (One token is roughly 4 characters for normal English text) + */ Integer maxTokens; + + /** + * What sampling temperature to use. Higher values means the model will take more risks. + * Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. + * + * We generally recommend using this or {@link top_p} but not both. + */ Double temperature; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of + * the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are + * considered. + * + * We generally recommend using this or {@link temperature} but not both. + */ Double topP; + + /** + * How many completions to generate for each prompt. + * + * Because this parameter generates many completions, it can quickly consume your token quota. + * Use carefully and ensure that you have reasonable settings for {@link max_tokens} and {@link stop}. + */ Integer n; + + /** + * Whether to stream back partial progress. + * If set, tokens will be sent as data-only server-sent events as they become available, + * with the stream terminated by a data: DONE message. + */ Boolean stream; + + /** + * Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. + * For example, if logprobs is 10, the API will return a list of the 10 most likely tokens. + * The API will always return the logprob of the sampled token, + * so there may be up to logprobs+1 elements in the response. + */ Integer logprobs; + + /** + * Echo back the prompt in addition to the completion + */ Boolean echo; + + /** + * Up to 4 sequences where the API will stop generating further tokens. + * The returned text will not contain the stop sequence. + */ List stop; //todo test this + + /** + * Number between 0 and 1 (default 0) that penalizes new tokens based on whether they appear in the text so far. + * Increases the model's likelihood to talk about new topics. + */ Double presencePenalty; + + /** + * Number between 0 and 1 (default 0) that penalizes new tokens based on their existing frequency in the text so far. + * Decreases the model's likelihood to repeat the same line verbatim. + */ Double frequencyPenalty; + + /** + * Generates best_of completions server-side and returns the "best" + * (the one with the lowest log probability per token). + * Results cannot be streamed. + * + * When used with {@link n}, best_of controls the number of candidate completions and n specifies how many to return, + * best_of must be greater than n. + */ Integer bestOf; } diff --git a/openai-api/src/main/java/openai/completion/CompletionResult.java b/openai-api/src/main/java/openai/completion/CompletionResult.java index 7c1df97..14eeec5 100644 --- a/openai-api/src/main/java/openai/completion/CompletionResult.java +++ b/openai-api/src/main/java/openai/completion/CompletionResult.java @@ -4,11 +4,35 @@ import lombok.Data; import java.util.List; +/** + * An object containing a response from the completion api + * + * https://beta.openai.com/docs/api-reference/create-completion + */ @Data public class CompletionResult { + /** + * A unique id assigned to this completion + */ String id; + + /** + * The type of object returned, should be "text_completion" + */ String object; + + /** + * The creation time in epoch milliseconds. + */ long created; + + /** + * The GPT-3 model used + */ String model; + + /** + * A list of generated completions + */ List choices; } diff --git a/openai-api/src/main/java/openai/engine/Engine.java b/openai-api/src/main/java/openai/engine/Engine.java index d23f4a1..304dcdd 100644 --- a/openai-api/src/main/java/openai/engine/Engine.java +++ b/openai-api/src/main/java/openai/engine/Engine.java @@ -2,10 +2,30 @@ package openai.engine; import lombok.Data; +/** + * GPT-3 engine details + * + * https://beta.openai.com/docs/api-reference/retrieve-engine + */ @Data public class Engine { + /** + * An identifier for this engine, used to specify an engine for completions or searching. + */ public String id; + + /** + * The type of object returned, should be "engine" + */ public String object; + + /** + * The owner of the GPT-3 engine, typically "openai" + */ public String owner; + + /** + * Whether the engine is ready to process requests or not + */ public boolean ready; } diff --git a/openai-api/src/main/java/openai/search/SearchRequest.java b/openai-api/src/main/java/openai/search/SearchRequest.java index 65bbada..8e913f0 100644 --- a/openai-api/src/main/java/openai/search/SearchRequest.java +++ b/openai-api/src/main/java/openai/search/SearchRequest.java @@ -4,8 +4,22 @@ import lombok.Data; import java.util.List; +/** + * A request to the document search api. + * GPT-3 will perform a semantic search over the documents and score them based on how related they are to the query. + * Higher scores indicate a stronger relation. + * + * https://beta.openai.com/docs/api-reference/search + */ @Data public class SearchRequest { + /** + * Documents to search over + */ List documents; + + /** + * Search query + */ String query; } diff --git a/openai-api/src/main/java/openai/search/SearchResult.java b/openai-api/src/main/java/openai/search/SearchResult.java index bdf35a2..de923f5 100644 --- a/openai-api/src/main/java/openai/search/SearchResult.java +++ b/openai-api/src/main/java/openai/search/SearchResult.java @@ -2,9 +2,26 @@ package openai.search; import lombok.Data; +/** + * A search result for a single document. + * + * https://beta.openai.com/docs/api-reference/search + */ @Data public class SearchResult { + /** + * The position of this document in the request list + */ Integer document; + + /** + * The type of object returned, should be "search_result" + */ String object; + + /** + * A number measuring the document's correlation with the query. + * A higher score means a stronger relationship. + */ Double score; }