1+ class ChatCompletionParams {
2+
3+ constructor ( {
4+ messages,
5+ model,
6+ frequency_penalty,
7+ function_call,
8+ functions,
9+ logit_bias,
10+ max_tokens,
11+ n,
12+ presence_penalty,
13+ stop,
14+ stream,
15+ temperature,
16+ top_p,
17+ user
18+ } ) {
19+ this . messages = messages ;
20+ this . model = model ;
21+ this . frequency_penalty = frequency_penalty ;
22+ this . function_call = function_call ;
23+ this . functions = functions ;
24+ this . logit_bias = logit_bias ;
25+ this . max_tokens = max_tokens ;
26+ this . n = n ;
27+ this . presence_penalty = presence_penalty ;
28+ this . stop = stop ;
29+ this . stream = stream ;
30+ this . temperature = temperature ;
31+ this . top_p = top_p ;
32+ this . user = user ;
33+ }
34+
35+ /**
36+ * A list of messages comprising the conversation so far.
37+ * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
38+ */
39+ messages ; // List
40+
41+ /**
42+ * ID of the model to use. See the
43+ * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
44+ * table for details on which models work with the Chat API.
45+ *(string & {})
46+ 'gpt-4'
47+ 'gpt-4-0314'
48+ 'gpt-4-0613'
49+ 'gpt-4-32k'
50+ 'gpt-4-32k-0314'
51+ 'gpt-4-32k-0613'
52+ 'gpt-3.5-turbo'
53+ 'gpt-3.5-turbo-16k'
54+ 'gpt-3.5-turbo-0301'
55+ 'gpt-3.5-turbo-0613'
56+ 'gpt-3.5-turbo-16k-0613';
57+ */
58+ model ;
59+
60+ /**
61+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their
62+ * existing frequency in the text so far, decreasing the model's likelihood to
63+ * repeat the same line verbatim.
64+ *
65+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
66+ */
67+ frequency_penalty ; // number | null
68+
69+ /**
70+ * Controls how the model responds to function calls. `none` means the model does
71+ * not call a function, and responds to the end-user. `auto` means the model can
72+ * pick between an end-user or calling a function. Specifying a particular function
73+ * via `{"name": "my_function"}` forces the model to call that function. `none` is
74+ * the default when no functions are present. `auto` is the default if functions
75+ * are present.
76+ */
77+ function_call ; // 'none' | 'auto' | ChatCompletionCreateParams.FunctionCallOption
78+
79+ /**
80+ * A list of functions the model may generate JSON inputs for.
81+ */
82+ functions ; // Array<ChatCompletionCreateParams.Function>
83+
84+ /**
85+ * Modify the likelihood of specified tokens appearing in the completion.
86+ *
87+ * Accepts a json object that maps tokens (specified by their token ID in the
88+ * tokenizer) to an associated bias value from -100 to 100. Mathematically, the
89+ * bias is added to the logits generated by the model prior to sampling. The exact
90+ * effect will vary per model, but values between -1 and 1 should decrease or
91+ * increase likelihood of selection; values like -100 or 100 should result in a ban
92+ * or exclusive selection of the relevant token.
93+ */
94+ logit_bias ; //Record<string, number> | null
95+
96+ /**
97+ * The maximum number of [tokens](/tokenizer) to generate in the chat completion.
98+ *
99+ * The total length of input tokens and generated tokens is limited by the model's
100+ * context length.
101+ * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
102+ * for counting tokens.
103+ */
104+ max_tokens ; // number | null
105+
106+ /**
107+ * How many chat completion choices to generate for each input message.
108+ */
109+ n ; // number | null
110+
111+ /**
112+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on
113+ * whether they appear in the text so far, increasing the model's likelihood to
114+ * talk about new topics.
115+ *
116+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
117+ */
118+ presence_penalty ; // number | null
119+
120+ /**
121+ * Up to 4 sequences where the API will stop generating further tokens.
122+ */
123+ stop ; // string | null | Array<string>
124+
125+ /**
126+ * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
127+ * sent as data-only
128+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
129+ * as they become available, with the stream terminated by a `data: [DONE]`
130+ * message.
131+ * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
132+ */
133+ stream ; // boolean | null
134+
135+ /**
136+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
137+ * make the output more random, while lower values like 0.2 will make it more
138+ * focused and deterministic.
139+ *
140+ * We generally recommend altering this or `top_p` but not both.
141+ */
142+ temperature ; // number | null
143+
144+ /**
145+ * An alternative to sampling with temperature, called nucleus sampling, where the
146+ * model considers the results of the tokens with top_p probability mass. So 0.1
147+ * means only the tokens comprising the top 10% probability mass are considered.
148+ *
149+ * We generally recommend altering this or `temperature` but not both.
150+ */
151+ top_p ; // number | null
152+
153+ /**
154+ * A unique identifier representing your end-user, which can help OpenAI to monitor
155+ * and detect abuse.
156+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
157+ */
158+ user ; // string
159+ }
160+
161+ module . exports = ChatCompletionParams ;
0 commit comments