|
2 | 2 |
|
3 | 3 | import json |
4 | 4 | from dataclasses import asdict, dataclass, field |
5 | | -from typing import Dict, List, Literal, Optional, Tuple, Union |
6 | | - |
7 | | - |
8 | | -@dataclass |
9 | | -class ResponseFormat: |
10 | | - """The response format dataclass. |
11 | | -
|
12 | | - Parameters |
13 | | - ---------- |
14 | | - type : Literal["text", "json_object"] |
15 | | - The type of response format. Default: "text". |
16 | | -
|
17 | | - schema : Optional[str] |
18 | | - The JSON schema string for the JSON response format. If None, a legal json string without |
19 | | - special restrictions will be generated. |
20 | | -
|
21 | | - Could be specified when the response format is "json_object". Default: None. |
22 | | - """ |
23 | | - |
24 | | - type: Literal["text", "json_object"] = "text" |
25 | | - schema: Optional[str] = None |
26 | | - |
27 | | - def __post_init__(self): |
28 | | - if self.schema is not None and self.type != "json_object": |
29 | | - raise ValueError("JSON schema is only supported in JSON response format") |
30 | | - |
31 | | - |
32 | | -@dataclass |
33 | | -class DebugConfig: |
34 | | - """The debug configuration dataclass.Parameters |
35 | | - ---------- |
36 | | - ignore_eos : bool |
37 | | - When it is true, ignore the eos token and generate tokens until `max_tokens`. |
38 | | - Default is set to False. |
39 | | -
|
40 | | - pinned_system_prompt : bool |
41 | | - Whether the input and generated data pinned in engine. Default is set to False. |
42 | | - This can be used for system prompt or other purpose, if the data is aimed to be |
43 | | - kept all the time. |
44 | | -
|
45 | | - special_request: Optional[string] |
46 | | - Special requests to send to engine |
47 | | - """ |
48 | | - |
49 | | - ignore_eos: bool = False |
50 | | - pinned_system_prompt: bool = False |
51 | | - special_request: Optional[Literal["query_engine_metrics"]] = None |
52 | | - |
53 | | - |
54 | | -@dataclass |
55 | | -class GenerationConfig: # pylint: disable=too-many-instance-attributes |
56 | | - """The generation configuration dataclass. |
57 | | -
|
58 | | - Parameters |
59 | | - ---------- |
60 | | - n : int |
61 | | - How many chat completion choices to generate for each input message. |
62 | | -
|
63 | | - temperature : Optional[float] |
64 | | - The value that applies to logits and modulates the next token probabilities. |
65 | | -
|
66 | | - top_p : Optional[float] |
67 | | - In sampling, only the most probable tokens with probabilities summed up to |
68 | | - `top_p` are kept for sampling. |
69 | | -
|
70 | | - frequency_penalty : Optional[float] |
71 | | - Positive values penalize new tokens based on their existing frequency |
72 | | - in the text so far, decreasing the model's likelihood to repeat the same |
73 | | - line verbatim. |
74 | | -
|
75 | | - presence_penalty : Optional[float] |
76 | | - Positive values penalize new tokens based on whether they appear in the text |
77 | | - so far, increasing the model's likelihood to talk about new topics. |
78 | | -
|
79 | | - repetition_penalty : float |
80 | | - The penalty term that applies to logits to control token repetition in generation. |
81 | | - It will be suppressed when any of frequency_penalty and presence_penalty is |
82 | | - non-zero. |
83 | | -
|
84 | | - logprobs : bool |
85 | | - Whether to return log probabilities of the output tokens or not. |
86 | | - If true, the log probabilities of each output token will be returned. |
87 | | -
|
88 | | - top_logprobs : int |
89 | | - An integer between 0 and 5 specifying the number of most likely |
90 | | - tokens to return at each token position, each with an associated |
91 | | - log probability. |
92 | | - `logprobs` must be set to True if this parameter is used. |
93 | | -
|
94 | | - logit_bias : Optional[Dict[int, float]] |
95 | | - The bias logit value added to selected tokens prior to sampling. |
96 | | -
|
97 | | - max_tokens : Optional[int] |
98 | | - The maximum number of generated tokens, |
99 | | - or None, in which case the generation will not stop |
100 | | - until exceeding model capability or hit any stop criteria. |
101 | | -
|
102 | | - seed : Optional[int] |
103 | | - The random seed of the generation. |
104 | | - The seed will be a random value if not specified. |
105 | | -
|
106 | | - stop_strs : List[str] |
107 | | - The list of strings that mark the end of generation. |
108 | | -
|
109 | | - stop_token_ids : List[int] |
110 | | - The list of token ids that mark the end of generation. |
111 | | -
|
112 | | - response_format : ResponseFormat |
113 | | - The response format of the generation output. |
114 | | -
|
115 | | - debug_config : Optional[DebugConfig] |
116 | | - The optional debug configuration. |
117 | | - """ |
118 | | - |
119 | | - n: int = 1 |
120 | | - temperature: Optional[float] = None |
121 | | - top_p: Optional[float] = None |
122 | | - frequency_penalty: Optional[float] = None |
123 | | - presence_penalty: Optional[float] = None |
124 | | - repetition_penalty: float = 1.0 |
125 | | - logprobs: bool = False |
126 | | - top_logprobs: int = 0 |
127 | | - logit_bias: Optional[Dict[int, float]] = field(default_factory=dict) # type: ignore |
128 | | - |
129 | | - max_tokens: Optional[int] = 128 |
130 | | - seed: Optional[int] = None |
131 | | - stop_strs: List[str] = field(default_factory=list) |
132 | | - stop_token_ids: List[int] = field(default_factory=list) |
133 | | - |
134 | | - response_format: ResponseFormat = field(default_factory=ResponseFormat) |
135 | | - |
136 | | - debug_config: Optional[DebugConfig] = field(default_factory=DebugConfig) |
137 | | - |
138 | | - def asjson(self) -> str: |
139 | | - """Return the config in string of JSON format.""" |
140 | | - return json.dumps(asdict(self)) |
141 | | - |
142 | | - @staticmethod |
143 | | - def from_json(json_str: str) -> "GenerationConfig": |
144 | | - """Construct a config from JSON string.""" |
145 | | - return GenerationConfig(**json.loads(json_str)) |
| 5 | +from typing import List, Literal, Optional, Tuple, Union |
146 | 6 |
|
147 | 7 |
|
148 | 8 | @dataclass |
|
0 commit comments