|
1 | 1 | from typing import Any, Dict, List, Optional, Union |
2 | 2 |
|
3 | | -from . import api |
| 3 | +from . import api, types |
4 | 4 | from . import client as _client |
5 | | -from . import types |
6 | 5 |
|
7 | 6 |
|
8 | 7 | class _LemurImpl: |
@@ -173,10 +172,11 @@ def question( |
173 | 172 | Args: |
174 | 173 | questions: One or a list of questions to ask. |
175 | 174 | context: The context which is shared among all questions. This can be a string or a dictionary. |
176 | | - final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default"). |
| 175 | + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). |
177 | 176 | max_output_size: Max output size in tokens |
178 | 177 | timeout: The timeout in seconds to wait for the answer(s). |
179 | 178 | temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. |
| 179 | + input_text: Custom formatted transcript data. Use instead of transcript_ids. |
180 | 180 |
|
181 | 181 | Returns: One or a list of answer objects. |
182 | 182 | """ |
@@ -214,10 +214,11 @@ def summarize( |
214 | 214 | Args: |
215 | 215 | context: An optional context on the transcript. |
216 | 216 | answer_format: The format on how the summary shall be summarized. |
217 | | - final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default"). |
| 217 | + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). |
218 | 218 | max_output_size: Max output size in tokens |
219 | 219 | timeout: The timeout in seconds to wait for the summary. |
220 | 220 | temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. |
| 221 | + input_text: Custom formatted transcript data. Use instead of transcript_ids. |
221 | 222 |
|
222 | 223 | Returns: The summary as a string. |
223 | 224 | """ |
@@ -253,10 +254,11 @@ def action_items( |
253 | 254 | Args: |
254 | 255 | context: An optional context on the transcript. |
255 | 256 | answer_format: The preferred format for the result action items. |
256 | | - final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default"). |
| 257 | + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). |
257 | 258 | max_output_size: Max output size in tokens |
258 | 259 | timeout: The timeout in seconds to wait for the action items response. |
259 | 260 | temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. |
| 261 | + input_text: Custom formatted transcript data. Use instead of transcript_ids. |
260 | 262 |
|
261 | 263 | Returns: The action items as a string. |
262 | 264 | """ |
@@ -287,10 +289,11 @@ def task( |
287 | 289 |
|
288 | 290 | Args: |
289 | 291 | prompt: The prompt to use for this task. |
290 | | - final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default"). |
| 292 | + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). |
291 | 293 | max_output_size: Max output size in tokens |
292 | 294 | timeout: The timeout in seconds to wait for the task. |
293 | 295 | temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. |
| 296 | + input_text: Custom formatted transcript data. Use instead of transcript_ids. |
294 | 297 |
|
295 | 298 | Returns: A response to a question or task submitted via custom prompt (with source transcripts or other sources taken into the context) |
296 | 299 | """ |
|
0 commit comments