-
-
Notifications
You must be signed in to change notification settings - Fork 11.8k
[responsesAPI][6] input/output messages for ResponsesParser #30158
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -266,11 +266,38 @@ def __init__( | |
| self.chat_template = chat_template | ||
| self.chat_template_content_format = chat_template_content_format | ||
|
|
||
| self.input_messages: list[ResponseRawMessageAndToken] = [] | ||
| self.output_messages: list[ResponseRawMessageAndToken] = [] | ||
|
|
||
| def append_output(self, output: RequestOutput) -> None: | ||
| self.num_prompt_tokens = len(output.prompt_token_ids or []) | ||
| self.num_cached_tokens = output.num_cached_tokens or 0 | ||
| self.num_output_tokens += len(output.outputs[0].token_ids or []) | ||
| self.parser.process(output.outputs[0]) | ||
| output_prompt = output.prompt or "" | ||
| output_prompt_token_ids = output.prompt_token_ids or [] | ||
| if len(self.input_messages) == 0: | ||
| self.input_messages.append( | ||
| ResponseRawMessageAndToken( | ||
| message=output_prompt, | ||
| tokens=output_prompt_token_ids, | ||
| ) | ||
| ) | ||
| else: | ||
| # TODO: merge them in properly together | ||
| # TODO: responsesParser doesn't parse kimi k2 sentences correctly | ||
|
Comment on lines
+287
to
+288
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These |
||
| self.output_messages.append( | ||
| ResponseRawMessageAndToken( | ||
| message=output_prompt, | ||
| tokens=output_prompt_token_ids, | ||
| ) | ||
| ) | ||
| self.output_messages.append( | ||
| ResponseRawMessageAndToken( | ||
| message=output.outputs[0].text, | ||
| tokens=output.outputs[0].token_ids, | ||
| ) | ||
| ) | ||
|
|
||
| def append_tool_output(self, output: list[ResponseInputOutputItem]) -> None: | ||
| self.parser.response_messages.extend(output) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1339,6 +1339,7 @@ async def _generate_with_builtin_tools( | |
| ) | ||
| engine_prompt = engine_prompts[0] | ||
| request_prompt = request_prompts[0] | ||
| prompt_text, _, _ = self._get_prompt_components(request_prompt) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| # Update the sampling params. | ||
| sampling_params.max_tokens = self.max_model_len - len( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -318,6 +318,8 @@ | |
| if maybe_validation_error is not None: | ||
| return maybe_validation_error | ||
|
|
||
| fbvscode.set_trace() | ||
|
Check failure on line 321 in vllm/entrypoints/openai/serving_responses.py
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| # If the engine is dead, raise the engine's DEAD_ERROR. | ||
| # This is required for the streaming case, where we return a | ||
| # success status before we actually start generating text :). | ||
|
|
@@ -656,12 +658,9 @@ | |
| ] | ||
| output = make_response_output_items_from_parsable_context(response_messages) | ||
|
|
||
| # TODO: context for non-gptoss models doesn't use messages | ||
| # so we can't get them out yet | ||
| if request.enable_response_messages: | ||
| raise NotImplementedError( | ||
| "enable_response_messages is currently only supported for gpt-oss" | ||
| ) | ||
| input_messages = context.input_messages | ||
| output_messages = context.output_messages | ||
|
Comment on lines
+662
to
+663
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
|
|
||
| # TODO: Calculate usage. | ||
| # assert final_res.prompt_token_ids is not None | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
input_messageslist is only populated during the first call toappend_outputdue to theif len(self.input_messages) == 0:condition. In a multi-turn conversation, subsequent prompts (which are part ofoutput.prompt) will not be added toinput_messages. This will result ininput_messagesnot accurately reflecting all prompts sent to the model across turns, which is likely unintended for a comprehensive input message log.