Spaces:
Runtime error
Runtime error
Update app_dialogue.py
Browse files- app_dialogue.py +16 -3
app_dialogue.py
CHANGED
|
@@ -314,16 +314,21 @@ def format_user_prompt_with_im_history_and_system_conditioning(
|
|
| 314 |
|
| 315 |
# Format history
|
| 316 |
for turn in history:
|
|
|
|
| 317 |
user_utterance, assistant_utterance = turn
|
|
|
|
| 318 |
assistant_utterance = assistant_utterance.replace(ASSISTANT_PREPEND, "")
|
| 319 |
splitted_user_utterance = split_str_on_im_markdown(user_utterance)
|
|
|
|
| 320 |
|
| 321 |
optional_space = ""
|
| 322 |
if not is_image(splitted_user_utterance[0]):
|
| 323 |
optional_space = " "
|
| 324 |
resulting_list.append(f"\nUser:{optional_space}")
|
| 325 |
resulting_list.extend(splitted_user_utterance)
|
|
|
|
| 326 |
resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
|
|
|
|
| 327 |
|
| 328 |
# Format current input
|
| 329 |
current_user_prompt_str = remove_spaces_around_token(current_user_prompt_str)
|
|
@@ -343,6 +348,7 @@ def format_user_prompt_with_im_history_and_system_conditioning(
|
|
| 343 |
else:
|
| 344 |
# Choosing to put the image first when the image is inputted through the UI, but this is an arbiratrary choice.
|
| 345 |
resulting_list.extend(["\nUser:", current_image, f"{current_user_prompt_str}<end_of_utterance>\nAssistant:"])
|
|
|
|
| 346 |
current_user_prompt_list = [current_user_prompt_str]
|
| 347 |
|
| 348 |
print(f"returns - resulting_list - {resulting_list}, current_user_prompt_list - {current_user_prompt_list}")
|
|
@@ -536,6 +542,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 536 |
print(f"params: max_new_tokens - {max_new_tokens}")
|
| 537 |
print(f"params: repetition_penalty - {repetition_penalty}")
|
| 538 |
print(f"params: top_p - {top_p}")
|
|
|
|
| 539 |
if user_prompt_str.strip() == "" and image is None:
|
| 540 |
return "", None, chat_history
|
| 541 |
|
|
@@ -544,6 +551,8 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 544 |
current_image=image,
|
| 545 |
history=chat_history,
|
| 546 |
)
|
|
|
|
|
|
|
| 547 |
|
| 548 |
client_endpoint = API_PATHS[model_selector]
|
| 549 |
client = Client(
|
|
@@ -573,6 +582,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 573 |
if image is None:
|
| 574 |
# Case where there is no image OR the image is passed as `<fake_token_around_image><image:IMAGE_URL><fake_token_around_image>`
|
| 575 |
chat_history.append([prompt_list_to_markdown(user_prompt_list), ASSISTANT_PREPEND])
|
|
|
|
| 576 |
else:
|
| 577 |
# Case where the image is passed through the Image Box.
|
| 578 |
# Convert the image into base64 for both passing it through the chat history and
|
|
@@ -583,15 +593,18 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 583 |
ASSISTANT_PREPEND,
|
| 584 |
]
|
| 585 |
)
|
|
|
|
| 586 |
|
| 587 |
query = prompt_list_to_tgi_input(formated_prompt_list)
|
|
|
|
| 588 |
stream = client.generate_stream(prompt=query, **generation_args)
|
| 589 |
|
| 590 |
print(f"chat_history just before for loop - {chat_history}")
|
| 591 |
acc_text = ""
|
| 592 |
for idx, response in enumerate(stream):
|
| 593 |
text_token = response.token.text
|
| 594 |
-
|
|
|
|
| 595 |
if response.details:
|
| 596 |
# That's the exit condition
|
| 597 |
return
|
|
@@ -611,8 +624,8 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
| 611 |
#print(f"last_turnp[-1] is - {last_turn[-1]}")
|
| 612 |
#chat_history.append(last_turn)
|
| 613 |
chat_history[-1][-1] = acc_text
|
| 614 |
-
print(f"yields - chat_history -{chat_history} ")
|
| 615 |
-
print(f"acc_text -{acc_text} ")
|
| 616 |
yield "", None, chat_history
|
| 617 |
#acc_text = ""
|
| 618 |
|
|
|
|
| 314 |
|
| 315 |
# Format history
|
| 316 |
for turn in history:
|
| 317 |
+
print(f"inside for loop: turn is - {turn}")
|
| 318 |
user_utterance, assistant_utterance = turn
|
| 319 |
+
print(f"inside for loop: user_utterance is - {user_utterance}, assistant_utterance is - {assistant_utterance}")
|
| 320 |
assistant_utterance = assistant_utterance.replace(ASSISTANT_PREPEND, "")
|
| 321 |
splitted_user_utterance = split_str_on_im_markdown(user_utterance)
|
| 322 |
+
print(f"inside for loop: splitted_user_utterance after a call to fun split_str_on_im_markdown() is - {splitted_user_utterance}")
|
| 323 |
|
| 324 |
optional_space = ""
|
| 325 |
if not is_image(splitted_user_utterance[0]):
|
| 326 |
optional_space = " "
|
| 327 |
resulting_list.append(f"\nUser:{optional_space}")
|
| 328 |
resulting_list.extend(splitted_user_utterance)
|
| 329 |
+
print(f"inside for loop: resulting_list after extending with 'splitted_user_utterance' is - {resulting_list}")
|
| 330 |
resulting_list.append(f"<end_of_utterance>\nAssistant: {assistant_utterance}")
|
| 331 |
+
print(f"inside for loop: resulting_list after appending is - {resulting_list}")
|
| 332 |
|
| 333 |
# Format current input
|
| 334 |
current_user_prompt_str = remove_spaces_around_token(current_user_prompt_str)
|
|
|
|
| 348 |
else:
|
| 349 |
# Choosing to put the image first when the image is inputted through the UI, but this is an arbiratrary choice.
|
| 350 |
resulting_list.extend(["\nUser:", current_image, f"{current_user_prompt_str}<end_of_utterance>\nAssistant:"])
|
| 351 |
+
print(f"inside else: current_image is not None; resulting_list after extending is - {resulting_list}")
|
| 352 |
current_user_prompt_list = [current_user_prompt_str]
|
| 353 |
|
| 354 |
print(f"returns - resulting_list - {resulting_list}, current_user_prompt_list - {current_user_prompt_list}")
|
|
|
|
| 542 |
print(f"params: max_new_tokens - {max_new_tokens}")
|
| 543 |
print(f"params: repetition_penalty - {repetition_penalty}")
|
| 544 |
print(f"params: top_p - {top_p}")
|
| 545 |
+
|
| 546 |
if user_prompt_str.strip() == "" and image is None:
|
| 547 |
return "", None, chat_history
|
| 548 |
|
|
|
|
| 551 |
current_image=image,
|
| 552 |
history=chat_history,
|
| 553 |
)
|
| 554 |
+
print(f"formated_prompt_list is - {formated_prompt_list}")
|
| 555 |
+
print(f"user_prompt_list is - {user_prompt_list}")
|
| 556 |
|
| 557 |
client_endpoint = API_PATHS[model_selector]
|
| 558 |
client = Client(
|
|
|
|
| 582 |
if image is None:
|
| 583 |
# Case where there is no image OR the image is passed as `<fake_token_around_image><image:IMAGE_URL><fake_token_around_image>`
|
| 584 |
chat_history.append([prompt_list_to_markdown(user_prompt_list), ASSISTANT_PREPEND])
|
| 585 |
+
print(f"image is NONE; Chat_history after append is - {chat_history}")
|
| 586 |
else:
|
| 587 |
# Case where the image is passed through the Image Box.
|
| 588 |
# Convert the image into base64 for both passing it through the chat history and
|
|
|
|
| 593 |
ASSISTANT_PREPEND,
|
| 594 |
]
|
| 595 |
)
|
| 596 |
+
print(f"image is NOT NONE; Chat_history after append is - {chat_history}")
|
| 597 |
|
| 598 |
query = prompt_list_to_tgi_input(formated_prompt_list)
|
| 599 |
+
print(f"query is - {query}")
|
| 600 |
stream = client.generate_stream(prompt=query, **generation_args)
|
| 601 |
|
| 602 |
print(f"chat_history just before for loop - {chat_history}")
|
| 603 |
acc_text = ""
|
| 604 |
for idx, response in enumerate(stream):
|
| 605 |
text_token = response.token.text
|
| 606 |
+
print(f"text_token is - {text_token}")
|
| 607 |
+
|
| 608 |
if response.details:
|
| 609 |
# That's the exit condition
|
| 610 |
return
|
|
|
|
| 624 |
#print(f"last_turnp[-1] is - {last_turn[-1]}")
|
| 625 |
#chat_history.append(last_turn)
|
| 626 |
chat_history[-1][-1] = acc_text
|
| 627 |
+
#print(f"yields - chat_history -{chat_history} ")
|
| 628 |
+
#print(f"acc_text -{acc_text} ")
|
| 629 |
yield "", None, chat_history
|
| 630 |
#acc_text = ""
|
| 631 |
|