Spaces:
Sleeping
Sleeping
File size: 33,286 Bytes
a89888b e76f718 a89888b e76f718 a89888b e76f718 a89888b e76f718 a89888b e76f718 a89888b e76f718 a89888b e76f718 a89888b a062909 a89888b a062909 e76f718 a89888b a062909 a89888b e76f718 a062909 a89888b e76f718 a89888b 53e751d a89888b 53e751d a89888b e76f718 a89888b e76f718 88e7ced e76f718 88e7ced e76f718 a89888b e76f718 a89888b 53e751d a89888b 53e751d e76f718 a89888b 53e751d a89888b 53e751d a89888b 53e751d a89888b e76f718 53e751d e76f718 88e7ced e76f718 53e751d e76f718 88e7ced e76f718 53e751d e76f718 53e751d a89888b e76f718 a89888b 88e7ced e76f718 88e7ced a89888b e76f718 a89888b e76f718 88e7ced e76f718 88e7ced e76f718 88e7ced e76f718 d668aec e76f718 88e7ced e76f718 88e7ced e76f718 88e7ced e76f718 88e7ced e76f718 d668aec e76f718 53e751d a89888b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 |
# Local MedAlpaca-13b inference client
import os
import logging
import torch
from typing import Optional
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import gc
logger = logging.getLogger("local_llm")
if not logger.handlers:
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
class MedAlpacaClient:
"""Local MedAlpaca-13b client for medical text generation"""
def __init__(self, model_name: str = "medalpaca/medalpaca-13b", hf_token: str = None):
self.model_name = model_name
self.hf_token = hf_token or os.getenv("HF_TOKEN")
self.model = None
self.tokenizer = None
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.is_loaded = False
logger.info(f"[LOCAL_LLM] Initializing MedAlpaca client on device: {self.device}")
def load_model(self):
"""Load the MedAlpaca model and tokenizer"""
if self.is_loaded:
return
try:
logger.info(f"[LOCAL_LLM] Loading MedAlpaca model: {self.model_name}")
# Configure quantization for memory efficiency
if self.device == "cuda":
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4"
)
else:
quantization_config = None
# Load tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_name,
token=self.hf_token,
cache_dir=os.getenv("HF_HOME", "~/.cache/huggingface")
)
# Add padding token if not present
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
# Load model
self.model = AutoModelForCausalLM.from_pretrained(
self.model_name,
token=self.hf_token,
cache_dir=os.getenv("HF_HOME", "~/.cache/huggingface"),
quantization_config=quantization_config,
device_map="auto" if self.device == "cuda" else None,
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
trust_remote_code=True
)
if self.device == "cpu":
self.model = self.model.to(self.device)
self.is_loaded = True
logger.info("[LOCAL_LLM] MedAlpaca model loaded successfully")
except Exception as e:
logger.error(f"[LOCAL_LLM] Failed to load model: {e}")
raise
def generate(self, prompt: str, max_tokens: int = 512, temperature: float = 0.2) -> Optional[str]:
"""Generate text using MedAlpaca model"""
if not self.is_loaded:
self.load_model()
try:
# Format prompt for MedAlpaca
formatted_prompt = self._format_prompt(prompt)
# Tokenize input
inputs = self.tokenizer(
formatted_prompt,
return_tensors="pt",
padding=True,
truncation=True,
max_length=2048
).to(self.device)
# Generate with optimized parameters for MedAlpaca
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_tokens,
temperature=temperature,
do_sample=True if temperature > 0 else False,
pad_token_id=self.tokenizer.eos_token_id,
eos_token_id=self.tokenizer.eos_token_id,
repetition_penalty=1.1,
top_p=0.9 if temperature > 0 else 1.0,
top_k=50 if temperature > 0 else 0,
num_beams=1 if temperature > 0 else 4,
early_stopping=True
)
# Decode output
generated_text = self.tokenizer.decode(
outputs[0][inputs['input_ids'].shape[1]:],
skip_special_tokens=True
).strip()
# Clean up response
cleaned_text = self._clean_response(generated_text)
logger.info(f"[LOCAL_LLM] Generated: {self._snip(cleaned_text)}")
return cleaned_text
except Exception as e:
logger.error(f"[LOCAL_LLM] Generation failed: {e}")
return None
def _format_prompt(self, prompt: str) -> str:
"""Format prompt for MedAlpaca model with medical-specific formatting"""
# MedAlpaca was trained on medical Q&A pairs, so we use its expected format
if "Question:" in prompt and "Answer:" in prompt:
return prompt
elif "Context:" in prompt and "Question:" in prompt:
return prompt
elif "You are a" in prompt or "medical" in prompt.lower():
# For medical instructions, use Alpaca format
return f"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{prompt}\n\n### Response:"
else:
# Default medical Q&A format for MedAlpaca
return f"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\nAnswer the following medical question accurately and professionally.\n\n### Input:\n{prompt}\n\n### Response:"
def _clean_response(self, text: str) -> str:
"""Clean generated response with medical-specific cleaning"""
if not text:
return text
# Remove common conversational prefixes and comments
prefixes_to_remove = [
"Answer:",
"The answer is:",
"Based on the information provided:",
"Here's the answer:",
"Here is the answer:",
"Here's a rewritten version:",
"Here is a rewritten version:",
"Here's the rewritten text:",
"Here is the rewritten text:",
"Here's the translation:",
"Here is the translation:",
"Here's the enhanced text:",
"Here is the enhanced text:",
"Here's the improved text:",
"Here is the improved text:",
"Here's the medical context:",
"Here is the medical context:",
"Here's the cleaned text:",
"Here is the cleaned text:",
"Sure,",
"Okay,",
"Certainly,",
"Of course,",
"I can help you with that.",
"I'll help you with that.",
"Let me help you with that.",
"I can rewrite that for you.",
"I'll rewrite that for you.",
"Let me rewrite that for you.",
"I can translate that for you.",
"I'll translate that for you.",
"Let me translate that for you.",
"### Response:",
"Response:",
"Below is an instruction",
"### Instruction:",
"Instruction:",
]
text = text.strip()
for prefix in prefixes_to_remove:
if text.lower().startswith(prefix.lower()):
text = text[len(prefix):].strip()
break
# Remove any remaining Alpaca format artifacts
if "### Response:" in text:
text = text.split("### Response:")[-1].strip()
if "### Input:" in text:
text = text.split("### Input:")[0].strip()
# Remove any remaining conversational elements
lines = text.split('\n')
cleaned_lines = []
for line in lines:
line = line.strip()
if line and not any(phrase in line.lower() for phrase in [
"here's", "here is", "let me", "i can", "i'll", "sure,", "okay,",
"certainly,", "of course,", "i hope this helps", "hope this helps",
"does this help", "is this what you", "let me know if"
]):
cleaned_lines.append(line)
return '\n'.join(cleaned_lines).strip()
def _snip(self, text: str, max_words: int = 12) -> str:
"""Truncate text for logging"""
if not text:
return "∅"
words = text.strip().split()
return " ".join(words[:max_words]) + (" …" if len(words) > max_words else "")
def generate_batch(self, prompts: list, max_tokens: int = 512, temperature: float = 0.2) -> list:
"""Generate text for multiple prompts in batch for better efficiency"""
if not self.is_loaded:
self.load_model()
if not prompts:
return []
try:
# Format all prompts
formatted_prompts = [self._format_prompt(prompt) for prompt in prompts]
# Tokenize all inputs
inputs = self.tokenizer(
formatted_prompts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=2048
).to(self.device)
# Generate for all prompts
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_tokens,
temperature=temperature,
do_sample=True if temperature > 0 else False,
pad_token_id=self.tokenizer.eos_token_id,
eos_token_id=self.tokenizer.eos_token_id,
repetition_penalty=1.1,
top_p=0.9 if temperature > 0 else 1.0,
top_k=50 if temperature > 0 else 0,
num_beams=1 if temperature > 0 else 4,
early_stopping=True
)
# Decode all outputs
results = []
input_length = inputs['input_ids'].shape[1]
for i, output in enumerate(outputs):
generated_text = self.tokenizer.decode(
output[input_length:],
skip_special_tokens=True
).strip()
cleaned_text = self._clean_response(generated_text)
results.append(cleaned_text)
logger.info(f"[LOCAL_LLM] Generated batch of {len(prompts)} texts")
return results
except Exception as e:
logger.error(f"[LOCAL_LLM] Batch generation failed: {e}")
return [None] * len(prompts)
def unload_model(self):
"""Unload model to free memory"""
if self.model is not None:
del self.model
self.model = None
if self.tokenizer is not None:
del self.tokenizer
self.tokenizer = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
self.is_loaded = False
logger.info("[LOCAL_LLM] Model unloaded and memory freed")
class LocalParaphraser:
"""Local paraphraser using MedAlpaca model with Vietnamese fallback translation"""
def __init__(self, model_name: str = "medalpaca/medalpaca-13b", hf_token: str = None):
self.client = MedAlpacaClient(model_name, hf_token)
self.vietnamese_translator = None
self._init_vietnamese_translator()
def _init_vietnamese_translator(self):
"""Initialize Vietnamese translator for fallback translation"""
try:
from vi.translator import VietnameseTranslator
self.vietnamese_translator = VietnameseTranslator()
logger.info("[LOCAL_LLM] Vietnamese translator initialized for fallback")
except ImportError as e:
logger.warning(f"[LOCAL_LLM] Vietnamese translator not available: {e}")
self.vietnamese_translator = None
except Exception as e:
logger.warning(f"[LOCAL_LLM] Failed to initialize Vietnamese translator: {e}")
self.vietnamese_translator = None
def paraphrase(self, text: str, difficulty: str = "easy", custom_prompt: str = None) -> str:
"""Paraphrase text using MedAlpaca with medical-specific optimization"""
if not text or len(text) < 12:
return text
if custom_prompt:
prompt = custom_prompt
else:
# Medical-specific paraphrasing prompts based on difficulty
if difficulty == "easy":
prompt = (
"Rewrite the following medical text using different words while preserving all medical facts, clinical terms, and meaning. Keep the same level of detail and accuracy. Return only the rewritten text without any introduction or commentary.\n\n"
f"{text}"
)
else: # hard difficulty
prompt = (
"Rewrite the following medical text using more sophisticated medical language and different sentence structures while preserving all clinical facts, medical terminology, and diagnostic information. Maintain professional medical tone. Return only the rewritten text without any introduction or commentary.\n\n"
f"{text}"
)
# Adjust temperature based on difficulty
temperature = 0.1 if difficulty == "easy" else 0.3
result = self.client.generate(prompt, max_tokens=min(600, max(128, len(text)//2)), temperature=temperature)
return result if result else text
def translate(self, text: str, target_lang: str = "vi", max_retries: int = 2) -> Optional[str]:
"""Translate text using MedAlpaca with Vietnamese fallback mechanism"""
if not text:
return text
# Only implement fallback for Vietnamese translation
if target_lang != "vi":
return self._translate_other_language(text, target_lang)
# Try MedAlpaca translation with retries
for attempt in range(max_retries + 1):
try:
# Medical-specific Vietnamese translation prompt
prompt = (
"Translate the following English medical text to Vietnamese while preserving all medical terminology, clinical facts, and professional medical language. Use appropriate Vietnamese medical terms. Return only the translation without any introduction or commentary.\n\n"
f"{text}"
)
result = self.client.generate(prompt, max_tokens=min(800, len(text)+100), temperature=0.0)
if result and result.strip():
# Validate the translation
if self._is_valid_vietnamese_translation(text, result.strip()):
logger.info(f"[LOCAL_LLM] Vietnamese translation successful (attempt {attempt + 1})")
return result.strip()
else:
logger.warning(f"[LOCAL_LLM] Invalid Vietnamese translation (attempt {attempt + 1}): {result[:100]}...")
else:
logger.warning(f"[LOCAL_LLM] Empty Vietnamese translation (attempt {attempt + 1})")
except Exception as e:
logger.warning(f"[LOCAL_LLM] Vietnamese translation attempt {attempt + 1} failed: {e}")
# Fallback: Use translation model to translate English answer
logger.info("[LOCAL_LLM] MedAlpaca Vietnamese translation failed, using fallback translation model")
return self._fallback_vietnamese_translation(text)
def _translate_other_language(self, text: str, target_lang: str) -> Optional[str]:
"""Translate to languages other than Vietnamese using MedAlpaca"""
prompt = (
f"Translate the following medical text to {target_lang} while preserving all medical terminology, clinical facts, and professional medical language. Return only the translation without any introduction or commentary.\n\n"
f"{text}"
)
result = self.client.generate(prompt, max_tokens=min(800, len(text)+100), temperature=0.0)
return result.strip() if result else None
def _is_valid_vietnamese_translation(self, original: str, translation: str) -> bool:
"""Check if the Vietnamese translation is valid"""
if not translation or not translation.strip():
return False
# Check if translation is too similar to original (likely failed)
if translation.strip().lower() == original.strip().lower():
return False
# Check if translation contains English words (likely failed)
english_words = ['the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should', 'may', 'might', 'can', 'must']
translation_lower = translation.lower()
english_word_count = sum(1 for word in english_words if word in translation_lower)
# If more than 30% of common English words are present, likely failed
if english_word_count > len(translation.split()) * 0.3:
return False
# Check minimum length (should be reasonable)
if len(translation.strip()) < len(original.strip()) * 0.3:
return False
return True
def _fallback_vietnamese_translation(self, text: str) -> Optional[str]:
"""Use translation model as fallback for Vietnamese translation"""
if not self.vietnamese_translator:
logger.warning("[LOCAL_LLM] Vietnamese translator not available for fallback")
return None
try:
result = self.vietnamese_translator.translate_text(text)
if result and result.strip() and result.strip() != text.strip():
logger.info("[LOCAL_LLM] Fallback Vietnamese translation successful")
return result.strip()
else:
logger.warning("[LOCAL_LLM] Fallback Vietnamese translation failed or returned identical text")
return None
except Exception as e:
logger.error(f"[LOCAL_LLM] Fallback Vietnamese translation error: {e}")
return None
def backtranslate(self, text: str, via_lang: str = "vi") -> Optional[str]:
"""Backtranslate text using MedAlpaca with Vietnamese fallback mechanism"""
if not text:
return text
# First translate to target language (this will use fallback if needed)
translated = self.translate(text, target_lang=via_lang)
if not translated:
return None
# Then translate back to English with medical focus
if via_lang == "vi":
# Try MedAlpaca for back-translation first
prompt = (
"Translate the following Vietnamese medical text back to English while preserving all medical terminology, clinical facts, and professional medical language. Ensure the translation is medically accurate. Return only the translation without any introduction or commentary.\n\n"
f"{translated}"
)
result = self.client.generate(prompt, max_tokens=min(900, len(text)+150), temperature=0.0)
if result and result.strip():
return result.strip()
# Fallback: Use translation model for back-translation
logger.info("[LOCAL_LLM] MedAlpaca back-translation failed, using fallback translation model")
return self._fallback_english_translation(translated)
else:
prompt = (
f"Translate the following {via_lang} medical text back to English while preserving all medical terminology, clinical facts, and professional medical language. Return only the translation without any introduction or commentary.\n\n"
f"{translated}"
)
result = self.client.generate(prompt, max_tokens=min(900, len(text)+150), temperature=0.0)
return result.strip() if result else None
def _fallback_english_translation(self, vietnamese_text: str) -> Optional[str]:
"""Use translation model as fallback for English back-translation"""
if not self.vietnamese_translator:
logger.warning("[LOCAL_LLM] Vietnamese translator not available for back-translation fallback")
return None
try:
# Use the translator's back-translation capability
# Note: This would need to be implemented in the VietnameseTranslator class
# For now, we'll use a simple approach
result = self.vietnamese_translator.translate_text(vietnamese_text)
if result and result.strip() and result.strip() != vietnamese_text.strip():
logger.info("[LOCAL_LLM] Fallback English back-translation successful")
return result.strip()
else:
logger.warning("[LOCAL_LLM] Fallback English back-translation failed or returned identical text")
return None
except Exception as e:
logger.error(f"[LOCAL_LLM] Fallback English back-translation error: {e}")
return None
def consistency_check(self, user: str, output: str) -> bool:
"""Check consistency using MedAlpaca with medical validation focus"""
prompt = (
"Evaluate if the medical answer is consistent with the question/context and medically accurate. Consider medical accuracy, clinical appropriateness, consistency with the question, safety standards, and completeness of medical information. Reply with exactly 'PASS' if the answer is medically sound and consistent, otherwise 'FAIL'.\n\n"
f"Question/Context: {user}\n\n"
f"Medical Answer: {output}"
)
result = self.client.generate(prompt, max_tokens=5, temperature=0.0)
return isinstance(result, str) and "PASS" in result.upper()
def medical_accuracy_check(self, question: str, answer: str) -> bool:
"""Check medical accuracy of Q&A pairs using MedAlpaca"""
if not question or not answer:
return False
prompt = (
"Evaluate if the medical answer is accurate and appropriate for the question. Consider medical facts, clinical knowledge, appropriate medical terminology, clinical reasoning, logic, and safety considerations. Reply with exactly 'ACCURATE' if the answer is medically correct, otherwise 'INACCURATE'.\n\n"
f"Medical Question: {question}\n\n"
f"Medical Answer: {answer}"
)
result = self.client.generate(prompt, max_tokens=5, temperature=0.0)
return isinstance(result, str) and "ACCURATE" in result.upper()
def enhance_medical_terminology(self, text: str) -> str:
"""Enhance medical terminology in text using MedAlpaca"""
if not text or len(text) < 20:
return text
prompt = (
"Improve the medical terminology in the following text while preserving all factual information and clinical accuracy. Use more precise medical terms where appropriate. Return only the improved text without any introduction or commentary.\n\n"
f"{text}"
)
result = self.client.generate(prompt, max_tokens=min(800, len(text)+100), temperature=0.1)
return result if result else text
def create_clinical_scenarios(self, question: str, answer: str) -> list:
"""Create different clinical scenarios from Q&A pairs using MedAlpaca with batch optimization"""
scenarios = []
# Different clinical context prompts
context_prompts = [
(
"Rewrite this medical question as if asked by a patient in an emergency room setting. Return only the rewritten question without any introduction or commentary:\n\n{question}",
"emergency_room"
),
(
"Rewrite this medical question as if asked by a patient during a routine checkup. Return only the rewritten question without any introduction or commentary:\n\n{question}",
"routine_checkup"
),
(
"Rewrite this medical question as if asked by a patient with chronic conditions. Return only the rewritten question without any introduction or commentary:\n\n{question}",
"chronic_care"
),
(
"Rewrite this medical question as if asked by a patient's family member. Return only the rewritten question without any introduction or commentary:\n\n{question}",
"family_inquiry"
)
]
# Use batch processing for better efficiency
try:
prompts = [prompt_template.format(question=question) for prompt_template, _ in context_prompts]
results = self.client.generate_batch(prompts, max_tokens=min(400, len(question)+50), temperature=0.2)
for i, (result, (_, scenario_type)) in enumerate(zip(results, context_prompts)):
if result and not self._is_invalid_response(result):
scenarios.append((result, answer, scenario_type))
except Exception as e:
logger.warning(f"Batch clinical scenario creation failed, falling back to individual: {e}")
# Fallback to individual processing
for prompt_template, scenario_type in context_prompts:
try:
prompt = prompt_template.format(question=question)
scenario_question = self.client.generate(prompt, max_tokens=min(400, len(question)+50), temperature=0.2)
if scenario_question and not self._is_invalid_response(scenario_question):
scenarios.append((scenario_question, answer, scenario_type))
except Exception as e:
logger.warning(f"Failed to create clinical scenario {scenario_type}: {e}")
continue
return scenarios
def _is_invalid_response(self, text: str) -> bool:
"""Check if response is invalid (similar to augment.py)"""
if not text or not isinstance(text, str):
return True
text_lower = text.lower().strip()
invalid_patterns = [
"fail", "invalid", "i couldn't", "i can't", "i cannot", "unable to",
"sorry", "error", "not available", "no answer", "insufficient",
"don't know", "do not know", "not sure", "cannot determine",
"unable to provide", "not possible", "not applicable", "n/a"
]
if len(text_lower) < 3:
return True
for pattern in invalid_patterns:
if pattern in text_lower:
return True
return False
def create_vietnamese_training_data(self, question: str, answer: str, max_retries: int = 2) -> list:
"""
Create Vietnamese training data with fallback mechanism.
This method tries to get Vietnamese translations from MedAlpaca first.
If MedAlpaca fails (max 2 retries), it allows MedAlpaca to answer in English
and uses translation models to create Vietnamese versions.
Args:
question: English question
answer: English answer
max_retries: Maximum retries for MedAlpaca Vietnamese translation
Returns:
List of training data tuples: [(question_vi, answer_vi), ...]
"""
training_data = []
# Try to get Vietnamese translation from MedAlpaca
question_vi = self.translate(question, target_lang="vi", max_retries=max_retries)
answer_vi = self.translate(answer, target_lang="vi", max_retries=max_retries)
if question_vi and answer_vi:
# MedAlpaca successfully translated both
training_data.append((question_vi, answer_vi))
logger.info("[LOCAL_LLM] Created Vietnamese training data using MedAlpaca translation")
else:
# MedAlpaca failed, use fallback mechanism
logger.info("[LOCAL_LLM] MedAlpaca Vietnamese translation failed, using fallback mechanism")
# Allow MedAlpaca to answer in English (this should always work)
english_answer = self.client.generate(
f"Answer the following medical question: {question}",
max_tokens=min(800, len(answer)+100),
temperature=0.1
)
if english_answer and english_answer.strip():
# Use translation models to create Vietnamese versions
if self.vietnamese_translator:
try:
# Translate question using fallback
question_vi_fallback = self._fallback_vietnamese_translation(question)
# Translate answer using fallback
answer_vi_fallback = self._fallback_vietnamese_translation(english_answer.strip())
if question_vi_fallback and answer_vi_fallback:
training_data.append((question_vi_fallback, answer_vi_fallback))
logger.info("[LOCAL_LLM] Created Vietnamese training data using fallback translation")
else:
logger.warning("[LOCAL_LLM] Fallback translation failed, no Vietnamese training data created")
except Exception as e:
logger.error(f"[LOCAL_LLM] Fallback translation error: {e}")
else:
logger.warning("[LOCAL_LLM] Vietnamese translator not available for fallback")
else:
logger.warning("[LOCAL_LLM] MedAlpaca failed to generate English answer for fallback")
return training_data
def create_vietnamese_augmented_data(self, question: str, answer: str) -> list:
"""
Create multiple Vietnamese training data variations using different approaches.
This method creates:
1. Direct Vietnamese translation (if successful)
2. English answer + Vietnamese translation fallback
3. Paraphrased English + Vietnamese translation
Args:
question: English question
answer: English answer
Returns:
List of training data tuples: [(question_vi, answer_vi), ...]
"""
training_data = []
# 1. Try direct Vietnamese translation
direct_data = self.create_vietnamese_training_data(question, answer)
training_data.extend(direct_data)
# 2. Create paraphrased English version and translate
try:
paraphrased_answer = self.paraphrase(answer, difficulty="easy")
if paraphrased_answer and paraphrased_answer != answer:
paraphrased_data = self.create_vietnamese_training_data(question, paraphrased_answer)
training_data.extend(paraphrased_data)
logger.info("[LOCAL_LLM] Created Vietnamese training data from paraphrased English")
except Exception as e:
logger.warning(f"[LOCAL_LLM] Failed to create paraphrased Vietnamese data: {e}")
# 3. Create back-translated version
try:
backtranslated_answer = self.backtranslate(answer, via_lang="vi")
if backtranslated_answer and backtranslated_answer != answer:
backtranslated_data = self.create_vietnamese_training_data(question, backtranslated_answer)
training_data.extend(backtranslated_data)
logger.info("[LOCAL_LLM] Created Vietnamese training data from back-translated English")
except Exception as e:
logger.warning(f"[LOCAL_LLM] Failed to create back-translated Vietnamese data: {e}")
logger.info(f"[LOCAL_LLM] Created {len(training_data)} Vietnamese training data variations")
return training_data
def unload(self):
"""Unload the model"""
self.client.unload_model()
|