| # /// script | |
| # dependencies = [ | |
| # "trl>=0.12.0", | |
| # "transformers>=4.36.0", | |
| # "datasets>=2.14.0", | |
| # "peft>=0.7.0", | |
| # "accelerate>=0.24.0", | |
| # ] | |
| # /// | |
| from datasets import load_dataset | |
| from peft import LoraConfig | |
| from trl import SFTTrainer, SFTConfig | |
| # Load just 50 examples for quick demo | |
| dataset = load_dataset("trl-lib/Capybara", split="train[:50]") | |
| print(f"β Dataset loaded: {len(dataset)} examples") | |
| # Training configuration | |
| config = SFTConfig( | |
| # CRITICAL: Hub settings | |
| output_dir="qwen-demo-sft", | |
| push_to_hub=True, | |
| hub_model_id="evalstate/qwen-capybara-demo", | |
| # Quick training settings | |
| max_steps=20, # Just 20 steps for demo | |
| per_device_train_batch_size=2, | |
| learning_rate=2e-5, | |
| # Logging | |
| logging_steps=5, | |
| save_strategy="steps", | |
| save_steps=10, | |
| # Optimization | |
| warmup_ratio=0.1, | |
| ) | |
| # LoRA configuration for efficient training | |
| peft_config = LoraConfig( | |
| r=16, | |
| lora_alpha=32, | |
| lora_dropout=0.05, | |
| bias="none", | |
| task_type="CAUSAL_LM", | |
| target_modules=["q_proj", "v_proj"], | |
| ) | |
| # Initialize and train | |
| trainer = SFTTrainer( | |
| model="Qwen/Qwen2.5-0.5B", | |
| train_dataset=dataset, | |
| args=config, | |
| peft_config=peft_config, | |
| ) | |
| print("π Starting training...") | |
| trainer.train() | |
| print("πΎ Pushing to Hub...") | |
| trainer.push_to_hub() | |
| print("β Complete! Model at: https://huggingface.co/evalstate/qwen-capybara-demo") | |