Text Generation
Transformers
Safetensors
mistral
alignment-handbook
trl
sft
Generated from Trainer
conversational
text-generation-inference
Instructions to use interview-eval/zephyr-7b-stem-case-5 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use interview-eval/zephyr-7b-stem-case-5 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="interview-eval/zephyr-7b-stem-case-5") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("interview-eval/zephyr-7b-stem-case-5") model = AutoModelForCausalLM.from_pretrained("interview-eval/zephyr-7b-stem-case-5") messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use interview-eval/zephyr-7b-stem-case-5 with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "interview-eval/zephyr-7b-stem-case-5" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "interview-eval/zephyr-7b-stem-case-5", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/interview-eval/zephyr-7b-stem-case-5
- SGLang
How to use interview-eval/zephyr-7b-stem-case-5 with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "interview-eval/zephyr-7b-stem-case-5" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "interview-eval/zephyr-7b-stem-case-5", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "interview-eval/zephyr-7b-stem-case-5" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "interview-eval/zephyr-7b-stem-case-5", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use interview-eval/zephyr-7b-stem-case-5 with Docker Model Runner:
docker model run hf.co/interview-eval/zephyr-7b-stem-case-5
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 20, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 18.490420290038628, | |
| "learning_rate": 1e-05, | |
| "loss": 1.1109, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.3244118690490723, | |
| "eval_runtime": 5.4616, | |
| "eval_samples_per_second": 18.127, | |
| "eval_steps_per_second": 0.366, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.9689944386482239, | |
| "eval_runtime": 5.3534, | |
| "eval_samples_per_second": 18.493, | |
| "eval_steps_per_second": 0.374, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 24.423394115352504, | |
| "learning_rate": 8.94570254698197e-06, | |
| "loss": 1.1415, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.8217005133628845, | |
| "eval_runtime": 5.3528, | |
| "eval_samples_per_second": 18.495, | |
| "eval_steps_per_second": 0.374, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.6269264221191406, | |
| "eval_runtime": 5.322, | |
| "eval_samples_per_second": 18.602, | |
| "eval_steps_per_second": 0.376, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 6.734044641807279, | |
| "learning_rate": 5.412896727361663e-06, | |
| "loss": 0.7246, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.48788779973983765, | |
| "eval_runtime": 5.328, | |
| "eval_samples_per_second": 18.581, | |
| "eval_steps_per_second": 0.375, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.37585097551345825, | |
| "eval_runtime": 5.3264, | |
| "eval_samples_per_second": 18.587, | |
| "eval_steps_per_second": 0.375, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.28014644980430603, | |
| "eval_runtime": 5.3426, | |
| "eval_samples_per_second": 18.53, | |
| "eval_steps_per_second": 0.374, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 2.833636235664597, | |
| "learning_rate": 1.6135921418712959e-06, | |
| "loss": 0.3832, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.21719852089881897, | |
| "eval_runtime": 5.327, | |
| "eval_samples_per_second": 18.585, | |
| "eval_steps_per_second": 0.375, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.1864246279001236, | |
| "eval_runtime": 5.3426, | |
| "eval_samples_per_second": 18.53, | |
| "eval_steps_per_second": 0.374, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 1.675995133235129, | |
| "learning_rate": 0.0, | |
| "loss": 0.2054, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.17900313436985016, | |
| "eval_runtime": 5.3121, | |
| "eval_samples_per_second": 18.637, | |
| "eval_steps_per_second": 0.376, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 20, | |
| "total_flos": 8375186227200.0, | |
| "train_loss": 0.6121411263942719, | |
| "train_runtime": 321.7144, | |
| "train_samples_per_second": 3.077, | |
| "train_steps_per_second": 0.062 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 20, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8375186227200.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |