Update README.md
Browse files
README.md
CHANGED
|
@@ -81,19 +81,18 @@ from PIL import Image
|
|
| 81 |
from transformers import (
|
| 82 |
LlavaForConditionalGeneration,
|
| 83 |
AutoTokenizer,
|
|
|
|
| 84 |
CLIPImageProcessor
|
| 85 |
)
|
| 86 |
#In this repo, needed for version < 4.41.1
|
| 87 |
#from processing_llavagemma import LlavaGemmaProcessor
|
|
|
|
| 88 |
|
| 89 |
checkpoint = "Intel/llava-gemma-2b"
|
| 90 |
|
| 91 |
# Load model
|
| 92 |
model = LlavaForConditionalGeneration.from_pretrained(checkpoint)
|
| 93 |
-
processor =
|
| 94 |
-
tokenizer=AutoTokenizer.from_pretrained(checkpoint),
|
| 95 |
-
image_processor=CLIPImageProcessor.from_pretrained(checkpoint)
|
| 96 |
-
)
|
| 97 |
|
| 98 |
# Prepare inputs
|
| 99 |
# Use gemma chat template
|
|
|
|
| 81 |
from transformers import (
|
| 82 |
LlavaForConditionalGeneration,
|
| 83 |
AutoTokenizer,
|
| 84 |
+
AutoProcessor,
|
| 85 |
CLIPImageProcessor
|
| 86 |
)
|
| 87 |
#In this repo, needed for version < 4.41.1
|
| 88 |
#from processing_llavagemma import LlavaGemmaProcessor
|
| 89 |
+
#processor = LlavaGemmaProcessor( tokenizer=AutoTokenizer.from_pretrained(checkpoint), image_processor=CLIPImageProcessor.from_pretrained(checkpoint))
|
| 90 |
|
| 91 |
checkpoint = "Intel/llava-gemma-2b"
|
| 92 |
|
| 93 |
# Load model
|
| 94 |
model = LlavaForConditionalGeneration.from_pretrained(checkpoint)
|
| 95 |
+
processor = AutoProcessor.from_pretrained(checkpoint)
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
# Prepare inputs
|
| 98 |
# Use gemma chat template
|