Image-Text-to-Text
Transformers
Safetensors
llava_next
multimodal
multilingual
vlm
translation
conversational
text-generation-inference
Instructions to use utter-project/TowerVision-2B with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use utter-project/TowerVision-2B with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="utter-project/TowerVision-2B") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)# Load model directly from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained("utter-project/TowerVision-2B") model = AutoModelForImageTextToText.from_pretrained("utter-project/TowerVision-2B") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use utter-project/TowerVision-2B with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "utter-project/TowerVision-2B" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "utter-project/TowerVision-2B", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker
docker model run hf.co/utter-project/TowerVision-2B
- SGLang
How to use utter-project/TowerVision-2B with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "utter-project/TowerVision-2B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "utter-project/TowerVision-2B", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "utter-project/TowerVision-2B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "utter-project/TowerVision-2B", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }' - Docker Model Runner
How to use utter-project/TowerVision-2B with Docker Model Runner:
docker model run hf.co/utter-project/TowerVision-2B
Guilherme Viveiros commited on
Upload LlavaNextForConditionalGeneration
Browse files- config.json +7 -6
- generation_config.json +1 -1
- model-00001-of-00003.safetensors +2 -2
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
- model.safetensors.index.json +0 -0
config.json
CHANGED
|
@@ -62,11 +62,12 @@
|
|
| 62 |
],
|
| 63 |
"image_seq_length": 576,
|
| 64 |
"image_token_index": 256000,
|
|
|
|
| 65 |
"model_type": "llava_next",
|
| 66 |
"multimodal_projector_bias": true,
|
| 67 |
"projector_hidden_act": "gelu",
|
| 68 |
"text_config": {
|
| 69 |
-
"_name_or_path": "/
|
| 70 |
"add_faster_video": false,
|
| 71 |
"add_time_instruction": false,
|
| 72 |
"architectures": [
|
|
@@ -161,7 +162,7 @@
|
|
| 161 |
"mm_use_im_start_end": false,
|
| 162 |
"mm_vision_select_feature": "patch",
|
| 163 |
"mm_vision_select_layer": -2,
|
| 164 |
-
"mm_vision_tower": "/
|
| 165 |
"mm_vision_tower_lr": 2e-06,
|
| 166 |
"model_type": "gemma2",
|
| 167 |
"num_attention_heads": 8,
|
|
@@ -174,7 +175,7 @@
|
|
| 174 |
"sliding_window": 4096,
|
| 175 |
"tokenizer_model_max_length": 8192,
|
| 176 |
"tokenizer_padding_side": "right",
|
| 177 |
-
"torch_dtype": "
|
| 178 |
"use_cache": true,
|
| 179 |
"use_mm_proj": true,
|
| 180 |
"use_pos_skipping": false,
|
|
@@ -182,8 +183,8 @@
|
|
| 182 |
"vocab_size": 256001
|
| 183 |
},
|
| 184 |
"tie_word_embeddings": false,
|
| 185 |
-
"torch_dtype": "
|
| 186 |
-
"transformers_version": "4.52.0
|
| 187 |
"use_image_newline_parameter": true,
|
| 188 |
"vision_config": {
|
| 189 |
"attention_dropout": 0.0,
|
|
@@ -197,7 +198,7 @@
|
|
| 197 |
"num_channels": 3,
|
| 198 |
"num_hidden_layers": 26,
|
| 199 |
"patch_size": 14,
|
| 200 |
-
"torch_dtype": "
|
| 201 |
"vision_use_head": false
|
| 202 |
},
|
| 203 |
"vision_feature_layer": -1,
|
|
|
|
| 62 |
],
|
| 63 |
"image_seq_length": 576,
|
| 64 |
"image_token_index": 256000,
|
| 65 |
+
"local_files_only": true,
|
| 66 |
"model_type": "llava_next",
|
| 67 |
"multimodal_projector_bias": true,
|
| 68 |
"projector_hidden_act": "gelu",
|
| 69 |
"text_config": {
|
| 70 |
+
"_name_or_path": "/gpfs/scratch/ehpc209/tvision-lnext-vblocks-cp/FinetuneModel/TextModel.towerp_2b_instruct_full/trained_model_dir",
|
| 71 |
"add_faster_video": false,
|
| 72 |
"add_time_instruction": false,
|
| 73 |
"architectures": [
|
|
|
|
| 162 |
"mm_use_im_start_end": false,
|
| 163 |
"mm_vision_select_feature": "patch",
|
| 164 |
"mm_vision_select_layer": -2,
|
| 165 |
+
"mm_vision_tower": "/gpfs/scratch/ehpc209/hf_models/siglip2-so400m-patch14-384/",
|
| 166 |
"mm_vision_tower_lr": 2e-06,
|
| 167 |
"model_type": "gemma2",
|
| 168 |
"num_attention_heads": 8,
|
|
|
|
| 175 |
"sliding_window": 4096,
|
| 176 |
"tokenizer_model_max_length": 8192,
|
| 177 |
"tokenizer_padding_side": "right",
|
| 178 |
+
"torch_dtype": "float32",
|
| 179 |
"use_cache": true,
|
| 180 |
"use_mm_proj": true,
|
| 181 |
"use_pos_skipping": false,
|
|
|
|
| 183 |
"vocab_size": 256001
|
| 184 |
},
|
| 185 |
"tie_word_embeddings": false,
|
| 186 |
+
"torch_dtype": "float32",
|
| 187 |
+
"transformers_version": "4.52.0",
|
| 188 |
"use_image_newline_parameter": true,
|
| 189 |
"vision_config": {
|
| 190 |
"attention_dropout": 0.0,
|
|
|
|
| 198 |
"num_channels": 3,
|
| 199 |
"num_hidden_layers": 26,
|
| 200 |
"patch_size": 14,
|
| 201 |
+
"torch_dtype": "float32",
|
| 202 |
"vision_use_head": false
|
| 203 |
},
|
| 204 |
"vision_feature_layer": -1,
|
generation_config.json
CHANGED
|
@@ -4,5 +4,5 @@
|
|
| 4 |
"cache_implementation": "hybrid",
|
| 5 |
"eos_token_id": 107,
|
| 6 |
"pad_token_id": 0,
|
| 7 |
-
"transformers_version": "4.52.0
|
| 8 |
}
|
|
|
|
| 4 |
"cache_implementation": "hybrid",
|
| 5 |
"eos_token_id": 107,
|
| 6 |
"pad_token_id": 0,
|
| 7 |
+
"transformers_version": "4.52.0"
|
| 8 |
}
|
model-00001-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c509490261c8241cb20ed73657e10d5a56fc6141de3569b47d01c0ca5ff6563a
|
| 3 |
+
size 4973255048
|
model-00002-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4983446008
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9f9b2a6c5bcf14f9192bf685293d6f85bc6d507e896ad36f5f42a74bfe481a0
|
| 3 |
size 4983446008
|
model-00003-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2123643328
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bf2c697f598b97fdc7a4fb438057ce4240fed2ba33fee42c45e1ba67dd262dca
|
| 3 |
size 2123643328
|
model.safetensors.index.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|