Skip to content

Commit

Permalink
dropping some ci tests from image_to_text and text_generation
Browse files Browse the repository at this point in the history
  • Loading branch information
hsubramony committed Jan 21, 2025
1 parent c5d679d commit c37d9be
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 14 deletions.
8 changes: 4 additions & 4 deletions tests/test_image_to_text_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
# Gaudi2 CI baselines
MODELS_TO_TEST = {
"bf16": [
("llava-hf/llava-1.5-7b-hf", 1, 77.98733740859008),
("llava-hf/llava-1.5-13b-hf", 1, 48.54364937033955),
#("llava-hf/llava-1.5-7b-hf", 1, 77.98733740859008),
#("llava-hf/llava-1.5-13b-hf", 1, 48.54364937033955),
("llava-hf/llava-v1.6-mistral-7b-hf", 1, 33.17984878151546),
("llava-hf/llava-v1.6-vicuna-7b-hf", 1, 35.00608681379742),
("llava-hf/llava-v1.6-vicuna-13b-hf", 1, 23.527610042925),
Expand All @@ -25,8 +25,8 @@
("tiiuae/falcon-11B-vlm", 1, 23.69260849957278),
],
"fp8": [
("llava-hf/llava-1.5-7b-hf", 1, 98.72578382705062),
("llava-hf/llava-1.5-13b-hf", 1, 67.20488222876344),
#("llava-hf/llava-1.5-7b-hf", 1, 98.72578382705062),
#("llava-hf/llava-1.5-13b-hf", 1, 67.20488222876344),
("llava-hf/llava-v1.6-mistral-7b-hf", 1, 45.011551008367084),
("llava-hf/llava-v1.6-vicuna-7b-hf", 1, 45.18544502949674),
("llava-hf/llava-v1.6-vicuna-13b-hf", 1, 30.9535718774675),
Expand Down
20 changes: 10 additions & 10 deletions tests/test_text_generation_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@
("codellama/CodeLlama-34b-hf", 1, True, 32.644, False),
("bigcode/starcoder2-3b", 1, False, 261.07213776344133, True),
("adept/persimmon-8b-base", 4, False, 366.73968820698406, False),
("Qwen/Qwen1.5-7B", 4, False, 490.8621617893209, False),
#("Qwen/Qwen1.5-7B", 4, False, 490.8621617893209, False),
("google/gemma-7b", 1, False, 109.70751574382221, True),
("google/gemma-2-9b", 1, False, 92.302359446567, True),
("state-spaces/mamba-130m-hf", 1536, False, 5385.511100161605, False),
("Deci/DeciLM-7B", 1, False, 115, False),
#("Deci/DeciLM-7B", 1, False, 115, False),
("Qwen/Qwen2-7B", 256, False, 8870.945160540245, True),
("Qwen/Qwen1.5-MoE-A2.7B", 1, True, 44.25834541569395, False),
("EleutherAI/gpt-neo-2.7B", 1, False, 257.2476416844122, False),
("facebook/xglm-1.7B", 1, False, 357.46365062825083, False),
("CohereForAI/c4ai-command-r-v01", 1, False, 29.50315234651154, False),
#("EleutherAI/gpt-neo-2.7B", 1, False, 257.2476416844122, False),
#("facebook/xglm-1.7B", 1, False, 357.46365062825083, False),
#("CohereForAI/c4ai-command-r-v01", 1, False, 29.50315234651154, False),
("tiiuae/falcon-mamba-7b", 1, False, 47.1464839567739, False),
("openbmb/MiniCPM3-4B", 1, False, 65.116, False),
("baichuan-inc/Baichuan2-7B-Chat", 1, True, 108, False),
Expand All @@ -68,13 +68,13 @@
("meta-llama/Llama-2-70b-hf", 4, 207, False, 2048, 128, 568.5),
("meta-llama/Llama-2-70b-hf", 8, 172, False, 2048, 2048, 4656.2),
("mistralai/Mistral-7B-Instruct-v0.2", 1, 896, True, 128, 128, 17068.965283763682),
("mistralai/Mistral-7B-Instruct-v0.2", 1, 120, True, 128, 2048, 6979.225194247115),
("mistralai/Mistral-7B-Instruct-v0.2", 1, 120, True, 2048, 128, 1681.4401450088983),
#("mistralai/Mistral-7B-Instruct-v0.2", 1, 120, True, 128, 2048, 6979.225194247115),
#("mistralai/Mistral-7B-Instruct-v0.2", 1, 120, True, 2048, 128, 1681.4401450088983),
("mistralai/Mistral-7B-Instruct-v0.2", 1, 44, True, 2048, 2048, 3393.149396451692),
("mistralai/Mixtral-8x7B-v0.1", 1, 1, True, 128, 128, 40.94),
("mistralai/Mixtral-8x7B-v0.1", 2, 768, True, 128, 128, 3428.65),
("mistralai/Mixtral-8x7B-v0.1", 2, 96, True, 128, 2048, 2570.34),
("mistralai/Mixtral-8x7B-v0.1", 2, 96, True, 2048, 128, 379.03),
#("mistralai/Mixtral-8x7B-v0.1", 2, 96, True, 128, 2048, 2570.34),
#("mistralai/Mixtral-8x7B-v0.1", 2, 96, True, 2048, 128, 379.03),
("mistralai/Mixtral-8x7B-v0.1", 2, 48, True, 2048, 2048, 1147.50),
("microsoft/phi-2", 1, 1, True, 128, 128, 254.08932787178165),
],
Expand All @@ -83,7 +83,7 @@
],
"deepspeed": [
("bigscience/bloomz", 8, 1, 36.77314954096159),
("meta-llama/Llama-2-70b-hf", 8, 1, 64.10514998902435),
#("meta-llama/Llama-2-70b-hf", 8, 1, 64.10514998902435),
("meta-llama/Meta-Llama-3-70B-Instruct", 8, 1, 64),
("facebook/opt-66b", 2, 1, 28.48069266504111),
("google/gemma-2-9b", 8, 1, 110.12610917383735),
Expand Down

0 comments on commit c37d9be

Please sign in to comment.