Skip to content

Commit b15d434

Browse files
Update transformers to fix CalledProcessError in optimum-cli call (#2657)
CVS-156213
1 parent db8f650 commit b15d434

File tree

3 files changed

+20
-18
lines changed

3 files changed

+20
-18
lines changed

notebooks/text-to-image-genai/text-to-image-genai.ipynb

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
"\n",
6161
"%pip install -q \"git+https://github.com/huggingface/optimum-intel.git\"\n",
6262
"%pip install -q -U --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly \"openvino>=2024.5\" \"openvino-tokenizers>=2024.5\" \"openvino-genai>=2024.5\"\n",
63+
"%pip install -q -U \"transformers>=4.45\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
6364
"%pip install -q Pillow \"diffusers>=0.30.3\" \"gradio>=4.19\" \"typing_extensions>=4.9\" \"tqdm\"\n",
6465
"if platform.system() == \"Darwin\":\n",
6566
" %pip install -q \"numpy<2.0.0\"\n",

notebooks/whisper-asr-genai/whisper-asr-genai.ipynb

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,9 @@
7575
"\n",
7676
"\n",
7777
"%pip install -q \"torch>=2.3\" \"torchvision>=0.18.1\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
78-
"%pip install -q \"transformers>=4.45\" \"git+https://github.com/huggingface/optimum-intel.git\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
79-
"%pip install -q -U \"openvino>=2024.5.0\" \"openvino-tokenizers>=2024.5.0\" \"openvino-genai>=2024.5.0\"\n",
78+
"%pip install -q -U \"transformers>=4.45\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
79+
"%pip install -q \"git+https://github.com/huggingface/optimum-intel.git\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
80+
"%pip install --pre -q -U \"openvino>=2024.5.0\" \"openvino-tokenizers>=2024.5.0\" \"openvino-genai>=2024.5.0\"\n",
8081
"%pip install -q datasets \"gradio>=4.0\" \"soundfile>=0.12\" \"librosa\" \"python-ffmpeg<=1.0.16\"\n",
8182
"%pip install -q \"nncf>=2.14.0\" \"jiwer\" \"typing_extensions>=4.9\"\n",
8283
"if platform.system() == \"Darwin\":\n",
@@ -995,7 +996,7 @@
995996
" encoder_calibration_data = []\n",
996997
" decoder_calibration_data = []\n",
997998
" ov_model.encoder.request = InferRequestWrapper(ov_model.encoder.request, encoder_calibration_data, apply_caching=True)\n",
998-
" ov_model.decoder_with_past.request = InferRequestWrapper(ov_model.decoder_with_past.request,\n",
999+
" ov_model.decoder.request = InferRequestWrapper(ov_model.decoder.request,\n",
9991000
" decoder_calibration_data,\n",
10001001
" apply_caching=True)\n",
10011002
"\n",
@@ -1012,7 +1013,7 @@
10121013
" pipe(sample[\"audio\"], return_timestamps=True)\n",
10131014
" finally:\n",
10141015
" ov_model.encoder.request = ov_model.encoder.request.request\n",
1015-
" ov_model.decoder_with_past.request = ov_model.decoder_with_past.request.request\n",
1016+
" ov_model.decoder.request = ov_model.decoder.request.request\n",
10161017
"\n",
10171018
" return encoder_calibration_data, decoder_calibration_data"
10181019
]
@@ -1070,17 +1071,17 @@
10701071
" del encoder_calibration_data\n",
10711072
" gc.collect()\n",
10721073
"\n",
1073-
" print(\"Quantizing decoder with past\")\n",
1074-
" quantized_decoder_with_past = nncf.quantize(\n",
1075-
" ov_model.decoder_with_past.model,\n",
1074+
" print(\"Quantizing decoder\")\n",
1075+
" quantized_decoder = nncf.quantize(\n",
1076+
" ov_model.decoder.model,\n",
10761077
" nncf.Dataset(decoder_calibration_data),\n",
10771078
" subset_size=len(decoder_calibration_data),\n",
10781079
" model_type=nncf.ModelType.TRANSFORMER,\n",
10791080
" # Smooth Quant algorithm reduces activation quantization error; optimal alpha value was obtained through grid search\n",
10801081
" advanced_parameters=nncf.AdvancedQuantizationParameters(smooth_quant_alpha=0.96)\n",
10811082
" )\n",
1082-
" ov.save_model(quantized_decoder_with_past, quantized_model_path / \"openvino_decoder_with_past_model.xml\")\n",
1083-
" del quantized_decoder_with_past\n",
1083+
" ov.save_model(quantized_decoder, quantized_model_path / \"openvino_decoder_model.xml\")\n",
1084+
" del quantized_decoder\n",
10841085
" del decoder_calibration_data\n",
10851086
" gc.collect()\n",
10861087
"\n",

notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@
8787
"import importlib.util\n",
8888
"\n",
8989
"%pip install -q \"nncf>=2.14.0\"\n",
90-
"%pip install -q -U \"openvino>=2024.5.0\" \"openvino-tokenizers>=2024.5.0\" \"openvino-genai>=2024.5.0\"\n",
91-
"%pip install -q \"python-ffmpeg<=1.0.16\" \"ffmpeg\" \"moviepy\" \"transformers>=4.45\" \"git+https://github.com/huggingface/optimum-intel.git\" \"torch>=2.1\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
90+
"%pip install --pre -q -U \"openvino>=2024.5.0\" \"openvino-tokenizers>=2024.5.0\" \"openvino-genai>=2024.5.0\"\n",
91+
"%pip install -q -U \"python-ffmpeg<=1.0.16\" \"ffmpeg\" \"moviepy\" \"transformers>=4.45\" \"git+https://github.com/huggingface/optimum-intel.git\" \"torch>=2.1\" --extra-index-url https://download.pytorch.org/whl/cpu\n",
9292
"%pip install -q -U \"yt_dlp>=2024.8.6\" soundfile librosa jiwer packaging\n",
9393
"%pip install -q -U \"gradio>=4.19\" \"typing_extensions>=4.9\"\n",
9494
"\n",
@@ -744,7 +744,7 @@
744744
" encoder_calibration_data = []\n",
745745
" decoder_calibration_data = []\n",
746746
" ov_model.encoder.request = InferRequestWrapper(ov_model.encoder.request, encoder_calibration_data, apply_caching=True)\n",
747-
" ov_model.decoder_with_past.request = InferRequestWrapper(ov_model.decoder_with_past.request,\n",
747+
" ov_model.decoder.request = InferRequestWrapper(ov_model.decoder.request,\n",
748748
" decoder_calibration_data,\n",
749749
" apply_caching=True)\n",
750750
"\n",
@@ -761,7 +761,7 @@
761761
" pipe(sample[\"audio\"], generate_kwargs={\"task\": task.value}, return_timestamps=True)\n",
762762
" finally:\n",
763763
" ov_model.encoder.request = ov_model.encoder.request.request\n",
764-
" ov_model.decoder_with_past.request = ov_model.decoder_with_past.request.request\n",
764+
" ov_model.decoder.request = ov_model.decoder.request.request\n",
765765
"\n",
766766
" return encoder_calibration_data, decoder_calibration_data"
767767
]
@@ -812,17 +812,17 @@
812812
" del encoder_calibration_data\n",
813813
" gc.collect()\n",
814814
"\n",
815-
" print(\"Quantizing decoder with past\")\n",
816-
" quantized_decoder_with_past = nncf.quantize(\n",
817-
" ov_model.decoder_with_past.model,\n",
815+
" print(\"Quantizing decoder\")\n",
816+
" quantized_decoder = nncf.quantize(\n",
817+
" ov_model.decoder.model,\n",
818818
" nncf.Dataset(decoder_calibration_data),\n",
819819
" subset_size=len(decoder_calibration_data),\n",
820820
" model_type=nncf.ModelType.TRANSFORMER,\n",
821821
" # Smooth Quant algorithm reduces activation quantization error; optimal alpha value was obtained through grid search\n",
822822
" advanced_parameters=nncf.AdvancedQuantizationParameters(smooth_quant_alpha=0.96),\n",
823823
" )\n",
824-
" ov.save_model(quantized_decoder_with_past, quantized_model_path / \"openvino_decoder_with_past_model.xml\")\n",
825-
" del quantized_decoder_with_past\n",
824+
" ov.save_model(quantized_decoder, quantized_model_path / \"openvino_decoder_model.xml\")\n",
825+
" del quantized_decoder\n",
826826
" del decoder_calibration_data\n",
827827
" gc.collect()\n",
828828
"\n",

0 commit comments

Comments
 (0)