Skip to content

Commit b6e7c2f

Browse files
committed
fix arm pipelines call
1 parent 7b4831f commit b6e7c2f

File tree

4 files changed

+16
-3
lines changed

4 files changed

+16
-3
lines changed

notebooks/llm-agent-react/llm-agent-react-langchain.ipynb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,11 @@
486486
"import openvino.properties.hint as hints\n",
487487
"import openvino.properties.streams as streams\n",
488488
"\n",
489+
"import torch\n",
490+
"\n",
491+
"if hasattr(torch, \"mps\") and torch.mps.is_available:\n",
492+
" torch.mps.is_available = lambda: False\n",
493+
"\n",
489494
"\n",
490495
"class StopSequenceCriteria(StoppingCriteria):\n",
491496
" \"\"\"\n",

notebooks/multimodal-rag/multimodal-rag-llamaindex.ipynb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,11 @@
605605
"metadata": {},
606606
"outputs": [],
607607
"source": [
608+
"import torch\n",
609+
"\n",
610+
"if hasattr(torch, \"mps\") and torch.mps.is_available:\n",
611+
" torch.mps.is_available = lambda: False\n",
612+
"\n",
608613
"from llama_index.core.indices import MultiModalVectorStoreIndex\n",
609614
"from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
610615
"from llama_index.core import StorageContext, Settings\n",

notebooks/whisper-asr-genai/whisper-asr-genai.ipynb

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,7 @@
243243
"source": [
244244
"from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq, pipeline\n",
245245
"from transformers.utils import logging\n",
246+
"import torch\n",
246247
"\n",
247248
"processor = AutoProcessor.from_pretrained(model_id.value)\n",
248249
"\n",
@@ -253,7 +254,7 @@
253254
" model=pt_model,\n",
254255
" tokenizer=processor.tokenizer,\n",
255256
" feature_extractor=processor.feature_extractor,\n",
256-
" device=\"cpu\",\n",
257+
" device=torch.device(\"cpu\"),\n",
257258
")"
258259
]
259260
},
@@ -1017,7 +1018,9 @@
10171018
" model=ov_model,\n",
10181019
" chunk_length_s=30,\n",
10191020
" tokenizer=ov_processor.tokenizer,\n",
1020-
" feature_extractor=ov_processor.feature_extractor)\n",
1021+
" feature_extractor=ov_processor.feature_extractor,\n",
1022+
" device=torch.device(\"cpu\")\n",
1023+
" )\n",
10211024
" try:\n",
10221025
" calibration_dataset = dataset = load_dataset(\"openslr/librispeech_asr\", \"clean\", split=\"validation\", streaming=True, trust_remote_code=True)\n",
10231026
" for sample in tqdm(islice(calibration_dataset, calibration_dataset_size), desc=\"Collecting calibration data\",\n",

notebooks/whisper-subtitles-generation/whisper-subtitles-generation.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -763,7 +763,7 @@
763763
" model=ov_model,\n",
764764
" chunk_length_s=30,\n",
765765
" tokenizer=processor.tokenizer,\n",
766-
" feature_extractor=processor.feature_extractor)\n",
766+
" feature_extractor=processor.feature_extractor, devide=torch.device(\"cpu\"))\n",
767767
" try:\n",
768768
" calibration_dataset = dataset = load_dataset(\"openslr/librispeech_asr\", \"clean\", split=\"validation\", streaming=True, trust_remote_code=True)\n",
769769
" for sample in tqdm(islice(calibration_dataset, calibration_dataset_size), desc=\"Collecting calibration data\",\n",

0 commit comments

Comments
 (0)