1
1
from pathlib import Path
2
2
from typing import Any , Dict , Generic , Iterable , Optional , Tuple , Type , TypeVar , Sequence
3
+ import warnings
3
4
4
5
import numpy as np
5
6
import onnxruntime as ort
@@ -39,14 +40,15 @@ def load_onnx_model(
39
40
providers : Optional [Sequence [OnnxProvider ]] = None ,
40
41
) -> None :
41
42
model_path = model_dir / model_file
42
-
43
43
# List of Execution Providers: https://onnxruntime.ai/docs/execution-providers
44
44
45
45
onnx_providers = ["CPUExecutionProvider" ] if providers is None else list (providers )
46
46
available_providers = ort .get_available_providers ()
47
+ requested_provider_names = []
47
48
for provider in onnx_providers :
48
49
# check providers available
49
50
provider_name = provider if isinstance (provider , str ) else provider [0 ]
51
+ requested_provider_names .append (provider_name )
50
52
if provider_name not in available_providers :
51
53
raise ValueError (
52
54
f"Provider { provider_name } is not available. Available providers: { available_providers } "
@@ -62,6 +64,15 @@ def load_onnx_model(
62
64
self .model = ort .InferenceSession (
63
65
str (model_path ), providers = onnx_providers , sess_options = so
64
66
)
67
+ if "CUDAExecutionProvider" in requested_provider_names :
68
+ current_providers = self .model .get_providers ()
69
+ if "CUDAExecutionProvider" not in current_providers :
70
+ warnings .warn (
71
+ f"Attempt to set CUDAExecutionProvider failed. Current providers: { current_providers } ."
72
+ "If you are using CUDA 12.x, install onnxruntime-gpu via "
73
+ "`pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/`" ,
74
+ RuntimeWarning ,
75
+ )
65
76
66
77
def onnx_embed (self , * args , ** kwargs ) -> Tuple [np .ndarray , np .ndarray ]:
67
78
raise NotImplementedError ("Subclasses must implement this method" )
0 commit comments