Skip to content

Commit 04cdb69

Browse files
authored
[TRT-EP] Ignore deprecated warnings for TRT APIs (#25105)
### Description In TensorRT 10.12, weakly-typed network and related APIs have been marked deprecated. Ignore these deprecated API warnings for the Windows build. --------- Signed-off-by: Kevin Chen <[email protected]>
1 parent 27cdb5c commit 04cdb69

File tree

1 file changed

+21
-3
lines changed

1 file changed

+21
-3
lines changed

onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2959,6 +2959,10 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
29592959
}
29602960

29612961
// Force Pow + Reduce ops in layer norm to run in FP32 to avoid overflow
2962+
#if defined(_MSC_VER)
2963+
#pragma warning(push)
2964+
#pragma warning(disable : 4996)
2965+
#endif
29622966
if ((fp16_enable_ || bf16_enable_) && layer_norm_fp32_fallback_) {
29632967
for (auto idx = 1; idx < trt_network->getNbLayers() - 1; ++idx) {
29642968
auto layer = trt_network->getLayer(idx);
@@ -2972,6 +2976,9 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
29722976
}
29732977
}
29742978
}
2979+
#if defined(_MSC_VER)
2980+
#pragma warning(pop)
2981+
#endif
29752982

29762983
int num_inputs = trt_network->getNbInputs();
29772984
int num_outputs = trt_network->getNbOutputs();
@@ -3146,6 +3153,10 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
31463153
}
31473154
}
31483155

3156+
#if defined(_MSC_VER)
3157+
#pragma warning(push)
3158+
#pragma warning(disable : 4996)
3159+
#endif
31493160
// Set precision flags
31503161
std::string trt_node_name_with_precision = fused_node.Name();
31513162
if (fp16_enable_) {
@@ -3163,7 +3174,9 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
31633174
trt_node_name_with_precision += "_int8";
31643175
LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] INT8 mode is enabled";
31653176
}
3166-
3177+
#if defined(_MSC_VER)
3178+
#pragma warning(pop)
3179+
#endif
31673180
// Set DLA
31683181
if (fp16_enable_ || int8_enable_) {
31693182
if (dla_enable_ && dla_core_ >= 0) { // DLA can only run with FP16 and INT8
@@ -3779,7 +3792,10 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
37793792
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP failed to set INT8 dynamic range.");
37803793
}
37813794
}
3782-
3795+
#if defined(_MSC_VER)
3796+
#pragma warning(push)
3797+
#pragma warning(disable : 4996)
3798+
#endif
37833799
// Set precision
37843800
if (trt_state->int8_enable) {
37853801
trt_config->setFlag(nvinfer1::BuilderFlag::kINT8);
@@ -3793,7 +3809,9 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
37933809
trt_config->setFlag(nvinfer1::BuilderFlag::kBF16);
37943810
LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] BF16 mode is enabled";
37953811
}
3796-
3812+
#if defined(_MSC_VER)
3813+
#pragma warning(pop)
3814+
#endif
37973815
// Set DLA (DLA can only run with FP16 or INT8)
37983816
if ((trt_state->fp16_enable || trt_state->int8_enable) && trt_state->dla_enable) {
37993817
LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] use DLA core " << trt_state->dla_core;

0 commit comments

Comments
 (0)