Skip to content

Commit 606b234

Browse files
Michael Andreas Dagitsespytorchmergebot
Michael Andreas Dagitses
authored andcommitted
turn on -Werror=unused-function in our Bazel CPU build
Summary: We also fix any existing issues. Note that we only do this for the CPU build because nvcc is considered a C++ toolchain but it does not have the same flag support. Adding flags to the GPU build will cause nvcc errors. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: pytorch#79154 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
1 parent 4aca751 commit 606b234

26 files changed

+83
-373
lines changed

.bazelrc

+16-2
Original file line numberDiff line numberDiff line change
@@ -49,5 +49,19 @@ build:cpu-only --@rules_cuda//cuda:enable_cuda=False
4949
# On the bright side, this means we don't have to more broadly apply
5050
# the exceptions to an entire target.
5151
build \
52-
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
53-
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits
52+
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
53+
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits \
54+
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-function \
55+
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function
56+
57+
build \
58+
--per_file_copt=//:aten/src/ATen/RegisterCompositeExplicitAutograd.cpp@-Wno-error=unused-function \
59+
--per_file_copt=//:aten/src/ATen/RegisterCompositeImplicitAutograd.cpp@-Wno-error=unused-function \
60+
--per_file_copt=//:aten/src/ATen/RegisterMkldnnCPU.cpp$@-Wno-error=unused-function \
61+
--per_file_copt=//:aten/src/ATen/RegisterNestedTensorCPU.cpp$@-Wno-error=unused-function \
62+
--per_file_copt=//:aten/src/ATen/RegisterQuantizedCPU.cpp$@-Wno-error=unused-function \
63+
--per_file_copt=//:aten/src/ATen/RegisterSparseCPU.cpp$@-Wno-error=unused-function \
64+
--per_file_copt=//:aten/src/ATen/RegisterSparseCsrCPU.cpp$@-Wno-error=unused-function \
65+
--per_file_copt=//:aten/src/ATen/RegisterZeroTensor.cpp$@-Wno-error=unused-function \
66+
--per_file_copt=//:torch/csrc/lazy/generated/RegisterAutogradLazy.cpp@-Wno-error=unused-function \
67+
--per_file_copt=//:torch/csrc/lazy/generated/RegisterLazy.cpp@-Wno-error=unused-function

aten/src/ATen/NamedTensorUtils.cpp

-57
Original file line numberDiff line numberDiff line change
@@ -260,33 +260,6 @@ std::vector<Dimname> compute_diagonal_outnames(
260260
return outnames;
261261
}
262262

263-
// tensor_dotted_dim and other_dotted_dim are the dimensions of the two
264-
// tensors that we contract together. Usually other_dotted_dim is 0
265-
// and tensor_dotted_dim is the last dim of tensor, but there are some special
266-
// cases like einsum and tensordot where one can contract arbitrary dims.
267-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
268-
static std::vector<Dimname> compute_dot_product_outnames(
269-
DimnameList tensor_names,
270-
int64_t tensor_dotted_dim,
271-
DimnameList other_names,
272-
int64_t other_dotted_dim) {
273-
int64_t num_outnames = tensor_names.size() + other_names.size() - 2;
274-
if (num_outnames == 0) {
275-
return {};
276-
}
277-
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
278-
int64_t index = 0;
279-
for (const auto j : c10::irange(static_cast<int64_t>(tensor_names.size()))) {
280-
if (j == tensor_dotted_dim) continue;
281-
outnames[index++] = tensor_names[j];
282-
}
283-
for (const auto j : c10::irange(static_cast<int64_t>(other_names.size()))) {
284-
if (j == other_dotted_dim) continue;
285-
outnames[index++] = other_names[j];
286-
}
287-
return outnames;
288-
}
289-
290263
static void check_feature_names_are_distinct(
291264
DimnameList self_names,
292265
DimnameList other_names,
@@ -306,36 +279,6 @@ static void check_feature_names_are_distinct(
306279
". Please rename the input tensors with `Tensor.rename` to prevent this.");
307280
}
308281

309-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
310-
static DimnameList batch_dims(DimnameList names) {
311-
if (names.size() <= 2) {
312-
return {};
313-
}
314-
return DimnameList(names.begin(), names.end() - 2);
315-
}
316-
317-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
318-
static DimnameList feature_dims(DimnameList names) {
319-
if (names.size() <= 2) {
320-
return names;
321-
}
322-
return DimnameList(names.end() - 2, 2);
323-
}
324-
325-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
326-
static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) {
327-
for (const auto& target : feature_dims) {
328-
if (target.isWildcard()) {
329-
continue;
330-
}
331-
if (std::any_of(batch_dims.begin(), batch_dims.end(),
332-
[&](const Dimname& dim) { return target == dim; })) {
333-
return false;
334-
}
335-
}
336-
return true;
337-
}
338-
339282
static int64_t num_batch_dims(DimnameList names) {
340283
if (names.size() <= 2) {
341284
return 0;

aten/src/ATen/native/BinaryOps.cpp

-20
Original file line numberDiff line numberDiff line change
@@ -12,26 +12,6 @@
1212
#include <torch/library.h>
1313

1414
namespace at {
15-
namespace native {
16-
17-
// These are still needed because we don't have C++ conversions from number
18-
// types (int, float, etc.) to Tensor (only to Scalar). They're not exposed
19-
// to Python.
20-
21-
static void check_convert(const Scalar& scalar, ScalarType scalarType) {
22-
// Validate that is possible to convert scalar to tensor dtype without
23-
// overflow
24-
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
25-
at::ScalarType::Bool,
26-
at::ScalarType::BFloat16,
27-
at::ScalarType::Half,
28-
at::ScalarType::ComplexHalf,
29-
scalarType,
30-
"check_convert",
31-
[&] { scalar.to<scalar_t>(); });
32-
}
33-
34-
} // namespace native
3515

3616
namespace meta {
3717

aten/src/ATen/native/ReduceOps.cpp

-12
Original file line numberDiff line numberDiff line change
@@ -1111,18 +1111,6 @@ Tensor nansum(const Tensor& self, IntArrayRef dim, bool keepdim, c10::optional<S
11111111
return at::native::nansum_out(self, dim, keepdim, dtype, result);
11121112
}
11131113

1114-
static Tensor& prod_out_impl(Tensor& result, const Tensor& self, IntArrayRef dim,
1115-
bool keepdim, c10::optional<ScalarType> opt_dtype) {
1116-
ScalarType dtype = get_dtype_from_result(result, opt_dtype);
1117-
auto iter = make_reduction("prod", result, self, dim, keepdim, dtype);
1118-
if (iter.numel() == 0) {
1119-
result.fill_(1);
1120-
} else {
1121-
prod_stub(iter.device_type(), iter);
1122-
}
1123-
return result;
1124-
}
1125-
11261114
// NOTE: this could be implemented via diag and sum, but this has perf problems,
11271115
// see https://github.com/pytorch/pytorch/pull/47305,
11281116
Tensor trace_cpu(const Tensor& self) {

aten/src/ATen/native/quantized/AffineQuantizer.cpp

-5
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,6 @@ void checkRoundingMode(const std::string& fn_name) {
3535
return;
3636
}
3737

38-
void checkCPUTensor(const std::string& fn_name, const Tensor& t) {
39-
TORCH_CHECK(
40-
t.device().type() == kCPU, fn_name, " only supports CPU device type.");
41-
}
42-
4338
void checkFloatTensor(const std::string& fn_name, const Tensor& t) {
4439
TORCH_CHECK(
4540
t.scalar_type() == kFloat, fn_name, " expects a Float Tensor, got ",

c10/test/util/exception_test.cpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,6 @@
55
using c10::Error;
66

77
namespace {
8-
bool throw_func() {
9-
throw std::runtime_error("I'm throwing...");
10-
}
118

129
template <class Functor>
1310
inline void expectThrowsEq(Functor&& functor, const char* expectedMessage) {
@@ -26,9 +23,10 @@ TEST(ExceptionTest, TORCH_INTERNAL_ASSERT_DEBUG_ONLY) {
2623
#ifdef NDEBUG
2724
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
2825
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false));
29-
// Does nothing - `throw_func()` should not be evaluated
26+
// Does nothing - `throw ...` should not be evaluated
3027
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
31-
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(throw_func()));
28+
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
29+
(throw std::runtime_error("I'm throwing..."), true)));
3230
#else
3331
ASSERT_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false), c10::Error);
3432
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(true));

caffe2/ideep/operators/adam_op.cc

-25
Original file line numberDiff line numberDiff line change
@@ -4,31 +4,6 @@ using namespace caffe2;
44

55
namespace {
66

7-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
8-
void adam_ideep_update(
9-
int N,
10-
const float* g,
11-
const float* m,
12-
const float* v,
13-
float* ng,
14-
float* nm,
15-
float* nv,
16-
float beta1,
17-
float beta2,
18-
float eps_hat,
19-
float correction,
20-
const float* lr) {
21-
#ifdef _OPENMP
22-
#pragma omp parallel for schedule(static)
23-
#endif
24-
for (auto i = 0; i < N; ++i) {
25-
float gi = g[i];
26-
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
27-
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
28-
ng[i] = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat);
29-
}
30-
}
31-
327
void adam_ideep_compute(
338
int N,
349
const float* w,

caffe2/opt/onnxifi_transformer.cc

-22
Original file line numberDiff line numberDiff line change
@@ -31,28 +31,6 @@ std::unordered_map<std::string, TensorShape> stripShapeInfoMap(
3131
return shape_map;
3232
}
3333

34-
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
35-
uint64_t onnxifiDataType(caffe2::TensorProto::DataType t) {
36-
#define CAFFE2_TO_ONNXIFI_TYPE(x, y) \
37-
case (caffe2::TensorProto::x): \
38-
return y
39-
switch (t) {
40-
CAFFE2_TO_ONNXIFI_TYPE(FLOAT, ONNXIFI_DATATYPE_FLOAT32);
41-
CAFFE2_TO_ONNXIFI_TYPE(INT8, ONNXIFI_DATATYPE_INT8);
42-
CAFFE2_TO_ONNXIFI_TYPE(UINT8, ONNXIFI_DATATYPE_UINT8);
43-
CAFFE2_TO_ONNXIFI_TYPE(INT16, ONNXIFI_DATATYPE_INT16);
44-
CAFFE2_TO_ONNXIFI_TYPE(UINT16, ONNXIFI_DATATYPE_UINT16);
45-
CAFFE2_TO_ONNXIFI_TYPE(INT32, ONNXIFI_DATATYPE_INT32);
46-
CAFFE2_TO_ONNXIFI_TYPE(INT64, ONNXIFI_DATATYPE_INT64);
47-
CAFFE2_TO_ONNXIFI_TYPE(FLOAT16, ONNXIFI_DATATYPE_FLOAT16);
48-
default:
49-
LOG(WARNING) << "Unsupported Caffe2 tensor type: " << t
50-
<< ", fallback to FLOAT";
51-
return ONNXIFI_DATATYPE_FLOAT32;
52-
}
53-
#undef CAFFE2_TO_ONNXIFI_TYPE
54-
}
55-
5634
std::vector<::ONNX_NAMESPACE::ValueInfoProto> convertToValueInfo(
5735
const std::vector<std::string>& names,
5836
const std::unordered_map<std::string, TensorShape>& shape_hints,

test/cpp/jit/torch_python_test.cpp

+21-20
Original file line numberDiff line numberDiff line change
@@ -34,29 +34,30 @@ void testEvalModeForLoadedModule() {
3434
AT_ASSERT(module.attr("dropout").toModule().is_training());
3535
}
3636

37-
void testSerializationInterop() {
38-
if (isSandcastle()) {
39-
// The module file to load is not generated in Sandcastle
40-
return;
41-
}
37+
// TODO: this test never ran before and is broken.
38+
// void testSerializationInterop() {
39+
// if (isSandcastle()) {
40+
// // The module file to load is not generated in Sandcastle
41+
// return;
42+
// }
4243

43-
// This should be generated by `test/cpp/jit/tests_setup.py`
44-
std::ifstream input_stream("ivalue.pt");
45-
std::vector<char> input;
46-
input.insert(
47-
input.begin(),
48-
std::istream_iterator<char>(input_stream),
49-
std::istream_iterator<char>());
50-
IValue ivalue = pickle_load(input);
44+
// // This should be generated by `test/cpp/jit/tests_setup.py`
45+
// std::ifstream input_stream("ivalue.pt");
46+
// std::vector<char> input;
47+
// input.insert(
48+
// input.begin(),
49+
// std::istream_iterator<char>(input_stream),
50+
// std::istream_iterator<char>());
51+
// IValue ivalue = pickle_load(input);
5152

52-
auto elements = ivalue.toTupleRef().elements();
53-
auto ones = torch::ones({2, 2});
54-
AT_ASSERT(ones.equal(elements.at(0).toTensor()));
53+
// auto elements = ivalue.toTupleRef().elements();
54+
// auto ones = torch::ones({2, 2});
55+
// AT_ASSERT(ones.equal(elements.at(0).toTensor()));
5556

56-
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
57-
auto twos = torch::ones({3, 5}) * 2;
58-
AT_ASSERT(twos.equal(elements.at(1).toTensor()));
59-
}
57+
// // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
58+
// auto twos = torch::ones({3, 5}) * 2;
59+
// AT_ASSERT(twos.equal(elements.at(1).toTensor()));
60+
// }
6061

6162
void testTorchSaveError() {
6263
if (isSandcastle()) {

test/cpp/tensorexpr/test_conv.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@ namespace jit {
1212
namespace te = torch::jit::tensorexpr;
1313
namespace F = torch::nn::functional;
1414

15+
#ifdef TORCH_ENABLE_LLVM
16+
1517
// Generate test data with few bits of precision, to minimize error
1618
// accumulation from floating-point reordering.
1719
static at::Tensor genTestData(c10::IntArrayRef args) {
1820
return at::trunc(at::randn(args) * 256.0f) / 256.0f;
1921
}
2022

21-
#ifdef TORCH_ENABLE_LLVM
22-
2323
TEST(Conv, DepthwiseConv2D) {
2424
constexpr int N = 1, C = 72, H = 56, W = 56;
2525
constexpr int K = 72, R = 3, S = 3;

test/cpp/tensorexpr/tutorial.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,13 @@
5454

5555
using namespace torch::jit::tensorexpr;
5656

57+
#ifdef TORCH_ENABLE_LLVM
58+
5759
// Helper function to print a snippet from a big multi-line string
5860
static void printLinesToFrom(const std::string& input_str, int from, int to);
5961

62+
#endif
63+
6064
int main(int argc, char* argv[]) {
6165
std::cout << "*** Structure of tensor expressions and statements ***"
6266
<< std::endl;

torch/csrc/DynamicTypes.cpp

-16
Original file line numberDiff line numberDiff line change
@@ -28,22 +28,6 @@ std::array<THPDtype*, static_cast<int>(at::ScalarType::NumOptions)> dtype_regist
2828

2929
std::array<THPLayout*, static_cast<int>(at::Layout::NumOptions)> layout_registry = {};
3030

31-
at::Backend get_backend(bool is_cuda, bool is_sparse) {
32-
if (is_cuda) {
33-
if (is_sparse){
34-
return at::Backend::SparseCUDA;
35-
} else {
36-
return at::Backend::CUDA;
37-
}
38-
} else {
39-
if (is_sparse){
40-
return at::Backend::SparseCPU;
41-
} else {
42-
return at::Backend::CPU;
43-
}
44-
}
45-
}
46-
4731
at::DeprecatedTypeProperties* get_type_properties(at::DeviceType device_type, at::ScalarType scalarType) {
4832
at::Backend backend;
4933
if (device_type == at::kCPU) {

torch/csrc/Storage.cpp

-14
Original file line numberDiff line numberDiff line change
@@ -337,20 +337,6 @@ static PyObject * THPStorage_device(THPStorage* self, void *unused) {
337337
END_HANDLE_TH_ERRORS
338338
}
339339

340-
static PyObject * THPStorage_dtype(THPStorage *self, void *unused)
341-
{
342-
HANDLE_TH_ERRORS
343-
return torch::autograd::utils::wrap(
344-
torch::getTHPDtype(at::typeMetaToScalarType(
345-
#ifdef THQUANTIZED
346-
caffe2::TypeMeta::Make<quantized_t>()
347-
#else
348-
caffe2::TypeMeta::Make<uint8_t>()
349-
#endif
350-
)));
351-
END_HANDLE_TH_ERRORS
352-
}
353-
354340
typedef PyObject *(*getter)(PyObject *, void *);
355341

356342
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)

torch/csrc/autograd/init.cpp

-12
Original file line numberDiff line numberDiff line change
@@ -480,18 +480,6 @@ static PyObject * set_autocast_cpu_dtype(PyObject* _unused, PyObject *arg) {
480480
END_HANDLE_TH_ERRORS
481481
}
482482

483-
static const char* scalarTypeName(const at::ScalarType type) {
484-
switch (type) {
485-
#define DEFINE_CASE(ctype, name) \
486-
case at::ScalarType::name: \
487-
return #ctype;
488-
AT_FORAUTOCAST_SCALAR_TYPES(DEFINE_CASE)
489-
#undef DEFINE_CASE
490-
default:
491-
throw std::runtime_error("unknown scalar type for autocast");
492-
}
493-
}
494-
495483
static PyObject * get_autocast_gpu_dtype(PyObject* _unused, PyObject *arg){
496484
HANDLE_TH_ERRORS
497485
at::ScalarType current_dtype = at::autocast::get_autocast_gpu_dtype();

torch/csrc/jit/codegen/cuda/codegen.cpp

-6
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,6 @@ std::string ptrType(DataType dt) {
2727
return ss.str();
2828
}
2929

30-
std::string refType(DataType dt) {
31-
std::stringstream ss;
32-
ss << dt << "&";
33-
return ss.str();
34-
}
35-
3630
//! Utility class to build an argument list
3731
class ArgumentBuilder {
3832
public:

0 commit comments

Comments
 (0)