Skip to content

Commit 1a4f997

Browse files
Mikhail Zolotukhinfacebook-github-bot
Mikhail Zolotukhin
authored andcommitted
[TensorExpr] Add a class for representing data type. (pytorch#33217)
Summary: Pull Request resolved: pytorch#33217 Test Plan: Imported from OSS Differential Revision: D19848380 Pulled By: ZolotukhinM fbshipit-source-id: d8683f8fc4555d2456cd2a7c827d8e8231915b49
1 parent 089d658 commit 1a4f997

File tree

12 files changed

+435
-0
lines changed

12 files changed

+435
-0
lines changed

caffe2/CMakeLists.txt

+2
Original file line numberDiff line numberDiff line change
@@ -456,6 +456,7 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
456456
${TORCH_SRC_DIR}/csrc/jit/vararg_functions.cpp
457457

458458
${TORCH_SRC_DIR}/csrc/jit/tensorexpr/mem_arena.cpp
459+
${TORCH_SRC_DIR}/csrc/jit/tensorexpr/types.cpp
459460
)
460461

461462
if (NOT INTERN_DISABLE_MOBILE_INTERP)
@@ -757,6 +758,7 @@ ENDIF()
757758

758759
if (BUILD_TEST AND NOT MSVC AND NOT USE_ROCM)
759760
add_subdirectory(${TORCH_ROOT}/test/cpp/jit ${CMAKE_BINARY_DIR}/test_jit)
761+
add_subdirectory(${TORCH_ROOT}/test/cpp/tensorexpr ${CMAKE_BINARY_DIR}/test_tensorexpr)
760762
if (USE_DISTRIBUTED)
761763
add_subdirectory(${TORCH_ROOT}/test/cpp/rpc ${CMAKE_BINARY_DIR}/test_cpp_rpc)
762764
endif()

test/cpp/tensorexpr/CMakeLists.txt

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
set(TENSOREXPR_TEST_ROOT ${TORCH_ROOT}/test/cpp/tensorexpr)
2+
3+
file(GLOB TENSOREXPR_TEST_SRCS ${TENSOREXPR_TEST_ROOT}/test_*.cpp)
4+
set(TENSOREXPR_TEST_SRCS ${TENSOREXPR_TEST_SRCS} PARENT_SCOPE)
5+
6+
add_executable(test_tensorexpr
7+
${TORCH_ROOT}/test/cpp/common/main.cpp
8+
${TENSOREXPR_TEST_ROOT}/gtest.cpp
9+
${TENSOREXPR_TEST_SRCS})
10+
11+
target_link_libraries(test_tensorexpr PRIVATE torch gtest)
12+
target_include_directories(test_tensorexpr PRIVATE ${ATen_CPU_INCLUDE})
13+
14+
if (USE_CUDA)
15+
target_link_libraries(test_tensorexpr PRIVATE
16+
${CUDA_LIBRARIES}
17+
${CUDA_NVRTC_LIB}
18+
${CUDA_CUDA_LIB}
19+
${TORCH_CUDA_LIBRARIES})
20+
21+
target_compile_definitions(test_tensorexpr PRIVATE USE_CUDA)
22+
elseif (USE_ROCM)
23+
target_link_libraries(test_tensorexpr PRIVATE
24+
${ROCM_HIPRTC_LIB}
25+
${PYTORCH_HIP_HCC_LIBRARIES}
26+
${TORCH_CUDA_LIBRARIES})
27+
28+
target_link_libraries(test_tensorexpr PRIVATE caffe2_gpu)
29+
30+
target_compile_definitions(test_tensorexpr PRIVATE USE_ROCM)
31+
endif()
32+
33+
if (INSTALL_TEST)
34+
install(TARGETS test_tensorexpr DESTINATION bin)
35+
# Install PDB files for MSVC builds
36+
if (MSVC AND BUILD_SHARED_LIBS)
37+
install(FILES $<TARGET_PDB_FILE:test_tensorexpr> DESTINATION bin OPTIONAL)
38+
endif()
39+
endif()

test/cpp/tensorexpr/README.md

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# TensorExpr C++ Tests
2+
3+
## How to add a new test
4+
First, create a new test file. Test files should have be placed in this
5+
directory, with a name that starts with `test_`, like `test_foo.cpp`.
6+
7+
Here is an example test file you can copy-paste.
8+
```cpp
9+
#include <test/cpp/tensorexpr/test_base.h>
10+
11+
// Tests go in torch::jit
12+
namespace torch {
13+
namespace jit {
14+
15+
// 1. Test cases are void() functions.
16+
// 2. They start with the prefix `test`
17+
void testCaseOne() {
18+
// ...
19+
}
20+
21+
void testCaseTwo() {
22+
// ...
23+
}
24+
}
25+
}
26+
```
27+
28+
Then, register your test in `tests.h`:
29+
```cpp
30+
// Add to TH_FORALL_TESTS_CUDA instead for CUDA-requiring tests
31+
#define TH_FORALL_TESTS(_) \
32+
_(ADFormulas) \
33+
_(Attributes) \
34+
...
35+
_(CaseOne) // note that the `test` prefix is omitted.
36+
_(CaseTwo)
37+
```
38+
39+
We glob all the test files together in `CMakeLists.txt` so that you don't
40+
have to edit it every time you add a test. Unfortunately, this means that in
41+
order to get the build to pick up your new test file, you need to re-run
42+
cmake:
43+
```
44+
python setup.py build --cmake
45+
```
46+
47+
## How do I run the tests?
48+
The following commands assume you are in PyTorch root.
49+
50+
```bash
51+
# (re)build the test binary
52+
ninja build/bin/test_tensorexpr
53+
# run
54+
build/bin/test_tensorexpr --gtest_filter='glob_style_filter*'
55+
```

test/cpp/tensorexpr/__init__.py

Whitespace-only changes.

test/cpp/tensorexpr/gtest.cpp

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#include <test/cpp/tensorexpr/tests.h>
2+
3+
#include <gtest/gtest.h>
4+
5+
namespace torch {
6+
namespace jit {
7+
8+
#define TENSOREXPR_GTEST(name) \
9+
TEST(TensorExprTest, name) { \
10+
test##name(); \
11+
}
12+
TH_FORALL_TESTS(TENSOREXPR_GTEST)
13+
#undef TENSOREXPR_GTEST
14+
15+
} // namespace jit
16+
} // namespace torch

test/cpp/tensorexpr/test_base.h

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#pragma once
2+
3+
#include <gtest/gtest.h>
4+
#include <test/cpp/common/support.h>
5+
6+
namespace torch {
7+
namespace jit {
8+
namespace tensorexpr {
9+
10+
template <typename U, typename V>
11+
void ExpectAllNear(
12+
const std::vector<U>& v1,
13+
const std::vector<U>& v2,
14+
V threshold,
15+
const std::string& name = "") {
16+
ASSERT_EQ(v1.size(), v2.size());
17+
for (int i = 0; i < v1.size(); i++) {
18+
EXPECT_NEAR(v1[i], v2[i], threshold)
19+
<< "element index: " << i << ", name: " << name;
20+
}
21+
}
22+
23+
template <typename T>
24+
static void assertAllEqual(const std::vector<T>& vec, const T& val) {
25+
for (auto const& elt : vec) {
26+
ASSERT_EQ(elt, val);
27+
}
28+
}
29+
} // namespace tensorexpr
30+
} // namespace jit
31+
} // namespace torch

test/cpp/tensorexpr/test_type.cpp

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
#include "test/cpp/tensorexpr/test_base.h"
2+
#include "torch/csrc/jit/tensorexpr/mem_arena.h"
3+
#include "torch/csrc/jit/tensorexpr/types.h"
4+
5+
namespace torch {
6+
namespace jit {
7+
using namespace torch::jit::tensorexpr;
8+
9+
void testTypeTest01() {
10+
KernelScope kernel_scope;
11+
{
12+
Dtype dt1 = kInt32;
13+
EXPECT_EQ(dt1, kInt32);
14+
}
15+
{
16+
Dtype dt2_a(kInt32, 8);
17+
Dtype dt2_b(kInt32, 4);
18+
Dtype dt2_c(kInt32, 8);
19+
EXPECT_EQ(dt2_a, dt2_c);
20+
EXPECT_NE(dt2_a, dt2_b);
21+
}
22+
{
23+
EXPECT_EQ(kInt32, ToDtype<int>());
24+
EXPECT_EQ(kFloat32, ToDtype<float>());
25+
}
26+
{
27+
Dtype int32x8(kInt32, 8);
28+
Dtype float32x8(kFloat32, 8);
29+
EXPECT_NE(int32x8, float32x8);
30+
EXPECT_EQ(float32x8, BinaryOpDtype(int32x8, float32x8));
31+
EXPECT_EQ(float32x8, BinaryOpDtype(float32x8, int32x8));
32+
EXPECT_EQ(int32x8, BinaryOpDtype(int32x8, int32x8));
33+
EXPECT_EQ(float32x8, BinaryOpDtype(float32x8, float32x8));
34+
}
35+
}
36+
} // namespace jit
37+
} // namespace torch

test/cpp/tensorexpr/test_utils.h

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
#pragma once
2+
3+
#include <memory>
4+
#include <vector>
5+
6+
#include "test/cpp/tensorexpr/test_base.h"
7+
#include "torch/csrc/jit/testing/file_check.h"
8+
9+
namespace torch {
10+
namespace jit {
11+
using namespace torch::jit::tensorexpr;
12+
13+
} // namespace jit
14+
} // namespace torch

test/cpp/tensorexpr/tests.h

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
#pragma once
2+
3+
/**
4+
* See README.md for instructions on how to add a new test.
5+
*/
6+
#include <c10/macros/Export.h>
7+
#include <torch/csrc/WindowsTorchApiMacro.h>
8+
9+
namespace torch {
10+
namespace jit {
11+
#define TH_FORALL_TESTS(_) \
12+
_(TypeTest01) \
13+
14+
#define TH_FORALL_TESTS_CUDA(_) \
15+
16+
#define DECLARE_TENSOREXPR_TEST(name) void test##name();
17+
TH_FORALL_TESTS(DECLARE_TENSOREXPR_TEST)
18+
#ifdef USE_CUDA
19+
TH_FORALL_TESTS_CUDA(DECLARE_TENSOREXPR_TEST)
20+
#endif
21+
#undef DECLARE_TENSOREXPR_TEST
22+
23+
} // namespace jit
24+
} // namespace torch

tools/build_variables.bzl

+2
Original file line numberDiff line numberDiff line change
@@ -191,10 +191,12 @@ libtorch_sources = [
191191
"torch/csrc/jit/mobile/interpreter.cpp",
192192
"torch/csrc/jit/mobile/type_parser.cpp",
193193
"torch/csrc/jit/tensorexpr/mem_arena.cpp",
194+
"torch/csrc/jit/tensorexpr/types.cpp",
194195
"torch/csrc/utils/byte_order.cpp",
195196
"torch/csrc/utils/tensor_flatten.cpp",
196197
"torch/csrc/utils/variadic.cpp",
197198
"torch/csrc/jit/tensorexpr/mem_arena.cpp",
199+
"torch/csrc/jit/tensorexpr/types.cpp",
198200
]
199201

200202
libtorch_cuda_sources = [

torch/csrc/jit/tensorexpr/types.cpp

+100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
#include "torch/csrc/jit/tensorexpr/types.h"
2+
#include <torch/csrc/WindowsTorchApiMacro.h>
3+
4+
#include <c10/util/Logging.h>
5+
6+
namespace torch {
7+
namespace jit {
8+
namespace tensorexpr {
9+
10+
enum ScalarType {
11+
kScalarUninitialized,
12+
kScalarHandle,
13+
kScalarInt32,
14+
kScalarFloat32,
15+
};
16+
17+
Dtype Dtype::scalar_type() const {
18+
switch (static_cast<ScalarType>(scalar_type_)) {
19+
case kScalarUninitialized:
20+
return kUninitialized;
21+
case kScalarHandle:
22+
return kHandle;
23+
case kScalarInt32:
24+
return kInt32;
25+
case kScalarFloat32:
26+
return kFloat32;
27+
default:
28+
LOG(FATAL) << "invalid scalar type: " << scalar_type_;
29+
return kUninitialized;
30+
}
31+
}
32+
33+
TORCH_API Dtype kInt32(kScalarInt32, 1);
34+
TORCH_API Dtype kFloat32(kScalarFloat32, 1);
35+
TORCH_API Dtype kHandle(kScalarHandle, 1);
36+
TORCH_API Dtype kUninitialized(kScalarUninitialized, 1);
37+
38+
TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype) {
39+
switch (static_cast<ScalarType>(dtype.scalar_type_)) {
40+
case kScalarUninitialized:
41+
stream << "uninitialized";
42+
break;
43+
case kScalarHandle:
44+
stream << "handle";
45+
break;
46+
case kScalarInt32:
47+
stream << "int32";
48+
break;
49+
case kScalarFloat32:
50+
stream << "float32";
51+
break;
52+
default:
53+
LOG(FATAL) << "invalid scalar type: " << dtype.scalar_type_;
54+
}
55+
if (dtype.lanes() > 1) {
56+
stream << "x" << dtype.lanes();
57+
;
58+
}
59+
return stream;
60+
}
61+
62+
int Dtype::byte_size() const {
63+
int scalar_size = -1;
64+
switch (scalar_type_) {
65+
case kScalarInt32:
66+
scalar_size = sizeof(int32);
67+
break;
68+
case kScalarFloat32:
69+
scalar_size = sizeof(float);
70+
break;
71+
default:
72+
throw std::runtime_error(
73+
"invalid scalar type; " + std::to_string(scalar_type_));
74+
}
75+
return scalar_size * lanes();
76+
}
77+
78+
std::string Dtype::ToCppString() const {
79+
if (scalar_type_ == kScalarInt32) {
80+
return "int";
81+
} else if (scalar_type_ == kScalarFloat32) {
82+
return "float";
83+
} else {
84+
throw std::runtime_error("Invalid dtype: " + std::to_string(scalar_type_));
85+
}
86+
}
87+
88+
} // namespace tensorexpr
89+
} // namespace jit
90+
} // namespace torch
91+
92+
namespace std {
93+
94+
std::string to_string(const Dtype& dtype) {
95+
std::ostringstream oss;
96+
oss << dtype;
97+
return oss.str();
98+
}
99+
100+
} // namespace std

0 commit comments

Comments
 (0)