Skip to content

Commit f4dcf2a

Browse files
cyyeverpytorchmergebot
authored andcommitted
[1/N] Change #include <c10/util/Optional.h> to #include <optional> (pytorch#128301)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#128301 Approved by: https://github.com/ezyang, https://github.com/r-barnes
1 parent f053be2 commit f4dcf2a

File tree

328 files changed

+1204
-1202
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

328 files changed

+1204
-1202
lines changed

aten/src/ATen/CPUGeneratorImpl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include <ATen/core/Generator.h>
44
#include <ATen/core/MT19937RNGEngine.h>
55
#include <c10/core/GeneratorImpl.h>
6-
#include <c10/util/Optional.h>
6+
#include <optional>
77

88
namespace at {
99

aten/src/ATen/InferSize.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
#include <c10/core/ScalarType.h>
55
#include <c10/core/SymIntArrayRef.h>
66
#include <c10/util/DimVector.h>
7-
#include <c10/util/Optional.h>
7+
#include <optional>
88
#include <sstream>
99
#include <vector>
1010

aten/src/ATen/SavedTensorHooks.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ void SavedTensorDefaultHooks::disable(const std::string& message) {
3232
}
3333

3434
void SavedTensorDefaultHooks::enable() {
35-
tls.disabled_error_message = c10::nullopt;
35+
tls.disabled_error_message = std::nullopt;
3636
}
3737

3838
/* static */ bool SavedTensorDefaultHooks::set_tracing(bool is_tracing) {

aten/src/ATen/SavedTensorHooks.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
#pragma once
22

33
#include <c10/macros/Export.h>
4-
#include <c10/util/Optional.h>
54
#include <c10/util/python_stub.h>
5+
#include <optional>
66
#include <stack>
77
#include <string>
88

aten/src/ATen/TensorIndexing.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
#include <ATen/core/Tensor.h>
66
#include <ATen/core/TensorBody.h>
77
#include <c10/core/SymInt.h>
8-
#include <c10/util/Optional.h>
98
#include <c10/util/irange.h>
9+
#include <optional>
1010

1111
#ifndef AT_PER_OPERATOR_HEADERS
1212
#include <ATen/Functions.h>

aten/src/ATen/native/BatchLinearAlgebra.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#pragma once
22

3-
#include <c10/util/Optional.h>
3+
#include <optional>
44
#include <c10/util/string_view.h>
55
#include <ATen/Config.h>
66
#include <ATen/native/DispatchStub.h>

aten/src/ATen/record_function.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
#include <ATen/core/ivalue.h>
44
#include <ATen/core/operator_name.h>
55
#include <c10/macros/Export.h>
6-
#include <c10/util/Optional.h>
76
#include <c10/util/SmallVector.h>
7+
#include <optional>
88

99
#include <array>
1010
#include <functional>

c10/core/ConstantSymNodeImpl.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
#include <c10/core/SymNodeImpl.h>
44
#include <c10/macros/Export.h>
55
#include <c10/util/Exception.h>
6-
#include <c10/util/Optional.h>
76
#include <cstdint>
7+
#include <optional>
88
#include <string>
99
#include <variant>
1010

@@ -73,14 +73,14 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
7373
if constexpr (is_int_()) {
7474
return ::std::get<int64_t>(value_);
7575
} else {
76-
return c10::nullopt;
76+
return std::nullopt;
7777
}
7878
}
7979
std::optional<bool> constant_bool() override {
8080
if constexpr (is_bool_()) {
8181
return ::std::get<bool>(value_);
8282
} else {
83-
return c10::nullopt;
83+
return std::nullopt;
8484
}
8585
}
8686
bool is_constant() override {

c10/core/ScalarTypeToTypeMeta.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
3030
inline optional<at::ScalarType> optTypeMetaToScalarType(
3131
optional<caffe2::TypeMeta> type_meta) {
3232
if (!type_meta.has_value()) {
33-
return c10::nullopt;
33+
return std::nullopt;
3434
}
3535
return type_meta->toScalarType();
3636
}

c10/core/SymBool.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
#include <c10/core/SymNodeImpl.h>
44
#include <c10/macros/Export.h>
55
#include <c10/util/Exception.h>
6-
#include <c10/util/Optional.h>
76
#include <c10/util/intrusive_ptr.h>
87
#include <cstdint>
8+
#include <optional>
99
#include <ostream>
1010
#include <utility>
1111

@@ -68,7 +68,7 @@ class C10_API SymBool {
6868

6969
std::optional<bool> maybe_as_bool() const {
7070
if (!is_heap_allocated()) {
71-
return c10::make_optional(data_);
71+
return std::make_optional(data_);
7272
}
7373
return toSymNodeImplUnowned()->constant_bool();
7474
}

c10/core/SymInt.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <cstdint>
1111
#include <iterator>
1212
#include <numeric>
13+
#include <optional>
1314
#include <ostream>
1415
#include <type_traits>
1516

@@ -231,7 +232,7 @@ class C10_API SymInt {
231232

232233
std::optional<int64_t> maybe_as_int() const {
233234
if (!is_heap_allocated()) {
234-
return c10::make_optional(data_);
235+
return std::make_optional(data_);
235236
}
236237
auto* node = toSymNodeImplUnowned();
237238
if (auto c = node->constant_int()) {

c10/core/SymIntArrayRef.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
#include <c10/util/ArrayRef.h>
55
#include <c10/util/DimVector.h>
66
#include <c10/util/Exception.h>
7-
#include <c10/util/Optional.h>
87
#include <c10/util/irange.h>
98
#include <cstdint>
9+
#include <optional>
1010

1111
namespace c10 {
1212
using SymIntArrayRef = ArrayRef<SymInt>;
@@ -25,7 +25,7 @@ inline std::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
2525
c10::SymIntArrayRef ar) {
2626
for (const c10::SymInt& sci : ar) {
2727
if (sci.is_heap_allocated()) {
28-
return c10::nullopt;
28+
return std::nullopt;
2929
}
3030
}
3131

c10/core/SymNodeImpl.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
#include <c10/macros/Export.h>
44
#include <c10/util/ArrayRef.h>
55
#include <c10/util/Exception.h>
6-
#include <c10/util/Optional.h>
76
#include <c10/util/intrusive_ptr.h>
87
#include <cstdint>
8+
#include <optional>
99
#include <ostream>
1010
#include <string>
1111

@@ -207,19 +207,19 @@ class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
207207
TORCH_CHECK(false, "NYI");
208208
};
209209
virtual std::optional<int64_t> nested_int() {
210-
return c10::nullopt;
210+
return std::nullopt;
211211
}
212212
virtual std::optional<int64_t> nested_int_coeff() {
213-
return c10::nullopt;
213+
return std::nullopt;
214214
}
215215
virtual std::optional<int64_t> constant_int() {
216-
return c10::nullopt;
216+
return std::nullopt;
217217
}
218218
virtual std::optional<bool> constant_bool() {
219-
return c10::nullopt;
219+
return std::nullopt;
220220
}
221221
virtual std::optional<int64_t> maybe_as_int() {
222-
return c10::nullopt;
222+
return std::nullopt;
223223
}
224224
virtual bool is_constant() {
225225
return false;

c10/core/SymbolicShapeMeta.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) {
5656
// Couldn't find. Tell the caller to do the normal computation
5757
// Alternately, if everything is hinted, we want the normal computation
5858
// too
59-
return c10::nullopt;
59+
return std::nullopt;
6060
}
6161
// Populate the SymNode array
6262
std::vector<SymNode> size_nodes;
@@ -69,7 +69,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) {
6969
for (const auto& s : strides) {
7070
stride_nodes.emplace_back(s.wrap_node(base));
7171
}
72-
return c10::make_optional(
72+
return std::make_optional(
7373
std::tuple<SymNode, std::vector<SymNode>, std::vector<SymNode>>(
7474
std::move(base), std::move(size_nodes), std::move(stride_nodes)));
7575
}

c10/core/TensorImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
#include <c10/core/impl/PyInterpreter.h>
99
#include <c10/core/impl/TorchDispatchModeTLS.h>
1010
#include <c10/util/Logging.h>
11-
#include <c10/util/Optional.h>
1211
#include <c10/util/accumulate.h>
1312
#include <c10/util/irange.h>
13+
#include <optional>
1414

1515
#include <utility>
1616

c10/core/TensorImpl.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@
2424
#include <c10/util/DimVector.h>
2525
#include <c10/util/Exception.h>
2626
#include <c10/util/Flags.h>
27-
#include <c10/util/Optional.h>
2827
#include <c10/util/accumulate.h>
2928
#include <c10/util/intrusive_ptr.h>
3029
#include <c10/util/irange.h>
3130
#include <c10/util/safe_numerics.h>
3231
#include <c10/util/typeid.h>
32+
#include <optional>
3333

3434
#include <algorithm>
3535
#include <atomic>
@@ -233,8 +233,8 @@ struct C10_API ExtraMeta {
233233
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta_ = nullptr;
234234
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta_ = nullptr;
235235
intrusive_ptr<c10::BackendMeta> backend_meta_ = nullptr;
236-
std::optional<std::string> custom_data_ptr_error_msg_ = c10::nullopt;
237-
std::optional<std::string> custom_storage_error_msg_ = c10::nullopt;
236+
std::optional<std::string> custom_data_ptr_error_msg_ = std::nullopt;
237+
std::optional<std::string> custom_storage_error_msg_ = std::nullopt;
238238

239239
ExtraMeta() = default;
240240
ExtraMeta(const ExtraMeta& other) {
@@ -260,8 +260,8 @@ struct C10_API ExtraMeta {
260260
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta,
261261
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta,
262262
intrusive_ptr<c10::BackendMeta> backend_meta,
263-
std::optional<std::string> custom_data_ptr_error_msg = c10::nullopt,
264-
std::optional<std::string> custom_storage_access_error_msg = c10::nullopt)
263+
std::optional<std::string> custom_data_ptr_error_msg = std::nullopt,
264+
std::optional<std::string> custom_storage_access_error_msg = std::nullopt)
265265
: symbolic_shape_meta_(std::move(symbolic_shape_meta)),
266266
named_tensor_meta_(std::move(named_tensor_meta)),
267267
backend_meta_(std::move(backend_meta)),
@@ -1737,7 +1737,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
17371737
void set_sizes_and_strides(
17381738
c10::SymIntArrayRef sizes,
17391739
c10::SymIntArrayRef strides,
1740-
std::optional<c10::SymInt> storage_offset = c10::nullopt);
1740+
std::optional<c10::SymInt> storage_offset = std::nullopt);
17411741
// This is renamed to avoid breaking overload BC
17421742
void generic_set_sizes_contiguous(c10::SymIntArrayRef sizes);
17431743
void generic_set_sizes_contiguous(c10::IntArrayRef sizes) {
@@ -1834,7 +1834,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
18341834
void set_sizes_and_strides(
18351835
IntArrayRef new_size,
18361836
IntArrayRef new_stride,
1837-
std::optional<int64_t> storage_offset = c10::nullopt) {
1837+
std::optional<int64_t> storage_offset = std::nullopt) {
18381838
TORCH_CHECK(
18391839
allow_tensor_metadata_change(),
18401840
"set_sizes_and_strides ",

c10/core/TensorOptions.h

+16-16
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#include <c10/macros/Export.h>
1414
#include <c10/macros/Macros.h>
1515
#include <c10/util/Exception.h>
16-
#include <c10/util/Optional.h>
16+
#include <optional>
1717

1818
#include <cstdint>
1919
#include <iosfwd>
@@ -284,10 +284,10 @@ struct C10_API TensorOptions {
284284
return has_device_;
285285
}
286286

287-
/// Returns the device of the `TensorOptions`, or `c10::nullopt` if
287+
/// Returns the device of the `TensorOptions`, or `std::nullopt` if
288288
/// device is not specified.
289289
std::optional<Device> device_opt() const noexcept {
290-
return has_device_ ? c10::make_optional(device_) : c10::nullopt;
290+
return has_device_ ? std::make_optional(device_) : std::nullopt;
291291
}
292292

293293
/// Returns the device index of the `TensorOptions`.
@@ -305,10 +305,10 @@ struct C10_API TensorOptions {
305305
return has_dtype_;
306306
}
307307

308-
/// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if
308+
/// Returns the dtype of the `TensorOptions`, or `std::nullopt` if
309309
/// device is not specified.
310310
std::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
311-
return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt;
311+
return has_dtype_ ? std::make_optional(dtype_) : std::nullopt;
312312
}
313313

314314
/// Returns the layout of the `TensorOptions`.
@@ -321,10 +321,10 @@ struct C10_API TensorOptions {
321321
return has_layout_;
322322
}
323323

324-
/// Returns the layout of the `TensorOptions`, or `c10::nullopt` if
324+
/// Returns the layout of the `TensorOptions`, or `std::nullopt` if
325325
/// layout is not specified.
326326
std::optional<Layout> layout_opt() const noexcept {
327-
return has_layout_ ? c10::make_optional(layout_) : c10::nullopt;
327+
return has_layout_ ? std::make_optional(layout_) : std::nullopt;
328328
}
329329

330330
/// Returns the `requires_grad` property of the `TensorOptions`.
@@ -338,10 +338,10 @@ struct C10_API TensorOptions {
338338
}
339339

340340
/// Returns the `requires_grad` property of the `TensorOptions`, or
341-
/// `c10::nullopt` if `requires_grad` is not specified.
341+
/// `std::nullopt` if `requires_grad` is not specified.
342342
std::optional<bool> requires_grad_opt() const noexcept {
343-
return has_requires_grad_ ? c10::make_optional(requires_grad_)
344-
: c10::nullopt;
343+
return has_requires_grad_ ? std::make_optional(requires_grad_)
344+
: std::nullopt;
345345
}
346346

347347
/// Returns the `pinned_memory` property of the `TensorOptions`.
@@ -378,10 +378,10 @@ struct C10_API TensorOptions {
378378
}
379379

380380
/// Returns the `pinned_memory` property of the `TensorOptions`, or
381-
/// `c10::nullopt` if `pinned_memory` is not specified.
381+
/// `std::nullopt` if `pinned_memory` is not specified.
382382
std::optional<bool> pinned_memory_opt() const noexcept {
383-
return has_pinned_memory_ ? c10::make_optional(pinned_memory_)
384-
: c10::nullopt;
383+
return has_pinned_memory_ ? std::make_optional(pinned_memory_)
384+
: std::nullopt;
385385
}
386386

387387
/// Returns whether the `memory_layout` is specified
@@ -393,10 +393,10 @@ struct C10_API TensorOptions {
393393
// behavior of memory_format varies from function to function.
394394

395395
/// Returns the `memory_layout` property of `TensorOptions, or
396-
/// `c10::nullopt` if `memory_format` is not specified.
396+
/// `std::nullopt` if `memory_format` is not specified.
397397
std::optional<MemoryFormat> memory_format_opt() const noexcept {
398-
return has_memory_format_ ? c10::make_optional(memory_format_)
399-
: c10::nullopt;
398+
return has_memory_format_ ? std::make_optional(memory_format_)
399+
: std::nullopt;
400400
}
401401

402402
// Resolves the ATen backend specified by the current construction axes.

c10/core/UndefinedTensorImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ namespace c10 {
55

66
// should this use the globalContext? Can it get a context passed in somehow?
77
UndefinedTensorImpl::UndefinedTensorImpl()
8-
: TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), c10::nullopt) {
8+
: TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), std::nullopt) {
99
set_storage_access_should_throw();
1010
// TODO: accessing the sizes on an undefined tensor is not meaningful
1111
// and should error too, but empirically it does not!

c10/core/impl/InlineDeviceGuard.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -404,15 +404,15 @@ class InlineOptionalDeviceGuard {
404404
/// Returns the device that was set immediately prior to initialization of
405405
/// the, guard, or nullopt if the guard is uninitialized.
406406
optional<Device> original_device() const {
407-
return guard_.has_value() ? make_optional(guard_->original_device())
407+
return guard_.has_value() ? std::make_optional(guard_->original_device())
408408
: nullopt;
409409
}
410410

411411
/// Returns the most recent device that was set using this device guard,
412412
/// either from construction, or via set_device, if the guard is initialized,
413413
/// or nullopt if the guard is uninitialized.
414414
optional<Device> current_device() const {
415-
return guard_.has_value() ? make_optional(guard_->current_device())
415+
return guard_.has_value() ? std::make_optional(guard_->current_device())
416416
: nullopt;
417417
}
418418

0 commit comments

Comments
 (0)