diff --git a/ttnn/cpp/ttnn/core.cpp b/ttnn/cpp/ttnn/core.cpp index f357770ca1b..bc959f55de9 100644 --- a/ttnn/cpp/ttnn/core.cpp +++ b/ttnn/cpp/ttnn/core.cpp @@ -4,8 +4,48 @@ #include "ttnn/core.hpp" +#include + +namespace ttnn::core { + +bool has_storage_type_of(const ttnn::Tensor& tensor, const ttnn::StorageType& storage_type) { + return tensor.storage_type() == storage_type; +} + +std::optional get_memory_config(const ttnn::Tensor& tensor) { + if (not tensor.is_allocated() or not is_tensor_on_device_or_multidevice(tensor)) { + return std::nullopt; + } + return tensor.memory_config(); +} + +void set_printoptions(const std::string& profile) { + tt::tt_metal::tensor_impl::TTNN_TENSOR_PRINT_PROFILE = + magic_enum::enum_cast(profile, [](char lhs, char rhs) { + return std::tolower(lhs) == std::tolower(rhs); + }).value(); +} + +void segfault_handler(int sig) { + std::cerr << tt::assert::backtrace_to_string() << std::endl; + exit(EXIT_FAILURE); +} + +void dump_stack_trace_on_segfault() { + if (std::signal(SIGSEGV, segfault_handler) == SIG_ERR) { + std::cerr << "Error: cannot handle SIGSEGV" << std::endl; + exit(EXIT_FAILURE); + } +} +} // namespace ttnn::core + namespace ttnn { +CoreIDs& CoreIDs::instance() { + static CoreIDs instance; + return instance; +} + std::int64_t CoreIDs::get_python_operation_id() { return python_operation_id.load(); } void CoreIDs::set_python_operation_id(std::int64_t python_operation_id_) { python_operation_id = python_operation_id_; } std::int64_t CoreIDs::fetch_and_increment_python_operation_id() { return python_operation_id.fetch_add(1); } diff --git a/ttnn/cpp/ttnn/core.hpp b/ttnn/cpp/ttnn/core.hpp index a484070b507..b68354071fa 100644 --- a/ttnn/cpp/ttnn/core.hpp +++ b/ttnn/cpp/ttnn/core.hpp @@ -4,9 +4,10 @@ #pragma once #include +#include #include +#include -#include #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor_impl.hpp" // TTNN_TENSOR_PRINT_PROFILE #include "ttnn/tensor/types.hpp" @@ -28,53 +29,25 @@ namespace ttnn { namespace core { -inline std::uint32_t pad_to_multiple_of_tile_size(std::uint32_t value, std::uint32_t tile_size) { - return (value + (tile_size - 1)) / tile_size * tile_size; -} - -inline bool has_storage_type_of(const ttnn::Tensor& tensor, const ttnn::StorageType& storage_type) { - return tensor.storage_type() == storage_type; -} - -inline std::optional get_memory_config(const ttnn::Tensor& tensor) { - if (not tensor.is_allocated() or not is_tensor_on_device_or_multidevice(tensor)) { - return std::nullopt; - } - return tensor.memory_config(); -} - -inline void set_printoptions(const std::string& profile) { - tt::tt_metal::tensor_impl::TTNN_TENSOR_PRINT_PROFILE = - magic_enum::enum_cast(profile, [](char lhs, char rhs) { - return std::tolower(lhs) == std::tolower(rhs); - }).value(); -} - -inline void segfault_handler(int sig) { - std::cerr << tt::assert::backtrace_to_string() << std::endl; - exit(EXIT_FAILURE); -} - -inline void dump_stack_trace_on_segfault() { - if (std::signal(SIGSEGV, segfault_handler) == SIG_ERR) { - std::cerr << "Error: cannot handle SIGSEGV" << std::endl; - exit(EXIT_FAILURE); - } -} +bool has_storage_type_of(const ttnn::Tensor& tensor, const ttnn::StorageType& storage_type); + +std::optional get_memory_config(const ttnn::Tensor& tensor); + +void set_printoptions(const std::string& profile); + +void segfault_handler(int sig); + +void dump_stack_trace_on_segfault(); } // namespace core using core::get_memory_config; using core::has_storage_type_of; -using core::pad_to_multiple_of_tile_size; using core::set_printoptions; class CoreIDs { public: - static CoreIDs& instance() { - static CoreIDs instance; - return instance; - } + static CoreIDs& instance(); std::int64_t get_python_operation_id(); void set_python_operation_id(std::int64_t operation_id);