From 0a2448387bd38a89cf8c4fc86e327fe1b3da34a9 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Tue, 3 Apr 2018 13:46:59 -0700 Subject: [PATCH 001/385] first commit --- src/flip/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/flip/README.md diff --git a/src/flip/README.md b/src/flip/README.md new file mode 100644 index 00000000..0cf9c65a --- /dev/null +++ b/src/flip/README.md @@ -0,0 +1 @@ +# Flip From d5e8283fd6155aee364ab32698e8a10150a67d7c Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 04:04:35 -0700 Subject: [PATCH 002/385] Initial flip library --- src/flip/CMakeLists.txt | 60 ++++++++ src/flip/lib/flip.hpp | 264 +++++++++++++++++++++++++++++++++ src/flip/lib/test_flip.cpp | 48 ++++++ src/flip/proto/flip_spec.proto | 93 ++++++++++++ 4 files changed, 465 insertions(+) create mode 100644 src/flip/CMakeLists.txt create mode 100644 src/flip/lib/flip.hpp create mode 100644 src/flip/lib/test_flip.cpp create mode 100644 src/flip/proto/flip_spec.proto diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt new file mode 100644 index 00000000..3f2250de --- /dev/null +++ b/src/flip/CMakeLists.txt @@ -0,0 +1,60 @@ +cmake_minimum_required(VERSION 3.7) +project(flip) + +set(CMAKE_CXX_STANDARD 17) + +#set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) + +if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) + set(PROTOBUF_PROTOC_EXECUTABLE protoc) +endif() + +if (NOT DEFINED PREFIX_DIR) + set(PREFIX_DIR /usr/local/) +endif() + +set(PROTO_DIR ${PROJECT_SOURCE_DIR}/proto) +set(PROTO_GEN_DIR ${CMAKE_BINARY_DIR}/gen_src/proto) +file(MAKE_DIRECTORY ${PROTO_GEN_DIR}) +#set(FLIP_PROTO_FILE proto/flip_spec.proto) + +get_filename_component(PROTO_FULL_PATH proto/flip_spec.proto ABSOLUTE) +file(RELATIVE_PATH PROTO_REL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/proto ${PROTO_FULL_PATH}) +message(STATUS "Processing ${PROTO_FULL_PATH} Relative path ${PROTO_REL_PATH}") + +#set(_GEN_HEADERS src/proto/flip_spec.pb.h) +#list(APPEND _generated_headers "${_GEN_HEADERS}") +#set(_GEN_SOURCES src/proto/flip_spec.pb.cc) + +#target_sources(test_flip PRIVATE ${_GEN_SOURCES}) +#set_source_files_properties(${_GEN_SOURCES} ${_GEN_HEADERS} PROPERTIES GENERATED TRUE) + +add_custom_command( + OUTPUT ${PROTO_GEN_DIR}/flip_spec.pb.cc + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS --cpp_out=${PROTO_GEN_DIR} -I. ${PROTO_REL_PATH} + DEPENDS proto/flip_spec.proto + WORKING_DIRECTORY ${PROTO_DIR} + COMMENT "Running C++ protocol buffer compiler on flip_spec.proto" + VERBATIM +) + +set(FLIP_LIB_FILES + src/flip.hpp + ${PROTO_GEN_DIR}/flip_spec.pb.cc + ) + +set(TEST_FLIP_FILES + ${FLIP_LIB_FILES} + src/test_flip.cpp + ) + +find_library(PROTOBUF_LIBRARY protobuf HINTS ${PREFIX_DIR}/lib) +find_library(GLOG_LIBRARY glog HINTS ${PREFIX_DIR}/lib) +include_directories(${PREFIX_DIR}/include) + +add_library(flip ${FLIP_LIB_FILES}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) + +add_executable(test_flip ${TEST_FLIP_FILES}) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp new file mode 100644 index 00000000..6d5c36ee --- /dev/null +++ b/src/flip/lib/flip.hpp @@ -0,0 +1,264 @@ +// +// Created by Kadayam, Hari on 28/03/18. +// +#ifndef FLIP_FLIP_HPP +#define FLIP_FLIP_HPP + +#include "proto/flip_spec.pb.h" +#include +#include +#include +#include +#include + +namespace flip { +template < + size_t Index = 0, // start iteration at 0 index + typename TTuple, // the tuple type + size_t Size = + std::tuple_size_v< + std::remove_reference_t>, // tuple size + typename TCallable, // the callable to bo invoked for each tuple item + typename... TArgs // other arguments to be passed to the callable +> +void for_each(TTuple&& tuple, TCallable&& callable, TArgs&&... args) +{ + if constexpr (Index < Size) + { + std::invoke(callable, args..., std::get(tuple)); + + if constexpr (Index + 1 < Size) + for_each( + std::forward(tuple), + std::forward(callable), + std::forward(args)...); + } +} + +struct flip_name_compare { + bool operator()(const std::string &lhs, const std::string &rhs) const { + return lhs < rhs; + } +}; + +struct flip_instance { + flip_instance(const FlipSpec &fspec) : + m_fspec(fspec), + m_hit_count(0), + m_exec_count(0) { + } + + flip_instance(const flip_instance &other) { + m_fspec = other.m_fspec; + m_hit_count.store(other.m_hit_count.load()); + m_exec_count.store(other.m_exec_count.load()); + } + + FlipSpec m_fspec; + std::atomic< uint32_t > m_hit_count; + std::atomic< uint32_t > m_exec_count; +}; + +template +struct val_converter { + T operator()(const ParamValue &val) { + return 0; + } +}; + +template <> +struct val_converter { + int operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; + } +}; + +#if 0 +template <> +struct val_converter { + const int operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; + } +}; +#endif + +template <> +struct val_converter { + long operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kLongValue) ? val.long_value() : 0; + } +}; + +template <> +struct val_converter { + double operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kDoubleValue) ? val.double_value() : 0; + } +}; + +template <> +struct val_converter { + std::string operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kStringValue) ? val.string_value() : ""; + } +}; + +template <> +struct val_converter { + bool operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kBoolValue) ? val.bool_value() : 0; + } +}; + +class Flip { +public: + Flip() { + } + + bool add(const FlipSpec &fspec) { + auto inst = flip_instance(fspec); + + // TODO: Add verification to see if the flip is already scheduled, any errors etc.. + m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); + + LOG(INFO) << "Added new fault flip " << fspec.flip_name() << " to the list of flips"; + return true; + } + + template< class... Args > + bool test_flip(std::string flip_name, Args &&... args) { + auto search = m_flip_specs.find(flip_name); + if (search == m_flip_specs.end()) { + //LOG(INFO) << "Flip " << flip_name << " is not triggered"; + return false; + } + + auto &inst = search->second; + auto fspec = inst.m_fspec; + std::tuple arglist(std::forward(args)...); + + auto i = 0U; + bool matched = true; + for_each(arglist, [this, fspec, &i, &matched](auto &v) { + if (!condition_matches(fspec.conditions()[i++], v)) { + matched = false; + } + }); + + // One or more conditions does not match. + if (!matched) { + //LOG(INFO) << "Flip " << flip_name << " does not match with one of the condition"; + return false; + } + + // Have we already executed this enough times + auto count = fspec.flip_frequency().count(); + if (count && (inst.m_exec_count.load(std::memory_order_acquire) >= count)) { + LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " << count; + return false; + } + + if (!handle_hits(fspec.flip_frequency(), inst)) { + LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; + return false; + } + + inst.m_exec_count.fetch_add(1, std::memory_order_acq_rel); + LOG(INFO) << "Flip " << flip_name << " matches and hits"; + return true; + } + + template< typename T, class... Args > + boost::optional< T > get_test_flip(std::string flip_name, Args &&... args) { + auto search = m_flip_specs.find(flip_name); + if (search == m_flip_specs.end()) { + LOG(INFO) << "Flip " << flip_name << " is not triggered"; + return boost::none; + } + + auto &inst = search->second; + auto fspec = inst.m_fspec; + std::tuple arglist(std::forward(args)...); + + auto i = 0U; + bool matched = true; + for_each(arglist, [this, fspec, &i, &matched](auto &v) { + if (!condition_matches(fspec.conditions()[i++], v)) { + matched = false; + } + }); + + // One or more conditions does not match. + if (!matched) { + return boost::none; + } + + // Have we already executed this enough times + if (inst.m_exec_count.load(std::memory_order_acquire) >= fspec.flip_frequency().count()) { + LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " + << fspec.flip_frequency().count(); + return boost::none; + } + + if (!handle_hits(fspec.flip_frequency(), inst)) { + LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; + return boost::none; + } + + inst.m_exec_count.fetch_add(1, std::memory_order_acq_rel); + LOG(INFO) << "Flip " << flip_name << " matches and hits"; + + return val_converter< T >()(fspec.returns()); + } + +private: + template< typename T > + bool condition_matches(const FlipCondition &cond, T &comp_val) { + auto val1 = val_converter< T >()(cond.value()); + return compare_val< T >(val1, comp_val, cond.oper()); + } + + bool handle_hits(const FlipFrequency &freq, flip_instance &inst) { + auto hit_count = inst.m_hit_count.fetch_add(1, std::memory_order_release); + if (freq.every_nth() != 0) { + return ((hit_count % freq.every_nth()) == 0); + } else { + return ((rand() % 100) < freq.percent()); + } + } + + template< typename T > + bool compare_val(T &val1, T &val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 == val2); + + case Operator::NOT_EQUAL: + return (val1 != val2); + + case Operator::GREATER_THAN: + return (val1 > val2); + + case Operator::LESS_THAN: + return (val1 < val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 >= val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 <= val2); + + default: + return false; + } + } + +private: + std::map< std::string, flip_instance, flip_name_compare > m_flip_specs; +}; + +} // namespace flip +#endif //FLIP_FLIP_HPP diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp new file mode 100644 index 00000000..09221023 --- /dev/null +++ b/src/flip/lib/test_flip.cpp @@ -0,0 +1,48 @@ +// +// Created by Kadayam, Hari on 28/03/18. +// + +#include "proto/flip_spec.pb.h" +#include "flip.hpp" + +void create_flip_spec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "fail_cmd"; + + // Create a new condition and add it to flip spec + auto cond1 = fspec->mutable_conditions()->Add(); + *cond1->mutable_name() = "cmd_type"; + cond1->set_oper(flip::Operator::EQUAL); + cond1->mutable_value()->set_int_value(1); + + auto cond2 = fspec->mutable_conditions()->Add(); + *cond2->mutable_name() = "coll_name"; + cond2->set_oper(flip::Operator::EQUAL); + cond2->mutable_value()->set_string_value("item_shipping"); + + fspec->mutable_returns()->set_string_value("Error simulated"); + + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(1); + freq->set_percent(100); +} + +int main(int argc, char *argv[]) { + flip::FlipSpec fspec; + create_flip_spec(&fspec); + + flip::Flip flip; + flip.add(fspec); + + int my_cmd = 1; + std::string my_coll = "item_shipping"; + auto result = flip.get_test_flip("fail_cmd", my_cmd, my_coll); + if (result) { + std::cout << "flip returned " << result.get() << "\n"; + } + result = flip.get_test_flip("fail_cmd", my_cmd, my_coll); + if (result) { + std::cout << "flip returned " << result.get(); + } + + return 0; +} \ No newline at end of file diff --git a/src/flip/proto/flip_spec.proto b/src/flip/proto/flip_spec.proto new file mode 100644 index 00000000..b4397e34 --- /dev/null +++ b/src/flip/proto/flip_spec.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +package flip; + +option cc_enable_arenas = true; +enum Operator { + // Equal. + EQUAL = 0; + + // Inequality. + NOT_EQUAL = 1; + + // Less than. + LESS_THAN = 2; + + // Less than or equal. + LESS_THAN_OR_EQUAL = 3; + + // Greater than. + GREATER_THAN = 4; + + // Greater than or equal. + GREATER_THAN_OR_EQUAL = 5; + + // Don't care about + DONT_CARE = 6; +} + +enum Frequency { + // Every time generate a fault + ALWAYS = 0; + + // Generate fault alternate attempts + ALTERNATE = 1; + + // Fault on uniform random basis + UNI_RANDOM = 2; +} + +message ParamValue { + // The kind of value. + oneof kind { + bool null_value = 1; + + int32 int_value = 2; + + int64 long_value = 3; + + // Represents a double value. + double double_value = 4; + + // Represents a string value. + string string_value = 6; + + // Represents a boolean value. + bool bool_value = 7; + + bytes binary_value = 10; + + //google.protobuf.Any struct_encoded = 22; + + //This is a wrapper object that contains a single attribute with reserved name `list` + //google.protobuf.Any list_encoded = 23; + } +} + +message FlipCondition { + string name = 1; + Operator oper = 2; + ParamValue value = 3; +} + +message FlipFrequency { + uint32 count = 1; // How many faults to generate, Default no limit + + oneof frequency { + uint32 percent = 2; // Percentage of requests that matches the condition to generate fault + uint32 every_nth = 3; // Generate fault for every nth request + } +} + +message FlipSpec { + string flip_name = 1; + repeated FlipCondition conditions = 2; + ParamValue returns = 3; + FlipFrequency flip_frequency = 4; +} + +message FlipResponse { + bool success = 1; + + map metadata = 200; +} \ No newline at end of file From 37be91aaa8483b3fa42f13e5e94e4d1ca1fa3ce4 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 04:14:26 -0700 Subject: [PATCH 003/385] Kickstart a README --- src/flip/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/flip/README.md b/src/flip/README.md index 0cf9c65a..08db98f2 100644 --- a/src/flip/README.md +++ b/src/flip/README.md @@ -1 +1,6 @@ # Flip + +Flip stands for **F**au**l**t **I**njection **P**oint. Its a generic framework for injecting fault into the code. +It provides a framework, where actual fault could be injected outside the application. + +More info to be added later \ No newline at end of file From b8c841bc5ff4e92539f732719042803d075c3749 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 04:25:50 -0700 Subject: [PATCH 004/385] Adjustments to makefile on path --- src/flip/CMakeLists.txt | 10 +++++----- src/flip/lib/flip.hpp | 2 +- src/flip/lib/test_flip.cpp | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 3f2250de..99c26617 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -9,8 +9,8 @@ if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) set(PROTOBUF_PROTOC_EXECUTABLE protoc) endif() -if (NOT DEFINED PREFIX_DIR) - set(PREFIX_DIR /usr/local/) +if (NOT DEFINED CMAKE_PREFIX_PATH) + set(CMAKE_PREFIX_PATH /usr/local/) endif() set(PROTO_DIR ${PROJECT_SOURCE_DIR}/proto) @@ -49,9 +49,9 @@ set(TEST_FLIP_FILES src/test_flip.cpp ) -find_library(PROTOBUF_LIBRARY protobuf HINTS ${PREFIX_DIR}/lib) -find_library(GLOG_LIBRARY glog HINTS ${PREFIX_DIR}/lib) -include_directories(${PREFIX_DIR}/include) +find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) +find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) +include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 6d5c36ee..d3c6c1f1 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -4,7 +4,7 @@ #ifndef FLIP_FLIP_HPP #define FLIP_FLIP_HPP -#include "proto/flip_spec.pb.h" +#include "flip_spec.pb.h" #include #include #include diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 09221023..ec266f65 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -2,7 +2,7 @@ // Created by Kadayam, Hari on 28/03/18. // -#include "proto/flip_spec.pb.h" +#include "flip_spec.pb.h" #include "flip.hpp" void create_flip_spec(flip::FlipSpec *fspec) { From 7897c41e0b67fcf0d2794d4d41a101e4d0901d97 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 04:34:37 -0700 Subject: [PATCH 005/385] Add install targets to flip --- src/flip/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 99c26617..fae34a87 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -58,3 +58,6 @@ target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) + +install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) +install(FILES src/flip.hpp proto/flip_spec.proto DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From 39216c4a61419a44dcec75105a60f2afbd1cad90 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 05:12:14 -0700 Subject: [PATCH 006/385] Adding gen proto header to install targets --- src/flip/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index fae34a87..fcd6aa42 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -60,4 +60,4 @@ add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) -install(FILES src/flip.hpp proto/flip_spec.proto DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) +install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From dffb059223d79b2a99a6da838450d4868c051e34 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Wed, 4 Apr 2018 09:29:39 -0700 Subject: [PATCH 007/385] Added support for multiple types of flip added --- src/flip/lib/flip.hpp | 173 ++++++++++++++++++++++++------------- src/flip/lib/test_flip.cpp | 5 +- 2 files changed, 118 insertions(+), 60 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index d3c6c1f1..4547539d 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -9,7 +9,9 @@ #include #include #include +#include #include +#include namespace flip { template < @@ -21,17 +23,16 @@ template < typename TCallable, // the callable to bo invoked for each tuple item typename... TArgs // other arguments to be passed to the callable > -void for_each(TTuple&& tuple, TCallable&& callable, TArgs&&... args) -{ - if constexpr (Index < Size) - { +void for_each(TTuple&& tuple, TCallable&& callable, TArgs&&... args) { + if constexpr (Index < Size) { std::invoke(callable, args..., std::get(tuple)); - if constexpr (Index + 1 < Size) - for_each( - std::forward(tuple), - std::forward(callable), - std::forward(args)...); + if constexpr (Index + 1 < Size) { + for_each< Index + 1 >( + std::forward< TTuple >(tuple), + std::forward< TCallable >(callable), + std::forward< TArgs >(args)...); + } } } @@ -45,18 +46,18 @@ struct flip_instance { flip_instance(const FlipSpec &fspec) : m_fspec(fspec), m_hit_count(0), - m_exec_count(0) { + m_remain_exec_count(fspec.flip_frequency().count()) { } flip_instance(const flip_instance &other) { m_fspec = other.m_fspec; m_hit_count.store(other.m_hit_count.load()); - m_exec_count.store(other.m_exec_count.load()); + m_remain_exec_count.store(other.m_remain_exec_count.load()); } FlipSpec m_fspec; std::atomic< uint32_t > m_hit_count; - std::atomic< uint32_t > m_exec_count; + std::atomic< int32_t > m_remain_exec_count; }; template @@ -119,41 +120,27 @@ class Flip { auto inst = flip_instance(fspec); // TODO: Add verification to see if the flip is already scheduled, any errors etc.. + std::unique_lock lock(m_mutex); m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); - LOG(INFO) << "Added new fault flip " << fspec.flip_name() << " to the list of flips"; return true; } template< class... Args > bool test_flip(std::string flip_name, Args &&... args) { - auto search = m_flip_specs.find(flip_name); - if (search == m_flip_specs.end()) { - //LOG(INFO) << "Flip " << flip_name << " is not triggered"; - return false; - } - - auto &inst = search->second; - auto fspec = inst.m_fspec; - std::tuple arglist(std::forward(args)...); - - auto i = 0U; - bool matched = true; - for_each(arglist, [this, fspec, &i, &matched](auto &v) { - if (!condition_matches(fspec.conditions()[i++], v)) { - matched = false; - } - }); +#if 0 + std::shared_lock lock(m_mutex); - // One or more conditions does not match. - if (!matched) { - //LOG(INFO) << "Flip " << flip_name << " does not match with one of the condition"; + auto inst = match_flip(flip_name, std::forward< Args >(args)...); + if (inst == nullptr) { + //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; return false; } + auto &fspec = inst->m_fspec; // Have we already executed this enough times auto count = fspec.flip_frequency().count(); - if (count && (inst.m_exec_count.load(std::memory_order_acquire) >= count)) { + if (count && (inst->m_remain_exec_count.load(std::memory_order_acquire) >= count)) { LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " << count; return false; } @@ -163,38 +150,28 @@ class Flip { return false; } - inst.m_exec_count.fetch_add(1, std::memory_order_acq_rel); + inst->m_remain_exec_count.fetch_add(1, std::memory_order_acq_rel); LOG(INFO) << "Flip " << flip_name << " matches and hits"; return true; +#endif + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + return (ret != boost::none); } template< typename T, class... Args > boost::optional< T > get_test_flip(std::string flip_name, Args &&... args) { - auto search = m_flip_specs.find(flip_name); - if (search == m_flip_specs.end()) { - LOG(INFO) << "Flip " << flip_name << " is not triggered"; - return boost::none; - } - - auto &inst = search->second; - auto fspec = inst.m_fspec; - std::tuple arglist(std::forward(args)...); - - auto i = 0U; - bool matched = true; - for_each(arglist, [this, fspec, &i, &matched](auto &v) { - if (!condition_matches(fspec.conditions()[i++], v)) { - matched = false; - } - }); +#if 0 + std::shared_lock lock(m_mutex); - // One or more conditions does not match. - if (!matched) { + auto inst = match_flip(flip_name, std::forward< Args >(args)...); + if (inst == nullptr) { + //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; return boost::none; } + auto &fspec = inst->m_fspec; // Have we already executed this enough times - if (inst.m_exec_count.load(std::memory_order_acquire) >= fspec.flip_frequency().count()) { + if (inst->m_remain_exec_count.load(std::memory_order_acquire) >= fspec.flip_frequency().count()) { LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " << fspec.flip_frequency().count(); return boost::none; @@ -205,21 +182,98 @@ class Flip { return boost::none; } - inst.m_exec_count.fetch_add(1, std::memory_order_acq_rel); + inst->m_remain_exec_count.fetch_add(1, std::memory_order_acq_rel); LOG(INFO) << "Flip " << flip_name << " matches and hits"; return val_converter< T >()(fspec.returns()); +#endif + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) return boost::none; + return boost::optional(boost::get(ret.get())); } private: + template< typename T, bool ValueNeeded, class... Args > + boost::optional< boost::variant > __test_flip(std::string flip_name, Args &&... args) { + bool exec_completed = false; // If all the exec for the flip is completed. + flip_instance *inst = nullptr; + + { + std::shared_lock lock(m_mutex); + inst = match_flip(flip_name, std::forward< Args >(args)...); + if (inst == nullptr) { + //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; + return boost::none; + } + auto &fspec = inst->m_fspec; + + // Check if we are subjected to rate limit + if (!handle_hits(fspec.flip_frequency(), inst)) { + LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; + return boost::none; + } + + // Have we already executed this enough times + if (inst->m_remain_exec_count.fetch_sub(1, std::memory_order_acq_rel) == 1) { + exec_completed = true; + } + LOG(INFO) << "Flip " << flip_name << " matches and hits"; + } + + boost::variant val_ret; + if (ValueNeeded) { + val_ret = val_converter< T >()(inst->m_fspec.returns()); + } else { + //static_assert(!std::is_same::value || std::is_same::value, "__test_flip without value should be called with bool as type"); + val_ret = true; + } + + if (exec_completed) { + // If we completed the execution, need to remove them + std::unique_lock lock(m_mutex); + if (inst->m_remain_exec_count.load(std::memory_order_relaxed) == 0) { + m_flip_specs.erase(flip_name); + } + } + return val_ret; + } + + template< class... Args > + flip_instance * match_flip(std::string flip_name, Args &&... args) { + flip_instance *match_inst = nullptr; + + auto search = m_flip_specs.equal_range(flip_name); + for (auto it = search.first; it != search.second; ++it) { + auto inst = &it->second; + auto fspec = inst->m_fspec; + + // Check for all the condition match + std::tuple< Args... > arglist(std::forward< Args >(args)...); + auto i = 0U; + bool matched = true; + for_each(arglist, [this, fspec, &i, &matched](auto &v) { + if (!condition_matches(fspec.conditions()[i++], v)) { + matched = false; + } + }); + + // One or more conditions does not match. + if (matched) { + match_inst = inst; + break; + } + } + return match_inst; + } + template< typename T > bool condition_matches(const FlipCondition &cond, T &comp_val) { auto val1 = val_converter< T >()(cond.value()); return compare_val< T >(val1, comp_val, cond.oper()); } - bool handle_hits(const FlipFrequency &freq, flip_instance &inst) { - auto hit_count = inst.m_hit_count.fetch_add(1, std::memory_order_release); + bool handle_hits(const FlipFrequency &freq, flip_instance *inst) { + auto hit_count = inst->m_hit_count.fetch_add(1, std::memory_order_release); if (freq.every_nth() != 0) { return ((hit_count % freq.every_nth()) == 0); } else { @@ -257,7 +311,8 @@ class Flip { } private: - std::map< std::string, flip_instance, flip_name_compare > m_flip_specs; + std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; + std::shared_mutex m_mutex; }; } // namespace flip diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index ec266f65..a59fc381 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -22,7 +22,7 @@ void create_flip_spec(flip::FlipSpec *fspec) { fspec->mutable_returns()->set_string_value("Error simulated"); auto freq = fspec->mutable_flip_frequency(); - freq->set_count(1); + freq->set_count(2); freq->set_percent(100); } @@ -35,6 +35,9 @@ int main(int argc, char *argv[]) { int my_cmd = 1; std::string my_coll = "item_shipping"; + if (flip.test_flip("fail_cmd", my_cmd, my_coll)) { + std::cout << "flip hit " << "\n"; + } auto result = flip.get_test_flip("fail_cmd", my_cmd, my_coll); if (result) { std::cout << "flip returned " << result.get() << "\n"; From 1c52afbdceacf48210301354d42896b0a59f13e9 Mon Sep 17 00:00:00 2001 From: "Kadayam, Hari(hkadayam)" Date: Sun, 8 Apr 2018 13:58:54 -0700 Subject: [PATCH 008/385] Fixed compilation error on linux while building test_flip --- src/flip/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index fcd6aa42..5276a3ec 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -57,7 +57,7 @@ add_library(flip ${FLIP_LIB_FILES}) target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From 992778861e0ae9e7565c91e1498ff3cee81626c4 Mon Sep 17 00:00:00 2001 From: Aditya Marella Date: Wed, 11 Apr 2018 18:03:37 -0700 Subject: [PATCH 009/385] Fix sign compare compile error on linux --- src/flip/lib/flip.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 4547539d..6b870330 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -277,7 +277,7 @@ class Flip { if (freq.every_nth() != 0) { return ((hit_count % freq.every_nth()) == 0); } else { - return ((rand() % 100) < freq.percent()); + return ((rand() % 100) < (int)freq.percent()); } } From 2b9c5f55fb958243c68636e145b7cd4b1893ec1c Mon Sep 17 00:00:00 2001 From: Celik Date: Sat, 28 Apr 2018 16:00:45 -0700 Subject: [PATCH 010/385] testing CMakeList changes --- src/flip/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 5276a3ec..53849415 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -3,6 +3,9 @@ project(flip) set(CMAKE_CXX_STANDARD 17) +find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) +add_library(flip ${FLIP_LIB_FILES}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) #set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) From 3e96ba8377e01eafe9805bd451b0068708f06665 Mon Sep 17 00:00:00 2001 From: Celik Date: Sat, 28 Apr 2018 16:35:12 -0700 Subject: [PATCH 011/385] checking --- src/flip/CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 53849415..13342dc4 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -4,8 +4,6 @@ project(flip) set(CMAKE_CXX_STANDARD 17) find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) -add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) #set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) @@ -57,7 +55,7 @@ find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread) From c484b849c36f19ab2f62401da613b91c15cd0675 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Sat, 28 Apr 2018 17:16:51 -0700 Subject: [PATCH 012/385] [MONSTOR-4374] Added delay fault facility, enhanced test, fixed missing gflags --- src/flip/CMakeLists.txt | 5 +- src/flip/lib/flip.hpp | 58 ++++++++++++-- src/flip/lib/test_flip.cpp | 141 ++++++++++++++++++++++++++++----- src/flip/proto/flip_spec.proto | 18 ++++- 4 files changed, 193 insertions(+), 29 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 5276a3ec..6b04eeec 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -51,13 +51,14 @@ set(TEST_FLIP_FILES find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) +find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread boost_system) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 4547539d..529f5df9 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -12,8 +12,13 @@ #include #include #include +#include +#include +#include namespace flip { +static thread_local boost::asio::io_service g_io; + template < size_t Index = 0, // start iteration at 0 index typename TTuple, // the tuple type @@ -113,10 +118,11 @@ struct val_converter { class Flip { public: - Flip() { + Flip() : m_flip_enabled(false) { } bool add(const FlipSpec &fspec) { + m_flip_enabled = true; auto inst = flip_instance(fspec); // TODO: Add verification to see if the flip is already scheduled, any errors etc.. @@ -154,6 +160,7 @@ class Flip { LOG(INFO) << "Flip " << flip_name << " matches and hits"; return true; #endif + if (!m_flip_enabled) return false; auto ret = __test_flip(flip_name, std::forward< Args >(args)...); return (ret != boost::none); } @@ -187,14 +194,35 @@ class Flip { return val_converter< T >()(fspec.returns()); #endif + if (!m_flip_enabled) return boost::none; + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); if (ret == boost::none) return boost::none; return boost::optional(boost::get(ret.get())); } + template< class... Args > + bool delay_flip(std::string flip_name, const std::function &closure, Args &&... args) { + if (!m_flip_enabled) return false; + + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + if (ret != boost::none) { + uint64_t delay_usec = boost::get(ret.get()); + auto io = std::make_shared(); + boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(delay_usec/1000)); + t.async_wait([closure, io](const boost::system::error_code& e) { + closure(); + }); + auto ret = io->run(); + return true; + } else { + return false; + } + } + private: template< typename T, bool ValueNeeded, class... Args > - boost::optional< boost::variant > __test_flip(std::string flip_name, Args &&... args) { + boost::optional< boost::variant > __test_flip(std::string flip_name, Args &&... args) { bool exec_completed = false; // If all the exec for the flip is completed. flip_instance *inst = nullptr; @@ -220,12 +248,27 @@ class Flip { LOG(INFO) << "Flip " << flip_name << " matches and hits"; } - boost::variant val_ret; - if (ValueNeeded) { - val_ret = val_converter< T >()(inst->m_fspec.returns()); - } else { + boost::variant val_ret ; + switch (inst->m_fspec.flip_action().action_case()) { + case FlipAction::kReturns: + if (ValueNeeded) { + val_ret = val_converter< T >()(inst->m_fspec.flip_action().returns().return_()); + } else { + val_ret = true; + } + break; + + case FlipAction::kNoAction: //static_assert(!std::is_same::value || std::is_same::value, "__test_flip without value should be called with bool as type"); val_ret = true; + break; + + case FlipAction::kDelays: + val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); + break; + + default: + val_ret = true; } if (exec_completed) { @@ -277,7 +320,7 @@ class Flip { if (freq.every_nth() != 0) { return ((hit_count % freq.every_nth()) == 0); } else { - return ((rand() % 100) < freq.percent()); + return ((uint32_t)(rand() % 100) < freq.percent()); } } @@ -313,6 +356,7 @@ class Flip { private: std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; std::shared_mutex m_mutex; + bool m_flip_enabled; }; } // namespace flip diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index a59fc381..cb21f4bd 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -4,9 +4,113 @@ #include "flip_spec.pb.h" #include "flip.hpp" +#include -void create_flip_spec(flip::FlipSpec *fspec) { - *(fspec->mutable_flip_name()) = "fail_cmd"; +void create_ret_fspec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "ret_fspec"; + + // Create a new condition and add it to flip spec + auto cond = fspec->mutable_conditions()->Add(); + *cond->mutable_name() = "coll_name"; + cond->set_oper(flip::Operator::EQUAL); + cond->mutable_value()->set_string_value("item_shipping"); + + fspec->mutable_flip_action()->mutable_returns()->mutable_return_()->set_string_value("Error simulated value"); + + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(2); + freq->set_percent(100); +} + +void run_and_validate_ret_flip(flip::Flip *flip) { + std::string my_coll = "item_shipping"; + std::string unknown_coll = "unknown_collection"; + + auto result = flip->get_test_flip("ret_fspec", my_coll); + assert(result); + assert(result.get() == "Error simulated value"); + + result = flip->get_test_flip("ret_fspec", unknown_coll); + assert(!result); + + result = flip->get_test_flip("ret_fspec", my_coll); + assert(result); + assert(result.get() == "Error simulated value"); + + result = flip->get_test_flip("ret_fspec", my_coll); + assert(!result); // Not more than 2 +} + +void create_check_fspec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "check_fspec"; + + auto cond = fspec->mutable_conditions()->Add(); + *cond->mutable_name() = "cmd_type"; + cond->set_oper(flip::Operator::EQUAL); + cond->mutable_value()->set_int_value(1); + + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(2); + freq->set_percent(100); +} + +void run_and_validate_check_flip(flip::Flip *flip) { + int valid_cmd = 1; + int invalid_cmd = -1; + + assert(!flip->test_flip("check_fspec", invalid_cmd)); + assert(flip->test_flip("check_fspec", valid_cmd)); + assert(!flip->test_flip("check_fspec", invalid_cmd)); + assert(flip->test_flip("check_fspec", valid_cmd)); + assert(!flip->test_flip("check_fspec", valid_cmd)); // Not more than 2 +} + +void create_delay_fspec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "delay_fspec"; + + auto cond = fspec->mutable_conditions()->Add(); + *cond->mutable_name() = "cmd_type"; + cond->set_oper(flip::Operator::EQUAL); + cond->mutable_value()->set_int_value(2); + + fspec->mutable_flip_action()->mutable_delays()->set_delay_in_usec(100000); + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(2); + freq->set_percent(100); +} + +void run_and_validate_delay_flip(flip::Flip *flip) { + int valid_cmd = 2; + int invalid_cmd = -1; + std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); + + assert(flip->delay_flip("delay_fspec", [closure_calls]() { + (*closure_calls)++; + }, valid_cmd)); + + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { + (*closure_calls)++; + }, invalid_cmd)); + + assert(flip->delay_flip("delay_fspec", [closure_calls]() { + (*closure_calls)++; + }, valid_cmd)); + + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { + (*closure_calls)++; + }, invalid_cmd)); + + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { + (*closure_calls)++; + }, valid_cmd)); + + sleep(2); + DCHECK_EQ((*closure_calls).load(), 2); +} + +#if 0 +void create_multi_cond_fspec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "multi_cond1_fspec"; // Create a new condition and add it to flip spec auto cond1 = fspec->mutable_conditions()->Add(); @@ -19,33 +123,32 @@ void create_flip_spec(flip::FlipSpec *fspec) { cond2->set_oper(flip::Operator::EQUAL); cond2->mutable_value()->set_string_value("item_shipping"); - fspec->mutable_returns()->set_string_value("Error simulated"); + fspec->mutable_flip_action()->mutable_returns()->mutable_return_()->set_string_value("Error simulated value"); auto freq = fspec->mutable_flip_frequency(); freq->set_count(2); freq->set_percent(100); } +#endif int main(int argc, char *argv[]) { - flip::FlipSpec fspec; - create_flip_spec(&fspec); + flip::FlipSpec ret_fspec; + create_ret_fspec(&ret_fspec); + + flip::FlipSpec check_fspec; + create_check_fspec(&check_fspec); + + flip::FlipSpec delay_fspec; + create_delay_fspec(&delay_fspec); flip::Flip flip; - flip.add(fspec); + flip.add(ret_fspec); + flip.add(check_fspec); + flip.add(delay_fspec); - int my_cmd = 1; - std::string my_coll = "item_shipping"; - if (flip.test_flip("fail_cmd", my_cmd, my_coll)) { - std::cout << "flip hit " << "\n"; - } - auto result = flip.get_test_flip("fail_cmd", my_cmd, my_coll); - if (result) { - std::cout << "flip returned " << result.get() << "\n"; - } - result = flip.get_test_flip("fail_cmd", my_cmd, my_coll); - if (result) { - std::cout << "flip returned " << result.get(); - } + run_and_validate_ret_flip(&flip); + run_and_validate_check_flip(&flip); + run_and_validate_delay_flip(&flip); return 0; } \ No newline at end of file diff --git a/src/flip/proto/flip_spec.proto b/src/flip/proto/flip_spec.proto index b4397e34..4658d541 100644 --- a/src/flip/proto/flip_spec.proto +++ b/src/flip/proto/flip_spec.proto @@ -70,6 +70,22 @@ message FlipCondition { ParamValue value = 3; } +message FlipAction { + message ActionReturns { + ParamValue return = 1; + } + + message ActionDelays { + uint64 delay_in_usec = 1; + } + + oneof action { + bool no_action = 1; + ActionReturns returns = 2; + ActionDelays delays = 3; + } +} + message FlipFrequency { uint32 count = 1; // How many faults to generate, Default no limit @@ -82,7 +98,7 @@ message FlipFrequency { message FlipSpec { string flip_name = 1; repeated FlipCondition conditions = 2; - ParamValue returns = 3; + FlipAction flip_action = 3; FlipFrequency flip_frequency = 4; } From b0b0849254d93ae6bfc8a7212b7c973ccc43131e Mon Sep 17 00:00:00 2001 From: Celik Date: Sun, 29 Apr 2018 19:39:18 -0700 Subject: [PATCH 013/385] testing --- src/flip/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 13342dc4..d2cae739 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -54,8 +54,8 @@ find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) -add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) +#add_library(flip ${FLIP_LIB_FILES}) +#target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread) From f8851671d8e43bc3a33b5570a16f7b40ce0b1cd2 Mon Sep 17 00:00:00 2001 From: Celik Date: Sun, 29 Apr 2018 19:51:27 -0700 Subject: [PATCH 014/385] testing --- src/flip/CMakeLists.txt | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index d2cae739..866a5e12 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -3,8 +3,8 @@ project(flip) set(CMAKE_CXX_STANDARD 17) -find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) #set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) +find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) set(PROTOBUF_PROTOC_EXECUTABLE protoc) @@ -52,13 +52,15 @@ set(TEST_FLIP_FILES find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) +find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) -#add_library(flip ${FLIP_LIB_FILES}) -#target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) +add_library(flip ${FLIP_LIB_FILES}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread boost_system) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) + From 5934d9696e4d77e7a5387710ed6ed16351c10309 Mon Sep 17 00:00:00 2001 From: Celik Date: Mon, 30 Apr 2018 10:02:30 -0700 Subject: [PATCH 015/385] testing --- src/flip/CMakeLists.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 866a5e12..07410e06 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -3,8 +3,12 @@ project(flip) set(CMAKE_CXX_STANDARD 17) +set (CMAKE_DEPENDENT_MODULES_DIR ../../deps_prefix) +include(find_gflags.cmake) + + #set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) -find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) +#find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) set(PROTOBUF_PROTOC_EXECUTABLE protoc) @@ -52,7 +56,7 @@ set(TEST_FLIP_FILES find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) -find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) +#find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) From ae10ad8f8c9cbe3d4607557614af5ff237f173fd Mon Sep 17 00:00:00 2001 From: Celik Date: Mon, 30 Apr 2018 10:05:38 -0700 Subject: [PATCH 016/385] testing --- src/flip/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 07410e06..e2231570 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -63,7 +63,7 @@ add_library(flip ${FLIP_LIB_FILES}) target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} pthread boost_system) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY} pthread boost_system) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From f202f90d67ec85a8b49497a6f0363034c09d1be6 Mon Sep 17 00:00:00 2001 From: Celik Date: Mon, 30 Apr 2018 10:14:09 -0700 Subject: [PATCH 017/385] testing no find_flags --- src/flip/CMakeLists.txt | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index e2231570..212ead97 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -3,12 +3,7 @@ project(flip) set(CMAKE_CXX_STANDARD 17) -set (CMAKE_DEPENDENT_MODULES_DIR ../../deps_prefix) -include(find_gflags.cmake) - - #set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) -#find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) set(PROTOBUF_PROTOC_EXECUTABLE protoc) @@ -56,7 +51,7 @@ set(TEST_FLIP_FILES find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) -#find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) +find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) From 92d43ea052c6141c7358afc6610731400ef69108 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Tue, 1 May 2018 22:57:42 -0700 Subject: [PATCH 018/385] Changed gflags and glog library order --- src/flip/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 212ead97..424c6c6e 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -55,10 +55,10 @@ find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}) +target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY} pthread boost_system) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From 534f1cf270ffe8e9c18acdf3515aaeee817e87a1 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Wed, 2 May 2018 14:13:52 -0700 Subject: [PATCH 019/385] [MONSTOR-4676] Added one more action - delay + return error --- src/flip/lib/flip.hpp | 86 ++++++++++++++++++++++++++-------- src/flip/lib/test_flip.cpp | 57 ++++++++++++++++++++++ src/flip/proto/flip_spec.proto | 6 +++ 3 files changed, 130 insertions(+), 19 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 529f5df9..13064420 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace flip { static thread_local boost::asio::io_service g_io; @@ -116,6 +117,25 @@ struct val_converter { } }; +template< typename T > +struct delayed_return_param { + uint64_t delay_usec; + T val; +}; + +template +struct val_converter> { + delayed_return_param operator()(const ParamValue &val) { + delayed_return_param dummy; + return dummy; + } +}; + +#define TEST_ONLY 0 +#define RETURN_VAL 1 +#define SET_DELAY 2 +#define DELAYED_RETURN 3 + class Flip { public: Flip() : m_flip_enabled(false) { @@ -161,7 +181,7 @@ class Flip { return true; #endif if (!m_flip_enabled) return false; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); return (ret != boost::none); } @@ -196,7 +216,7 @@ class Flip { #endif if (!m_flip_enabled) return boost::none; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); if (ret == boost::none) return boost::none; return boost::optional(boost::get(ret.get())); } @@ -205,24 +225,40 @@ class Flip { bool delay_flip(std::string flip_name, const std::function &closure, Args &&... args) { if (!m_flip_enabled) return false; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); - if (ret != boost::none) { - uint64_t delay_usec = boost::get(ret.get()); - auto io = std::make_shared(); - boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(delay_usec/1000)); - t.async_wait([closure, io](const boost::system::error_code& e) { - closure(); - }); - auto ret = io->run(); - return true; - } else { - return false; - } + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) return false; // Not a hit + + uint64_t delay_usec = boost::get(ret.get()); + auto io = std::make_shared(); + boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(delay_usec/1000)); + t.async_wait([closure, io](const boost::system::error_code& e) { + closure(); + }); + io->run(); + return true; + } + + template + bool get_delay_flip(std::string flip_name, const std::function &closure, Args &&... args) { + if (!m_flip_enabled) return false; + + auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) return false; // Not a hit + + auto param = boost::get>(ret.get()); + + auto io = std::make_shared(); + boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(param.delay_usec/1000)); + t.async_wait([closure, io, param](const boost::system::error_code& e) { + closure(param.val); + }); + io->run(); + return true; } private: - template< typename T, bool ValueNeeded, class... Args > - boost::optional< boost::variant > __test_flip(std::string flip_name, Args &&... args) { + template< typename T, int ActionType, class... Args > + boost::optional< boost::variant> > __test_flip(std::string flip_name, Args &&... args) { bool exec_completed = false; // If all the exec for the flip is completed. flip_instance *inst = nullptr; @@ -248,10 +284,10 @@ class Flip { LOG(INFO) << "Flip " << flip_name << " matches and hits"; } - boost::variant val_ret ; + boost::variant> val_ret ; switch (inst->m_fspec.flip_action().action_case()) { case FlipAction::kReturns: - if (ValueNeeded) { + if (ActionType == RETURN_VAL) { val_ret = val_converter< T >()(inst->m_fspec.flip_action().returns().return_()); } else { val_ret = true; @@ -267,6 +303,18 @@ class Flip { val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); break; + case FlipAction::kDelayReturns: + if (ActionType == DELAYED_RETURN) { + auto &flip_dr = inst->m_fspec.flip_action().delay_returns(); + delayed_return_param dr; + dr.delay_usec = flip_dr.delay_in_usec(); + dr.val = val_converter< T >()(flip_dr.return_()); + val_ret = dr; + } else { + val_ret = true; + } + break; + default: val_ret = true; } diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index cb21f4bd..c344e84a 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -5,6 +5,7 @@ #include "flip_spec.pb.h" #include "flip.hpp" #include +#include void create_ret_fspec(flip::FlipSpec *fspec) { *(fspec->mutable_flip_name()) = "ret_fspec"; @@ -108,6 +109,57 @@ void run_and_validate_delay_flip(flip::Flip *flip) { DCHECK_EQ((*closure_calls).load(), 2); } +void create_delay_ret_fspec(flip::FlipSpec *fspec) { + *(fspec->mutable_flip_name()) = "delay_ret_fspec"; + + auto cond = fspec->mutable_conditions()->Add(); + *cond->mutable_name() = "cmd_type"; + cond->set_oper(flip::Operator::EQUAL); + cond->mutable_value()->set_int_value(2); + + fspec->mutable_flip_action()->mutable_delay_returns()->set_delay_in_usec(100000); + fspec->mutable_flip_action()->mutable_delay_returns()->mutable_return_()->set_string_value("Delayed error simulated value"); + + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(2); + freq->set_percent(100); +} + +void run_and_validate_delay_return_flip(flip::Flip *flip) { + int valid_cmd = 2; + int invalid_cmd = -1; + std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); + + assert(flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { + (*closure_calls)++; + DCHECK_EQ(error, "Delayed error simulated value"); + }, valid_cmd)); + + assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, invalid_cmd)); + + assert(flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { + DCHECK_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + }, valid_cmd)); + + assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, invalid_cmd)); + + assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { + DCHECK_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + LOG(INFO) << "Called with error = " << error; + }, valid_cmd)); + + sleep(2); + DCHECK_EQ((*closure_calls).load(), 2); +} + #if 0 void create_multi_cond_fspec(flip::FlipSpec *fspec) { *(fspec->mutable_flip_name()) = "multi_cond1_fspec"; @@ -141,14 +193,19 @@ int main(int argc, char *argv[]) { flip::FlipSpec delay_fspec; create_delay_fspec(&delay_fspec); + flip::FlipSpec delay_ret_fspec; + create_delay_ret_fspec(&delay_ret_fspec); + flip::Flip flip; flip.add(ret_fspec); flip.add(check_fspec); flip.add(delay_fspec); + flip.add(delay_ret_fspec); run_and_validate_ret_flip(&flip); run_and_validate_check_flip(&flip); run_and_validate_delay_flip(&flip); + run_and_validate_delay_return_flip(&flip); return 0; } \ No newline at end of file diff --git a/src/flip/proto/flip_spec.proto b/src/flip/proto/flip_spec.proto index 4658d541..b0bdf86b 100644 --- a/src/flip/proto/flip_spec.proto +++ b/src/flip/proto/flip_spec.proto @@ -79,10 +79,16 @@ message FlipAction { uint64 delay_in_usec = 1; } + message ActionDelayedReturns { + uint64 delay_in_usec = 1; + ParamValue return = 2; + } + oneof action { bool no_action = 1; ActionReturns returns = 2; ActionDelays delays = 3; + ActionDelayedReturns delay_returns = 4; } } From fa118c9e58133cc0b50e3d3ca41d59c62d2fab46 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Wed, 9 May 2018 08:54:07 -0700 Subject: [PATCH 020/385] Race condition fix and added const char * comparator * Concurrent flip hits could potentially exceed maximum count set by total num threads, fixed it * Added const char * comparator --- src/flip/lib/flip.hpp | 160 +++++++++++++++++++++++++++--------------- 1 file changed, 105 insertions(+), 55 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 13064420..4757021e 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -110,6 +110,13 @@ struct val_converter { } }; +template <> +struct val_converter { + const char *operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kStringValue) ? val.string_value().c_str() : nullptr; + } +}; + template <> struct val_converter { bool operator()(const ParamValue &val) { @@ -131,6 +138,68 @@ struct val_converter> { } }; +template< typename T > +struct compare_val { + bool operator()(const T &val1, const T &val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 == val2); + + case Operator::NOT_EQUAL: + return (val1 != val2); + + case Operator::GREATER_THAN: + return (val1 > val2); + + case Operator::LESS_THAN: + return (val1 < val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 >= val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 <= val2); + + default: + return false; + } + } +}; + +template<> +struct compare_val { + bool operator()(const char *&val1, const char *&val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); + + case Operator::NOT_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) != 0)) || (!val1 && val2) || (val1 && !val2); + + case Operator::GREATER_THAN: + return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); + + case Operator::LESS_THAN: + return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) >= 0)) || (val1 && !val2) || (!val1 && !val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) <= 0)) || (!val1 && val2) || (!val1 && !val2); + + default: + return false; + } + } +}; + #define TEST_ONLY 0 #define RETURN_VAL 1 #define SET_DELAY 2 @@ -154,32 +223,6 @@ class Flip { template< class... Args > bool test_flip(std::string flip_name, Args &&... args) { -#if 0 - std::shared_lock lock(m_mutex); - - auto inst = match_flip(flip_name, std::forward< Args >(args)...); - if (inst == nullptr) { - //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; - return false; - } - auto &fspec = inst->m_fspec; - - // Have we already executed this enough times - auto count = fspec.flip_frequency().count(); - if (count && (inst->m_remain_exec_count.load(std::memory_order_acquire) >= count)) { - LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " << count; - return false; - } - - if (!handle_hits(fspec.flip_frequency(), inst)) { - LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; - return false; - } - - inst->m_remain_exec_count.fetch_add(1, std::memory_order_acq_rel); - LOG(INFO) << "Flip " << flip_name << " matches and hits"; - return true; -#endif if (!m_flip_enabled) return false; auto ret = __test_flip(flip_name, std::forward< Args >(args)...); return (ret != boost::none); @@ -187,33 +230,6 @@ class Flip { template< typename T, class... Args > boost::optional< T > get_test_flip(std::string flip_name, Args &&... args) { -#if 0 - std::shared_lock lock(m_mutex); - - auto inst = match_flip(flip_name, std::forward< Args >(args)...); - if (inst == nullptr) { - //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; - return boost::none; - } - auto &fspec = inst->m_fspec; - - // Have we already executed this enough times - if (inst->m_remain_exec_count.load(std::memory_order_acquire) >= fspec.flip_frequency().count()) { - LOG(INFO) << "Flip " << flip_name << " matches, but it reached max count = " - << fspec.flip_frequency().count(); - return boost::none; - } - - if (!handle_hits(fspec.flip_frequency(), inst)) { - LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; - return boost::none; - } - - inst->m_remain_exec_count.fetch_add(1, std::memory_order_acq_rel); - LOG(INFO) << "Flip " << flip_name << " matches and hits"; - - return val_converter< T >()(fspec.returns()); -#endif if (!m_flip_enabled) return boost::none; auto ret = __test_flip(flip_name, std::forward< Args >(args)...); @@ -278,8 +294,12 @@ class Flip { } // Have we already executed this enough times - if (inst->m_remain_exec_count.fetch_sub(1, std::memory_order_acq_rel) == 1) { + auto remain_count = inst->m_remain_exec_count.fetch_sub(1, std::memory_order_acq_rel) - 1; + if (remain_count == 0) { exec_completed = true; + } else if (remain_count < 0) { + LOG(INFO) << "Flip " << flip_name << " matches, but reaches max count"; + return boost::none; } LOG(INFO) << "Flip " << flip_name << " matches and hits"; } @@ -360,7 +380,7 @@ class Flip { template< typename T > bool condition_matches(const FlipCondition &cond, T &comp_val) { auto val1 = val_converter< T >()(cond.value()); - return compare_val< T >(val1, comp_val, cond.oper()); + return compare_val< T >()(val1, comp_val, cond.oper()); } bool handle_hits(const FlipFrequency &freq, flip_instance *inst) { @@ -372,6 +392,7 @@ class Flip { } } +#if 0 template< typename T > bool compare_val(T &val1, T &val2, Operator oper) { switch (oper) { @@ -401,6 +422,35 @@ class Flip { } } + template<> + bool compare_val(const char *&val1, const char *&val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); + + case Operator::NOT_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) != 0)) || (!val1 && val2) || (val1 && !val2); + + case Operator::GREATER_THAN: + return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); + + case Operator::LESS_THAN: + return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) >= 0)) || (val1 && !val2) || (!val1 && !val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 && val2 && (strcmp(val1, val2) <= 0)) || (!val1 && val2) || (!val1 && !val2); + + default: + return false; + } + } +#endif private: std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; std::shared_mutex m_mutex; From f6f4e63e921f26388c09d18d04ef787b31a39f14 Mon Sep 17 00:00:00 2001 From: lhuang8 Date: Mon, 24 Sep 2018 12:07:02 -0700 Subject: [PATCH 021/385] Add conan/cmake files for building --- CMakeLists.txt | 25 +++++++++++++++++++++++++ tests/CMakeLists.txt | 1 + tests/unit/CMakeLists.txt | 28 ++++++++++++++++++++++++++++ tests/unit/gtest-all.cpp | 17 +++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 tests/CMakeLists.txt create mode 100644 tests/unit/CMakeLists.txt create mode 100644 tests/unit/gtest-all.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000..295c7779 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,25 @@ +set(CMAKE_CXX_STANDARD 17) +set(CPP_WARNINGS "-Wall -Wextra -Werror -Wno-unused-parameter") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${CONAN_PACKAGE_NAME} -DPACKAGE_VERSION=${CONAN_PACKAGE_VERSION}") + +find_package(Boost REQUIRED) +find_package(Threads REQUIRED) +find_package(OpenSSL REQUIRED) + +include_directories(BEFORE include) + +if (${CMAKE_BUILD_TYPE} STREQUAL Debug) + # Remove tcmalloc from debug builds so valgrind etc. work well + list(REMOVE_ITEM CONAN_LIBS tcmalloc tcmalloc_minimal) +endif () + +set(SDS_GRPC_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) + + +set (SDS_GRPC_SOURCE ) + + +add_library(sds_grpc INTERFACE) + + +add_subdirectory(tests) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt new file mode 100644 index 00000000..269aea0c --- /dev/null +++ b/tests/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(unit) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt new file mode 100644 index 00000000..55aea008 --- /dev/null +++ b/tests/unit/CMakeLists.txt @@ -0,0 +1,28 @@ + + +add_library(gtestall gtest-all.cpp) + + + +# enable_testing() + +file(GLOB TEST_SRC_FILES **/*.cpp) + + +MESSAGE( STATUS "TEST_SRC_FILES = " ${TEST_SRC_FILES} ) +MESSAGE( STATUS "CMAKE_RUNTIME_OUTPUT_DIRECTORY = " ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} ) + + +# from list of files we'll create tests +foreach(_test_file ${TEST_SRC_FILES}) + get_filename_component(_test_name ${_test_file} NAME_WE) + add_executable(${_test_name} ${_test_file}) + + target_link_libraries (${_test_name} ${CONAN_LIBS_GTEST}) + add_test(NAME ${_test_name} COMMAND ${_test_name} WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + set_tests_properties(${_test_name} PROPERTIES TIMEOUT 5) +endforeach() + + + + diff --git a/tests/unit/gtest-all.cpp b/tests/unit/gtest-all.cpp new file mode 100644 index 00000000..7b823856 --- /dev/null +++ b/tests/unit/gtest-all.cpp @@ -0,0 +1,17 @@ +/* + * gtest-all.cpp + * + * Created on: Sep 18, 2018 + * Author: lhuang8 + */ + + +#include + + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + int ret = RUN_ALL_TESTS(); + return ret; +} + From 6fb5a0e40dc4e5aa2436b4f2dae434118ae5a05b Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Thu, 27 Sep 2018 00:33:58 +0000 Subject: [PATCH 022/385] SDSTOR-368 AM-OM gRPC common library Move gRPC client and server code from OM to sds_grpc common library. Function tests are in src/tests/function; for unit test, need to refactor code to make it unit test friendly, then add unit test cases. --- CMakeLists.txt | 7 +- include/sds_grpc/client.h | 306 +++++++++++++++++++++++++++ include/sds_grpc/server.h | 310 ++++++++++++++++++++++++++++ include/sds_grpc/utils.h | 19 ++ lib/client.cpp | 11 + lib/utils.cpp | 38 ++++ tests/CMakeLists.txt | 4 +- tests/function/CMakeLists.txt | 23 +++ tests/function/echo_server.cpp | 91 ++++++++ tests/function/echo_sync_client.cpp | 105 ++++++++++ tests/proto/CMakeLists.txt | 13 ++ tests/proto/sds_grpc_test.proto | 16 ++ tests/unit/CMakeLists.txt | 5 +- tests/unit/gtest-all.cpp | 4 +- 14 files changed, 946 insertions(+), 6 deletions(-) create mode 100644 include/sds_grpc/client.h create mode 100644 include/sds_grpc/server.h create mode 100644 include/sds_grpc/utils.h create mode 100644 lib/client.cpp create mode 100644 lib/utils.cpp create mode 100644 tests/function/CMakeLists.txt create mode 100644 tests/function/echo_server.cpp create mode 100644 tests/function/echo_sync_client.cpp create mode 100644 tests/proto/CMakeLists.txt create mode 100644 tests/proto/sds_grpc_test.proto diff --git a/CMakeLists.txt b/CMakeLists.txt index 295c7779..4c1ec569 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,10 +16,13 @@ endif () set(SDS_GRPC_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) -set (SDS_GRPC_SOURCE ) +set (SDS_GRPC_SOURCE + lib/client.cpp + lib/utils.cpp +) -add_library(sds_grpc INTERFACE) +add_library(sds_grpc ${SDS_GRPC_SOURCE}) add_subdirectory(tests) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h new file mode 100644 index 00000000..9aafde98 --- /dev/null +++ b/include/sds_grpc/client.h @@ -0,0 +1,306 @@ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "utils.h" + +namespace sds::grpc +{ + +class CallbackHandler; +class ClientCallMethod; + +using ::grpc::Channel; +using ::grpc::ClientAsyncResponseReader; +using ::grpc::ClientContext; +using ::grpc::CompletionQueue; +using ::grpc::Status; + + + +/** + * ClientCallMethod : Stores the callback handler and method name of the rpc + * + * TODO: rename as BaseClientCallData + */ +class ClientCallMethod { +public: + ClientCallMethod(CallbackHandler* handler, const std::string& methodName) : + cb_handler_(handler), method_name_(methodName) + {} + + virtual ~ClientCallMethod() {} + + const std::string& call_method_name() { return method_name_; } + CallbackHandler* cb_handler() { return cb_handler_; } + +protected: + +private: + CallbackHandler* cb_handler_; + std::string method_name_; +}; + + +/** + * The specialized 'ClientCallMethod' per-response type. + * + * + */ +template +class ClientCallData : public ClientCallMethod { +public: + ClientCallData(CallbackHandler* handler, const std::string& methodName, + uint32_t deadlineSeconds) + : ClientCallMethod(handler, methodName) { + std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + + std::chrono::seconds(deadlineSeconds); + context_.set_deadline(deadline); + } + + Status& rpc_status() { return rpc_status_; } + + TREPLY& reply() { return reply_; } + ClientContext& context() { return context_; } + + std::unique_ptr<::grpc::ClientAsyncResponseReader>& responder_reader() { + return response_reader_; + } + +private: + TREPLY reply_; + ClientContext context_; + Status rpc_status_; + std::unique_ptr> response_reader_; +}; + + +/** + * A callback interface for handling gRPC response + * + * + */ +class CallbackHandler { +public: + virtual void on_message(ClientCallMethod* cm) = 0; + virtual ~CallbackHandler() {} +}; + + + + +/** + * A gRPC connection, holds a gRPC Service's stub which used to send gRPC request. + * + * it implements CallbackHandler interface. + * + * + */ +template +class GrpcConnection : public CallbackHandler { +public: + + const std::string& server_addr_; + const std::string& target_domain_; + const std::string ssl_cert_; + + uint32_t dead_line_; + std::shared_ptr<::grpc::ChannelInterface> channel_; + CompletionQueue* completion_queue_; + std::unique_ptr stub_; + + + GrpcConnection(const std::string& server_addr, uint32_t dead_line, + CompletionQueue* cq, const std::string& target_domain, + const std::string& ssl_cert) + : server_addr_(server_addr), target_domain_(target_domain), + ssl_cert_(ssl_cert), dead_line_(dead_line), + completion_queue_(cq) + { + + } + + ~GrpcConnection() { } + + std::unique_ptr& stub() { + return stub_; + } + + virtual bool init() + { + if (!init_channel()) { + return false; + } + + init_stub(); + return true; + } + +protected: + + virtual bool init_channel() { + + ::grpc::SslCredentialsOptions ssl_opts; + + if (!ssl_cert_.empty()) { + + if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { + ::grpc::ChannelArguments channel_args; + channel_args.SetSslTargetNameOverride(target_domain_); + channel_ = ::grpc::CreateCustomChannel(server_addr_, + ::grpc::SslCredentials(ssl_opts), + channel_args); + } else { + // TODO: add log -- lhuang8 + return false; + } + } else { + channel_ = ::grpc::CreateChannel(server_addr_, + ::grpc::InsecureChannelCredentials()); + } + + return true; + } + + virtual void init_stub() + { + stub_ = TSERVICE::NewStub(channel_); + } + + + virtual bool load_ssl_cert(const std::string& ssl_cert, std::string content) + { + return ::sds::grpc::get_file_contents(ssl_cert, content);; + } + + virtual bool is_connection_ready() { + if (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY) + return true; + else + return false; + } + + virtual void wait_for_connection_ready() { + grpc_connectivity_state state; + int count = 0; + while ((state = channel_->GetState(true)) != GRPC_CHANNEL_READY && count++ < 5000) { + usleep(10000); + } + } + + +}; + + +class GrpcConnectionFactory { + +public: + template + static T* Make(const std::string& server_addr, uint32_t dead_line, + CompletionQueue* cq, const std::string& target_domain, + const std::string& ssl_cert) { + + T* ret = new T(server_addr, dead_line, cq, target_domain, ssl_cert); + if (ret->init()) + return ret; + + return nullptr; + } + +}; + + +/** + * TODO: rename to gRPC client worker -- lhuang8 + * TODO: When work as a async responses handling worker, it's can be hidden from + * user of this lib. + * + * + * The gRPC client, it owns a CompletionQueue and one or more threads, it's only + * used for handling asynchronous responses. + * + * The CompletionQueue is used to send asynchronous request, then the + * response will be handled on this client's threads. + * + */ +class GrpcClient { +public: + GrpcClient() : shutdown_(true) {} + + ~GrpcClient() { + shutdown(); + for (auto& it : t_) { + it->join(); + } + } + + void shutdown() { + if (!shutdown_) { + completion_queue_.Shutdown(); + shutdown_ = true; + } + } + + bool run(uint32_t num_threads) { + if (num_threads == 0) { + return false; + } + + shutdown_ = false; + for (uint32_t i = 0; i < num_threads; ++i) { + // TODO: no need to call async_complete_rpc for sync calls; + std::shared_ptr t = std::shared_ptr( + new std::thread(&GrpcClient::async_complete_rpc, this)); + t_.push_back(t); + } + + return true; + } + + CompletionQueue& cq() { return completion_queue_; } + +private: + void sync_complete_rpc() { // TODO: looks unuseful, remove it + + } + void async_complete_rpc() { + void* got_tag; + bool ok = false; + while (completion_queue_.Next(&got_tag, &ok)) { + if (!ok) { + continue; + } + + ClientCallMethod* cm = static_cast(got_tag); + process(cm); + } + } + + virtual void process(ClientCallMethod * cm) { + CallbackHandler* cb = cm->cb_handler(); + cb->on_message(cm); + } + +protected: + CompletionQueue completion_queue_; + +private: + bool shutdown_; + std::list> t_; +}; + + + + +} // end of namespace sds::common::grpc diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h new file mode 100644 index 00000000..c4d3a601 --- /dev/null +++ b/include/sds_grpc/server.h @@ -0,0 +1,310 @@ +/* + * Server.h + * + * Created on: Sep 19, 2018 + */ + + +#pragma once + + +#include +#include +#include + +#include + + +namespace sds::grpc +{ + +using ::grpc::Server; +using ::grpc::ServerAsyncResponseWriter; +using ::grpc::ServerBuilder; +using ::grpc::ServerContext; +using ::grpc::ServerCompletionQueue; +using ::grpc::Status; + + + +/** + * ServerCallMethod : Stores the incoming request callback handler and method name of the rpc + * + * TODO: rename as BaseServerCallData + */ +class ServerCallMethod { +public: + enum CallStatus { CREATE, PROCESS, FINISH }; + +public: + ServerCallMethod(const std::string& method_name): + method_name_(method_name), status_(CREATE) { + } + + virtual ~ServerCallMethod(){} + + const std::string& method_name() { return method_name_; } + CallStatus& status() { return status_; } + + void proceed() { + if (status_ == CREATE){ + + do_create(); + status_ = PROCESS; + + } else if (status_ == PROCESS) { + + do_process(); + status_ = FINISH; + } else { + do_finish(); + } + } + + +protected: + + virtual void do_create() = 0; + virtual void do_process() = 0; + + virtual void do_finish(){ + GPR_ASSERT(status_ == FINISH); + // Once in the FINISH state, deallocate ourselves + delete this; + } + + + std::string method_name_; // TODO: looks like not useful -- lhuang8 + CallStatus status_; + +}; + + +/** + * Once a new instance's proceed() method being called, it will begin to wait for + * one request. + * + * Each instance only handles one request, after that it will be destroyed; + * a new instance will be created automatically for handling next request. + * + */ +template +class ServerCallData final : public ServerCallMethod { + + + typedef std::function*, + ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, + void *)> wait_request_cb_t; + + typedef std::function handle_request_cb_t; + + typedef ServerCallData T; + +public: + ServerCallData(TSERVICE * service, + ::grpc::ServerCompletionQueue *cq, + const std::string& method_name, + wait_request_cb_t wait_request, + handle_request_cb_t handle_request): + ServerCallMethod(method_name), + service_(service), cq_(cq), responder_(&context_), + wait_request_cb_(wait_request), handle_request_cb_(handle_request) { + + } + + ServerContext& context() { return context_; } + TREQUEST& request() { return request_; } + TREPLY& reply() { return reply_; } + ::grpc::ServerAsyncResponseWriter& responder() { return responder_; } + +protected: + + + ServerContext context_; + + TSERVICE * service_; + // The producer-consumer queue where for asynchronous server notifications. + ::grpc::ServerCompletionQueue* cq_; + + TREQUEST request_; + TREPLY reply_; + ::grpc::ServerAsyncResponseWriter responder_; + + wait_request_cb_t wait_request_cb_; + handle_request_cb_t handle_request_cb_; + + + + void do_create() + { + wait_request_cb_(service_, &context_, &request_, &responder_, + cq_, cq_, this); + } + + void do_process() + { + (new T(service_, cq_, method_name_, + wait_request_cb_, handle_request_cb_))->proceed(); + //LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); + + reply_ = handle_request_cb_(request_); + responder_.Finish(reply_, Status::OK, this); + } + + +}; + + + +template +class GrpcServer { +public: + GrpcServer(); + virtual ~GrpcServer(); + + void shutdown(); + bool is_shutdown(); + bool run(const std::string& ssl_key, const std::string& ssl_cert, + const std::string& listen_addr, uint32_t threads = 1); + + virtual void ready() = 0; + virtual void process(ServerCallMethod * cm) = 0; + + + ::grpc::ServerCompletionQueue * completion_queue() { + return completion_queue_.get(); + } + +private: + // This can be run in multiple threads if needed. + void handle_rpcs(); + // TODO: move this function to utils + bool get_file_contents(const std::string& file_name, std::string& contents); + +protected: + std::unique_ptr<::grpc::ServerCompletionQueue> completion_queue_; + std::unique_ptr server_; + TSERVICE service_; + +private: + bool shutdown_; + std::list> threads_; +}; + + +template +GrpcServer::GrpcServer() + :shutdown_(true) +{} + + +template +GrpcServer::~GrpcServer() { + shutdown(); + for (auto& it : threads_) { + it->join(); + } +} + + +template +void GrpcServer::shutdown() { + if (!shutdown_) { + server_->Shutdown(); + completion_queue_->Shutdown(); + shutdown_ = true; + + } +} + +template +bool GrpcServer::is_shutdown() { + return shutdown_; +} + + +template +bool GrpcServer::run(const std::string& ssl_key, const std::string& ssl_cert, + const std::string& listen_addr, uint32_t threads /* = 1 */) { + if (listen_addr.empty() || threads == 0) { + return false; + } + + ServerBuilder builder; + if (!ssl_cert.empty() && !ssl_key.empty()) { + std::string key_contents; + std::string cert_contents; + get_file_contents(ssl_cert, cert_contents); + get_file_contents(ssl_key, key_contents); + + if (cert_contents.empty() || key_contents.empty()) { + return false; + } + + ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = { key_contents, cert_contents }; + ::grpc::SslServerCredentialsOptions ssl_opts; + ssl_opts.pem_root_certs = ""; + ssl_opts.pem_key_cert_pairs.push_back(pkcp); + + builder.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); + } else { + builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); + } + + builder.RegisterService(&service_); + completion_queue_ = builder.AddCompletionQueue(); + server_ = builder.BuildAndStart(); + //LOGDEBUGMOD(GRPC, "Server listening on {}", listen_addr); + + shutdown_ = false; + ready(); + + for (uint32_t i = 0; i < threads; ++i) { + std::shared_ptr t = + std::shared_ptr(new std::thread(&GrpcServer::handle_rpcs, this)); + threads_.push_back(t); + } + + return true; +} + + +template +bool GrpcServer::get_file_contents(const std::string& file_name, std::string& contents) { + try { + std::ifstream in(file_name.c_str(), std::ios::in); + if (in) { + std::ostringstream t; + t << in.rdbuf(); + in.close(); + + contents = t.str(); + return true; + } + } catch (...) { + + } + + return false; +} + +template +void GrpcServer::handle_rpcs() { + void* tag; + bool ok = false; + + while (completion_queue_->Next(&tag, &ok)) { + if (!ok) { + continue; + } + + ServerCallMethod* cm = static_cast(tag); + process(cm); + } +} + + + +} diff --git a/include/sds_grpc/utils.h b/include/sds_grpc/utils.h new file mode 100644 index 00000000..a88f5015 --- /dev/null +++ b/include/sds_grpc/utils.h @@ -0,0 +1,19 @@ +/* + * utils.h + * + * Created on: Sep 25, 2018 + */ + +#pragma once + +#include + + +namespace sds::grpc +{ + + +bool get_file_contents(const std::string & file_name, std::string & contents); + +} + diff --git a/lib/client.cpp b/lib/client.cpp new file mode 100644 index 00000000..99123073 --- /dev/null +++ b/lib/client.cpp @@ -0,0 +1,11 @@ +/* + * Client.cpp + * + * Created on: Sep 19, 2018 + */ + +#include "sds_grpc/client.h" + + + + diff --git a/lib/utils.cpp b/lib/utils.cpp new file mode 100644 index 00000000..4c3b63d7 --- /dev/null +++ b/lib/utils.cpp @@ -0,0 +1,38 @@ +/* + * utils.cpp + * + * Created on: Sep 25, 2018 + */ + +#include "sds_grpc/utils.h" +#include +#include + +namespace sds::grpc +{ + +bool get_file_contents(const std::string & file_name, std::string & contents) +{ + try { + std::ifstream in(file_name.c_str(), std::ios::in); + if (in) { + std::ostringstream t; + t << in.rdbuf(); + in.close(); + + contents = t.str(); + return true; + } + } catch (...) { + + } + return false; +} + + + + + +} + + diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 269aea0c..155a795c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1 +1,3 @@ -add_subdirectory(unit) +add_subdirectory(function) +add_subdirectory(proto) + diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt new file mode 100644 index 00000000..207a56aa --- /dev/null +++ b/tests/function/CMakeLists.txt @@ -0,0 +1,23 @@ + +include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) + +set(FUNCTION_TEST_LIBS sds_grpc test_proto + ${CONAN_LIBS_GRPC} + ${CONAN_LIBS_PROTOBUF} + ${CONAN_LIBS_OPENSSL} + ${CONAN_LIBS_C-ARES} + ${CONAN_LIBS_ZLIB} ) + + +# build echo_server +add_executable(echo_server echo_server.cpp) +add_dependencies(echo_server sds_grpc test_proto) +target_link_libraries(echo_server ${FUNCTION_TEST_LIBS} ) + + +# build echo_sync_client +add_executable(echo_sync_client echo_sync_client.cpp) +add_dependencies(echo_sync_client sds_grpc test_proto) +target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) + + diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp new file mode 100644 index 00000000..0793ea9b --- /dev/null +++ b/tests/function/echo_server.cpp @@ -0,0 +1,91 @@ +/* + * echo_server.cpp + * + * Created on: Sep 22, 2018 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sds_grpc/server.h" +#include "sds_grpc_test.grpc.pb.h" + + +using namespace ::grpc; +using namespace ::sds::grpc; +using namespace ::sds_grpc_test; +using namespace std::placeholders; + + +class RequestDispatcher { + +public: + virtual ~RequestDispatcher() = default; + + virtual EchoReply echo_request(EchoRequest& request) { + EchoReply reply; + + reply.set_message(request.message()); + + std::cout << "receive echo request " << request.message() << std::endl; + + return reply; + } +}; + + +class EchoServer : public GrpcServer { + +public: + EchoServer(RequestDispatcher* dispatcher) + : GrpcServer(), + dispatcher_(dispatcher) { + } + + void ready() { + + (new ServerCallData + (&service_, completion_queue_.get(), "echo", + &Echo::AsyncService::RequestEcho, + std::bind(&RequestDispatcher::echo_request, dispatcher_, _1)))->proceed(); + } + + void process(ServerCallMethod * cm) { + cm->proceed(); + } + + RequestDispatcher* dispatcher_; + +}; + + +void RunServer() { + + std::string server_address("0.0.0.0:50051"); + + RequestDispatcher * dispatcher = new RequestDispatcher(); + EchoServer* server = new EchoServer(dispatcher); + server->run("", "", server_address, 4); + std::cout << "Server listening on " << server_address << std::endl; + + while (!server->is_shutdown()) + { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + +} + + +int main(int arc, char* argv[]) +{ + std::cout << "Start echo server ..." << std::endl; + + RunServer(); + return 0; +} + diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp new file mode 100644 index 00000000..21ff8068 --- /dev/null +++ b/tests/function/echo_sync_client.cpp @@ -0,0 +1,105 @@ +/* + * echo_sync_client.cpp + * + * Created on: Sep 22, 2018 + */ + + +#include +#include +#include +#include +#include +#include +#include + + + +#include "sds_grpc/client.h" +#include "sds_grpc_test.grpc.pb.h" + + +using namespace ::grpc; +using namespace ::sds::grpc; +using namespace ::sds_grpc_test; +using namespace std::placeholders; + + + +class EchoSyncClient : public GrpcConnection { + +public: + EchoSyncClient(const std::string& server_addr, uint32_t dead_line, + ::grpc::CompletionQueue* cq, + const std::string& target_domain, + const std::string& ssl_cert) + : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) { + } + + // TODO: sync mode client doesn't need this method, but must define it because it's pure + // virtual -- lhuang8 + void on_message(ClientCallMethod* cm) { + // not needed for sync calls. + } + +}; + + +int RunClient(const std::string& server_address) { + + GrpcClient* fix_this_name = new GrpcClient(); + + EchoSyncClient* client = GrpcConnectionFactory::Make( + server_address, 5, &(fix_this_name->cq()), "", ""); + if (!client) + { + std::cout << "Create echo client failed." << std::endl; + return -1; + } + + int ret = 0; + + for (int i = 0; i < 3; i++){ + ClientContext context; + EchoRequest request; + EchoReply reply; + + request.set_message(std::to_string(i)); + + Status status =client->stub()->Echo(&context, request, &reply); + if (!status.ok()) + { + std::cout << "echo request " << request.message() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + continue; + } + + std::cout << "echo request " << request.message() << + " reply " << reply.message() << std::endl; + + if (request.message() == reply.message()) { + ret++; + } + + } + + return ret; +} + +int main(int argc, char** argv) { + + + std::string server_address("0.0.0.0:50051"); + + + return RunClient(server_address); +} + + + + + + + + diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt new file mode 100644 index 00000000..cddc95e5 --- /dev/null +++ b/tests/proto/CMakeLists.txt @@ -0,0 +1,13 @@ +find_package(Protobuf REQUIRED) +include(${CMAKE_HOME_DIRECTORY}/cmake/grpc.cmake) + +protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS sds_grpc_test.proto) + +PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS sds_grpc_test.proto) + + +MESSAGE( STATUS "PROTO_SRCS = " ${PROTO_SRCS} " " ${PROTO_HDRS}) +MESSAGE( STATUS "GRPC_SRCS = " ${GRPC_SRCS} " " ${GRPC_HDRS}) + + +add_library(test_proto ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS}) diff --git a/tests/proto/sds_grpc_test.proto b/tests/proto/sds_grpc_test.proto new file mode 100644 index 00000000..946d39f5 --- /dev/null +++ b/tests/proto/sds_grpc_test.proto @@ -0,0 +1,16 @@ + +syntax = "proto3"; + +package sds_grpc_test; + +service Echo { + rpc Echo (EchoRequest) returns (EchoReply) {} +} + +message EchoRequest { + string message = 1; +} + +message EchoReply { + string message = 1; +} diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 55aea008..182a2390 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -2,7 +2,7 @@ add_library(gtestall gtest-all.cpp) - +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../proto) # enable_testing() @@ -18,7 +18,8 @@ foreach(_test_file ${TEST_SRC_FILES}) get_filename_component(_test_name ${_test_file} NAME_WE) add_executable(${_test_name} ${_test_file}) - target_link_libraries (${_test_name} ${CONAN_LIBS_GTEST}) + add_dependencies(${_test_name} sds_grpc ) + target_link_libraries (${_test_name} sds_grpc gtestall ${CONAN_LIBS} ) add_test(NAME ${_test_name} COMMAND ${_test_name} WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) set_tests_properties(${_test_name} PROPERTIES TIMEOUT 5) endforeach() diff --git a/tests/unit/gtest-all.cpp b/tests/unit/gtest-all.cpp index 7b823856..b41b7fc3 100644 --- a/tests/unit/gtest-all.cpp +++ b/tests/unit/gtest-all.cpp @@ -2,14 +2,16 @@ * gtest-all.cpp * * Created on: Sep 18, 2018 - * Author: lhuang8 */ #include +#include +using log_level = spdlog::level::level_enum; int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); return ret; From 5de4ef1ae81a94e1d7b32eb430a068881dd8aecb Mon Sep 17 00:00:00 2001 From: lhuang8 Date: Wed, 10 Oct 2018 16:34:51 -0700 Subject: [PATCH 023/385] SDSTOR-368 AM-OM gRPC common library - add test_package for `conan test` --- include/sds_grpc/server.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index c4d3a601..85013d48 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include From d1a495014abb7a5c8e1b538b5189f54b935fa337 Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Tue, 9 Oct 2018 23:57:09 +0000 Subject: [PATCH 024/385] SDSTOR-432 sds_grpc: refactor async grpc client - simplify async client reponse handling interface - decoupling response handling code from grpc client(reponse handling callbacks are still being called on client work threads) --- include/sds_grpc/client.h | 155 ++++++++++++++------------- include/sds_grpc/server.h | 2 + tests/function/CMakeLists.txt | 5 + tests/function/echo_async_client.cpp | 124 +++++++++++++++++++++ tests/function/echo_server.cpp | 1 + tests/function/echo_sync_client.cpp | 13 +-- 6 files changed, 214 insertions(+), 86 deletions(-) create mode 100644 tests/function/echo_async_client.cpp diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index 9aafde98..668b2d0f 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -7,108 +7,95 @@ #include #include #include +#include +#include #include +#include #include #include -#include #include "utils.h" namespace sds::grpc { -class CallbackHandler; -class ClientCallMethod; - using ::grpc::Channel; using ::grpc::ClientAsyncResponseReader; using ::grpc::ClientContext; using ::grpc::CompletionQueue; using ::grpc::Status; - +using namespace ::std::chrono; /** - * ClientCallMethod : Stores the callback handler and method name of the rpc + * ClientCallMethod : Stores the response handler and method name of the rpc * - * TODO: rename as BaseClientCallData */ class ClientCallMethod { public: - ClientCallMethod(CallbackHandler* handler, const std::string& methodName) : - cb_handler_(handler), method_name_(methodName) - {} - virtual ~ClientCallMethod() {} - const std::string& call_method_name() { return method_name_; } - CallbackHandler* cb_handler() { return cb_handler_; } - -protected: - -private: - CallbackHandler* cb_handler_; - std::string method_name_; + virtual void handle_response() = 0; }; /** * The specialized 'ClientCallMethod' per-response type. * - * */ template -class ClientCallData : public ClientCallMethod { +class ClientCallData final : public ClientCallMethod { + + using handle_response_cb_t = std::function< + void(TREPLY&, ::grpc::Status& status)>; + + using ResponseReaderType = std::unique_ptr< + ::grpc::ClientAsyncResponseReaderInterface>; + public: - ClientCallData(CallbackHandler* handler, const std::string& methodName, - uint32_t deadlineSeconds) - : ClientCallMethod(handler, methodName) { - std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + - std::chrono::seconds(deadlineSeconds); + ClientCallData(handle_response_cb_t handle_response_cb) + : handle_response_cb_(handle_response_cb) { } + + void set_deadline(uint32_t seconds) + { + system_clock::time_point deadline = system_clock::now() + + std::chrono::seconds(seconds); context_.set_deadline(deadline); } - Status& rpc_status() { return rpc_status_; } - - TREPLY& reply() { return reply_; } - ClientContext& context() { return context_; } - - std::unique_ptr<::grpc::ClientAsyncResponseReader>& responder_reader() { + ResponseReaderType& responder_reader() { return response_reader_; } -private: - TREPLY reply_; - ClientContext context_; - Status rpc_status_; - std::unique_ptr> response_reader_; -}; + Status & status() { return status_; } + TREPLY & reply() { return reply_; } -/** - * A callback interface for handling gRPC response - * - * - */ -class CallbackHandler { -public: - virtual void on_message(ClientCallMethod* cm) = 0; - virtual ~CallbackHandler() {} -}; + ClientContext & context() { return context_; } + + + virtual void handle_response() override + { + handle_response_cb_(reply_, status_); + } +private: + handle_response_cb_t handle_response_cb_; + TREPLY reply_; + ClientContext context_; + Status status_; + ResponseReaderType response_reader_; +}; /** * A gRPC connection, holds a gRPC Service's stub which used to send gRPC request. * - * it implements CallbackHandler interface. - * - * */ template -class GrpcConnection : public CallbackHandler { +class GrpcConnection { public: const std::string& server_addr_; @@ -133,8 +120,8 @@ class GrpcConnection : public CallbackHandler { ~GrpcConnection() { } - std::unique_ptr& stub() { - return stub_; + typename TSERVICE::StubInterface* stub() { + return stub_.get(); } virtual bool init() @@ -147,6 +134,9 @@ class GrpcConnection : public CallbackHandler { return true; } + CompletionQueue* completion_queue() { return completion_queue_; } + + protected: virtual bool init_channel() { @@ -199,34 +189,44 @@ class GrpcConnection : public CallbackHandler { } } - }; +/** + * + * Use GrpcConnectionFactory::Make() to create instance of + * GrpcConnection. + * + * TODO: This factory is not good enough, should be refactored + * with GrpcConnection and GrpcClient later -- lhuang8 + * + */ class GrpcConnectionFactory { public: template - static T* Make(const std::string& server_addr, uint32_t dead_line, + static std::unique_ptr Make( + const std::string& server_addr, uint32_t dead_line, CompletionQueue* cq, const std::string& target_domain, const std::string& ssl_cert) { - T* ret = new T(server_addr, dead_line, cq, target_domain, ssl_cert); - if (ret->init()) - return ret; + std::unique_ptr ret(new T(server_addr, dead_line, cq, + target_domain, ssl_cert)); + if (!ret->init()) { + ret.reset(nullptr); + } - return nullptr; + return ret; } }; /** - * TODO: rename to gRPC client worker -- lhuang8 + * TODO: inherit GrpcConnection and implement as async client -- lhuang8 * TODO: When work as a async responses handling worker, it's can be hidden from * user of this lib. * - * * The gRPC client, it owns a CompletionQueue and one or more threads, it's only * used for handling asynchronous responses. * @@ -240,7 +240,7 @@ class GrpcClient { ~GrpcClient() { shutdown(); - for (auto& it : t_) { + for (auto& it : threads_) { it->join(); } } @@ -262,7 +262,7 @@ class GrpcClient { // TODO: no need to call async_complete_rpc for sync calls; std::shared_ptr t = std::shared_ptr( new std::thread(&GrpcClient::async_complete_rpc, this)); - t_.push_back(t); + threads_.push_back(t); } return true; @@ -271,36 +271,37 @@ class GrpcClient { CompletionQueue& cq() { return completion_queue_; } private: - void sync_complete_rpc() { // TODO: looks unuseful, remove it - } void async_complete_rpc() { - void* got_tag; + void* tag; bool ok = false; - while (completion_queue_.Next(&got_tag, &ok)) { + while (completion_queue_.Next(&tag, &ok)) { if (!ok) { + // Client-side StartCallit not going to the wire. This + // would happen if the channel is either permanently broken or + // transiently broken but with the fail-fast option. continue; } - ClientCallMethod* cm = static_cast(got_tag); - process(cm); + // The tag was set by ::grpc::ClientAsyncResponseReader<>::Finish(), + // it must be a instance of ClientCallMethod. + // + // TODO: user of this lib should not have change to set the tag, + // need to hide tag from user totally -- lhuang8 + ClientCallMethod* cm = static_cast(tag); + cm->handle_response(); } } - virtual void process(ClientCallMethod * cm) { - CallbackHandler* cb = cm->cb_handler(); - cb->on_message(cm); - } - protected: CompletionQueue completion_queue_; private: bool shutdown_; - std::list> t_; + std::list> threads_; }; -} // end of namespace sds::common::grpc +} // end of namespace sds::grpc diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 85013d48..411e28a3 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -298,6 +298,8 @@ void GrpcServer::handle_rpcs() { while (completion_queue_->Next(&tag, &ok)) { if (!ok) { + // the server has been Shutdown before this particular + // call got matched to an incoming RPC. continue; } diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 207a56aa..002aef95 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -21,3 +21,8 @@ add_dependencies(echo_sync_client sds_grpc test_proto) target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) +# build echo_async_client +add_executable(echo_async_client echo_async_client.cpp) +add_dependencies(echo_async_client sds_grpc test_proto) +target_link_libraries(echo_async_client ${FUNCTION_TEST_LIBS} ) + diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp new file mode 100644 index 00000000..9e9ee94e --- /dev/null +++ b/tests/function/echo_async_client.cpp @@ -0,0 +1,124 @@ +/* + * echo_async_client.cpp + * + * Created on: Oct 9, 2018 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "sds_grpc/client.h" +#include "sds_grpc_test.grpc.pb.h" + + +using namespace ::grpc; +using namespace ::sds::grpc; +using namespace ::sds_grpc_test; +using namespace std::placeholders; + + +class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::Echo> +{ +public: + EchoAsyncClient(const std::string& server_addr, uint32_t dead_line, + ::grpc::CompletionQueue* cq, + const std::string& target_domain, + const std::string& ssl_cert) + : GrpcConnection<::sds_grpc_test::Echo>( + server_addr, dead_line, cq, target_domain, ssl_cert) + { + + } + + + void Echo(const EchoRequest& request, + std::function callback) + { + auto call = new ClientCallData(callback); + call->set_deadline(dead_line_); + call->responder_reader() = stub()->AsyncEcho( + &call->context(), request, completion_queue()); + call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); + } + + +}; + + +std::atomic_int g_counter; + +class Ping +{ +public: + + Ping(int seqno) + { + request_.set_message(std::to_string(seqno)); + } + + void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) + { + if (!status.ok()) + { + std::cout << "echo request " << request_.message() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + return; + } + + std::cout << "echo request " << request_.message() << + " reply " << reply.message() << std::endl; + + + assert(request_.message() == reply.message()); + g_counter.fetch_add(1, std::memory_order_relaxed); + } + + EchoRequest request_; +}; + + +int RunClient(const std::string& server_address) +{ + GrpcClient* fix_this_name = new GrpcClient(); + auto client = GrpcConnectionFactory::Make( + server_address, 5, &(fix_this_name->cq()), "", ""); + + if (!client) + { + std::cout << "Create echo async client failed." << std::endl; + return -1; + } + + fix_this_name->run(3); + + for (int i = 0; i < 10; i++) + { + Ping * ping = new Ping(i); + client->Echo(ping->request_, std::bind(&Ping::handle_echo_reply, ping, _1, _2)); + } + + delete fix_this_name; // wait client worker threads terminate + + return g_counter.load(); + +} + + +int main(int argc, char** argv) { + + std::string server_address("0.0.0.0:50051"); + + + return RunClient(server_address); +} + + diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 0793ea9b..1616b1ad 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -78,6 +78,7 @@ void RunServer() { std::this_thread::sleep_for(std::chrono::seconds(1)); } + delete server; } diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 21ff8068..5880ea17 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -33,13 +33,8 @@ class EchoSyncClient : public GrpcConnection { ::grpc::CompletionQueue* cq, const std::string& target_domain, const std::string& ssl_cert) - : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) { - } - - // TODO: sync mode client doesn't need this method, but must define it because it's pure - // virtual -- lhuang8 - void on_message(ClientCallMethod* cm) { - // not needed for sync calls. + : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) + { } }; @@ -49,7 +44,7 @@ int RunClient(const std::string& server_address) { GrpcClient* fix_this_name = new GrpcClient(); - EchoSyncClient* client = GrpcConnectionFactory::Make( + auto client = GrpcConnectionFactory::Make( server_address, 5, &(fix_this_name->cq()), "", ""); if (!client) { @@ -93,7 +88,7 @@ int main(int argc, char** argv) { std::string server_address("0.0.0.0:50051"); - return RunClient(server_address); + return RunClient(server_address); } From 19fd482ba70aa40b0efe9a86263431efe3d687a8 Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Mon, 22 Oct 2018 23:39:47 +0000 Subject: [PATCH 025/385] SDSTOR-453 sds_grpc: fix crash issue - While GrpcServer handling async requests, ServerCallData's status_ was changed too late, with multiple completion queue worker threads, that causes request being handled twice and grpc abort with error: E1022 23:22:13.746129310 6644 server_cc.cc:662] Fatal: grpc_call_start_batch returned 8 E1022 23:22:13.746148610 6644 server_cc.cc:663] ops[0]: SEND_MESSAGE ptr=0x7f39f0008320 E1022 23:22:13.746158310 6644 server_cc.cc:663] ops[1]: SEND_STATUS_FROM_SERVER status=0 details=(null)(nil) --- include/sds_grpc/server.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 411e28a3..1d0e8cf9 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -8,7 +8,6 @@ #pragma once #include - #include #include #include @@ -49,14 +48,11 @@ class ServerCallMethod { void proceed() { if (status_ == CREATE){ - - do_create(); status_ = PROCESS; - + do_create(); } else if (status_ == PROCESS) { - - do_process(); status_ = FINISH; + do_process(); } else { do_finish(); } From e0506b3836bd9c18062374c2d51cb8d48bdc697d Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Fri, 26 Oct 2018 21:52:33 +0000 Subject: [PATCH 026/385] SDSTOR-463 sds_grpc: hide ServerCallData from GrpcServer interface It's not necessary to let user of libsds_grpc to see ServerCallData, which is a internal object which used by grpc server for handling asynchronized request. --- CMakeLists.txt | 1 + include/sds_grpc/server.h | 178 +++++++++++++++------------ lib/server.cpp | 34 +++++ tests/function/echo_async_client.cpp | 4 +- tests/function/echo_server.cpp | 43 +++---- tests/function/echo_sync_client.cpp | 6 +- tests/proto/sds_grpc_test.proto | 2 +- tests/unit/CMakeLists.txt | 6 +- tests/unit/gtest-all.cpp | 19 --- 9 files changed, 163 insertions(+), 130 deletions(-) create mode 100644 lib/server.cpp delete mode 100644 tests/unit/gtest-all.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c1ec569..b0e2f67b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,6 +18,7 @@ set(SDS_GRPC_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) set (SDS_GRPC_SOURCE lib/client.cpp + lib/server.cpp lib/utils.cpp ) diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 1d0e8cf9..5f8b8f01 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -28,129 +28,127 @@ using ::grpc::Status; /** - * ServerCallMethod : Stores the incoming request callback handler and method name of the rpc + * Defines the life cycle of handling a gRPC call. * - * TODO: rename as BaseServerCallData */ -class ServerCallMethod { +class BaseServerCallData { public: enum CallStatus { CREATE, PROCESS, FINISH }; + CallStatus& status() { return status_; } + public: - ServerCallMethod(const std::string& method_name): - method_name_(method_name), status_(CREATE) { - } - - virtual ~ServerCallMethod(){} - - const std::string& method_name() { return method_name_; } - CallStatus& status() { return status_; } - - void proceed() { - if (status_ == CREATE){ - status_ = PROCESS; - do_create(); - } else if (status_ == PROCESS) { - status_ = FINISH; - do_process(); - } else { - do_finish(); - } - } + /** + * During the life cycle of this object, this method should be called + * 3 times with different status: + * - CREATE is the initial status, the object was just created, it request + * that the gRPC server start processing async requests. In this request, + * "this" is used as tag for uniquely identifying the request, so that + * different CallData instances can serve different requests + * concurrently. + * - PROCESS is for handling the request, e.g. the incoming request can be + * routed to a callback function. Once the handling is done, the gRPC + * runtime should be informed, e.g for unary calls, + * ServerAsyncResponseWriter::Finish() should be called. + * - FINISH is for destroy this object, gRPC server has sent the + * appropriate signals to the client to end the call. + */ + void proceed(); protected: - virtual void do_create() = 0; - virtual void do_process() = 0; - - virtual void do_finish(){ - GPR_ASSERT(status_ == FINISH); - // Once in the FINISH state, deallocate ourselves - delete this; + BaseServerCallData() : status_(CREATE) { } + virtual ~BaseServerCallData() {} - std::string method_name_; // TODO: looks like not useful -- lhuang8 - CallStatus status_; + /** + * See BaseServerCallData::proceed() for semantics. + */ + virtual void do_create() = 0; + /** + * See BaseServerCallData::proceed() for semantics. + */ + virtual void do_process() = 0; + + /** + * See BaseServerCallData::proceed() for semantics. + */ + virtual void do_finish(); + + CallStatus status_; }; /** - * Once a new instance's proceed() method being called, it will begin to wait for - * one request. - * - * Each instance only handles one request, after that it will be destroyed; + * Each instance only handles one request, after that it will be destroyed; * a new instance will be created automatically for handling next request. * */ -template -class ServerCallData final : public ServerCallMethod { - +template +class ServerCallData final : public BaseServerCallData { typedef std::function*, - ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, - void *)> wait_request_cb_t; + ::grpc::ServerContext*, + TREQUEST*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, + void *)> request_call_func_t; - typedef std::function handle_request_cb_t; + typedef std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_call_func_t; - typedef ServerCallData T; + typedef ServerCallData T; + +private: + template + friend class GrpcServer; -public: ServerCallData(TSERVICE * service, ::grpc::ServerCompletionQueue *cq, - const std::string& method_name, - wait_request_cb_t wait_request, - handle_request_cb_t handle_request): - ServerCallMethod(method_name), + request_call_func_t wait_request, + handle_call_func_t handle_request): + BaseServerCallData(), service_(service), cq_(cq), responder_(&context_), - wait_request_cb_(wait_request), handle_request_cb_(handle_request) { - + wait_request_func_(wait_request), + handle_request_func_(handle_request) { } - ServerContext& context() { return context_; } - TREQUEST& request() { return request_; } - TREPLY& reply() { return reply_; } - ::grpc::ServerAsyncResponseWriter& responder() { return responder_; } + ::grpc::ServerAsyncResponseWriter& responder() { return responder_; } protected: + ServerContext context_; - ServerContext context_; - - TSERVICE * service_; + TSERVICE * service_; // The producer-consumer queue where for asynchronous server notifications. ::grpc::ServerCompletionQueue* cq_; - TREQUEST request_; - TREPLY reply_; - ::grpc::ServerAsyncResponseWriter responder_; - - wait_request_cb_t wait_request_cb_; - handle_request_cb_t handle_request_cb_; - + TREQUEST request_; + TRESPONSE reponse_; + ::grpc::ServerAsyncResponseWriter responder_; + request_call_func_t wait_request_func_; + handle_call_func_t handle_request_func_; void do_create() { - wait_request_cb_(service_, &context_, &request_, &responder_, + wait_request_func_(service_, &context_, &request_, &responder_, cq_, cq_, this); } void do_process() { - (new T(service_, cq_, method_name_, - wait_request_cb_, handle_request_cb_))->proceed(); + (new T(service_, cq_, + wait_request_func_, handle_request_func_))->proceed(); //LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); - reply_ = handle_request_cb_(request_); - responder_.Finish(reply_, Status::OK, this); + ::grpc::Status status = handle_request_func_(request_, reponse_); + responder_.Finish(reponse_, status, this); } - }; @@ -158,6 +156,9 @@ class ServerCallData final : public ServerCallMethod { template class GrpcServer { public: + + typedef TSERVICE ServiceType; + GrpcServer(); virtual ~GrpcServer(); @@ -166,17 +167,42 @@ class GrpcServer { bool run(const std::string& ssl_key, const std::string& ssl_cert, const std::string& listen_addr, uint32_t threads = 1); + /** + * Currently, user need to inherit GrpcServer and register rpc calls. + * This will be changed by "SDSTOR-464 sds_grpc: make single + * sds_grpc::GrpcServer instance supports multiple gRPC services" + */ virtual void ready() = 0; - virtual void process(ServerCallMethod * cm) = 0; ::grpc::ServerCompletionQueue * completion_queue() { return completion_queue_.get(); } + template + void register_rpc( + std::function< + void(TSVC*, + ::grpc::ServerContext*, + TREQUEST*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, + void *)> request_call_func, + std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_request_func){ + + (new ServerCallData ( + &service_, completion_queue_.get(), + request_call_func, + handle_request_func))->proceed(); + } + + private: - // This can be run in multiple threads if needed. + // This can be called by multiple threads void handle_rpcs(); + void process(BaseServerCallData * cm); + // TODO: move this function to utils bool get_file_contents(const std::string& file_name, std::string& contents); @@ -299,8 +325,8 @@ void GrpcServer::handle_rpcs() { continue; } - ServerCallMethod* cm = static_cast(tag); - process(cm); + BaseServerCallData* cm = static_cast(tag); + cm->proceed(); } } diff --git a/lib/server.cpp b/lib/server.cpp new file mode 100644 index 00000000..fa619d71 --- /dev/null +++ b/lib/server.cpp @@ -0,0 +1,34 @@ +/* + * server.cpp + * + * Created on: Oct 24, 2018 + */ + +#include + + +namespace sds::grpc +{ + +void BaseServerCallData::proceed() { + if (status_ == CREATE){ + status_ = PROCESS; + do_create(); + } else if (status_ == PROCESS) { + // status must be changed firstly, otherwise this may + // cause concurrency issue with multi-threads + status_ = FINISH; + do_process(); + } else { + do_finish(); + } +} + + +void BaseServerCallData::do_finish(){ + GPR_ASSERT(status_ == FINISH); + // Once in the FINISH state, this can be destroyed + delete this; +} + +} diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 9e9ee94e..c16d5f6f 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -25,14 +25,14 @@ using namespace ::sds_grpc_test; using namespace std::placeholders; -class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::Echo> +class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> { public: EchoAsyncClient(const std::string& server_addr, uint32_t dead_line, ::grpc::CompletionQueue* cq, const std::string& target_domain, const std::string& ssl_cert) - : GrpcConnection<::sds_grpc_test::Echo>( + : GrpcConnection<::sds_grpc_test::EchoService>( server_addr, dead_line, cq, target_domain, ssl_cert) { diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 1616b1ad..d9272fd3 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -22,44 +22,39 @@ using namespace ::sds_grpc_test; using namespace std::placeholders; -class RequestDispatcher { +class EchoServiceImpl { public: - virtual ~RequestDispatcher() = default; - - virtual EchoReply echo_request(EchoRequest& request) { - EchoReply reply; - - reply.set_message(request.message()); + virtual ~EchoServiceImpl() = default; + virtual ::grpc::Status echo_request(EchoRequest& request, EchoReply& response) { std::cout << "receive echo request " << request.message() << std::endl; - - return reply; + response.set_message(request.message()); + return ::grpc::Status::OK; } + + }; +using EchoAsyncService = ::sds_grpc_test::EchoService::AsyncService; -class EchoServer : public GrpcServer { +class EchoServer : public GrpcServer { public: - EchoServer(RequestDispatcher* dispatcher) - : GrpcServer(), - dispatcher_(dispatcher) { + EchoServer(EchoServiceImpl* impl) + : GrpcServer(), + impl_(impl) { } void ready() { - (new ServerCallData - (&service_, completion_queue_.get(), "echo", - &Echo::AsyncService::RequestEcho, - std::bind(&RequestDispatcher::echo_request, dispatcher_, _1)))->proceed(); - } - - void process(ServerCallMethod * cm) { - cm->proceed(); + std::cout << "register rpc calls" << std::endl; + register_rpc( + &EchoAsyncService::RequestEcho, + std::bind(&EchoServiceImpl::echo_request, impl_, _1, _2)); } - RequestDispatcher* dispatcher_; + EchoServiceImpl* impl_; }; @@ -68,8 +63,8 @@ void RunServer() { std::string server_address("0.0.0.0:50051"); - RequestDispatcher * dispatcher = new RequestDispatcher(); - EchoServer* server = new EchoServer(dispatcher); + EchoServiceImpl * impl = new EchoServiceImpl(); + EchoServer* server = new EchoServer(impl); server->run("", "", server_address, 4); std::cout << "Server listening on " << server_address << std::endl; diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 5880ea17..fa146af7 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -26,14 +26,14 @@ using namespace std::placeholders; -class EchoSyncClient : public GrpcConnection { +class EchoSyncClient : public GrpcConnection { public: EchoSyncClient(const std::string& server_addr, uint32_t dead_line, ::grpc::CompletionQueue* cq, const std::string& target_domain, const std::string& ssl_cert) - : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) + : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) { } @@ -61,7 +61,7 @@ int RunClient(const std::string& server_address) { request.set_message(std::to_string(i)); - Status status =client->stub()->Echo(&context, request, &reply); + Status status = client->stub()->Echo(&context, request, &reply); if (!status.ok()) { std::cout << "echo request " << request.message() << diff --git a/tests/proto/sds_grpc_test.proto b/tests/proto/sds_grpc_test.proto index 946d39f5..8902356c 100644 --- a/tests/proto/sds_grpc_test.proto +++ b/tests/proto/sds_grpc_test.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package sds_grpc_test; -service Echo { +service EchoService { rpc Echo (EchoRequest) returns (EchoReply) {} } diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 182a2390..8fa7cde9 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,14 +1,10 @@ - -add_library(gtestall gtest-all.cpp) - include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../proto) # enable_testing() file(GLOB TEST_SRC_FILES **/*.cpp) - MESSAGE( STATUS "TEST_SRC_FILES = " ${TEST_SRC_FILES} ) MESSAGE( STATUS "CMAKE_RUNTIME_OUTPUT_DIRECTORY = " ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} ) @@ -19,7 +15,7 @@ foreach(_test_file ${TEST_SRC_FILES}) add_executable(${_test_name} ${_test_file}) add_dependencies(${_test_name} sds_grpc ) - target_link_libraries (${_test_name} sds_grpc gtestall ${CONAN_LIBS} ) + target_link_libraries (${_test_name} sds_grpc ${CONAN_LIBS} ) add_test(NAME ${_test_name} COMMAND ${_test_name} WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) set_tests_properties(${_test_name} PROPERTIES TIMEOUT 5) endforeach() diff --git a/tests/unit/gtest-all.cpp b/tests/unit/gtest-all.cpp deleted file mode 100644 index b41b7fc3..00000000 --- a/tests/unit/gtest-all.cpp +++ /dev/null @@ -1,19 +0,0 @@ -/* - * gtest-all.cpp - * - * Created on: Sep 18, 2018 - */ - - -#include -#include - -using log_level = spdlog::level::level_enum; - -int main(int argc, char** argv) { - - ::testing::InitGoogleTest(&argc, argv); - int ret = RUN_ALL_TESTS(); - return ret; -} - From 2f6b0f82603547f19ab2ca6310845f06725166b8 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Mon, 29 Oct 2018 07:38:37 -0700 Subject: [PATCH 027/385] Following changes are added: 1. Local flip client which injects from same executable as appln being tested (in which case grpc is not needed) 2. Truly async delay instead of sync delay (using thread) 3. Decent initial README --- src/flip/CMakeLists.txt | 8 ++ src/flip/README.md | 128 +++++++++++++++++- src/flip/lib/flip.hpp | 218 ++++++++++++++++++++++++++++-- src/flip/lib/test_flip_client.cpp | 133 ++++++++++++++++++ 4 files changed, 472 insertions(+), 15 deletions(-) create mode 100644 src/flip/lib/test_flip_client.cpp diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 424c6c6e..71dd6cb1 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -49,6 +49,11 @@ set(TEST_FLIP_FILES src/test_flip.cpp ) +set(TEST_FLIP_CLIENT_FILES + ${FLIP_LIB_FILES} + src/test_flip_client.cpp + ) + find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) @@ -60,6 +65,9 @@ target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) +add_executable(test_flip_client ${TEST_FLIP_CLIENT_FILES}) +target_link_libraries(test_flip_client ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) + install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) diff --git a/src/flip/README.md b/src/flip/README.md index 08db98f2..46cd67ef 100644 --- a/src/flip/README.md +++ b/src/flip/README.md @@ -3,4 +3,130 @@ Flip stands for **F**au**l**t **I**njection **P**oint. Its a generic framework for injecting fault into the code. It provides a framework, where actual fault could be injected outside the application. -More info to be added later \ No newline at end of file +# Fault Injection + +To induce the system to take an alternate path of the code, we explicitly inject faults. One typical way to achieve +this is by explicitly writing a injected fault and make it compile time option to trigger, something like. + +```c++ +io_status IO::submit(packet_t *pkt) { + ... +#ifdef INJECT_SUBMIT_FAULT + return io_status::error; +#endif + .... + return real_submit(pkt); +} +``` + +Unfortunately this is extremely limited because every time a fault needs to triggered it needs to be recompiled. Another +limitation to this approach is that it hits on every call to a method irrespective of any condition. This makes it lot of +hand holding and affect automation of the test cases. Flip tries to eliminate these issues and provide a generic framework +to ease out fault injection. It provides the following: + +**Multiple fault injection points:** Flip supports multiple fault injection points in a single program. +Each fault injection point is associated with an unique name. Example: + +```c++ +Flip flip; +io_status IO::submit(packet_t *pkt) { + if (flip.test_flip("fail_all_writes")) { + // Do your failure generation here + return io_status::error; + } + return real_submit(pkt); +} +``` + +**Trigger fault externally:** One of the major design goal is the ability to trigger these faults externally. Flip provides a +protobuf based serialization, which can be used by any RPC mechanism. + +**Parameterized faults:** In the above example, if there should be a provision for different types of packets to be injected at +different instances, there would be a fault injection for every type. This will quickly become unscalabale approach as more and +more packet types could be added and generic framework idea will be lost. Flip provides parameterized definition of faults. The +above example could be extended to + +```c++ +Flip flip; +io_status IO::submit(packet_t *pkt) { + if (flip.test_flip("fail_specific_writes", pkt->op_code)) { + // Do your failure generation here + return io_status::error; + } + return real_submit(pkt); +} +``` + +Here the _pkt->op_code_ is the parameter which could be controlled externally in-order to inject the fault. Flip supports filtering +various conditions (not just ==) to the value of the parameter. Hence, while triggering in the above example one can trigger +all OP_TYPE_CREATE or anything but OP_TYPE_CREATE etc. + +There are no limits to number of parameters, but the 2 conditions will be anded. The above example could be expanded to +```c++ +Flip flip; +io_status IO::submit(packet_t *pkt) { + if (flip.test_flip("fail_specific_writes", pkt->op_code, pkt->size)) { + // Do your failure generation here + return io_status::error; + } + return real_submit(pkt); +} +``` + +**Return specific values:** It is useful to inject an alternate path, but what will be more useful is whats the error it should +generate as configurable. This will avoid multiple flip points for different types of error generation. Extending above example, +if one wants to simulate return of different errors from IO::submit, one can write similar _flip.test_flip("")_ for all possible +error codes, but it will become too verbose. Flip supports parameterized return as well. Hence it one could do the following +```c++ +Flip flip; +io_status IO::submit(packet_t *pkt) { + auto ret = flip.test_flip("fail_specific_writes", pkt->op_code, pkt->size); + if (ret) { + // Do your failure generation here + return ret.get(); + } + return real_submit(pkt); +} +``` + +Now fault injection externally can make flip return specific errors, not just io_status::error, but say io_status::corrupted_data +etc.. + +**Async delay injection:** One more typical use of fault injection is to delay execution or simulate delays to test timeout code +paths etc.. In a sync code, it is easy to put a sleep to reproduce the delay. However, more and more applications do async operation +or code being completely async. In these cases, there needs a timer routine to keep track of the delay. Flip covers this and +creates a simple async delay injection framework. + +```c++ +Flip flip; +void IO::process_response(packet_t *pkt) { + if (flip.delay_flip("delay_response", [this, pkt]() { + IO::real_process_response(pkt); + }, pkt->op_code)) { + return; + } + IO::real_process_response(pkt); +``` + +Above example, provide the fault injection to delay specific opcode. After the configured delay (externally controllable) it calls +the closure. As always number of parameters are unlimited and it is exactly similar to the other types of fault injections explained above. +Also like other faults, it can be controlled externally on how many times and how frequent the faults have to be triggered. + +Flip supports combining delay_flip and simulated_value generation flip, so it can generate a specific value after imparting +delay. This will be useful, since after delay an application typically return a timeout error or other types of errors which +will have different behavior in apps, which needed to be tested. + +```c++ +Flip flip; +void IO::process_response(packet_t *pkt) { + if (flip.get_delay_flip("delay_response", [this, pkt](io_status generated_error) { + pkt->status = generated_error; + IO::real_process_response(pkt); + }, pkt->op_code)) { + return; + } + IO::real_process_response(pkt); +``` + +In the above example after delay the value injected externally is passed to the closure, which could be used to simulate +various error scenarios. \ No newline at end of file diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 4757021e..8042be77 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -66,6 +66,7 @@ struct flip_instance { std::atomic< int32_t > m_remain_exec_count; }; +/****************************** Proto Param to Value converter ******************************/ template struct val_converter { T operator()(const ParamValue &val) { @@ -138,6 +139,65 @@ struct val_converter> { } }; +/******************************************** Value to Proto converter ****************************************/ +template +struct to_proto_converter { + void operator()(const T& val, ParamValue* out_pval) { + } +}; + +template <> +struct to_proto_converter { + void operator()(const int& val, ParamValue* out_pval) { + out_pval->set_int_value(val); + } +}; + +#if 0 +template <> +struct val_converter { + const int operator()(const ParamValue &val) { + return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; + } +}; +#endif + +template <> +struct to_proto_converter { + void operator()(const long& val, ParamValue* out_pval) { + out_pval->set_long_value(val); + } +}; + +template <> +struct to_proto_converter { + void operator()(const double& val, ParamValue* out_pval) { + out_pval->set_double_value(val); + } +}; + +template <> +struct to_proto_converter { + void operator()(const std::string& val, ParamValue* out_pval) { + out_pval->set_string_value(val); + } +}; + +template <> +struct to_proto_converter { + void operator()(const char*& val, ParamValue* out_pval) { + out_pval->set_string_value(val); + } +}; + +template <> +struct to_proto_converter { + void operator()(const bool& val, ParamValue* out_pval) { + out_pval->set_bool_value(val); + } +}; + +/******************************************* Comparators *************************************/ template< typename T > struct compare_val { bool operator()(const T &val1, const T &val2, Operator oper) { @@ -200,6 +260,50 @@ struct compare_val { } }; +using io_service = boost::asio::io_service; +using deadline_timer = boost::asio::deadline_timer; +using io_work = boost::asio::io_service::work; + +class FlipTimer { +public: + FlipTimer() : m_timer_count(0) {} + ~FlipTimer() { + if (m_timer_thread != nullptr) { + m_work.reset(); + m_timer_thread->join(); + } + } + + void schedule(boost::posix_time::time_duration delay_us, const std::function& closure) { + std::unique_lock lk(m_thr_mutex); + ++m_timer_count; + if (m_work == nullptr) { + m_work = std::make_unique(m_svc); + m_timer_thread = std::make_unique(std::bind(&FlipTimer::timer_thr, this)); + } + + auto t = std::make_shared(m_svc, delay_us); + t->async_wait([this, closure, t](const boost::system::error_code& e){ + if (e) { LOG(ERROR) << "Error in timer routine, message " << e.message(); } + else { closure(); } + std::unique_lock lk(m_thr_mutex); + --m_timer_count; + }); + } + + void timer_thr() { + size_t executed = 0; + executed = m_svc.run(); + } + +private: + io_service m_svc; + std::unique_ptr m_work; + std::mutex m_thr_mutex; + int32_t m_timer_count; + std::unique_ptr< std::thread >m_timer_thread; +}; + #define TEST_ONLY 0 #define RETURN_VAL 1 #define SET_DELAY 2 @@ -214,6 +318,8 @@ class Flip { m_flip_enabled = true; auto inst = flip_instance(fspec); + LOG(INFO) << "Fpsec: " << fspec.DebugString(); + // TODO: Add verification to see if the flip is already scheduled, any errors etc.. std::unique_lock lock(m_mutex); m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); @@ -221,6 +327,26 @@ class Flip { return true; } +#if 0 + bool add_flip(std::string flip_name, std::vector conditions, FlipAction& action, + uint32_t count, uint8_t percent) { + FlipSpec fspec; + *(fspec.mutable_flip_name()) = "delay_ret_fspec"; + + auto cond = fspec->mutable_conditions()->Add(); + *cond->mutable_name() = "cmd_type"; + cond->set_oper(flip::Operator::EQUAL); + cond->mutable_value()->set_int_value(2); + + fspec->mutable_flip_action()->mutable_delay_returns()->set_delay_in_usec(100000); + fspec->mutable_flip_action()->mutable_delay_returns()->mutable_return_()->set_string_value("Delayed error simulated value"); + + auto freq = fspec->mutable_flip_frequency(); + freq->set_count(2); + freq->set_percent(100); + } +#endif + template< class... Args > bool test_flip(std::string flip_name, Args &&... args) { if (!m_flip_enabled) return false; @@ -245,12 +371,7 @@ class Flip { if (ret == boost::none) return false; // Not a hit uint64_t delay_usec = boost::get(ret.get()); - auto io = std::make_shared(); - boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(delay_usec/1000)); - t.async_wait([closure, io](const boost::system::error_code& e) { - closure(); - }); - io->run(); + m_timer.schedule(boost::posix_time::microseconds(delay_usec), closure); return true; } @@ -262,13 +383,10 @@ class Flip { if (ret == boost::none) return false; // Not a hit auto param = boost::get>(ret.get()); - - auto io = std::make_shared(); - boost::asio::deadline_timer t(*io, boost::posix_time::milliseconds(param.delay_usec/1000)); - t.async_wait([closure, io, param](const boost::system::error_code& e) { + LOG(INFO) << "Returned param delay = " << param.delay_usec << " val = " << param.val; + m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); - io->run(); return true; } @@ -363,7 +481,7 @@ class Flip { auto i = 0U; bool matched = true; for_each(arglist, [this, fspec, &i, &matched](auto &v) { - if (!condition_matches(fspec.conditions()[i++], v)) { + if (!condition_matches(v, fspec.conditions()[i++])) { matched = false; } }); @@ -378,9 +496,9 @@ class Flip { } template< typename T > - bool condition_matches(const FlipCondition &cond, T &comp_val) { + bool condition_matches(T &comp_val, const FlipCondition &cond) { auto val1 = val_converter< T >()(cond.value()); - return compare_val< T >()(val1, comp_val, cond.oper()); + return compare_val< T >()(comp_val, val1, cond.oper()); } bool handle_hits(const FlipFrequency &freq, flip_instance *inst) { @@ -455,6 +573,78 @@ class Flip { std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; std::shared_mutex m_mutex; bool m_flip_enabled; + FlipTimer m_timer; +}; + +class FlipClient { +public: + explicit FlipClient(Flip *f) : m_flip(f) {} + + template< typename T> + void create_condition(const std::string& param_name, flip::Operator oper, const T& value, FlipCondition *out_condition) { + *(out_condition->mutable_name()) = param_name; + out_condition->set_oper(oper); + to_proto_converter()(value, out_condition->mutable_value()); + } + + bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, const FlipFrequency &freq) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->set_no_action(true); + + m_flip->add(fspec); + return true; + } + + template + bool inject_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, const T& retval) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + to_proto_converter()(retval, fspec.mutable_flip_action()->mutable_returns()->mutable_return_()); + + m_flip->add(fspec); + return true; + } + + bool inject_delay_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, uint64_t delay_usec) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); + + m_flip->add(fspec); + return true; + } + + template + bool inject_delay_and_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency &freq, uint64_t delay_usec, const T& retval) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); + to_proto_converter()(retval, fspec.mutable_flip_action()->mutable_delay_returns()->mutable_return_()); + + m_flip->add(fspec); + return true; + } + +private: + void _create_flip_spec(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, FlipSpec& out_fspec) { + *(out_fspec.mutable_flip_name()) = flip_name; + for (auto &c: conditions) { + *(out_fspec.mutable_conditions()->Add()) = c; + } + *(out_fspec.mutable_flip_frequency()) = freq; + } + +private: + Flip* m_flip; }; } // namespace flip diff --git a/src/flip/lib/test_flip_client.cpp b/src/flip/lib/test_flip_client.cpp new file mode 100644 index 00000000..69f166f1 --- /dev/null +++ b/src/flip/lib/test_flip_client.cpp @@ -0,0 +1,133 @@ +// +// Created by Kadayam, Hari on 28/03/18. +// + +#include "flip_spec.pb.h" +#include "flip.hpp" +#include +#include + +using namespace flip; + +Flip g_flip; + +void run_and_validate_noret_flip() { + int valid_cmd = 1; + int invalid_cmd = -1; + + assert(!g_flip.test_flip("noret_flip", invalid_cmd)); + assert(g_flip.test_flip("noret_flip", valid_cmd)); + assert(!g_flip.test_flip("noret_flip", invalid_cmd)); + assert(g_flip.test_flip("noret_flip", valid_cmd)); + assert(!g_flip.test_flip("noret_flip", valid_cmd)); // Not more than 2 +} + +void run_and_validate_ret_flip() { + std::string my_vol = "vol1"; + std::string unknown_vol = "unknown_vol"; + + auto result = g_flip.get_test_flip("simval_flip", my_vol); + assert(result); + assert(result.get() == "Simulated error value"); + + result = g_flip.get_test_flip("simval_flip", unknown_vol); + assert(!result); + + result = g_flip.get_test_flip("simval_flip", my_vol); + assert(result); + assert(result.get() == "Simulated error value"); + + result = g_flip.get_test_flip("simval_flip", my_vol); + assert(!result); // Not more than 2 +} + +void run_and_validate_delay_flip() { + int valid_cmd = 1; + long valid_size_bytes1 = 2047; + long valid_size_bytes2 = 2048; + int invalid_cmd = -1; + long invalid_size_bytes = 4096; + std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); + + assert(g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, valid_size_bytes1)); + assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, invalid_cmd, valid_size_bytes1)); + assert(g_flip.delay_flip("delay_flip", [closure_calls]() { (*closure_calls)++;}, valid_cmd, valid_size_bytes2)); + assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, invalid_size_bytes)); + assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, valid_size_bytes1)); + + sleep(2); + DCHECK_EQ((*closure_calls).load(), 2); +} + +void run_and_validate_delay_return_flip() { + double valid_double = 2.0; + double invalid_double = 1.85; + std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); + + assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { + (*closure_calls)++; + DCHECK_EQ(error, "Simulated delayed errval"); + }, valid_double)); + + assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, invalid_double)); + + assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { + DCHECK_EQ(error, "Simulated delayed errval"); + (*closure_calls)++; + }, valid_double)); + + assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, invalid_double)); + + assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { + DCHECK_EQ(error, "Simulated delayed errval"); + (*closure_calls)++; + LOG(INFO) << "Called with error = " << error; + }, valid_double)); + + sleep(2); + DCHECK_EQ((*closure_calls).load(), 2); +} + +int main(int argc, char *argv[]) { + FlipClient fclient(&g_flip); + FlipFrequency freq; + + /* Inject a no return action flip */ + FlipCondition cond1; + fclient.create_condition("cmd_type", flip::Operator::EQUAL, (int)1, &cond1); + freq.set_count(2); freq.set_percent(100); + fclient.inject_noreturn_flip("noret_flip", {cond1}, freq); + + /* Inject a invalid return action flip */ + FlipCondition cond2; + fclient.create_condition("vol_name", flip::Operator::EQUAL, "vol1", &cond2); + freq.set_count(2); freq.set_percent(100); + fclient.inject_retval_flip("simval_flip", {cond2}, freq, "Simulated error value"); + + /* Inject a delay of 100ms action flip */ + FlipCondition cond3, cond4; + fclient.create_condition("cmd_type", flip::Operator::EQUAL, (int)1, &cond3); + fclient.create_condition("size_bytes", flip::Operator::LESS_THAN_OR_EQUAL, (long)2048, &cond4); + freq.set_count(2); freq.set_percent(100); + fclient.inject_delay_flip("delay_flip", {cond3, cond4}, freq, 100000); + + /* Inject a delay of 1second and return a value action flip */ + FlipCondition cond5; + fclient.create_condition("double_val", flip::Operator::NOT_EQUAL, (double)1.85, &cond5); + freq.set_count(2); freq.set_percent(100); + fclient.inject_delay_and_retval_flip("delay_simval_flip", {cond5}, freq, 1000000, "Simulated delayed errval"); + + /* Now execute the flip and validate that they are correct */ + run_and_validate_noret_flip(); + run_and_validate_ret_flip(); + run_and_validate_delay_flip(); + run_and_validate_delay_return_flip(); + + return 0; +} \ No newline at end of file From 6f1483070e88cfd651a867a6b3affb4e0c1eba8c Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Mon, 29 Oct 2018 10:01:31 -0700 Subject: [PATCH 028/385] API definition, examples etc.. added to README --- src/flip/README.md | 215 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 206 insertions(+), 9 deletions(-) diff --git a/src/flip/README.md b/src/flip/README.md index 46cd67ef..ba6dc642 100644 --- a/src/flip/README.md +++ b/src/flip/README.md @@ -5,7 +5,7 @@ It provides a framework, where actual fault could be injected outside the applic # Fault Injection -To induce the system to take an alternate path of the code, we explicitly inject faults. One typical way to achieve +To induce the system to take an alternate path of the code, we simulate faults. One typical way to achieve this is by explicitly writing a injected fault and make it compile time option to trigger, something like. ```c++ @@ -22,9 +22,11 @@ io_status IO::submit(packet_t *pkt) { Unfortunately this is extremely limited because every time a fault needs to triggered it needs to be recompiled. Another limitation to this approach is that it hits on every call to a method irrespective of any condition. This makes it lot of hand holding and affect automation of the test cases. Flip tries to eliminate these issues and provide a generic framework -to ease out fault injection. It provides the following: +to ease out fault injection. -**Multiple fault injection points:** Flip supports multiple fault injection points in a single program. +Following are some of the important features of Flip: + +* **Multiple fault injection points:** Flip supports multiple fault injection points in a single program. Each fault injection point is associated with an unique name. Example: ```c++ @@ -38,10 +40,10 @@ io_status IO::submit(packet_t *pkt) { } ``` -**Trigger fault externally:** One of the major design goal is the ability to trigger these faults externally. Flip provides a +* **Trigger fault externally:** One of the major design goal is the ability to trigger these faults externally. Flip provides a protobuf based serialization, which can be used by any RPC mechanism. -**Parameterized faults:** In the above example, if there should be a provision for different types of packets to be injected at +* **Parameterized faults:** In the above example, if there should be a provision for different types of packets to be injected at different instances, there would be a fault injection for every type. This will quickly become unscalabale approach as more and more packet types could be added and generic framework idea will be lost. Flip provides parameterized definition of faults. The above example could be extended to @@ -73,14 +75,14 @@ io_status IO::submit(packet_t *pkt) { } ``` -**Return specific values:** It is useful to inject an alternate path, but what will be more useful is whats the error it should +* **Return specific values:** It is useful to inject an alternate path, but what will be more useful is whats the error it should generate as configurable. This will avoid multiple flip points for different types of error generation. Extending above example, if one wants to simulate return of different errors from IO::submit, one can write similar _flip.test_flip("")_ for all possible error codes, but it will become too verbose. Flip supports parameterized return as well. Hence it one could do the following ```c++ Flip flip; io_status IO::submit(packet_t *pkt) { - auto ret = flip.test_flip("fail_specific_writes", pkt->op_code, pkt->size); + auto ret = flip.get_test_flip("fail_specific_writes", pkt->op_code, pkt->size); if (ret) { // Do your failure generation here return ret.get(); @@ -92,7 +94,7 @@ io_status IO::submit(packet_t *pkt) { Now fault injection externally can make flip return specific errors, not just io_status::error, but say io_status::corrupted_data etc.. -**Async delay injection:** One more typical use of fault injection is to delay execution or simulate delays to test timeout code +* **Async delay injection:** One more typical use of fault injection is to delay execution or simulate delays to test timeout code paths etc.. In a sync code, it is easy to put a sleep to reproduce the delay. However, more and more applications do async operation or code being completely async. In these cases, there needs a timer routine to keep track of the delay. Flip covers this and creates a simple async delay injection framework. @@ -129,4 +131,199 @@ void IO::process_response(packet_t *pkt) { ``` In the above example after delay the value injected externally is passed to the closure, which could be used to simulate -various error scenarios. \ No newline at end of file +various error scenarios. + +# How to use Flip + +Flip usage has 2 phases + +* **Definition phase:** This is the phase where the declaration of which place and what action needs to be taken in application code +with the fault. It needs to be written before compiling the application code. +* **Injection phase:** This is the phase where the faults are injected or triggered either through local flip client or external client. + +There are **4** important parameters that needs to be determined for a fault point. The proto file _proto/flip_spec.proto_ defines +these parameters: + +**Flip name**: Unique name identifies this flip point. There can be multiple instances (with different parameters) for same flip +point, say "fail_writes" flip for opcode=1, opcode=2 can coexist (with the same name) in above examples. The name needs to be +declared during definition phase and addressed with that during injection phase. + +**Flip Parameters**: The list of parameters that needs to filter out a flip. During definition phase, application code needs to +decide what are the possible filtering attributes to control with. Thus it is advisable to write a flip in possible common portion +of the code and let injection phase decide what it needs to filter on. In above example **_flip.get_test_flip("fail_specific_writes", pkt->op_code, pkt->size)_** +allows the injection phase to filter on opcode and pkt_size. Note that if there are multiple parameters each filter conditions are and'd. + +During injection phase, user can supply values and operator for each parameter. Flip as of now only supports primitive types (int, long, +double, bool, uint64) and string, const char* as parameter types. It supports all operators (==, >, <, >=, <=, !=) and one more called "*" +to ignore the check for this parameter. + +**Flip Action**: If the fault is triggered what action the application should take. Flip supports 4 types of action + +* **No explicit action**: Flip does not take any other action other than returning the fault is hit. Application code will then +write the error simulation code. +* **Return a value action**: A value decided during injection phase (of type determined during definition phase) will be returned +as part of flip hit. +* **Delay action**: Introduce a time delay determined during the injection phase. +* **Delay and return a value action**: Combining above 2. + +**Flip frequency**: This is actioned only during injection phase, which determines how frequently and how much the flip has to hit +or trigger the fault. + +Count: How many times it needs to hit. +Frequency: + either percentage of times it needs to hit (to randomly hit for this much percentage) or + every nth time + +## Flip APIs +Flip can be initialized with default constructor. In future it will accept parameters like having application own timer routine +and also specific grpc instance. As of now flip when called with default constructor creates/uses its own timer and thread for +delay and does not provide any RPC service to call. + +### test_flip +```c++ +template< class... Args > +bool test_flip(std::string flip_name, Args &&... args); +``` +Test if flip is triggered or not. Parameters are +* flip_name: Name of the flip +* args: variable list of arguments to filter. Arguments can be of primitive types or std::string or const char * + +Returns: If flip is hit or not. Flip is hit only if it matches the filter criteria and frequency of injection criteria. This API +can be called on any of the 4 types of flip. + +### get_test_flip +```c++ +template< typename T, class... Args > +boost::optional< T > get_test_flip(std::string flip_name, Args &&... args); +``` +Test if flip is triggered and if triggered, returns injected value. +* flip_name: Name of the flip +* args: variable list of arguments to filter. Arguments can be of primitive types or std::string or const char * + +Returns: If flip is not hit, returns boost::none, otherwise returns the injected value. The injected value cannot be of one of the +primitive types or std::string. This API is only valid for "return a value action" flip type. + +### delay_flip +```c++ +template< class... Args > +bool delay_flip(std::string flip_name, const std::function &closure, Args &&... args); +``` +Test if flip is triggered and if triggered, calls the supplied closure callback after injected delay time in microseconds. +* flip_name: Name of the flip +* closure: The callback closure to call after the delay, if flip is hit +* args: variable list of arguments to filter. Arguments can be of primitive types or std::string or const char * + +Returns: If the flip is hit or not. Whether flip is hit or not is immediately known. + +### get_delay_flip +```c++ +template +bool get_delay_flip(std::string flip_name, const std::function &closure, Args &&... args); +``` +Test if flip is triggered and if triggered, calls the supplied closure callback after injected delay time in microseconds with +the injected value as a parameter. +* flip_name: Name of the flip +* closure: The callback closure to call after the delay, if flip is hit. The closure should accept the parameter of type which +the fault could be injected with. +* args: variable list of arguments to filter. Arguments can be of primitive types or std::string or const char * + +Returns: If the flip is hit or not. Whether flip is hit or not is immediately known. + +## Integration with Application + +Flip is a header only framework and hence will be included and compiled along with application binary. It uses a protobuf to +serialize the message about how faults can be triggered. The protobuf could be used against any RPCs the application provide. +If application uses GRPC, the grpc definition needs to add the following RPC call to the grpc service proto +```c++ +// Inject a fault rpc +rpc InjectFault (flip.FlipSpec) returns (flip.FlipResponse); +``` + +**TODO:** Future work will provide a mechanism to start its own grpc server if needed, instead of relying on application rpc mechanism. + +# Flip Client + +Flip needs a client to trigger the faults externally. The exact client depends on which RPC it is integrated with application. + +## GRPC Client +There is a current implementation using GRPC for a project called "NuData/MonstorDB.git" which has nodejs client to inject the fault. Example of +grpc service is provided in path "MonstorDB/nodejs-test/test/support/monstor_client/inject_fault.js" and examples of how to use is +in "MonstorDB/nodejst-test/test/support/run_grpc_client.js" + +Example: +```javascript +await test.do_inject_fault( + "op_error_in_bson", + [{name : "op_type", oper : FlipOperator.EQUAL.ordinal, value : {string_value : "INSERT"} }], // Conditions} + { returns : { return : { int_value : 6 } } }, // Returns BSON_DOCUMENT_CORRUPTED + 1, // count + 100 // percentage +) +``` + +**TODO:** Write a standalone client which can be used to trigger various faults on different languages. + +## Local Client +If the code that needs to be fault injected and tested is a library in itself and that there is separate unit tests which runs +the code, we don't need an RPC, but local calls to trigger the faults. For example, if the tested code runs with GTest or other +unit test framework, the test code runs in the same context as actual code. Flip provides a FlipClient class to trigger faults. + +Following are the APIs for FlipClient +### FlipClient() +```c++ +FlipClient(Flip *f) +``` +Constructs a flipclient providing the flip instances of actual code. Typically FlipClient and Flip could be singleton instances. + +### create_condition +```c++ +template< typename T> +void create_condition(const std::string& param_name, flip::Operator oper, const T& value, FlipCondition *out_condition); +``` +Parameters are: +* param_name: This is not used for any cross verification but just for logging purposes +* oper: One of the flip operators (==, >, <, >=, <= !=, *) +* value: Value parameter of the filter criteria +* condition: Returns the FlipCondition that will be passed in subsequent APIs. + +### Inject APIs +```c++ +bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, const FlipFrequency &freq); + +template +bool inject_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, const T& retval); + +bool inject_delay_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, uint64_t delay_usec); +template +bool inject_delay_and_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency &freq, uint64_t delay_usec, const T& retval); +``` + +Parameters are: +* flip_name: Name of the flip to inject the fault to +* conditions: Vector of conditions which will be and'd. Each condition can be created using create_condition API +* freq: Flip frequency determines how much and how frequent. Can be constructed using _FlipFrequency::set_count(), +FlipFrequency::set_percent(), FlipFrequency::set_every_nth()_ +* retval: (for _inject_retval_flip_ and _inject_delay_and_retval_flip_): What is the injected value to be returned or called back respecitvely +* delay_usec: (for _inject_delay_flip_ and _inject_delay_and_retval_flip_): How much delay to inject + +Returns: +* If successfully injected the fault or not. + +Example code +```c++ + Flip flip; + FlipClient fclient(&flip); + ... + FlipCondition cond1, cond2; + fclient.create_condition("cmd_type", flip::Operator::EQUAL, (int)1, &cond1); + fclient.create_condition("size_bytes", flip::Operator::LESS_THAN_OR_EQUAL, (long)2048, &cond2); + + FlipFrequency freq; + freq.set_count(2); freq.set_percent(100); + fclient.inject_delay_flip("delay_flip", {cond1, cond2}, freq, 100000 /* delay in usec */); +``` +Above examples, trigger a flip called delay flip to inject a delay of 100ms if cmd_type == 1 and size_bytes <= 2048 + From e24a9025aefcfc1504c96bb51562beb4f41a3431 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 29 Oct 2018 19:13:32 +0000 Subject: [PATCH 029/385] Conanize the Flip project for SDS This is a fork of the NuData/Flip project with a conanized version of the library and client. --- src/flip/CMakeLists.txt | 89 +++------ src/flip/cmake/CodeCoverage.cmake | 303 ++++++++++++++++++++++++++++++ src/flip/cmake/debug_flags.cmake | 67 +++++++ src/flip/proto/CMakeLists.txt | 15 ++ 4 files changed, 413 insertions(+), 61 deletions(-) create mode 100644 src/flip/cmake/CodeCoverage.cmake create mode 100644 src/flip/cmake/debug_flags.cmake create mode 100644 src/flip/proto/CMakeLists.txt diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 71dd6cb1..022a91d7 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,73 +1,40 @@ -cmake_minimum_required(VERSION 3.7) +cmake_minimum_required(VERSION 3.10) project(flip) set(CMAKE_CXX_STANDARD 17) -#set (PROJECT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src) - -if (NOT DEFINED PROTOBUF_PROTOC_EXECUTABLE) - set(PROTOBUF_PROTOC_EXECUTABLE protoc) -endif() - -if (NOT DEFINED CMAKE_PREFIX_PATH) - set(CMAKE_PREFIX_PATH /usr/local/) +if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) + include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) + conan_basic_setup() +else() + message(WARNING "The file conanbuildinfo.cmake doesn't exist, you have to run conan install first") + return() endif() -set(PROTO_DIR ${PROJECT_SOURCE_DIR}/proto) -set(PROTO_GEN_DIR ${CMAKE_BINARY_DIR}/gen_src/proto) -file(MAKE_DIRECTORY ${PROTO_GEN_DIR}) -#set(FLIP_PROTO_FILE proto/flip_spec.proto) - -get_filename_component(PROTO_FULL_PATH proto/flip_spec.proto ABSOLUTE) -file(RELATIVE_PATH PROTO_REL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/proto ${PROTO_FULL_PATH}) -message(STATUS "Processing ${PROTO_FULL_PATH} Relative path ${PROTO_REL_PATH}") - -#set(_GEN_HEADERS src/proto/flip_spec.pb.h) -#list(APPEND _generated_headers "${_GEN_HEADERS}") -#set(_GEN_SOURCES src/proto/flip_spec.pb.cc) - -#target_sources(test_flip PRIVATE ${_GEN_SOURCES}) -#set_source_files_properties(${_GEN_SOURCES} ${_GEN_HEADERS} PROPERTIES GENERATED TRUE) - -add_custom_command( - OUTPUT ${PROTO_GEN_DIR}/flip_spec.pb.cc - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} - ARGS --cpp_out=${PROTO_GEN_DIR} -I. ${PROTO_REL_PATH} - DEPENDS proto/flip_spec.proto - WORKING_DIRECTORY ${PROTO_DIR} - COMMENT "Running C++ protocol buffer compiler on flip_spec.proto" - VERBATIM -) - -set(FLIP_LIB_FILES - src/flip.hpp - ${PROTO_GEN_DIR}/flip_spec.pb.cc - ) - -set(TEST_FLIP_FILES - ${FLIP_LIB_FILES} - src/test_flip.cpp - ) - -set(TEST_FLIP_CLIENT_FILES - ${FLIP_LIB_FILES} - src/test_flip_client.cpp - ) +if (${CMAKE_BUILD_TYPE} STREQUAL Debug) + if (NOT ${CONAN_SETTINGS_COMPILER} STREQUAL "clang" AND NOT ${CONAN_SETTINGS_COMPILER} STREQUAL "apple-clang") + include (cmake/debug_flags.cmake) + endif () +endif () +if (${MEMORY_SANITIZER_ON}) + include (cmake/mem_sanitizer.cmake) +endif () -find_library(PROTOBUF_LIBRARY protobuf HINTS ${CMAKE_PREFIX_PATH}/lib) -find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) -find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) -include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) +find_program(CCACHE_FOUND ccache) +if (CCACHE_FOUND) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) +endif () -add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY}) +find_package(Protobuf REQUIRED) -add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) +add_subdirectory(proto) +message("Here: ${PROTO_PATH}") -add_executable(test_flip_client ${TEST_FLIP_CLIENT_FILES}) -target_link_libraries(test_flip_client ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) +include_directories(BEFORE include ${PROTO_PATH}) -install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) -install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) +add_executable(test_flip src/test_flip.cpp) +target_link_libraries(test_flip flip) +add_executable(test_flip_client src/test_flip_client.cpp) +target_link_libraries(test_flip_client flip) diff --git a/src/flip/cmake/CodeCoverage.cmake b/src/flip/cmake/CodeCoverage.cmake new file mode 100644 index 00000000..932c3d06 --- /dev/null +++ b/src/flip/cmake/CodeCoverage.cmake @@ -0,0 +1,303 @@ +# Copyright (c) 2012 - 2017, Lars Bilke +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# CHANGES: +# +# 2012-01-31, Lars Bilke +# - Enable Code Coverage +# +# 2013-09-17, Joakim Söderberg +# - Added support for Clang. +# - Some additional usage instructions. +# +# 2016-02-03, Lars Bilke +# - Refactored functions to use named parameters +# +# 2017-06-02, Lars Bilke +# - Merged with modified version from github.com/ufz/ogs +# +# +# USAGE: +# +# 1. Copy this file into your cmake modules path. +# +# 2. Add the following line to your CMakeLists.txt: +# include(CodeCoverage) +# +# 3. Append necessary compiler flags: +# APPEND_COVERAGE_COMPILER_FLAGS() +# +# 4. If you need to exclude additional directories from the report, specify them +# using the COVERAGE_LCOV_EXCLUDES variable before calling SETUP_TARGET_FOR_COVERAGE_LCOV. +# Example: +# set(COVERAGE_LCOV_EXCLUDES 'dir1/*' 'dir2/*') +# +# 5. Use the functions described below to create a custom make target which +# runs your test executable and produces a code coverage report. +# +# 6. Build a Debug build: +# cmake -DCMAKE_BUILD_TYPE=Debug .. +# make +# make my_coverage_target +# + +include(CMakeParseArguments) + +# Check prereqs +find_program( GCOV_PATH gcov ) +find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl) +find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat ) +find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test) +find_program( SIMPLE_PYTHON_EXECUTABLE python ) + +if(NOT GCOV_PATH) + message(FATAL_ERROR "gcov not found! Aborting...") +endif() # NOT GCOV_PATH + +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") + if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3) + message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...") + endif() +elseif(NOT CMAKE_COMPILER_IS_GNUCXX) + message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") +endif() + +set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage -fprofile-arcs -ftest-coverage" + CACHE INTERNAL "") + +set(CMAKE_CXX_FLAGS_COVERAGE + ${COVERAGE_COMPILER_FLAGS} + CACHE STRING "Flags used by the C++ compiler during coverage builds." + FORCE ) +set(CMAKE_C_FLAGS_COVERAGE + ${COVERAGE_COMPILER_FLAGS} + CACHE STRING "Flags used by the C compiler during coverage builds." + FORCE ) +set(CMAKE_EXE_LINKER_FLAGS_COVERAGE + "" + CACHE STRING "Flags used for linking binaries during coverage builds." + FORCE ) +set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE + "" + CACHE STRING "Flags used by the shared libraries linker during coverage builds." + FORCE ) +mark_as_advanced( + CMAKE_CXX_FLAGS_COVERAGE + CMAKE_C_FLAGS_COVERAGE + CMAKE_EXE_LINKER_FLAGS_COVERAGE + CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) + +if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") +endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" + +if(CMAKE_C_COMPILER_ID STREQUAL "GNU") + link_libraries(gcov) +else() + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") +endif() + +# Defines a target for running and collection code coverage information +# Builds dependencies, runs the given executable and outputs reports. +# NOTE! The executable should always have a ZERO as exit code otherwise +# the coverage generation will not complete. +# +# SETUP_TARGET_FOR_COVERAGE_LCOV( +# NAME testrunner_coverage # New target name +# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR +# DEPENDENCIES testrunner # Dependencies to build first +# ) +function(SETUP_TARGET_FOR_COVERAGE_LCOV) + + set(options NONE) + set(oneValueArgs NAME) + set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) + cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT LCOV_PATH) + message(FATAL_ERROR "lcov not found! Aborting...") + endif() # NOT LCOV_PATH + + if(NOT GENHTML_PATH) + message(FATAL_ERROR "genhtml not found! Aborting...") + endif() # NOT GENHTML_PATH + + # Setup target + add_custom_target(${Coverage_NAME} + + # Cleanup lcov + COMMAND ${LCOV_PATH} --directory . --zerocounters + # Create baseline to make sure untouched files show up in the report + COMMAND ${LCOV_PATH} -c -i -d . -o ${Coverage_NAME}.base + + # Run tests + COMMAND ${Coverage_EXECUTABLE} + + # Capturing lcov counters and generating report + COMMAND ${LCOV_PATH} --directory . --capture --output-file ${Coverage_NAME}.info + # add baseline counters + COMMAND ${LCOV_PATH} -a ${Coverage_NAME}.base -a ${Coverage_NAME}.info --output-file ${Coverage_NAME}.total + COMMAND ${LCOV_PATH} --remove ${Coverage_NAME}.total ${COVERAGE_LCOV_EXCLUDES} --output-file ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + COMMAND ${CMAKE_COMMAND} -E remove ${Coverage_NAME}.base ${Coverage_NAME}.total ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + DEPENDS ${Coverage_DEPENDENCIES} + COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." + ) + + # Show where to find the lcov info report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info." + ) + + # Show info where to find the report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." + ) + +endfunction() # SETUP_TARGET_FOR_COVERAGE_LCOV + +# Defines a target for running and collection code coverage information +# Builds dependencies, runs the given executable and outputs reports. +# NOTE! The executable should always have a ZERO as exit code otherwise +# the coverage generation will not complete. +# +# SETUP_TARGET_FOR_COVERAGE_GCOVR_XML( +# NAME ctest_coverage # New target name +# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR +# DEPENDENCIES executable_target # Dependencies to build first +# ) +function(SETUP_TARGET_FOR_COVERAGE_GCOVR_XML) + + set(options NONE) + set(oneValueArgs NAME) + set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) + cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT SIMPLE_PYTHON_EXECUTABLE) + message(FATAL_ERROR "python not found! Aborting...") + endif() # NOT SIMPLE_PYTHON_EXECUTABLE + + if(NOT GCOVR_PATH) + message(FATAL_ERROR "gcovr not found! Aborting...") + endif() # NOT GCOVR_PATH + + # Combine excludes to several -e arguments + set(GCOVR_EXCLUDES "") + foreach(EXCLUDE ${COVERAGE_GCOVR_EXCLUDES}) + list(APPEND GCOVR_EXCLUDES "-e") + list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") + endforeach() + + add_custom_target(${Coverage_NAME} + # Run tests + ${Coverage_EXECUTABLE} + + # Running gcovr + COMMAND ${GCOVR_PATH} --xml + -r ${PROJECT_SOURCE_DIR} ${GCOVR_EXCLUDES} + --object-directory=${PROJECT_BINARY_DIR} + -o ${Coverage_NAME}.xml + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + DEPENDS ${Coverage_DEPENDENCIES} + COMMENT "Running gcovr to produce Cobertura code coverage report." + ) + + # Show info where to find the report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml." + ) + +endfunction() # SETUP_TARGET_FOR_COVERAGE_GCOVR_XML + +# Defines a target for running and collection code coverage information +# Builds dependencies, runs the given executable and outputs reports. +# NOTE! The executable should always have a ZERO as exit code otherwise +# the coverage generation will not complete. +# +# SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML( +# NAME ctest_coverage # New target name +# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR +# DEPENDENCIES executable_target # Dependencies to build first +# ) +function(SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML) + + set(options NONE) + set(oneValueArgs NAME) + set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) + cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT SIMPLE_PYTHON_EXECUTABLE) + message(FATAL_ERROR "python not found! Aborting...") + endif() # NOT SIMPLE_PYTHON_EXECUTABLE + + if(NOT GCOVR_PATH) + message(FATAL_ERROR "gcovr not found! Aborting...") + endif() # NOT GCOVR_PATH + + # Combine excludes to several -e arguments + set(GCOVR_EXCLUDES "") + foreach(EXCLUDE ${COVERAGE_GCOVR_EXCLUDES}) + list(APPEND GCOVR_EXCLUDES "-e") + list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") + endforeach() + + add_custom_target(${Coverage_NAME} + # Run tests + ${Coverage_EXECUTABLE} + + # Create folder + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/${Coverage_NAME} + + # Running gcovr + COMMAND ${GCOVR_PATH} --html --html-details + -r ${PROJECT_SOURCE_DIR} ${GCOVR_EXCLUDES} + --object-directory=${PROJECT_BINARY_DIR} + -o ${Coverage_NAME}/index.html + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + DEPENDS ${Coverage_DEPENDENCIES} + COMMENT "Running gcovr to produce HTML code coverage report." + ) + + # Show info where to find the report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." + ) + +endfunction() # SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML + +function(APPEND_COVERAGE_COMPILER_FLAGS) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) + message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") +endfunction() # APPEND_COVERAGE_COMPILER_FLAGS diff --git a/src/flip/cmake/debug_flags.cmake b/src/flip/cmake/debug_flags.cmake new file mode 100644 index 00000000..3b8d3db8 --- /dev/null +++ b/src/flip/cmake/debug_flags.cmake @@ -0,0 +1,67 @@ +# This list is generated from the output of: +# +# gcc -Q --help=optimizers -O0 +# +# with GCC 4.8.4 (Ubuntu 4.8.4-2ubuntu1-14.04.3). Yes, every one of these flags +# is on even with -O0 specified, and nothing changes when you add debugging +# options (-g/-g3/-gdwarf-4/etc.) in there. This should be updated every time +# the version of GCC used to compile changes. +# +# If you add an option here, it is your responsibility to comment it, with the +# following convention (feel free to add your own if there's not one suitable). +# DO YOUR RESEARCH. +# +# CBWITPOB: Can be wrong in the presence of bugs. When are you usually +# debugging? When there's a bug. Optimizations that can be wrong +# in the presence of bugs mean that, for example, you won't see +# a variable be modified when it actually happens--if it's +# modified due to the bug, as far as the debugger is concerned, +# it wasn't modified by the program, and things like conditional +# breakpoints won't work right, unless maybe it's a volatile +# variable. +# Inlining: Although GDB claims to track this correctly with -g3 and inject +# the code while you're stepping, it does not. You'll either be +# missing stack frames, or unable to view locals when you step +# to that frame--even if those locals exist nowhere else (i.e. +# not a function argument or tail return value). +# Eliding: Behavior may not change, but who knows where the values come +# from. +# Hoisting: Your program is not running instructions in the order of the +# code. Again, GDB claims to handle this, but it does not, or at +# least not well. +# Vectorizing: Great optimization, but the simulation of going through for +# loops is far from perfect, especially when you're dealing +# with bugs. +# +# And yes, these optimizations severely effect the quality of the debugging +# experience. Without these, you're lucky to be able to step into 80% of the +# stack, and of that 80%, you'll see anywhere from 50% to 100% of locals +# missing values. With these, I've never seen a stack frame I couldn't step +# into, and never seen when I look at a local. +# +set (REALLY_NO_OPTIMIZATION_FLAGS "-fno-short-enums" )# Binary-incompatible with code compiled otherwise. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-aggressive-loop-optimizations" ) # Changes behavior on overflow. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-branch-count-reg" )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dce )# Can be wrong in the presence of bugs (CBWITPOB). set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-delete-null-pointer-checks )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dse )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-early-inlining )# NO INLINING! Because... set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-gcse-lm )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-inline )# ...inlining also does things like elide locals. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ira-hoist-pressure )# Might be irrelevant, but NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ivopts )# Elides and changes instructions. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-jump-tables )# Changes CPU instructions for switch statements. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-move-loop-invariants )# NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-peephole )# Exploiting CPU quirks. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-prefetch-loop-arrays )# Changes CPU instructions, even GCC manual is ambivalent. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-rename-registers" )# Maybe wrong in the presence of bugs? +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-toplevel-reorder" )# Elides unused static variable, reorders globals. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-coalesce-vars" )# Elides temporaries. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-cselim" )# Reorders, violates C++ mem model, CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-forwprop" )# Reorders and changes instructions. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-if-convert" )# Reorders and changes instructions. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-im" )# Reorders and changes instructions. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-optimize" )# Reorders and changes instructions. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-phiprop" )# NO HOISTING! Reorders and changes. CBWITPOB. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-pta" )# Less analysis means maybe less interference. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-reassoc" )# Elides and vectories. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-scev-cprop" )# Elides and changes instructions. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-vect-loop-version" )# E&C. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" )# E&C. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. +set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. + +if (${CONAN_BUILD_COVERAGE}) + include (cmake/CodeCoverage.cmake) + APPEND_COVERAGE_COMPILER_FLAGS() + SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) +endif () +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${REALLY_NO_OPTIMIZATION_FLAGS}") diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt new file mode 100644 index 00000000..525157c2 --- /dev/null +++ b/src/flip/proto/CMakeLists.txt @@ -0,0 +1,15 @@ + +file(GLOB PROTO_IDLS *.proto) +PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_IDLS}) + +list(GET PROTO_HDRS 0 FIRST_PROTO) +get_filename_component(PROTO_DIR ${FIRST_PROTO} DIRECTORY) +set(PROTO_PATH ${PROTO_DIR} PARENT_SCOPE) + +set(FLIP_LIB_FILES + ${PROTO_SRCS} + ${PROTO_HDRS} + ) +add_library(flip ${FLIP_LIB_FILES}) +target_link_libraries(flip ${CONAN_LIBS}) +message("Here: ${PROTO_PATH}") From c568e224e2eb3bfbcc28a10400db7a3a4ebd5559 Mon Sep 17 00:00:00 2001 From: lhuang8 Date: Mon, 26 Nov 2018 17:05:17 -0800 Subject: [PATCH 030/385] fix coding style --- include/sds_grpc/client.h | 95 ++++++++++++++-------------- include/sds_grpc/server.h | 75 +++++++++++----------- include/sds_grpc/utils.h | 3 +- lib/server.cpp | 7 +- lib/utils.cpp | 36 +++++------ tests/function/echo_async_client.cpp | 50 ++++++--------- tests/function/echo_server.cpp | 14 ++-- tests/function/echo_sync_client.cpp | 27 ++++---- 8 files changed, 146 insertions(+), 161 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index 668b2d0f..cc21cb1b 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -17,8 +17,7 @@ #include "utils.h" -namespace sds::grpc -{ +namespace sds::grpc { using ::grpc::Channel; using ::grpc::ClientAsyncResponseReader; @@ -33,7 +32,7 @@ using namespace ::std::chrono; * */ class ClientCallMethod { -public: + public: virtual ~ClientCallMethod() {} virtual void handle_response() = 0; @@ -48,39 +47,43 @@ template class ClientCallData final : public ClientCallMethod { using handle_response_cb_t = std::function< - void(TREPLY&, ::grpc::Status& status)>; + void(TREPLY&, ::grpc::Status& status)>; using ResponseReaderType = std::unique_ptr< - ::grpc::ClientAsyncResponseReaderInterface>; + ::grpc::ClientAsyncResponseReaderInterface>; -public: + public: ClientCallData(handle_response_cb_t handle_response_cb) : handle_response_cb_(handle_response_cb) { } - void set_deadline(uint32_t seconds) - { + void set_deadline(uint32_t seconds) { system_clock::time_point deadline = system_clock::now() + - std::chrono::seconds(seconds); + std::chrono::seconds(seconds); context_.set_deadline(deadline); } ResponseReaderType& responder_reader() { - return response_reader_; + return response_reader_; } - Status & status() { return status_; } + Status & status() { + return status_; + } - TREPLY & reply() { return reply_; } + TREPLY & reply() { + return reply_; + } - ClientContext & context() { return context_; } + ClientContext & context() { + return context_; + } - virtual void handle_response() override - { + virtual void handle_response() override { handle_response_cb_(reply_, status_); } -private: + private: handle_response_cb_t handle_response_cb_; TREPLY reply_; ClientContext context_; @@ -96,7 +99,7 @@ class ClientCallData final : public ClientCallMethod { */ template class GrpcConnection { -public: + public: const std::string& server_addr_; const std::string& target_domain_; @@ -109,12 +112,11 @@ class GrpcConnection { GrpcConnection(const std::string& server_addr, uint32_t dead_line, - CompletionQueue* cq, const std::string& target_domain, - const std::string& ssl_cert) - : server_addr_(server_addr), target_domain_(target_domain), - ssl_cert_(ssl_cert), dead_line_(dead_line), - completion_queue_(cq) - { + CompletionQueue* cq, const std::string& target_domain, + const std::string& ssl_cert) + : server_addr_(server_addr), target_domain_(target_domain), + ssl_cert_(ssl_cert), dead_line_(dead_line), + completion_queue_(cq) { } @@ -124,8 +126,7 @@ class GrpcConnection { return stub_.get(); } - virtual bool init() - { + virtual bool init() { if (!init_channel()) { return false; } @@ -134,10 +135,12 @@ class GrpcConnection { return true; } - CompletionQueue* completion_queue() { return completion_queue_; } + CompletionQueue* completion_queue() { + return completion_queue_; + } -protected: + protected: virtual bool init_channel() { @@ -149,35 +152,33 @@ class GrpcConnection { ::grpc::ChannelArguments channel_args; channel_args.SetSslTargetNameOverride(target_domain_); channel_ = ::grpc::CreateCustomChannel(server_addr_, - ::grpc::SslCredentials(ssl_opts), - channel_args); + ::grpc::SslCredentials(ssl_opts), + channel_args); } else { // TODO: add log -- lhuang8 return false; } } else { channel_ = ::grpc::CreateChannel(server_addr_, - ::grpc::InsecureChannelCredentials()); + ::grpc::InsecureChannelCredentials()); } return true; } - virtual void init_stub() - { + virtual void init_stub() { stub_ = TSERVICE::NewStub(channel_); } - virtual bool load_ssl_cert(const std::string& ssl_cert, std::string content) - { + virtual bool load_ssl_cert(const std::string& ssl_cert, std::string content) { return ::sds::grpc::get_file_contents(ssl_cert, content);; } virtual bool is_connection_ready() { - if (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY) + if (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY) return true; - else + else return false; } @@ -203,12 +204,12 @@ class GrpcConnection { */ class GrpcConnectionFactory { -public: + public: template static std::unique_ptr Make( - const std::string& server_addr, uint32_t dead_line, - CompletionQueue* cq, const std::string& target_domain, - const std::string& ssl_cert) { + const std::string& server_addr, uint32_t dead_line, + CompletionQueue* cq, const std::string& target_domain, + const std::string& ssl_cert) { std::unique_ptr ret(new T(server_addr, dead_line, cq, target_domain, ssl_cert)); @@ -235,7 +236,7 @@ class GrpcConnectionFactory { * */ class GrpcClient { -public: + public: GrpcClient() : shutdown_(true) {} ~GrpcClient() { @@ -261,16 +262,18 @@ class GrpcClient { for (uint32_t i = 0; i < num_threads; ++i) { // TODO: no need to call async_complete_rpc for sync calls; std::shared_ptr t = std::shared_ptr( - new std::thread(&GrpcClient::async_complete_rpc, this)); + new std::thread(&GrpcClient::async_complete_rpc, this)); threads_.push_back(t); } return true; } - CompletionQueue& cq() { return completion_queue_; } + CompletionQueue& cq() { + return completion_queue_; + } -private: + private: void async_complete_rpc() { void* tag; @@ -293,10 +296,10 @@ class GrpcClient { } } -protected: + protected: CompletionQueue completion_queue_; -private: + private: bool shutdown_; std::list> threads_; }; diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 5f8b8f01..61034f8a 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -15,8 +15,7 @@ #include -namespace sds::grpc -{ +namespace sds::grpc { using ::grpc::Server; using ::grpc::ServerAsyncResponseWriter; @@ -32,12 +31,14 @@ using ::grpc::Status; * */ class BaseServerCallData { -public: - enum CallStatus { CREATE, PROCESS, FINISH }; + public: + enum CallStatus { CREATE, PROCESS, FINISH }; - CallStatus& status() { return status_; } + CallStatus& status() { + return status_; + } -public: + public: /** * During the life cycle of this object, this method should be called @@ -56,7 +57,7 @@ class BaseServerCallData { */ void proceed(); -protected: + protected: BaseServerCallData() : status_(CREATE) { } @@ -78,7 +79,7 @@ class BaseServerCallData { */ virtual void do_finish(); - CallStatus status_; + CallStatus status_; }; @@ -91,18 +92,18 @@ template class ServerCallData final : public BaseServerCallData { typedef std::function*, - ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, - void *)> request_call_func_t; + ::grpc::ServerContext*, + TREQUEST*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, + void *)> request_call_func_t; typedef std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_call_func_t; typedef ServerCallData T; -private: + private: template friend class GrpcServer; @@ -116,9 +117,11 @@ class ServerCallData final : public BaseServerCallData { handle_request_func_(handle_request) { } - ::grpc::ServerAsyncResponseWriter& responder() { return responder_; } + ::grpc::ServerAsyncResponseWriter& responder() { + return responder_; + } -protected: + protected: ServerContext context_; @@ -133,14 +136,12 @@ class ServerCallData final : public BaseServerCallData { request_call_func_t wait_request_func_; handle_call_func_t handle_request_func_; - void do_create() - { + void do_create() { wait_request_func_(service_, &context_, &request_, &responder_, - cq_, cq_, this); + cq_, cq_, this); } - void do_process() - { + void do_process() { (new T(service_, cq_, wait_request_func_, handle_request_func_))->proceed(); //LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); @@ -155,7 +156,7 @@ class ServerCallData final : public BaseServerCallData { template class GrpcServer { -public: + public: typedef TSERVICE ServiceType; @@ -165,7 +166,7 @@ class GrpcServer { void shutdown(); bool is_shutdown(); bool run(const std::string& ssl_key, const std::string& ssl_cert, - const std::string& listen_addr, uint32_t threads = 1); + const std::string& listen_addr, uint32_t threads = 1); /** * Currently, user need to inherit GrpcServer and register rpc calls. @@ -181,15 +182,15 @@ class GrpcServer { template void register_rpc( - std::function< - void(TSVC*, - ::grpc::ServerContext*, - TREQUEST*, - ::grpc::ServerAsyncResponseWriter*, - ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, - void *)> request_call_func, - std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_request_func){ + std::function< + void(TSVC*, + ::grpc::ServerContext*, + TREQUEST*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, + void *)> request_call_func, + std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_request_func) { (new ServerCallData ( &service_, completion_queue_.get(), @@ -198,7 +199,7 @@ class GrpcServer { } -private: + private: // This can be called by multiple threads void handle_rpcs(); void process(BaseServerCallData * cm); @@ -206,12 +207,12 @@ class GrpcServer { // TODO: move this function to utils bool get_file_contents(const std::string& file_name, std::string& contents); -protected: + protected: std::unique_ptr<::grpc::ServerCompletionQueue> completion_queue_; std::unique_ptr server_; TSERVICE service_; -private: + private: bool shutdown_; std::list> threads_; }; @@ -250,7 +251,7 @@ bool GrpcServer::is_shutdown() { template bool GrpcServer::run(const std::string& ssl_key, const std::string& ssl_cert, - const std::string& listen_addr, uint32_t threads /* = 1 */) { + const std::string& listen_addr, uint32_t threads /* = 1 */) { if (listen_addr.empty() || threads == 0) { return false; } diff --git a/include/sds_grpc/utils.h b/include/sds_grpc/utils.h index a88f5015..a6387b8e 100644 --- a/include/sds_grpc/utils.h +++ b/include/sds_grpc/utils.h @@ -9,8 +9,7 @@ #include -namespace sds::grpc -{ +namespace sds::grpc { bool get_file_contents(const std::string & file_name, std::string & contents); diff --git a/lib/server.cpp b/lib/server.cpp index fa619d71..2c3acd70 100644 --- a/lib/server.cpp +++ b/lib/server.cpp @@ -7,11 +7,10 @@ #include -namespace sds::grpc -{ +namespace sds::grpc { void BaseServerCallData::proceed() { - if (status_ == CREATE){ + if (status_ == CREATE) { status_ = PROCESS; do_create(); } else if (status_ == PROCESS) { @@ -25,7 +24,7 @@ void BaseServerCallData::proceed() { } -void BaseServerCallData::do_finish(){ +void BaseServerCallData::do_finish() { GPR_ASSERT(status_ == FINISH); // Once in the FINISH state, this can be destroyed delete this; diff --git a/lib/utils.cpp b/lib/utils.cpp index 4c3b63d7..f705af65 100644 --- a/lib/utils.cpp +++ b/lib/utils.cpp @@ -8,25 +8,23 @@ #include #include -namespace sds::grpc -{ - -bool get_file_contents(const std::string & file_name, std::string & contents) -{ - try { - std::ifstream in(file_name.c_str(), std::ios::in); - if (in) { - std::ostringstream t; - t << in.rdbuf(); - in.close(); - - contents = t.str(); - return true; - } - } catch (...) { - - } - return false; +namespace sds::grpc { + +bool get_file_contents(const std::string & file_name, std::string & contents) { + try { + std::ifstream in(file_name.c_str(), std::ios::in); + if (in) { + std::ostringstream t; + t << in.rdbuf(); + in.close(); + + contents = t.str(); + return true; + } + } catch (...) { + + } + return false; } diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index c16d5f6f..86b7fffe 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -25,27 +25,24 @@ using namespace ::sds_grpc_test; using namespace std::placeholders; -class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> -{ -public: +class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> { + public: EchoAsyncClient(const std::string& server_addr, uint32_t dead_line, - ::grpc::CompletionQueue* cq, - const std::string& target_domain, - const std::string& ssl_cert) + ::grpc::CompletionQueue* cq, + const std::string& target_domain, + const std::string& ssl_cert) : GrpcConnection<::sds_grpc_test::EchoService>( - server_addr, dead_line, cq, target_domain, ssl_cert) - { + server_addr, dead_line, cq, target_domain, ssl_cert) { } void Echo(const EchoRequest& request, - std::function callback) - { + std::function callback) { auto call = new ClientCallData(callback); call->set_deadline(dead_line_); call->responder_reader() = stub()->AsyncEcho( - &call->context(), request, completion_queue()); + &call->context(), request, completion_queue()); call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); } @@ -55,27 +52,23 @@ class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> std::atomic_int g_counter; -class Ping -{ -public: +class Ping { + public: - Ping(int seqno) - { + Ping(int seqno) { request_.set_message(std::to_string(seqno)); } - void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) - { - if (!status.ok()) - { + void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) { + if (!status.ok()) { std::cout << "echo request " << request_.message() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; return; } std::cout << "echo request " << request_.message() << - " reply " << reply.message() << std::endl; + " reply " << reply.message() << std::endl; assert(request_.message() == reply.message()); @@ -86,22 +79,19 @@ class Ping }; -int RunClient(const std::string& server_address) -{ +int RunClient(const std::string& server_address) { GrpcClient* fix_this_name = new GrpcClient(); auto client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); + server_address, 5, &(fix_this_name->cq()), "", ""); - if (!client) - { + if (!client) { std::cout << "Create echo async client failed." << std::endl; return -1; } fix_this_name->run(3); - for (int i = 0; i < 10; i++) - { + for (int i = 0; i < 10; i++) { Ping * ping = new Ping(i); client->Echo(ping->request_, std::bind(&Ping::handle_echo_reply, ping, _1, _2)); } diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index d9272fd3..4166fd9b 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -24,7 +24,7 @@ using namespace std::placeholders; class EchoServiceImpl { -public: + public: virtual ~EchoServiceImpl() = default; virtual ::grpc::Status echo_request(EchoRequest& request, EchoReply& response) { @@ -40,7 +40,7 @@ using EchoAsyncService = ::sds_grpc_test::EchoService::AsyncService; class EchoServer : public GrpcServer { -public: + public: EchoServer(EchoServiceImpl* impl) : GrpcServer(), impl_(impl) { @@ -50,8 +50,8 @@ class EchoServer : public GrpcServer { std::cout << "register rpc calls" << std::endl; register_rpc( - &EchoAsyncService::RequestEcho, - std::bind(&EchoServiceImpl::echo_request, impl_, _1, _2)); + &EchoAsyncService::RequestEcho, + std::bind(&EchoServiceImpl::echo_request, impl_, _1, _2)); } EchoServiceImpl* impl_; @@ -68,8 +68,7 @@ void RunServer() { server->run("", "", server_address, 4); std::cout << "Server listening on " << server_address << std::endl; - while (!server->is_shutdown()) - { + while (!server->is_shutdown()) { std::this_thread::sleep_for(std::chrono::seconds(1)); } @@ -77,8 +76,7 @@ void RunServer() { } -int main(int arc, char* argv[]) -{ +int main(int arc, char* argv[]) { std::cout << "Start echo server ..." << std::endl; RunServer(); diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index fa146af7..69ca942a 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -28,13 +28,12 @@ using namespace std::placeholders; class EchoSyncClient : public GrpcConnection { -public: + public: EchoSyncClient(const std::string& server_addr, uint32_t dead_line, - ::grpc::CompletionQueue* cq, - const std::string& target_domain, - const std::string& ssl_cert) - : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) - { + ::grpc::CompletionQueue* cq, + const std::string& target_domain, + const std::string& ssl_cert) + : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) { } }; @@ -45,16 +44,15 @@ int RunClient(const std::string& server_address) { GrpcClient* fix_this_name = new GrpcClient(); auto client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); - if (!client) - { + server_address, 5, &(fix_this_name->cq()), "", ""); + if (!client) { std::cout << "Create echo client failed." << std::endl; return -1; } int ret = 0; - for (int i = 0; i < 3; i++){ + for (int i = 0; i < 3; i++) { ClientContext context; EchoRequest request; EchoReply reply; @@ -62,16 +60,15 @@ int RunClient(const std::string& server_address) { request.set_message(std::to_string(i)); Status status = client->stub()->Echo(&context, request, &reply); - if (!status.ok()) - { + if (!status.ok()) { std::cout << "echo request " << request.message() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; continue; } std::cout << "echo request " << request.message() << - " reply " << reply.message() << std::endl; + " reply " << reply.message() << std::endl; if (request.message() == reply.message()) { ret++; From dc607fae17f54065b8beda175367d2fc2fc73bd2 Mon Sep 17 00:00:00 2001 From: lhuang8 Date: Fri, 16 Nov 2018 19:06:22 -0800 Subject: [PATCH 031/385] SDSTOR-464 sds_grpc: make GrpcServer supports multiple gRPC services Old GrpcServer implementation only supports one gRPC service, which is not convenient for hosting multiple gRPC services in one application: - init grpc server for each service - each grpc server need to listen on a TCP port This change makes GrpcServer to support multiple gRPC services by registering services on a single grpc server. --- CMakeLists.txt | 3 + include/sds_grpc/client.h | 4 +- include/sds_grpc/server.h | 240 ++++++++++----------------- lib/server.cpp | 115 +++++++++++++ tests/function/echo_async_client.cpp | 91 ++++++++-- tests/function/echo_server.cpp | 75 +++++++-- tests/function/echo_sync_client.cpp | 20 +-- tests/proto/CMakeLists.txt | 2 - tests/proto/sds_grpc_test.proto | 21 +++ 9 files changed, 378 insertions(+), 193 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b0e2f67b..286b3906 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,9 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${CONAN_P find_package(Boost REQUIRED) find_package(Threads REQUIRED) find_package(OpenSSL REQUIRED) +find_package(Protobuf REQUIRED) + +include(${CMAKE_HOME_DIRECTORY}/cmake/grpc.cmake) include_directories(BEFORE include) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index cc21cb1b..b8e19b13 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -120,7 +120,7 @@ class GrpcConnection { } - ~GrpcConnection() { } + virtual ~GrpcConnection() { } typename TSERVICE::StubInterface* stub() { return stub_.get(); @@ -239,7 +239,7 @@ class GrpcClient { public: GrpcClient() : shutdown_(true) {} - ~GrpcClient() { + virtual ~GrpcClient() { shutdown(); for (auto& it : threads_) { it->join(); diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 61034f8a..df3bf9ad 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -13,6 +13,10 @@ #include #include +#include +#include +#include +#include "utils.h" namespace sds::grpc { @@ -91,20 +95,21 @@ class BaseServerCallData { template class ServerCallData final : public BaseServerCallData { - typedef std::function*, - ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, - void *)> request_call_func_t; + using request_call_func_t = std::function< + void(TSERVICE*, + ::grpc::ServerContext*, + TREQUEST*, + ::grpc::ServerAsyncResponseWriter*, + ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, + void *)>; - typedef std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_call_func_t; + using handle_call_func_t = std::function< + ::grpc::Status(TREQUEST&, TRESPONSE&)>; - typedef ServerCallData T; + using T = ServerCallData; private: - template friend class GrpcServer; ServerCallData(TSERVICE * service, @@ -154,36 +159,70 @@ class ServerCallData final : public BaseServerCallData { -template -class GrpcServer { - public: +class GrpcServer : private boost::noncopyable { - typedef TSERVICE ServiceType; + enum State { + VOID, + INITED, + RUNNING, + SHUTTING_DOWN, + TERMINATED + }; + private: GrpcServer(); - virtual ~GrpcServer(); - void shutdown(); - bool is_shutdown(); - bool run(const std::string& ssl_key, const std::string& ssl_cert, - const std::string& listen_addr, uint32_t threads = 1); + bool init(const std::string& listen_addr, uint32_t threads, + const std::string& ssl_key, const std::string& ssl_cert); + + public: + virtual ~GrpcServer(); /** - * Currently, user need to inherit GrpcServer and register rpc calls. - * This will be changed by "SDSTOR-464 sds_grpc: make single - * sds_grpc::GrpcServer instance supports multiple gRPC services" + * Create a new GrpcServer instance and initialize it. */ - virtual void ready() = 0; + static GrpcServer* make(const std::string& listen_addr, + uint32_t threads=1, + const std::string& ssl_key="", + const std::string& ssl_cert=""); + + bool run(); + + void shutdown(); + bool is_terminated() { + return state_ == State::TERMINATED; + } ::grpc::ServerCompletionQueue * completion_queue() { - return completion_queue_.get(); + return cq_.get(); + } + + template + bool register_async_service() { + + BOOST_ASSERT_MSG(State::INITED == state_, + "register service in non-INITED state"); + + auto name = TSVC::service_full_name(); + + BOOST_ASSERT_MSG(services_.find(name) == services_.end(), + "Double register async service."); + if (services_.find(name) != services_.end()) { + return false; + } + + auto svc = new typename TSVC::AsyncService(); + builder_.RegisterService(svc); + services_.insert({name, svc}); + + return true; } template - void register_rpc( + bool register_rpc( std::function< - void(TSVC*, + void(typename TSVC::AsyncService*, ::grpc::ServerContext*, TREQUEST*, ::grpc::ServerAsyncResponseWriter*, @@ -192,145 +231,46 @@ class GrpcServer { void *)> request_call_func, std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_request_func) { - (new ServerCallData ( - &service_, completion_queue_.get(), - request_call_func, - handle_request_func))->proceed(); - } - - - private: - // This can be called by multiple threads - void handle_rpcs(); - void process(BaseServerCallData * cm); - - // TODO: move this function to utils - bool get_file_contents(const std::string& file_name, std::string& contents); - - protected: - std::unique_ptr<::grpc::ServerCompletionQueue> completion_queue_; - std::unique_ptr server_; - TSERVICE service_; - - private: - bool shutdown_; - std::list> threads_; -}; - + BOOST_ASSERT_MSG(State::RUNNING == state_, + "register service in non-INITED state"); -template -GrpcServer::GrpcServer() - :shutdown_(true) -{} - - -template -GrpcServer::~GrpcServer() { - shutdown(); - for (auto& it : threads_) { - it->join(); - } -} - - -template -void GrpcServer::shutdown() { - if (!shutdown_) { - server_->Shutdown(); - completion_queue_->Shutdown(); - shutdown_ = true; - - } -} - -template -bool GrpcServer::is_shutdown() { - return shutdown_; -} - - -template -bool GrpcServer::run(const std::string& ssl_key, const std::string& ssl_cert, - const std::string& listen_addr, uint32_t threads /* = 1 */) { - if (listen_addr.empty() || threads == 0) { - return false; - } - - ServerBuilder builder; - if (!ssl_cert.empty() && !ssl_key.empty()) { - std::string key_contents; - std::string cert_contents; - get_file_contents(ssl_cert, cert_contents); - get_file_contents(ssl_key, key_contents); - - if (cert_contents.empty() || key_contents.empty()) { + auto it = services_.find(TSVC::service_full_name()); + if (it == services_.end()) { + BOOST_ASSERT_MSG(false, "service not registered"); return false; } - ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = { key_contents, cert_contents }; - ::grpc::SslServerCredentialsOptions ssl_opts; - ssl_opts.pem_root_certs = ""; - ssl_opts.pem_key_cert_pairs.push_back(pkcp); - - builder.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); - } else { - builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); - } - - builder.RegisterService(&service_); - completion_queue_ = builder.AddCompletionQueue(); - server_ = builder.BuildAndStart(); - //LOGDEBUGMOD(GRPC, "Server listening on {}", listen_addr); - - shutdown_ = false; - ready(); + auto svc = static_cast(it->second); + (new ServerCallData ( + svc, cq_.get(), + request_call_func, + handle_request_func))->proceed(); - for (uint32_t i = 0; i < threads; ++i) { - std::shared_ptr t = - std::shared_ptr(new std::thread(&GrpcServer::handle_rpcs, this)); - threads_.push_back(t); + return true; } - return true; -} + private: -template -bool GrpcServer::get_file_contents(const std::string& file_name, std::string& contents) { - try { - std::ifstream in(file_name.c_str(), std::ios::in); - if (in) { - std::ostringstream t; - t << in.rdbuf(); - in.close(); - - contents = t.str(); - return true; - } - } catch (...) { + /* + * This can be called by multiple threads + */ + void handle_rpcs(); - } + void process(BaseServerCallData * cm); - return false; -} + State state_ = State::VOID; -template -void GrpcServer::handle_rpcs() { - void* tag; - bool ok = false; + uint32_t thread_num_ = 0; - while (completion_queue_->Next(&tag, &ok)) { - if (!ok) { - // the server has been Shutdown before this particular - // call got matched to an incoming RPC. - continue; - } + ServerBuilder builder_; - BaseServerCallData* cm = static_cast(tag); - cm->proceed(); - } -} + std::unique_ptr<::grpc::ServerCompletionQueue> cq_; + std::unique_ptr server_; + std::list> threads_; + std::unordered_map services_; +}; } diff --git a/lib/server.cpp b/lib/server.cpp index 2c3acd70..29f836a7 100644 --- a/lib/server.cpp +++ b/lib/server.cpp @@ -30,4 +30,119 @@ void BaseServerCallData::do_finish() { delete this; } + +GrpcServer::GrpcServer() { + +} + + +GrpcServer::~GrpcServer() { + shutdown(); +} + + +bool GrpcServer::init(const std::string& listen_addr, uint32_t threads, + const std::string& ssl_key, const std::string& ssl_cert) { + + BOOST_ASSERT(State::VOID == state_); + + if (listen_addr.empty() || threads == 0) { + return false; + } + + thread_num_ = threads; + + if (!ssl_cert.empty() && !ssl_key.empty()) { + std::string key_contents; + std::string cert_contents; + get_file_contents(ssl_cert, cert_contents); + get_file_contents(ssl_key, key_contents); + + if (cert_contents.empty() || key_contents.empty()) { + return false; + } + + ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = { key_contents, cert_contents }; + ::grpc::SslServerCredentialsOptions ssl_opts; + ssl_opts.pem_root_certs = ""; + ssl_opts.pem_key_cert_pairs.push_back(pkcp); + + builder_.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); + } else { + builder_.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); + } + + cq_ = builder_.AddCompletionQueue(); + + state_ = State::INITED; + return true; +} + + +GrpcServer* GrpcServer::make(const std::string& listen_addr, + uint32_t threads, + const std::string& ssl_key, + const std::string& ssl_cert) { + auto ret = new GrpcServer(); + if (!ret->init(listen_addr, threads, ssl_key, ssl_cert)) { + delete ret; + return nullptr; + } + + return ret; +} + + +bool GrpcServer::run() { + + BOOST_ASSERT(State::INITED == state_); + + server_ = builder_.BuildAndStart(); + + for (uint32_t i = 0; i < thread_num_; ++i) { + auto t = std::shared_ptr( + new std::thread(&GrpcServer::handle_rpcs, this)); + threads_.push_back(t); + } + + state_ = State::RUNNING; + return true; +} + + +void GrpcServer::handle_rpcs() { + void* tag; + bool ok = false; + + while (cq_->Next(&tag, &ok)) { + if (!ok) { + // the server has been Shutdown before this particular + // call got matched to an incoming RPC. + continue; + } + + BaseServerCallData* cm = static_cast(tag); + cm->proceed(); + } +} + +void GrpcServer::shutdown() { + if (state_ == State::RUNNING) { + server_->Shutdown(); + cq_->Shutdown(); // Always *after* the associated server's Shutdown()! + state_ = State::SHUTTING_DOWN; + + // drain the cq_ + for (auto& it : threads_) { + it->join(); + } + + state_ = State::TERMINATED; + } + + return; +} + + + } diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 86b7fffe..6054283c 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -45,17 +45,38 @@ class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> { &call->context(), request, completion_queue()); call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); } +}; +class PingAsyncClient : public GrpcConnection<::sds_grpc_test::PingService> { + public: + PingAsyncClient(const std::string& server_addr, uint32_t dead_line, + ::grpc::CompletionQueue* cq, + const std::string& target_domain, + const std::string& ssl_cert) + : GrpcConnection<::sds_grpc_test::PingService>( + server_addr, dead_line, cq, target_domain, ssl_cert) { + } + + + void Ping(const PingRequest& request, + std::function callback) { + auto call = new ClientCallData(callback); + call->set_deadline(dead_line_); + call->responder_reader() = stub()->AsyncPing( + &call->context(), request, completion_queue()); + call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); + } }; -std::atomic_int g_counter; +std::atomic_int g_echo_counter; +std::atomic_int g_ping_counter; -class Ping { +class Echo { public: - Ping(int seqno) { + Echo(int seqno) { request_.set_message(std::to_string(seqno)); } @@ -72,34 +93,74 @@ class Ping { assert(request_.message() == reply.message()); - g_counter.fetch_add(1, std::memory_order_relaxed); + g_echo_counter.fetch_add(1, std::memory_order_relaxed); } EchoRequest request_; }; +class Ping { + public: + + Ping(int seqno) { + request_.set_seqno(seqno); + } + + void handle_ping_reply(PingReply& reply, ::grpc::Status& status) { + if (!status.ok()) { + std::cout << "ping request " << request_.seqno() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + return; + } + + std::cout << "ping request " << request_.seqno() << + " reply " << reply.seqno() << std::endl; + + + assert(request_.seqno() == reply.seqno()); + g_ping_counter.fetch_add(1, std::memory_order_relaxed); + } + + PingRequest request_; +}; + +#define GRPC_CALL_COUNT 10 + int RunClient(const std::string& server_address) { GrpcClient* fix_this_name = new GrpcClient(); - auto client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); + auto echo_client = GrpcConnectionFactory::Make( + server_address, 5, &(fix_this_name->cq()), "", ""); - if (!client) { + if (!echo_client) { std::cout << "Create echo async client failed." << std::endl; return -1; } + auto ping_client = GrpcConnectionFactory::Make( + server_address, 5, &(fix_this_name->cq()), "", ""); + + if (!ping_client) { + std::cout << "Create ping async client failed." << std::endl; + return -1; + } + fix_this_name->run(3); - for (int i = 0; i < 10; i++) { - Ping * ping = new Ping(i); - client->Echo(ping->request_, std::bind(&Ping::handle_echo_reply, ping, _1, _2)); + for (int i = 0; i < GRPC_CALL_COUNT; i++) { + if (i % 2 == 0) { + Echo * echo = new Echo(i); + echo_client->Echo(echo->request_, std::bind(&Echo::handle_echo_reply, echo, _1, _2)); + } else { + Ping * ping = new Ping(i); + ping_client->Ping(ping->request_, std::bind(&Ping::handle_ping_reply, ping, _1, _2)); + } } delete fix_this_name; // wait client worker threads terminate - return g_counter.load(); - + return g_echo_counter.load() + g_ping_counter.load(); } @@ -107,8 +168,12 @@ int main(int argc, char** argv) { std::string server_address("0.0.0.0:50051"); + if (RunClient(server_address) != GRPC_CALL_COUNT) { + std::cerr << "Only " << GRPC_CALL_COUNT << " calls are successful" << std::endl; + return 1; + } - return RunClient(server_address); + return 0; } diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 4166fd9b..c3f36477 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -33,28 +33,64 @@ class EchoServiceImpl { return ::grpc::Status::OK; } + bool register_service(GrpcServer* server) { + + if (!server->register_async_service()) { + std::cout << "register service failed" << std::endl; + return false; + } + + return true; + } + + bool register_rpcs(GrpcServer* server) { + std::cout << "register rpc calls" << std::endl; + if (!server->register_rpc( + &EchoService::AsyncService::RequestEcho, + std::bind(&EchoServiceImpl::echo_request, this, _1, _2))) { + std::cout << "register rpc failed" << std::endl; + return false; + } + + return true; + } }; -using EchoAsyncService = ::sds_grpc_test::EchoService::AsyncService; -class EchoServer : public GrpcServer { + +class PingServiceImpl { public: - EchoServer(EchoServiceImpl* impl) - : GrpcServer(), - impl_(impl) { + virtual ~PingServiceImpl() = default; + + virtual ::grpc::Status ping_request(PingRequest& request, PingReply& response) { + std::cout << "receive ping request " << request.seqno() << std::endl; + response.set_seqno(request.seqno()); + return ::grpc::Status::OK; } - void ready() { + bool register_service(GrpcServer* server) { - std::cout << "register rpc calls" << std::endl; - register_rpc( - &EchoAsyncService::RequestEcho, - std::bind(&EchoServiceImpl::echo_request, impl_, _1, _2)); + if (!server->register_async_service()) { + std::cout << "register ping service failed" << std::endl; + return false; + } + + return true; } - EchoServiceImpl* impl_; + bool register_rpcs(GrpcServer* server) { + std::cout << "register rpc calls" << std::endl; + if (!server->register_rpc( + &PingService::AsyncService::RequestPing, + std::bind(&PingServiceImpl::ping_request, this, _1, _2))) { + std::cout << "register ping rpc failed" << std::endl; + return false; + } + + return true; + } }; @@ -63,12 +99,21 @@ void RunServer() { std::string server_address("0.0.0.0:50051"); - EchoServiceImpl * impl = new EchoServiceImpl(); - EchoServer* server = new EchoServer(impl); - server->run("", "", server_address, 4); + auto server = GrpcServer::make(server_address, 4, "", ""); + + EchoServiceImpl * echo_impl = new EchoServiceImpl(); + echo_impl->register_service(server); + + PingServiceImpl * ping_impl = new PingServiceImpl(); + ping_impl->register_service(server); + + server->run(); std::cout << "Server listening on " << server_address << std::endl; - while (!server->is_shutdown()) { + echo_impl->register_rpcs(server); + ping_impl->register_rpcs(server); + + while (!server->is_terminated()) { std::this_thread::sleep_for(std::chrono::seconds(1)); } diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 69ca942a..d9afaa11 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -39,6 +39,9 @@ class EchoSyncClient : public GrpcConnection { }; +#define GRPC_CALL_COUNT 10 + + int RunClient(const std::string& server_address) { GrpcClient* fix_this_name = new GrpcClient(); @@ -52,7 +55,7 @@ int RunClient(const std::string& server_address) { int ret = 0; - for (int i = 0; i < 3; i++) { + for (int i = 0; i < GRPC_CALL_COUNT; i++) { ClientContext context; EchoRequest request; EchoReply reply; @@ -81,17 +84,12 @@ int RunClient(const std::string& server_address) { int main(int argc, char** argv) { - std::string server_address("0.0.0.0:50051"); + if (RunClient(server_address) != GRPC_CALL_COUNT) { + std::cerr << "Only " << GRPC_CALL_COUNT << " calls are successful" << std::endl; + return 1; + } - return RunClient(server_address); + return 0; } - - - - - - - - diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt index cddc95e5..ea405a50 100644 --- a/tests/proto/CMakeLists.txt +++ b/tests/proto/CMakeLists.txt @@ -1,5 +1,3 @@ -find_package(Protobuf REQUIRED) -include(${CMAKE_HOME_DIRECTORY}/cmake/grpc.cmake) protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS sds_grpc_test.proto) diff --git a/tests/proto/sds_grpc_test.proto b/tests/proto/sds_grpc_test.proto index 8902356c..09d4238e 100644 --- a/tests/proto/sds_grpc_test.proto +++ b/tests/proto/sds_grpc_test.proto @@ -5,6 +5,13 @@ package sds_grpc_test; service EchoService { rpc Echo (EchoRequest) returns (EchoReply) {} + + rpc EchoLongReply (EchoRequest) returns (stream EchoReply) {} + + rpc LongEcho (stream EchoRequest) returns (EchoReply) {} + + rpc LongEchoLongReply (stream EchoRequest) returns (stream EchoReply) {} + } message EchoRequest { @@ -14,3 +21,17 @@ message EchoRequest { message EchoReply { string message = 1; } + + +service PingService { + rpc Ping (PingRequest) returns (PingReply) {} +} + +message PingRequest { + uint32 seqno = 1; +} + +message PingReply { + uint32 seqno = 1; +} + From fc4ee16bd6e3abc7fae1c733826023c74330342d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 29 Nov 2018 17:29:00 -0700 Subject: [PATCH 032/385] Need to link all conan dependencies --- tests/function/CMakeLists.txt | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 002aef95..43de1108 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -2,11 +2,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) set(FUNCTION_TEST_LIBS sds_grpc test_proto - ${CONAN_LIBS_GRPC} - ${CONAN_LIBS_PROTOBUF} - ${CONAN_LIBS_OPENSSL} - ${CONAN_LIBS_C-ARES} - ${CONAN_LIBS_ZLIB} ) + ${CONAN_LIBS}) # build echo_server From 0e4dd33956145fe5febc5fc498cdc041296d3544 Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Tue, 9 Oct 2018 23:57:09 +0000 Subject: [PATCH 033/385] SDSTOR-576 sds_grpc: refactor GrpcConnection and GrpcClient Changes: - sds_grpc defines sync client and async client separately. - User can create a client worker(`GrpcAyncClientWorker`) by `GrpcAyncClientWorker::create_worker(const char * name, int num_thread)` each GrpcAyncClientWorker owns a CompletionQueue and a thread pool. - For creating an AsyncStub, user need to specify a client worker. All responses of grpc async calls made on it will be handled on the GrpcAyncClientWorker's threads. --- include/sds_grpc/client.h | 384 +++++++++++++++------------ lib/client.cpp | 158 +++++++++++ tests/function/echo_async_client.cpp | 139 +++++----- tests/function/echo_sync_client.cpp | 96 ++++--- 4 files changed, 506 insertions(+), 271 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index b8e19b13..e4c336fe 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -7,10 +7,14 @@ #include #include #include -#include +#include #include +#include +#include +#include -#include +#include +#include #include #include #include @@ -28,10 +32,9 @@ using ::grpc::Status; using namespace ::std::chrono; /** - * ClientCallMethod : Stores the response handler and method name of the rpc - * + * A interface for handling gRPC async response */ -class ClientCallMethod { +class ClientCallMethod : private boost::noncopyable { public: virtual ~ClientCallMethod() {} @@ -40,7 +43,8 @@ class ClientCallMethod { /** - * The specialized 'ClientCallMethod' per-response type. + * The specialized 'ClientCallMethod' per gRPC call, it stores + * the response handler function * */ template @@ -52,10 +56,17 @@ class ClientCallData final : public ClientCallMethod { using ResponseReaderType = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface>; - public: + private: + + /* Sllow GrpcAsyncClient and its inner classes to use + * ClientCallData. + */ + friend class GrpcAsyncClient; + ClientCallData(handle_response_cb_t handle_response_cb) : handle_response_cb_(handle_response_cb) { } + // TODO: support time in any time unit -- lhuang8 void set_deadline(uint32_t seconds) { system_clock::time_point deadline = system_clock::now() + std::chrono::seconds(seconds); @@ -78,7 +89,6 @@ class ClientCallData final : public ClientCallMethod { return context_; } - virtual void handle_response() override { handle_response_cb_(reply_, status_); } @@ -89,222 +99,260 @@ class ClientCallData final : public ClientCallMethod { ClientContext context_; Status status_; ResponseReaderType response_reader_; - }; /** - * A gRPC connection, holds a gRPC Service's stub which used to send gRPC request. + * A GrpcBaseClient takes care of establish a channel to grpc + * server. The channel can be used by any number of grpc + * generated stubs. * */ -template -class GrpcConnection { - public: - - const std::string& server_addr_; - const std::string& target_domain_; +class GrpcBaseClient { + protected: + const std::string server_addr_; + const std::string target_domain_; const std::string ssl_cert_; - uint32_t dead_line_; std::shared_ptr<::grpc::ChannelInterface> channel_; - CompletionQueue* completion_queue_; - std::unique_ptr stub_; + public: + GrpcBaseClient(const std::string& server_addr, + const std::string& target_domain = "", + const std::string& ssl_cert = "") + : server_addr_(server_addr), + target_domain_(target_domain), + ssl_cert_(ssl_cert) { + } - GrpcConnection(const std::string& server_addr, uint32_t dead_line, - CompletionQueue* cq, const std::string& target_domain, - const std::string& ssl_cert) - : server_addr_(server_addr), target_domain_(target_domain), - ssl_cert_(ssl_cert), dead_line_(dead_line), - completion_queue_(cq) { + virtual ~GrpcBaseClient() {}; - } + virtual bool init(); + virtual bool is_connection_ready(); - virtual ~GrpcConnection() { } + private: + virtual bool init_channel(); - typename TSERVICE::StubInterface* stub() { - return stub_.get(); - } + virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); +}; - virtual bool init() { - if (!init_channel()) { - return false; - } - init_stub(); - return true; - } +class GrpcSyncClient : public GrpcBaseClient { + public: - CompletionQueue* completion_queue() { - return completion_queue_; + using GrpcBaseClient::GrpcBaseClient; + + template + std::unique_ptr MakeStub() { + return TSERVICE::NewStub(channel_); } +}; - protected: - virtual bool init_channel() { - - ::grpc::SslCredentialsOptions ssl_opts; - - if (!ssl_cert_.empty()) { - - if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { - ::grpc::ChannelArguments channel_args; - channel_args.SetSslTargetNameOverride(target_domain_); - channel_ = ::grpc::CreateCustomChannel(server_addr_, - ::grpc::SslCredentials(ssl_opts), - channel_args); - } else { - // TODO: add log -- lhuang8 - return false; - } - } else { - channel_ = ::grpc::CreateChannel(server_addr_, - ::grpc::InsecureChannelCredentials()); - } +/** + * One GrpcBaseClient can have multiple stub + * + * The gRPC client worker, it owns a CompletionQueue and one or more threads, + * it's only used for handling asynchronous responses. + * + * The CompletionQueue is used to send asynchronous request, then the + * response will be handled on worker threads. + * + */ +class GrpcAyncClientWorker final { - return true; - } + enum class State { + VOID, + INIT, + RUNNING, + SHUTTING_DOWN, + TERMINATED + }; - virtual void init_stub() { - stub_ = TSERVICE::NewStub(channel_); - } + public: + using UPtr = std::unique_ptr; - virtual bool load_ssl_cert(const std::string& ssl_cert, std::string content) { - return ::sds::grpc::get_file_contents(ssl_cert, content);; - } + GrpcAyncClientWorker(); + ~GrpcAyncClientWorker(); - virtual bool is_connection_ready() { - if (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY) - return true; - else - return false; - } - virtual void wait_for_connection_ready() { - grpc_connectivity_state state; - int count = 0; - while ((state = channel_->GetState(true)) != GRPC_CHANNEL_READY && count++ < 5000) { - usleep(10000); - } + bool run(uint32_t num_threads); + + CompletionQueue& cq() { + return completion_queue_; } -}; + /** + * Create a GrpcAyncClientWorker. + * + */ + static bool create_worker(const char * name, int num_thread); + /** + * + * Get a pointer of GrpcAyncClientWorker by name. + */ + static GrpcAyncClientWorker * get_worker(const char * name); -/** - * - * Use GrpcConnectionFactory::Make() to create instance of - * GrpcConnection. - * - * TODO: This factory is not good enough, should be refactored - * with GrpcConnection and GrpcClient later -- lhuang8 - * - */ -class GrpcConnectionFactory { + /** + * Must be called explicitly before program exit if any worker created. + */ + static void shutdown_all(); - public: - template - static std::unique_ptr Make( - const std::string& server_addr, uint32_t dead_line, - CompletionQueue* cq, const std::string& target_domain, - const std::string& ssl_cert) { - - std::unique_ptr ret(new T(server_addr, dead_line, cq, - target_domain, ssl_cert)); - if (!ret->init()) { - ret.reset(nullptr); - } + private: - return ret; - } + /* + * Shutdown CompletionQueue and threads. + * + * For now, workers can only by shutdown by + * GrpcAyncClientWorker::shutdown_all(). + */ + void shutdown(); + + void async_complete_rpc(); + + static std::mutex mutex_workers; + static std::unordered_map workers; + + State state_ = State::VOID; + CompletionQueue completion_queue_; + std::list> threads_; }; -/** - * TODO: inherit GrpcConnection and implement as async client -- lhuang8 - * TODO: When work as a async responses handling worker, it's can be hidden from - * user of this lib. - * - * The gRPC client, it owns a CompletionQueue and one or more threads, it's only - * used for handling asynchronous responses. - * - * The CompletionQueue is used to send asynchronous request, then the - * response will be handled on this client's threads. - * - */ -class GrpcClient { +class GrpcAsyncClient : public GrpcBaseClient { public: - GrpcClient() : shutdown_(true) {} - virtual ~GrpcClient() { - shutdown(); - for (auto& it : threads_) { - it->join(); + template + using StubPtr = std::unique_ptr; + + + /** + * AsyncStub is a wrapper of generated service stub. + * + * An AsyncStub is created with a GrpcAyncClientWorker, all responses + * of grpc async calls made on it will be handled on the + * GrpcAyncClientWorker's threads. + * + * Please use GrpcAsyncClient::make_stub() to create AsyncStub. + * + */ + template + struct AsyncStub { + using UPtr = std::unique_ptr; + + AsyncStub(StubPtr stub, GrpcAyncClientWorker * worker) : + stub_(std::move(stub)), worker_(worker) { } - } - void shutdown() { - if (!shutdown_) { - completion_queue_.Shutdown(); - shutdown_ = true; + using stub_t = typename TSERVICE::StubInterface; + + /* unary call helper */ + template + using unary_call_return_t = + std::unique_ptr< + ::grpc::ClientAsyncResponseReaderInterface>; + + template + using unary_call_t = + unary_call_return_t (stub_t::*) ( + ::grpc::ClientContext*, + const TREQUEST&, + ::grpc::CompletionQueue*); + + template + using unary_callback_t = + std::function; + + /** + * Make a unary call. + * + * @param request - a request of this unary call. + * @param call - a pointer to a member function in grpc service stub + * which used to make an aync call. If service name is + * "EchoService" and an unary rpc is defined as: + * ` rpc Echo (EchoRequest) returns (EchoReply) {}` + * then the member function used here should be: + * `EchoService::StubInterface::AsyncEcho`. + * @param callback - the response handler function, which will be + * called after response received asynchronously. + * + */ + template + void call_unary( + const TREQUEST& request, + unary_call_t call, + unary_callback_t callback) { + + auto data = new ClientCallData(callback); + data->responder_reader() = (stub_.get()->*call)(&data->context(), request, cq()); + data->responder_reader()->Finish(&data->reply(), &data->status(), (void*)data); + + return; } - } - bool run(uint32_t num_threads) { - if (num_threads == 0) { - return false; + + StubPtr stub_; + GrpcAyncClientWorker * worker_; + + const StubPtr& stub() { + return stub_; } - shutdown_ = false; - for (uint32_t i = 0; i < num_threads; ++i) { - // TODO: no need to call async_complete_rpc for sync calls; - std::shared_ptr t = std::shared_ptr( - new std::thread(&GrpcClient::async_complete_rpc, this)); - threads_.push_back(t); + CompletionQueue* cq() { + return &worker_->cq(); } - return true; - } + }; - CompletionQueue& cq() { - return completion_queue_; - } - private: + template + static auto make(Ts&&... params) { + std::unique_ptr ret; - void async_complete_rpc() { - void* tag; - bool ok = false; - while (completion_queue_.Next(&tag, &ok)) { - if (!ok) { - // Client-side StartCallit not going to the wire. This - // would happen if the channel is either permanently broken or - // transiently broken but with the fail-fast option. - continue; - } - - // The tag was set by ::grpc::ClientAsyncResponseReader<>::Finish(), - // it must be a instance of ClientCallMethod. - // - // TODO: user of this lib should not have change to set the tag, - // need to hide tag from user totally -- lhuang8 - ClientCallMethod* cm = static_cast(tag); - cm->handle_response(); + if (!std::is_base_of::value) { + return ret; + } + + ret = std::make_unique(std::forward(params)...); + if (!ret->init()) { + ret.reset(nullptr); + return ret; } + + return ret; } - protected: - CompletionQueue completion_queue_; + template + auto make_stub(const char * worker) { - private: - bool shutdown_; - std::list> threads_; -}; + typename AsyncStub::UPtr ret; + auto w = GrpcAyncClientWorker::get_worker(worker); + BOOST_ASSERT(w); + if (!w) { + return ret; // null + } + auto stub = TSERVICE::NewStub(channel_); + ret = std::make_unique>(std::move(stub), w); + return ret; + } + GrpcAsyncClient( + const std::string& server_addr, + const std::string& target_domain = "", + const std::string& ssl_cert = "") + : GrpcBaseClient(server_addr, target_domain, ssl_cert) { + } + + virtual ~GrpcAsyncClient() { + } + + +}; } // end of namespace sds::grpc diff --git a/lib/client.cpp b/lib/client.cpp index 99123073..c4ddd58d 100644 --- a/lib/client.cpp +++ b/lib/client.cpp @@ -8,4 +8,162 @@ +namespace sds::grpc { + + +bool GrpcBaseClient::init() { + if (!init_channel()) { + return false; + } + + return true; +} + + +bool GrpcBaseClient::init_channel() { + + ::grpc::SslCredentialsOptions ssl_opts; + + if (!ssl_cert_.empty()) { + + if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { + ::grpc::ChannelArguments channel_args; + channel_args.SetSslTargetNameOverride(target_domain_); + channel_ = ::grpc::CreateCustomChannel(server_addr_, + ::grpc::SslCredentials(ssl_opts), + channel_args); + } else { + return false; + } + } else { + channel_ = ::grpc::CreateChannel(server_addr_, + ::grpc::InsecureChannelCredentials()); + } + + return true; +} + +bool GrpcBaseClient::load_ssl_cert(const std::string& ssl_cert, std::string& content) { + return ::sds::grpc::get_file_contents(ssl_cert, content);; +} + + +bool GrpcBaseClient::is_connection_ready() { + return (channel_->GetState(true) == + grpc_connectivity_state::GRPC_CHANNEL_READY); +} + + +std::mutex GrpcAyncClientWorker::mutex_workers; +std::unordered_map GrpcAyncClientWorker::workers; + +GrpcAyncClientWorker::GrpcAyncClientWorker() { + state_ = State::INIT; +} + + +GrpcAyncClientWorker::~GrpcAyncClientWorker() { + shutdown(); +} + +void GrpcAyncClientWorker::shutdown() { + if (state_ == State::RUNNING) { + completion_queue_.Shutdown(); + state_ = State::SHUTTING_DOWN; + + for (auto& it : threads_) { + it->join(); + } + + state_ = State::TERMINATED; + } + + return; +} + + +bool GrpcAyncClientWorker::run(uint32_t num_threads) { + BOOST_ASSERT(State::INIT == state_); + + if (num_threads == 0) { + return false; + } + + for (uint32_t i = 0; i < num_threads; ++i) { + std::shared_ptr t = std::shared_ptr( + new std::thread(&GrpcAyncClientWorker::async_complete_rpc, this)); + threads_.push_back(t); + } + + state_ = State::RUNNING; + return true; +} + + +void GrpcAyncClientWorker::async_complete_rpc() { + void* tag; + bool ok = false; + while (completion_queue_.Next(&tag, &ok)) { + if (!ok) { + // Client-side StartCallit not going to the wire. This + // would happen if the channel is either permanently broken or + // transiently broken but with the fail-fast option. + continue; + } + + ClientCallMethod* cm = static_cast(tag); + cm->handle_response(); + delete cm; + } +} + + +bool GrpcAyncClientWorker::create_worker(const char * name, int num_thread) { + std::lock_guard lock(mutex_workers); + + if (auto it = workers.find(name); it != workers.end()) { + return true; + } + + auto worker = std::make_unique(); + if (!worker->run(num_thread)) { + return false; + } + + workers.insert(std::make_pair(name, std::move(worker))); + return true; +} + + +GrpcAyncClientWorker * GrpcAyncClientWorker::get_worker(const char * name) { + std::lock_guard lock(mutex_workers); + + auto it = workers.find(name); + if (it == workers.end()) { + return nullptr; + } + + return it->second.get(); +} + + +void GrpcAyncClientWorker::shutdown_all() { + std::lock_guard lock(mutex_workers); + + for (auto& it : workers) { + it.second->shutdown(); + // release worker, the completion queue holds by it need to + // be destroyed before grpc lib internal object + // g_core_codegen_interface + it.second.reset(); + } + + +} + +} + + + + diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 6054283c..cd000553 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -24,55 +24,54 @@ using namespace ::sds::grpc; using namespace ::sds_grpc_test; using namespace std::placeholders; +#define WORKER_NAME "worker-1" + + +class EchoAndPingAsyncClient : GrpcAsyncClient { -class EchoAsyncClient : public GrpcConnection<::sds_grpc_test::EchoService> { public: - EchoAsyncClient(const std::string& server_addr, uint32_t dead_line, - ::grpc::CompletionQueue* cq, - const std::string& target_domain, - const std::string& ssl_cert) - : GrpcConnection<::sds_grpc_test::EchoService>( - server_addr, dead_line, cq, target_domain, ssl_cert) { + using GrpcAsyncClient::GrpcAsyncClient; + + virtual bool init() { + if (!GrpcAsyncClient::init()) { + return false; + } + + echo_stub_ = make_stub(WORKER_NAME); + ping_stub_ = make_stub(WORKER_NAME); + + return true; } void Echo(const EchoRequest& request, std::function callback) { - auto call = new ClientCallData(callback); - call->set_deadline(dead_line_); - call->responder_reader() = stub()->AsyncEcho( - &call->context(), request, completion_queue()); - call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); - } -}; - -class PingAsyncClient : public GrpcConnection<::sds_grpc_test::PingService> { - public: - PingAsyncClient(const std::string& server_addr, uint32_t dead_line, - ::grpc::CompletionQueue* cq, - const std::string& target_domain, - const std::string& ssl_cert) - : GrpcConnection<::sds_grpc_test::PingService>( - server_addr, dead_line, cq, target_domain, ssl_cert) { + echo_stub_->call_unary(request, + &EchoService::StubInterface::AsyncEcho, + callback); } - void Ping(const PingRequest& request, std::function callback) { - auto call = new ClientCallData(callback); - call->set_deadline(dead_line_); - call->responder_reader() = stub()->AsyncPing( - &call->context(), request, completion_queue()); - call->responder_reader()->Finish(&call->reply(), &call->status(), (void*)call); + + ping_stub_->call_unary(request, + &PingService::StubInterface::AsyncPing, + callback); } + + AsyncStub::UPtr echo_stub_; + AsyncStub::UPtr ping_stub_; }; std::atomic_int g_echo_counter; std::atomic_int g_ping_counter; +/** + * Echo implements async response handler. + */ class Echo { public: @@ -100,65 +99,59 @@ class Echo { }; -class Ping { - public: - - Ping(int seqno) { - request_.set_seqno(seqno); - } - - void handle_ping_reply(PingReply& reply, ::grpc::Status& status) { - if (!status.ok()) { - std::cout << "ping request " << request_.seqno() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; - return; - } - - std::cout << "ping request " << request_.seqno() << - " reply " << reply.seqno() << std::endl; - - - assert(request_.seqno() == reply.seqno()); - g_ping_counter.fetch_add(1, std::memory_order_relaxed); - } - - PingRequest request_; -}; - #define GRPC_CALL_COUNT 10 int RunClient(const std::string& server_address) { - GrpcClient* fix_this_name = new GrpcClient(); - auto echo_client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); - if (!echo_client) { - std::cout << "Create echo async client failed." << std::endl; - return -1; - } + GrpcAyncClientWorker::create_worker(WORKER_NAME, 4); - auto ping_client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); - - if (!ping_client) { - std::cout << "Create ping async client failed." << std::endl; + auto client = GrpcAsyncClient::make(server_address, "", ""); + if (!client) { + std::cout << "Create async client failed." << std::endl; return -1; } - fix_this_name->run(3); - for (int i = 0; i < GRPC_CALL_COUNT; i++) { if (i % 2 == 0) { + // Async response handling logic can be put in a class's member + // function, then use a lambda to wrap it. Echo * echo = new Echo(i); - echo_client->Echo(echo->request_, std::bind(&Echo::handle_echo_reply, echo, _1, _2)); + client->Echo(echo->request_, + [echo] (EchoReply& reply, ::grpc::Status& status) { + echo->handle_echo_reply(reply, status); + delete echo; + }); + + // std::bind() can also be used, but need to take care releasing + // 'echo' additionally: + // std::bind(&Echo::handle_echo_reply, echo, _1, _2); + } else { - Ping * ping = new Ping(i); - ping_client->Ping(ping->request_, std::bind(&Ping::handle_ping_reply, ping, _1, _2)); + PingRequest* request = new PingRequest; + request->set_seqno(i); + + // response can be handled with lambda directly + client->Ping(*request, + [request] (PingReply& reply, ::grpc::Status& status) { + + if (!status.ok()) { + std::cout << "ping request " << request->seqno() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + return; + } + + std::cout << "ping request " << request->seqno() << + " reply " << reply.seqno() << std::endl; + + assert(request->seqno() == reply.seqno()); + g_ping_counter.fetch_add(1, std::memory_order_relaxed); + delete request; + }); } } - delete fix_this_name; // wait client worker threads terminate + GrpcAyncClientWorker::shutdown_all(); return g_echo_counter.load() + g_ping_counter.load(); } diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index d9afaa11..5b3391ab 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -25,56 +25,91 @@ using namespace ::sds_grpc_test; using namespace std::placeholders; - -class EchoSyncClient : public GrpcConnection { +class EchoAndPingClient : public GrpcSyncClient { public: - EchoSyncClient(const std::string& server_addr, uint32_t dead_line, - ::grpc::CompletionQueue* cq, - const std::string& target_domain, - const std::string& ssl_cert) - : GrpcConnection(server_addr, dead_line, cq, target_domain, ssl_cert) { + + using GrpcSyncClient::GrpcSyncClient; + + virtual bool init() { + if (!GrpcSyncClient::init()) { + return false; + } + + echo_stub_ = MakeStub(); + ping_stub_ = MakeStub(); + + return true; + } + + const std::unique_ptr& echo_stub() { + return echo_stub_; + } + + const std::unique_ptr& ping_stub() { + return ping_stub_; } + private: + + std::unique_ptr echo_stub_; + std::unique_ptr ping_stub_; + }; #define GRPC_CALL_COUNT 10 - int RunClient(const std::string& server_address) { - GrpcClient* fix_this_name = new GrpcClient(); - - auto client = GrpcConnectionFactory::Make( - server_address, 5, &(fix_this_name->cq()), "", ""); - if (!client) { - std::cout << "Create echo client failed." << std::endl; + auto client = std::make_unique(server_address, "", ""); + if (!client || !client->init()) { + std::cout << "Create grpc sync client failed." << std::endl; return -1; } int ret = 0; - for (int i = 0; i < GRPC_CALL_COUNT; i++) { ClientContext context; - EchoRequest request; - EchoReply reply; - request.set_message(std::to_string(i)); + if (i % 2 == 0) { + EchoRequest request; + EchoReply reply; - Status status = client->stub()->Echo(&context, request, &reply); - if (!status.ok()) { - std::cout << "echo request " << request.message() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; - continue; - } - - std::cout << "echo request " << request.message() << - " reply " << reply.message() << std::endl; + request.set_message(std::to_string(i)); + Status status = client->echo_stub()->Echo(&context, request, &reply); + if (!status.ok()) { + std::cout << "echo request " << request.message() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + continue; + } - if (request.message() == reply.message()) { - ret++; + std::cout << "echo request " << request.message() << + " reply " << reply.message() << std::endl; + + if (request.message() == reply.message()) { + ret++; + } + } else { + PingRequest request; + PingReply reply; + + request.set_seqno(i); + Status status = client->ping_stub()->Ping(&context, request, &reply); + if (!status.ok()) { + std::cout << "ping request " << request.seqno() << + " failed, status " << status.error_code() << + ": " << status.error_message() << std::endl; + continue; + } + + std::cout << "ping request " << request.seqno() << + " reply " << reply.seqno() << std::endl; + + if (request.seqno() == reply.seqno()) { + ret++; + } } } @@ -82,6 +117,7 @@ int RunClient(const std::string& server_address) { return ret; } + int main(int argc, char** argv) { std::string server_address("0.0.0.0:50051"); From eaeaae155e6a2f55adecb78fd32038781bcdf920 Mon Sep 17 00:00:00 2001 From: Woonhak Kang Date: Tue, 4 Dec 2018 10:42:54 +0900 Subject: [PATCH 034/385] Fix boost include/linking issue --- src/flip/CMakeLists.txt | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 71dd6cb1..3516baa7 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -59,14 +59,21 @@ find_library(GLOG_LIBRARY glog HINTS ${CMAKE_PREFIX_PATH}/lib) find_library(GFLAGS_LIBRARY gflags HINTS ${CMAKE_PREFIX_PATH}/lib) include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) +#boost +set(Boost_USE_STATIC_LIBS ON) +find_package(Boost 1.68 REQUIRED COMPONENTS system) +include_directories(${Boost_INCLUDE_DIRS}) + add_library(flip ${FLIP_LIB_FILES}) target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY}) add_executable(test_flip ${TEST_FLIP_FILES}) -target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) +target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} + pthread Boost::system) add_executable(test_flip_client ${TEST_FLIP_CLIENT_FILES}) -target_link_libraries(test_flip_client ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} pthread boost_system) +target_link_libraries(test_flip_client ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} + ${GFLAGS_LIBRARY} pthread Boost::system) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From 309a596d35b6cf61c5f0c2c2643cf9049258c9bd Mon Sep 17 00:00:00 2001 From: Woonhak Kang Date: Tue, 4 Dec 2018 16:51:46 +0900 Subject: [PATCH 035/385] [MONSTOR-8101] Fix compile issues on MacOSX Majave - Add dummy op to suppress compiler warning --- src/flip/lib/flip.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 8042be77..c493ee3b 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -294,6 +294,8 @@ class FlipTimer { void timer_thr() { size_t executed = 0; executed = m_svc.run(); + // To suppress compiler warning + (void) executed; } private: From 1ee672b50c586020c5ad261ea096218c172207d8 Mon Sep 17 00:00:00 2001 From: woonhak Date: Tue, 4 Dec 2018 23:25:14 -0800 Subject: [PATCH 036/385] Change boost version - CI building issue with boost 1.68 --- src/flip/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 3516baa7..ab65eac0 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -61,7 +61,7 @@ include_directories(${CMAKE_PREFIX_PATH}/include ${PROTO_GEN_DIR}) #boost set(Boost_USE_STATIC_LIBS ON) -find_package(Boost 1.68 REQUIRED COMPONENTS system) +find_package(Boost 1.67 REQUIRED COMPONENTS system) include_directories(${Boost_INCLUDE_DIRS}) add_library(flip ${FLIP_LIB_FILES}) From dc42e817d382017afb73a8c83d46b2e358c47919 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Jan 2019 12:53:06 -0700 Subject: [PATCH 037/385] Init logging in tests. SDSTOR-711 --- tests/function/echo_async_client.cpp | 1 + tests/function/echo_server.cpp | 1 + tests/function/echo_sync_client.cpp | 1 + 3 files changed, 3 insertions(+) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index cd000553..d2c311a2 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -156,6 +156,7 @@ int RunClient(const std::string& server_address) { return g_echo_counter.load() + g_ping_counter.load(); } +SDS_LOGGING_INIT() int main(int argc, char** argv) { diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index c3f36477..29306982 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -120,6 +120,7 @@ void RunServer() { delete server; } +SDS_LOGGING_INIT() int main(int arc, char* argv[]) { std::cout << "Start echo server ..." << std::endl; diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 5b3391ab..d18f9dd2 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -117,6 +117,7 @@ int RunClient(const std::string& server_address) { return ret; } +SDS_LOGGING_INIT() int main(int argc, char** argv) { From 84ae9a582b4b8d48b1db5971644af0312a9693ee Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Jan 2019 13:43:37 -0700 Subject: [PATCH 038/385] CLang fixes. CLang on Mojave is not happy without these symbols being defined. SDSTOR-711 --- tests/function/echo_async_client.cpp | 3 ++- tests/function/echo_server.cpp | 4 +++- tests/function/echo_sync_client.cpp | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index d2c311a2..f2777e18 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -157,9 +157,10 @@ int RunClient(const std::string& server_address) { } SDS_LOGGING_INIT() +SDS_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { - + SDS_OPTIONS_LOAD(argc, argv, logging) std::string server_address("0.0.0.0:50051"); if (RunClient(server_address) != GRPC_CALL_COUNT) { diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 29306982..25051f8d 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -121,8 +121,10 @@ void RunServer() { } SDS_LOGGING_INIT() +SDS_OPTIONS_ENABLE(logging) -int main(int arc, char* argv[]) { +int main(int argc, char* argv[]) { + SDS_OPTIONS_LOAD(argc, argv, logging) std::cout << "Start echo server ..." << std::endl; RunServer(); diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index d18f9dd2..a2f36aa9 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -118,8 +118,10 @@ int RunClient(const std::string& server_address) { } SDS_LOGGING_INIT() +SDS_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { + SDS_OPTIONS_LOAD(argc, argv, logging) std::string server_address("0.0.0.0:50051"); From 26e189ec9f559709e0da9477b27ae7d44f6f11c9 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Jan 2019 21:06:24 +0000 Subject: [PATCH 039/385] Use standard logging output for tests. SDSTOR-711 --- tests/function/echo_async_client.cpp | 26 +++++++++++++------------- tests/function/echo_server.cpp | 21 +++++++++++---------- tests/function/echo_sync_client.cpp | 25 +++++++++++++------------ 3 files changed, 37 insertions(+), 35 deletions(-) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index f2777e18..22f79d83 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -81,15 +81,14 @@ class Echo { void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) { if (!status.ok()) { - std::cout << "echo request " << request_.message() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + LOGERROR("echo request {} failed, status {}: {}", + request_.message(), + status.error_code(), + status.error_message()); return; } - std::cout << "echo request " << request_.message() << - " reply " << reply.message() << std::endl; - + LOGINFO("echo request {} reply {}", request_.message(), reply.message()); assert(request_.message() == reply.message()); g_echo_counter.fetch_add(1, std::memory_order_relaxed); @@ -107,7 +106,7 @@ int RunClient(const std::string& server_address) { auto client = GrpcAsyncClient::make(server_address, "", ""); if (!client) { - std::cout << "Create async client failed." << std::endl; + LOGCRITICAL("Create async client failed."); return -1; } @@ -135,14 +134,14 @@ int RunClient(const std::string& server_address) { [request] (PingReply& reply, ::grpc::Status& status) { if (!status.ok()) { - std::cout << "ping request " << request->seqno() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + LOGERROR("ping request {} failed, status {}: {}", + request->seqno(), + status.error_code(), + status.error_message()); return; } - std::cout << "ping request " << request->seqno() << - " reply " << reply.seqno() << std::endl; + LOGINFO("ping request {} reply {}", request->seqno(), reply.seqno()); assert(request->seqno() == reply.seqno()); g_ping_counter.fetch_add(1, std::memory_order_relaxed); @@ -161,10 +160,11 @@ SDS_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { SDS_OPTIONS_LOAD(argc, argv, logging) + sds_logging::SetLogger("async_client"); std::string server_address("0.0.0.0:50051"); if (RunClient(server_address) != GRPC_CALL_COUNT) { - std::cerr << "Only " << GRPC_CALL_COUNT << " calls are successful" << std::endl; + LOGERROR("Only {} calls are successful", GRPC_CALL_COUNT); return 1; } diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 25051f8d..002435d3 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -28,7 +28,7 @@ class EchoServiceImpl { virtual ~EchoServiceImpl() = default; virtual ::grpc::Status echo_request(EchoRequest& request, EchoReply& response) { - std::cout << "receive echo request " << request.message() << std::endl; + LOGINFO("receive echo request {}", request.message()); response.set_message(request.message()); return ::grpc::Status::OK; } @@ -36,7 +36,7 @@ class EchoServiceImpl { bool register_service(GrpcServer* server) { if (!server->register_async_service()) { - std::cout << "register service failed" << std::endl; + LOGERROR("register service failed"); return false; } @@ -44,11 +44,11 @@ class EchoServiceImpl { } bool register_rpcs(GrpcServer* server) { - std::cout << "register rpc calls" << std::endl; + LOGINFO("register rpc calls"); if (!server->register_rpc( &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1, _2))) { - std::cout << "register rpc failed" << std::endl; + LOGERROR("register rpc failed"); return false; } @@ -65,7 +65,7 @@ class PingServiceImpl { virtual ~PingServiceImpl() = default; virtual ::grpc::Status ping_request(PingRequest& request, PingReply& response) { - std::cout << "receive ping request " << request.seqno() << std::endl; + LOGINFO("receive ping request {}", request.seqno()); response.set_seqno(request.seqno()); return ::grpc::Status::OK; } @@ -73,7 +73,7 @@ class PingServiceImpl { bool register_service(GrpcServer* server) { if (!server->register_async_service()) { - std::cout << "register ping service failed" << std::endl; + LOGERROR("register ping service failed"); return false; } @@ -81,11 +81,11 @@ class PingServiceImpl { } bool register_rpcs(GrpcServer* server) { - std::cout << "register rpc calls" << std::endl; + LOGINFO("register rpc calls"); if (!server->register_rpc( &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1, _2))) { - std::cout << "register ping rpc failed" << std::endl; + LOGERROR("register ping rpc failed"); return false; } @@ -108,7 +108,7 @@ void RunServer() { ping_impl->register_service(server); server->run(); - std::cout << "Server listening on " << server_address << std::endl; + LOGINFO("Server listening on {}", server_address); echo_impl->register_rpcs(server); ping_impl->register_rpcs(server); @@ -125,7 +125,8 @@ SDS_OPTIONS_ENABLE(logging) int main(int argc, char* argv[]) { SDS_OPTIONS_LOAD(argc, argv, logging) - std::cout << "Start echo server ..." << std::endl; + sds_logging::SetLogger("echo_server"); + LOGINFO("Start echo server ..."); RunServer(); return 0; diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index a2f36aa9..c1326c9e 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -64,7 +64,7 @@ int RunClient(const std::string& server_address) { auto client = std::make_unique(server_address, "", ""); if (!client || !client->init()) { - std::cout << "Create grpc sync client failed." << std::endl; + LOGERROR("Create grpc sync client failed."); return -1; } @@ -79,14 +79,14 @@ int RunClient(const std::string& server_address) { request.set_message(std::to_string(i)); Status status = client->echo_stub()->Echo(&context, request, &reply); if (!status.ok()) { - std::cout << "echo request " << request.message() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + LOGERROR("echo request {} failed, status {}: {}", + request.message(), + status.error_code(), + status.error_message()); continue; } - std::cout << "echo request " << request.message() << - " reply " << reply.message() << std::endl; + LOGINFO("echo request {} reply {}", request.message(), reply.message()); if (request.message() == reply.message()) { ret++; @@ -98,14 +98,14 @@ int RunClient(const std::string& server_address) { request.set_seqno(i); Status status = client->ping_stub()->Ping(&context, request, &reply); if (!status.ok()) { - std::cout << "ping request " << request.seqno() << - " failed, status " << status.error_code() << - ": " << status.error_message() << std::endl; + LOGERROR("ping request {} failed, status {}: {}", + request.seqno(), + status.error_code(), + status.error_message()); continue; } - std::cout << "ping request " << request.seqno() << - " reply " << reply.seqno() << std::endl; + LOGINFO("ping request {} reply {}", request.seqno(), reply.seqno()); if (request.seqno() == reply.seqno()) { ret++; @@ -122,11 +122,12 @@ SDS_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { SDS_OPTIONS_LOAD(argc, argv, logging) + sds_logging::SetLogger("sync_client"); std::string server_address("0.0.0.0:50051"); if (RunClient(server_address) != GRPC_CALL_COUNT) { - std::cerr << "Only " << GRPC_CALL_COUNT << " calls are successful" << std::endl; + LOGERROR("Only {} calls are successful", GRPC_CALL_COUNT); return 1; } From f1c224682b624f627473abd94d38dbef90493d50 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 28 Jan 2019 15:15:11 +0000 Subject: [PATCH 040/385] v1.1.0 - Upgrade gRPC to 1.18.0 (SDSTOR-829) --- include/sds_grpc/client.h | 3 +-- include/sds_grpc/server.h | 3 ++- tests/function/echo_async_client.cpp | 2 ++ tests/function/echo_server.cpp | 4 +++- tests/function/echo_sync_client.cpp | 3 ++- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index e4c336fe..b317171b 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -17,7 +17,6 @@ #include #include #include -#include #include "utils.h" @@ -125,7 +124,7 @@ class GrpcBaseClient { ssl_cert_(ssl_cert) { } - virtual ~GrpcBaseClient() {}; + virtual ~GrpcBaseClient() = default; virtual bool init(); virtual bool is_connection_ready(); diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index df3bf9ad..1532dcf4 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -8,9 +8,10 @@ #pragma once #include +#include +#include #include #include -#include #include #include diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 22f79d83..49ce5e28 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -14,6 +14,8 @@ #include #include +#include +#include #include "sds_grpc/client.h" #include "sds_grpc_test.grpc.pb.h" diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 002435d3..fd328e18 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -12,10 +12,12 @@ #include #include +#include +#include + #include "sds_grpc/server.h" #include "sds_grpc_test.grpc.pb.h" - using namespace ::grpc; using namespace ::sds::grpc; using namespace ::sds_grpc_test; diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index c1326c9e..b0a3adf6 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -13,7 +13,8 @@ #include #include - +#include +#include #include "sds_grpc/client.h" #include "sds_grpc_test.grpc.pb.h" From b24a6cb2f616b33ef94cd0618b43c28f83786cda Mon Sep 17 00:00:00 2001 From: lhuang8 Date: Thu, 31 Jan 2019 19:04:54 -0800 Subject: [PATCH 041/385] SDSTOR-827 sds_grpc: fix memory leak For server-side, CompletionQueue::Next() may set ok to false, call data should be released. ::sds::grpc::GrpcServer should release registered services in dtor. --- include/sds_grpc/client.h | 17 +++++++--- include/sds_grpc/server.h | 2 +- lib/client.cpp | 11 ++----- lib/server.cpp | 51 ++++++++++++++++++++++++++---- tests/function/echo_server.cpp | 58 +++++++++++++++++++++++++--------- 5 files changed, 104 insertions(+), 35 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index b317171b..9f18b728 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -37,7 +37,7 @@ class ClientCallMethod : private boost::noncopyable { public: virtual ~ClientCallMethod() {} - virtual void handle_response() = 0; + virtual void handle_response(bool ok=true) = 0; }; @@ -57,7 +57,7 @@ class ClientCallData final : public ClientCallMethod { private: - /* Sllow GrpcAsyncClient and its inner classes to use + /* Allow GrpcAsyncClient and its inner classes to use * ClientCallData. */ friend class GrpcAsyncClient; @@ -88,7 +88,9 @@ class ClientCallData final : public ClientCallMethod { return context_; } - virtual void handle_response() override { + virtual void handle_response([[maybe_unused]] bool ok=true) override { + // For unary call, ok is always true, `status_` will indicate error + // if there are any. handle_response_cb_(reply_, status_); } @@ -277,7 +279,12 @@ class GrpcAsyncClient : public GrpcBaseClient { * then the member function used here should be: * `EchoService::StubInterface::AsyncEcho`. * @param callback - the response handler function, which will be - * called after response received asynchronously. + * called after response received asynchronously or call failed(which + * would happen if the channel is either permanently broken or + * transiently broken, or call timeout). + * The callback function must check if `::grpc::Status` argument is + * OK before handling the response. If call failed, `::grpc::Status` + * indicates the error code and error message. * */ template @@ -287,7 +294,9 @@ class GrpcAsyncClient : public GrpcBaseClient { unary_callback_t callback) { auto data = new ClientCallData(callback); + // Note that async unary RPCs don't post a CQ tag in call data->responder_reader() = (stub_.get()->*call)(&data->context(), request, cq()); + // CQ tag posted here data->responder_reader()->Finish(&data->reply(), &data->status(), (void*)data); return; diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 1532dcf4..427548e9 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -60,7 +60,7 @@ class BaseServerCallData { * - FINISH is for destroy this object, gRPC server has sent the * appropriate signals to the client to end the call. */ - void proceed(); + void proceed(bool ok=true); protected: diff --git a/lib/client.cpp b/lib/client.cpp index c4ddd58d..5ad246e9 100644 --- a/lib/client.cpp +++ b/lib/client.cpp @@ -104,15 +104,10 @@ void GrpcAyncClientWorker::async_complete_rpc() { void* tag; bool ok = false; while (completion_queue_.Next(&tag, &ok)) { - if (!ok) { - // Client-side StartCallit not going to the wire. This - // would happen if the channel is either permanently broken or - // transiently broken but with the fail-fast option. - continue; - } - + // For client-side unary call, `ok` is always true, + // even server is not running ClientCallMethod* cm = static_cast(tag); - cm->handle_response(); + cm->handle_response(ok); delete cm; } } diff --git a/lib/server.cpp b/lib/server.cpp index 29f836a7..195edb60 100644 --- a/lib/server.cpp +++ b/lib/server.cpp @@ -5,11 +5,28 @@ */ #include +#include namespace sds::grpc { -void BaseServerCallData::proceed() { +void BaseServerCallData::proceed(bool ok) { + if (!ok && status_ != FINISH) { + // for unary call, there are two cases ok can be false in server-side: + // - Server-side RPC request: the server has been Shutdown + // before this particular call got matched to an incoming RPC. + // Call data should be released in this case. + // - Server-side Finish: response not going to the wire because + // the call is already dead (i.e., canceled, deadline expired, + // other side dropped the channel, etc) + // In this case, not only this call data should be released, + // server-side may need to handle the error, e.g roll back the + // grpc call's operation. This version sds_grpc doesn't expose + // API for handling this case, such API will be provided in next + // version of this library. + status_ = FINISH; + } + if (status_ == CREATE) { status_ = PROCESS; do_create(); @@ -38,6 +55,13 @@ GrpcServer::GrpcServer() { GrpcServer::~GrpcServer() { shutdown(); + + for (auto [k, v] : services_) { + (void)k; + delete v; + } + + services_.clear(); } @@ -115,14 +139,27 @@ void GrpcServer::handle_rpcs() { bool ok = false; while (cq_->Next(&tag, &ok)) { - if (!ok) { - // the server has been Shutdown before this particular - // call got matched to an incoming RPC. - continue; - } + + // `ok` is true if read a successful event, false otherwise. + // Success here means that this operation completed in the normal + // valid manner. + + // This version of sds_grpc only support unary grpc call, so only + // two cases need to be considered: + // + // Server-side RPC request: \a ok indicates that the RPC has indeed + // been started. If it is false, the server has been Shutdown + // before this particular call got matched to an incoming RPC. + // + // Server-side Finish: ok means that the data/metadata/status/etc is + // going to go to the wire. + // If it is false, it not going to the wire because the call + // is already dead (i.e., canceled, deadline expired, other side + // dropped the channel, etc). + BaseServerCallData* cm = static_cast(tag); - cm->proceed(); + cm->proceed(ok); } } diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index fd328e18..a1ffa48f 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -97,40 +98,67 @@ class PingServiceImpl { }; -void RunServer() { +GrpcServer* g_grpc_server = nullptr; +EchoServiceImpl * g_echo_impl = nullptr; +PingServiceImpl * g_ping_impl = nullptr; + +void sighandler(int signum, siginfo_t *info, void *ptr) +{ + LOGINFO("Received signal {}", signum); + + if (signum == SIGTERM) { + // shutdown server gracefully for check memory leak + LOGINFO("Shutdown grpc server"); + g_grpc_server->shutdown(); + } +} + +void StartServer() { std::string server_address("0.0.0.0:50051"); - auto server = GrpcServer::make(server_address, 4, "", ""); + g_grpc_server = GrpcServer::make(server_address, 4, "", ""); - EchoServiceImpl * echo_impl = new EchoServiceImpl(); - echo_impl->register_service(server); + g_echo_impl = new EchoServiceImpl(); + g_echo_impl->register_service(g_grpc_server); - PingServiceImpl * ping_impl = new PingServiceImpl(); - ping_impl->register_service(server); + g_ping_impl = new PingServiceImpl(); + g_ping_impl->register_service(g_grpc_server); - server->run(); + g_grpc_server->run(); LOGINFO("Server listening on {}", server_address); - echo_impl->register_rpcs(server); - ping_impl->register_rpcs(server); + g_echo_impl->register_rpcs(g_grpc_server); + g_ping_impl->register_rpcs(g_grpc_server); - while (!server->is_terminated()) { - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - - delete server; } + SDS_LOGGING_INIT() SDS_OPTIONS_ENABLE(logging) + int main(int argc, char* argv[]) { SDS_OPTIONS_LOAD(argc, argv, logging) sds_logging::SetLogger("echo_server"); LOGINFO("Start echo server ..."); - RunServer(); + StartServer(); + + struct sigaction act; + memset(&act, 0, sizeof(act)); + act.sa_sigaction = sighandler; + + sigaction(SIGTERM, &act, NULL); + + while (!g_grpc_server->is_terminated()) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + + delete g_grpc_server; + delete g_echo_impl; + delete g_ping_impl; + return 0; } From 2ec5ec4faec8b5f1b30ca57f10834887b38689f8 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Thu, 28 Feb 2019 09:45:23 -0800 Subject: [PATCH 042/385] Disabling printing debug message of protobuf --- src/flip/lib/flip.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index c493ee3b..d812ed54 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -320,7 +320,7 @@ class Flip { m_flip_enabled = true; auto inst = flip_instance(fspec); - LOG(INFO) << "Fpsec: " << fspec.DebugString(); + //LOG(INFO) << "Fpsec: " << fspec.DebugString(); // TODO: Add verification to see if the flip is already scheduled, any errors etc.. std::unique_lock lock(m_mutex); From 2346fe8ba18c14a49aa4e0108cbbaa1b34c9414f Mon Sep 17 00:00:00 2001 From: Yaming Kuang Date: Sun, 10 Mar 2019 18:53:16 -0700 Subject: [PATCH 043/385] SDSTOR-1038 Fix a "set-but-unused" warning Add Flip package HomeStore and fix "set-but-unused" parameter warning (warning treated as error). --- src/flip/lib/flip.hpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 8042be77..d5668224 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -292,8 +292,7 @@ class FlipTimer { } void timer_thr() { - size_t executed = 0; - executed = m_svc.run(); + m_svc.run(); } private: From 687bd5174c881a32a0dbbc77db521f1324604ff6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 14 Mar 2019 07:31:30 -1000 Subject: [PATCH 044/385] Fix CLang build by adding missing header --- include/sds_grpc/client.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index 9f18b728..ec8413f8 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include From 57b5327e20272ddf9b44cdd51c30477c5ff415f3 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Tue, 23 Apr 2019 19:57:05 -0700 Subject: [PATCH 045/385] Added singleton instance for flip --- src/flip/lib/flip.hpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index d812ed54..c21cf743 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -18,7 +18,6 @@ #include namespace flip { -static thread_local boost::asio::io_service g_io; template < size_t Index = 0, // start iteration at 0 index @@ -313,7 +312,11 @@ class FlipTimer { class Flip { public: - Flip() : m_flip_enabled(false) { + Flip() : m_flip_enabled(false) {} + + static Flip& instance() { + static Flip s_instance; + return s_instance; } bool add(const FlipSpec &fspec) { From 2d5be912653b02bb0fd72c75f05fd2c17e25f90b Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Fri, 10 May 2019 10:35:16 -0700 Subject: [PATCH 046/385] CMake changes to prevent compilation error in Mac --- src/flip/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index ab65eac0..8f176b03 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -69,11 +69,11 @@ target_link_libraries(flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} add_executable(test_flip ${TEST_FLIP_FILES}) target_link_libraries(test_flip ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} - pthread Boost::system) + pthread ${Boost_LIBRARIES}) add_executable(test_flip_client ${TEST_FLIP_CLIENT_FILES}) target_link_libraries(test_flip_client ${PROTOBUF_LIBRARY} ${GLOG_LIBRARY} - ${GFLAGS_LIBRARY} pthread Boost::system) + ${GFLAGS_LIBRARY} pthread ${Boost_LIBRARIES}) install(TARGETS flip DESTINATION ${CMAKE_PREFIX_PATH}/lib) install(FILES src/flip.hpp proto/flip_spec.proto ${PROTO_GEN_DIR}/flip_spec.pb.h DESTINATION ${CMAKE_PREFIX_PATH}/include/flip) From 8823df3c238e5c58d451e7da5430a6b031f6ffe0 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Thu, 13 Jun 2019 16:09:02 -0700 Subject: [PATCH 047/385] Emdedded GRPC server and client * Added GRPC server which can be optionally initialized and interacted with * New python based client library to interact with flip * New methods to get list of flips or details about specific flip --- src/flip/CMakeLists.txt | 31 +- .../local/test_flip_local_client.cpp} | 0 src/flip/client/python/flip_client_example.py | 49 ++ src/flip/client/python/flip_rpc_client.py | 111 +++++ src/flip/client/python/setup_python_client.sh | 2 + src/flip/lib/.clang-format | 147 ++++++ src/flip/lib/flip.hpp | 425 ++++++++++-------- src/flip/lib/test_flip.cpp | 5 +- src/flip/proto/CMakeLists.txt | 13 + src/flip/proto/flip_server.proto | 21 + src/flip/proto/flip_spec.proto | 4 +- .../flip_rpc_server.cpp/test_flip_server.cpp | 13 + 12 files changed, 614 insertions(+), 207 deletions(-) rename src/flip/{lib/test_flip_client.cpp => client/local/test_flip_local_client.cpp} (100%) create mode 100644 src/flip/client/python/flip_client_example.py create mode 100644 src/flip/client/python/flip_rpc_client.py create mode 100755 src/flip/client/python/setup_python_client.sh create mode 100644 src/flip/lib/.clang-format create mode 100644 src/flip/proto/flip_server.proto create mode 100644 src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 13a85ea0..5de8ce77 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required(VERSION 3.10) project(flip) +include(CMakeScripts/grpc.cmake) set(CMAKE_CXX_STANDARD 17) @@ -28,11 +29,33 @@ endif () find_package(Protobuf REQUIRED) -add_subdirectory(proto) -include_directories(BEFORE include ${PROTO_PATH}) +file(GLOB PROTO_IDLS proto/*.proto) +PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_IDLS}) + +file(GLOB GRPC_IDLS proto/flip_server.proto) +PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${GRPC_IDLS}) + +list(GET PROTO_HDRS 0 FIRST_PROTO) +get_filename_component(PROTO_DIR ${FIRST_PROTO} DIRECTORY) +set(PROTO_PATH ${PROTO_DIR}) + +include_directories(BEFORE include ${PROTO_DIR} src) +set(FLIP_LIB_FILES + ${PROTO_SRCS} + ${PROTO_HDRS} + ${GRPC_SRCS} + ${GRPC_HDRS} + src/flip_rpc_server.cpp + ) + +add_library(flip ${FLIP_LIB_FILES}) +target_link_libraries(flip ${CONAN_LIBS}) add_executable(test_flip src/test_flip.cpp) target_link_libraries(test_flip flip) -add_executable(test_flip_client src/test_flip_client.cpp) -target_link_libraries(test_flip_client flip) +add_executable(test_flip_local_client src/test_flip_local_client.cpp) +target_link_libraries(test_flip_local_client flip) + +add_executable(test_flip_server src/test_flip_server.cpp) +target_link_libraries(test_flip_server flip) \ No newline at end of file diff --git a/src/flip/lib/test_flip_client.cpp b/src/flip/client/local/test_flip_local_client.cpp similarity index 100% rename from src/flip/lib/test_flip_client.cpp rename to src/flip/client/local/test_flip_local_client.cpp diff --git a/src/flip/client/python/flip_client_example.py b/src/flip/client/python/flip_client_example.py new file mode 100644 index 00000000..95f5288d --- /dev/null +++ b/src/flip/client/python/flip_client_example.py @@ -0,0 +1,49 @@ +from __future__ import print_function + +import random +import logging +from flip_rpc_client import * + +if __name__ == '__main__': + logging.basicConfig() + fclient = FlipRPCClient('localhost:50051') + + fclient.inject_test_flip("flip1", + fspec.FlipFrequency(count=4, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), + fspec.FlipCondition(oper=fspec.Operator.DONT_CARE) + ]) + fclient.inject_test_flip("flip2", + fspec.FlipFrequency(count=2, every_nth=5), + []) + + fclient.inject_ret_flip("flip3", + fspec.FlipFrequency(count=2, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), + ], + fspec.ParamValue(string_value="Simulated corruption") + ) + + fclient.inject_delay_flip("flip4", + fspec.FlipFrequency(count=10000, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.GREATER_THAN_OR_EQUAL, + value=fspec.ParamValue(long_value=50000)), + ], + 1000 + ) + + fclient.inject_delay_ret_flip("flip5", + fspec.FlipFrequency(count=1, percent=50), + [ + fspec.FlipCondition(oper=fspec.Operator.LESS_THAN_OR_EQUAL, + value=fspec.ParamValue(double_value=800.15)), + ], + 1000, + fspec.ParamValue(bool_value=False) + ) + + fclient.flip_details("flip2") + fclient.all_flip_details() \ No newline at end of file diff --git a/src/flip/client/python/flip_rpc_client.py b/src/flip/client/python/flip_rpc_client.py new file mode 100644 index 00000000..1de7c5cc --- /dev/null +++ b/src/flip/client/python/flip_rpc_client.py @@ -0,0 +1,111 @@ +from __future__ import print_function + +import random +import logging +import sys +sys.path.append("gen_src") + +import grpc +import flip_spec_pb2 as fspec +import flip_spec_pb2_grpc +import flip_server_pb2 +import flip_server_pb2_grpc + +class FlipRPCClient: + def __init__(self, server_address): + self.channel = grpc.insecure_channel(server_address) + self.stub = flip_server_pb2_grpc.FlipServerStub(self.channel) + + def inject_fault(self, name, freq, conds, action): + self.stub.InjectFault(fspec.FlipSpec(flip_name=name, conditions=conds, flip_action=action, flip_frequency=freq)) + + def inject_test_flip(self, name, freq, conds): + print("------ Inject test flip", name, "-------------") + self.inject_fault(name, freq, conds, fspec.FlipAction(no_action=1)) + + def inject_ret_flip(self, name, freq, conds, retval): + print("------ Inject ret flip", name, "-------------") + self.inject_fault(name, freq, conds, fspec.FlipAction(returns=fspec.FlipAction.ActionReturns(retval=retval))) + + def inject_delay_flip(self, name, freq, conds, delay_usec): + print("------ Inject delay flip", name, "-------------") + self.inject_fault(name, freq, conds, + fspec.FlipAction(delays=fspec.FlipAction.ActionDelays(delay_in_usec=delay_usec))) + + def inject_delay_ret_flip(self, name, freq, conds, delay_usec, retval): + print("------ Inject delay and then ret flip", name, "-------------") + self.inject_fault(name, freq, conds, + fspec.FlipAction(delay_returns=fspec.FlipAction.ActionDelayedReturns( + delay_in_usec=delay_usec, + retval=retval))) + + def flip_details(self, name): + list_response = self.stub.GetFaults(flip_server_pb2.FlipNameRequest(name=name)) + for finfo in list_response.infos: + print(finfo.info) + + def all_flip_details(self): + list_response = self.stub.GetFaults(flip_server_pb2.FlipNameRequest(name=None)) + for finfo in list_response.infos: + print(finfo.info) + +""" def run(): + with grpc.insecure_channel('localhost:50051') as channel: + stub = flip_server_pb2_grpc.FlipServerStub(channel) + print("------ Inject Fault -------------") + #stub.InjectFault(flip_spec_pb2.FlipSpec(flip_name="xyz", flip_action=flip_spec_pb2.FlipAction.action(no_action = true))) + #cond = flip_spec_pb2.FlipCondition(name = "my_id", oper = flip_spec_pb2.Operator(EQUAL), value = flip_spec_pb2.ParamValue(kind = flip_spec_pb2.ParamValue.kind(null_value=true) )) + action = flip_spec_pb2.FlipAction(no_action=1) + freq = flip_spec_pb2.FlipFrequency(count=2) + stub.InjectFault( + flip_spec_pb2.FlipSpec(flip_name="xyz", + flip_action=action, + flip_frequency=freq)) + + +def connect(): + with grpc.insecure_channel('localhost:50051') as channel: + global stub + stub = flip_server_pb2_grpc.FlipServerStub(channel) + print("Stub = ", stub) + +if __name__ == '__main__': + logging.basicConfig() + fclient = FlipRPCClient('localhost:50051') + + fclient.inject_test_flip("flip1", + fspec.FlipFrequency(count=4, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), + fspec.FlipCondition(oper=fspec.Operator.DONT_CARE) + ]) + fclient.inject_test_flip("flip2", + fspec.FlipFrequency(count=2, every_nth=5), + []) + + fclient.inject_ret_flip("flip3", + fspec.FlipFrequency(count=2, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), + ], + fspec.ParamValue(string_value="Simulated corruption") + ) + + fclient.inject_delay_flip("flip4", + fspec.FlipFrequency(count=10000, percent=100), + [ + fspec.FlipCondition(oper=fspec.Operator.GREATER_THAN_OR_EQUAL, + value=fspec.ParamValue(long_value=50000)), + ], + 1000 + ) + + fclient.inject_delay_ret_flip("flip5", + fspec.FlipFrequency(count=1, percent=50), + [ + fspec.FlipCondition(oper=fspec.Operator.LESS_THAN_OR_EQUAL, + value=fspec.ParamValue(double_value=800.15)), + ], + 1000, + fspec.ParamValue(bool_value=False) + ) """ \ No newline at end of file diff --git a/src/flip/client/python/setup_python_client.sh b/src/flip/client/python/setup_python_client.sh new file mode 100755 index 00000000..b3ff5386 --- /dev/null +++ b/src/flip/client/python/setup_python_client.sh @@ -0,0 +1,2 @@ +pip install grpcio-tools +python3 -m grpc_tools.protoc -I proto/ --python_out=src/client/python/gen_src --grpc_python_out=src/client/python/gen_src/ proto/*.proto diff --git a/src/flip/lib/.clang-format b/src/flip/lib/.clang-format new file mode 100644 index 00000000..6f3aa76a --- /dev/null +++ b/src/flip/lib/.clang-format @@ -0,0 +1,147 @@ +--- +# We'll use defaults from the LLVM style, but with 4 columns indentation. +BasedOnStyle: LLVM +IndentWidth: 4 +--- +Language: Cpp +# Force pointers to the type for C++. +DerivePointerAlignment: false +PointerAlignment: Left +ColumnLimit: 120 + +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: true +AlignEscapedNewlines: Right +AlignOperands: false +AlignTrailingComments: true +AllowShortBlocksOnASingleLine: true +AllowShortIfStatementsOnASingleLine: false +AllowShortBlocksOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: true +# AllowShortFunctionsOnASingleLine: InlineOnly +# AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakTemplateDeclarations: true + +BinPackArguments: true +BinPackParameters: true +BreakConstructorInitializersBeforeComma: true +BreakConstructorInitializers: AfterColon + +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 8 + +IndentCaseLabels: false +SortIncludes: false +#IndentWrappedFunctionNames: true +#SpaceAfterTemplateKeyword: true +#SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpacesInAngles : true + +--- +Language: JavaScript +# Use 140 columns for JS. +ColumnLimit: 140 +... + + +##################### +# --- +# Language: Cpp +# AccessModifierOffset: -1 +# AlignAfterOpenBracket: Align +# AlignConsecutiveAssignments: false +# AlignConsecutiveDeclarations: false +# AlignEscapedNewlinesLeft: true +# AlignOperands: true +# AlignTrailingComments: true +# AllowAllParametersOfDeclarationOnNextLine: false +# AllowShortBlocksOnASingleLine: false +# AllowShortCaseLabelsOnASingleLine: false +# AllowShortFunctionsOnASingleLine: Inline +# AllowShortIfStatementsOnASingleLine: false +# AllowShortLoopsOnASingleLine: false +# AlwaysBreakAfterDefinitionReturnType: None +# AlwaysBreakAfterReturnType: None +# AlwaysBreakBeforeMultilineStrings: true +# AlwaysBreakTemplateDeclarations: true +# BinPackArguments: true +# BinPackParameters: false +# BraceWrapping: +# AfterClass: false +# AfterControlStatement: false +# AfterEnum: false +# AfterFunction: false +# AfterNamespace: false +# AfterObjCDeclaration: false +# AfterStruct: false +# AfterUnion: false +# BeforeCatch: false +# BeforeElse: false +# IndentBraces: false +# BreakBeforeBinaryOperators: None +# BreakBeforeBraces: Attach +# BreakBeforeInheritanceComma: false +# BreakBeforeTernaryOperators: true +# BreakAfterJavaFieldAnnotations: false +# BreakConstructorInitializersBeforeComma: false +# BreakStringLiterals: true +# ColumnLimit: 80 +# CommentPragmas: '^ IWYU pragma:' +# ConstructorInitializerAllOnOneLineOrOnePerLine: true +# ConstructorInitializerIndentWidth: 4 +# ContinuationIndentWidth: 4 +# Cpp11BracedListStyle: true +# DerivePointerAlignment: false +# DisableFormat: false +# ExperimentalAutoDetectBinPacking: false +# FixNamespaceComments: true +# ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +# IncludeCategories: +# - Regex: '^<.*\.h>' +# Priority: 1 +# - Regex: '^<.*' +# Priority: 2 +# - Regex: '.*' +# Priority: 3 +# IncludeIsMainRegex: '([-_](test|unittest))?$' +# IndentCaseLabels: true +# IndentWidth: 2 +# IndentWrappedFunctionNames: false +# JavaScriptQuotes: Leave +# JavaScriptWrapImports: true +# KeepEmptyLinesAtTheStartOfBlocks: false +# MacroBlockBegin: '' +# MacroBlockEnd: '' +# MaxEmptyLinesToKeep: 1 +# NamespaceIndentation: None +# ObjCBlockIndentWidth: 2 +# ObjCSpaceAfterProperty: true +# ObjCSpaceBeforeProtocolList: false +# PenaltyBreakBeforeFirstCallParameter: 1 +# PenaltyBreakComment: 300 +# PenaltyBreakFirstLessLess: 120 +# PenaltyBreakString: 1000 +# PenaltyExcessCharacter: 1000000 +# PenaltyReturnTypeOnItsOwnLine: 200 +# PointerAlignment: Right +# ReflowComments: true +# SortIncludes: false +# SpaceAfterCStyleCast: false +# SpaceAfterTemplateKeyword: true +# SpaceBeforeAssignmentOperators: true +# SpaceBeforeParens: ControlStatements +# SpaceInEmptyParentheses: false +# SpacesBeforeTrailingComments: 2 +# SpacesInAngles: false +# SpacesInContainerLiterals: true +# SpacesInCStyleCastParentheses: false +# SpacesInParentheses: false +# SpacesInSquareBrackets: false +# Standard: Auto +# TabWidth: 8 +# UseTab: Never +# ... diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index c21cf743..3be2855a 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -5,6 +5,7 @@ #define FLIP_FLIP_HPP #include "flip_spec.pb.h" +#include "flip_rpc_server.hpp" #include #include #include @@ -19,65 +20,71 @@ namespace flip { -template < - size_t Index = 0, // start iteration at 0 index - typename TTuple, // the tuple type - size_t Size = - std::tuple_size_v< - std::remove_reference_t>, // tuple size - typename TCallable, // the callable to bo invoked for each tuple item - typename... TArgs // other arguments to be passed to the callable -> +template < size_t Index = 0, // start iteration at 0 index + typename TTuple, // the tuple type + size_t Size = std::tuple_size_v< std::remove_reference_t< TTuple > >, // tuple size + typename TCallable, // the callable to bo invoked for each tuple item + typename... TArgs // other arguments to be passed to the callable + > void for_each(TTuple&& tuple, TCallable&& callable, TArgs&&... args) { if constexpr (Index < Size) { - std::invoke(callable, args..., std::get(tuple)); + std::invoke(callable, args..., std::get< Index >(tuple)); if constexpr (Index + 1 < Size) { - for_each< Index + 1 >( - std::forward< TTuple >(tuple), - std::forward< TCallable >(callable), - std::forward< TArgs >(args)...); + for_each< Index + 1 >(std::forward< TTuple >(tuple), std::forward< TCallable >(callable), + std::forward< TArgs >(args)...); } } } struct flip_name_compare { - bool operator()(const std::string &lhs, const std::string &rhs) const { - return lhs < rhs; - } + bool operator()(const std::string& lhs, const std::string& rhs) const { return lhs < rhs; } }; struct flip_instance { - flip_instance(const FlipSpec &fspec) : + flip_instance(const FlipSpec& fspec) : m_fspec(fspec), m_hit_count(0), - m_remain_exec_count(fspec.flip_frequency().count()) { - } + m_remain_exec_count(fspec.flip_frequency().count()) {} - flip_instance(const flip_instance &other) { + flip_instance(const flip_instance& other) { m_fspec = other.m_fspec; m_hit_count.store(other.m_hit_count.load()); m_remain_exec_count.store(other.m_remain_exec_count.load()); } - FlipSpec m_fspec; + std::string to_string() const { + std::stringstream ss; + ss << "\n---------------------------" << m_fspec.flip_name() << "-----------------------\n"; + ss << "Hitcount: " << m_hit_count << "\n"; + ss << "Remaining count: " << m_remain_exec_count << "\n"; + ss << m_fspec.flip_frequency().DebugString(); + ss << m_fspec.flip_action().DebugString(); + ss << "Conditions: [\n"; + auto i = 1; + for (const auto& cond: m_fspec.conditions()) { + ss << std::to_string(i) << ") " << Operator_Name(cond.oper()) << " => " << cond.value().DebugString(); + ++i; + } + ss << "]"; + ss << "\n-------------------------------------------------------------------\n"; + return ss.str(); + } + + FlipSpec m_fspec; std::atomic< uint32_t > m_hit_count; - std::atomic< int32_t > m_remain_exec_count; + std::atomic< int32_t > m_remain_exec_count; }; /****************************** Proto Param to Value converter ******************************/ -template +template < typename T > struct val_converter { - T operator()(const ParamValue &val) { - return 0; - } + T operator()(const ParamValue& val) { return 0; } }; template <> -struct val_converter { - int operator()(const ParamValue &val) { - return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; - } +struct val_converter< int > { + int operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; } }; #if 0 @@ -90,66 +97,63 @@ struct val_converter { #endif template <> -struct val_converter { - long operator()(const ParamValue &val) { +struct val_converter< long > { + long operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kLongValue) ? val.long_value() : 0; } }; template <> -struct val_converter { - double operator()(const ParamValue &val) { +struct val_converter< double > { + double operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kDoubleValue) ? val.double_value() : 0; } }; template <> -struct val_converter { - std::string operator()(const ParamValue &val) { +struct val_converter< std::string > { + std::string operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kStringValue) ? val.string_value() : ""; } }; template <> -struct val_converter { - const char *operator()(const ParamValue &val) { +struct val_converter< const char* > { + const char* operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kStringValue) ? val.string_value().c_str() : nullptr; } }; template <> -struct val_converter { - bool operator()(const ParamValue &val) { +struct val_converter< bool > { + bool operator()(const ParamValue& val) { return (val.kind_case() == ParamValue::kBoolValue) ? val.bool_value() : 0; } }; -template< typename T > +template < typename T > struct delayed_return_param { uint64_t delay_usec; T val; }; -template -struct val_converter> { - delayed_return_param operator()(const ParamValue &val) { - delayed_return_param dummy; +template < typename T > +struct val_converter< delayed_return_param< T > > { + delayed_return_param< T > operator()(const ParamValue& val) { + delayed_return_param< T > dummy; return dummy; } }; /******************************************** Value to Proto converter ****************************************/ -template +template < typename T > struct to_proto_converter { - void operator()(const T& val, ParamValue* out_pval) { - } + void operator()(const T& val, ParamValue* out_pval) {} }; template <> -struct to_proto_converter { - void operator()(const int& val, ParamValue* out_pval) { - out_pval->set_int_value(val); - } +struct to_proto_converter< int > { + void operator()(const int& val, ParamValue* out_pval) { out_pval->set_int_value(val); } }; #if 0 @@ -162,90 +166,68 @@ struct val_converter { #endif template <> -struct to_proto_converter { - void operator()(const long& val, ParamValue* out_pval) { - out_pval->set_long_value(val); - } +struct to_proto_converter< long > { + void operator()(const long& val, ParamValue* out_pval) { out_pval->set_long_value(val); } }; template <> -struct to_proto_converter { - void operator()(const double& val, ParamValue* out_pval) { - out_pval->set_double_value(val); - } +struct to_proto_converter< double > { + void operator()(const double& val, ParamValue* out_pval) { out_pval->set_double_value(val); } }; template <> -struct to_proto_converter { - void operator()(const std::string& val, ParamValue* out_pval) { - out_pval->set_string_value(val); - } +struct to_proto_converter< std::string > { + void operator()(const std::string& val, ParamValue* out_pval) { out_pval->set_string_value(val); } }; template <> -struct to_proto_converter { - void operator()(const char*& val, ParamValue* out_pval) { - out_pval->set_string_value(val); - } +struct to_proto_converter< const char* > { + void operator()(const char*& val, ParamValue* out_pval) { out_pval->set_string_value(val); } }; template <> -struct to_proto_converter { - void operator()(const bool& val, ParamValue* out_pval) { - out_pval->set_bool_value(val); - } +struct to_proto_converter< bool > { + void operator()(const bool& val, ParamValue* out_pval) { out_pval->set_bool_value(val); } }; /******************************************* Comparators *************************************/ -template< typename T > +template < typename T > struct compare_val { - bool operator()(const T &val1, const T &val2, Operator oper) { + bool operator()(const T& val1, const T& val2, Operator oper) { switch (oper) { - case Operator::DONT_CARE: - return true; + case Operator::DONT_CARE: return true; - case Operator::EQUAL: - return (val1 == val2); + case Operator::EQUAL: return (val1 == val2); - case Operator::NOT_EQUAL: - return (val1 != val2); + case Operator::NOT_EQUAL: return (val1 != val2); - case Operator::GREATER_THAN: - return (val1 > val2); + case Operator::GREATER_THAN: return (val1 > val2); - case Operator::LESS_THAN: - return (val1 < val2); + case Operator::LESS_THAN: return (val1 < val2); - case Operator::GREATER_THAN_OR_EQUAL: - return (val1 >= val2); + case Operator::GREATER_THAN_OR_EQUAL: return (val1 >= val2); - case Operator::LESS_THAN_OR_EQUAL: - return (val1 <= val2); + case Operator::LESS_THAN_OR_EQUAL: return (val1 <= val2); - default: - return false; + default: return false; } } }; -template<> -struct compare_val { - bool operator()(const char *&val1, const char *&val2, Operator oper) { +template <> +struct compare_val< const char* > { + bool operator()(const char*& val1, const char*& val2, Operator oper) { switch (oper) { - case Operator::DONT_CARE: - return true; + case Operator::DONT_CARE: return true; - case Operator::EQUAL: - return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); + case Operator::EQUAL: return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); case Operator::NOT_EQUAL: return (val1 && val2 && (strcmp(val1, val2) != 0)) || (!val1 && val2) || (val1 && !val2); - case Operator::GREATER_THAN: - return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); + case Operator::GREATER_THAN: return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); - case Operator::LESS_THAN: - return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); + case Operator::LESS_THAN: return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); case Operator::GREATER_THAN_OR_EQUAL: return (val1 && val2 && (strcmp(val1, val2) >= 0)) || (val1 && !val2) || (!val1 && !val2); @@ -253,15 +235,14 @@ struct compare_val { case Operator::LESS_THAN_OR_EQUAL: return (val1 && val2 && (strcmp(val1, val2) <= 0)) || (!val1 && val2) || (!val1 && !val2); - default: - return false; + default: return false; } } }; -using io_service = boost::asio::io_service; -using deadline_timer = boost::asio::deadline_timer; -using io_work = boost::asio::io_service::work; +using io_service = boost::asio::io_service; +using deadline_timer = boost::asio::deadline_timer; +using io_work = boost::asio::io_service::work; class FlipTimer { public: @@ -273,19 +254,22 @@ class FlipTimer { } } - void schedule(boost::posix_time::time_duration delay_us, const std::function& closure) { - std::unique_lock lk(m_thr_mutex); + void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) { + std::unique_lock< std::mutex > lk(m_thr_mutex); ++m_timer_count; if (m_work == nullptr) { - m_work = std::make_unique(m_svc); - m_timer_thread = std::make_unique(std::bind(&FlipTimer::timer_thr, this)); + m_work = std::make_unique< io_work >(m_svc); + m_timer_thread = std::make_unique< std::thread >(std::bind(&FlipTimer::timer_thr, this)); } - auto t = std::make_shared(m_svc, delay_us); - t->async_wait([this, closure, t](const boost::system::error_code& e){ - if (e) { LOG(ERROR) << "Error in timer routine, message " << e.message(); } - else { closure(); } - std::unique_lock lk(m_thr_mutex); + auto t = std::make_shared< deadline_timer >(m_svc, delay_us); + t->async_wait([this, closure, t](const boost::system::error_code& e) { + if (e) { + LOG(ERROR) << "Error in timer routine, message " << e.message(); + } else { + closure(); + } + std::unique_lock< std::mutex > lk(m_thr_mutex); --m_timer_count; }); } @@ -294,20 +278,20 @@ class FlipTimer { size_t executed = 0; executed = m_svc.run(); // To suppress compiler warning - (void) executed; + (void)executed; } private: - io_service m_svc; - std::unique_ptr m_work; - std::mutex m_thr_mutex; - int32_t m_timer_count; - std::unique_ptr< std::thread >m_timer_thread; + io_service m_svc; + std::unique_ptr< io_work > m_work; + std::mutex m_thr_mutex; + int32_t m_timer_count; + std::unique_ptr< std::thread > m_timer_thread; }; -#define TEST_ONLY 0 -#define RETURN_VAL 1 -#define SET_DELAY 2 +#define TEST_ONLY 0 +#define RETURN_VAL 1 +#define SET_DELAY 2 #define DELAYED_RETURN 3 class Flip { @@ -319,19 +303,55 @@ class Flip { return s_instance; } - bool add(const FlipSpec &fspec) { + void start_rpc_server() { + m_flip_server_thread = std::unique_ptr< std::thread >(new std::thread(FlipRPCServer::rpc_thread)); + m_flip_server_thread->detach(); + } + + bool add(const FlipSpec& fspec) { m_flip_enabled = true; auto inst = flip_instance(fspec); - //LOG(INFO) << "Fpsec: " << fspec.DebugString(); + // LOG(INFO) << "Fpsec: " << fspec.DebugString(); // TODO: Add verification to see if the flip is already scheduled, any errors etc.. - std::unique_lock lock(m_mutex); + std::unique_lock< std::shared_mutex > lock(m_mutex); m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); LOG(INFO) << "Added new fault flip " << fspec.flip_name() << " to the list of flips"; + //LOG(INFO) << "Flip details:" << inst.to_string(); return true; } + std::vector< std::string > get(const std::string& flip_name) { + std::shared_lock< std::shared_mutex > lock(m_mutex); + std::vector< std::string > res; + + auto search = m_flip_specs.equal_range(flip_name); + for (auto it = search.first; it != search.second; ++it) { + const auto& inst = it->second; + res.emplace_back(inst.to_string()); + } + + return res; + } + + std::vector< std::string > get_all() { + std::shared_lock< std::shared_mutex > lock(m_mutex); + std::vector< std::string > res; + + for (const auto& it : m_flip_specs) { + const auto& inst = it.second; + res.emplace_back(inst.to_string()); +#if 0 + for (auto it = inst_range.first; it != inst_range.second; ++it) { + const auto& inst = inst_range->second; + res.emplace_back(inst.to_string()); + } +#endif + } + + return res; + } #if 0 bool add_flip(std::string flip_name, std::vector conditions, FlipAction& action, uint32_t count, uint8_t percent) { @@ -352,63 +372,69 @@ class Flip { } #endif - template< class... Args > - bool test_flip(std::string flip_name, Args &&... args) { - if (!m_flip_enabled) return false; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); + template < class... Args > + bool test_flip(std::string flip_name, Args&&... args) { + if (!m_flip_enabled) + return false; + auto ret = __test_flip< bool, TEST_ONLY >(flip_name, std::forward< Args >(args)...); return (ret != boost::none); } - template< typename T, class... Args > - boost::optional< T > get_test_flip(std::string flip_name, Args &&... args) { - if (!m_flip_enabled) return boost::none; + template < typename T, class... Args > + boost::optional< T > get_test_flip(std::string flip_name, Args&&... args) { + if (!m_flip_enabled) + return boost::none; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) return boost::none; - return boost::optional(boost::get(ret.get())); + auto ret = __test_flip< T, RETURN_VAL >(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) + return boost::none; + return boost::optional< T >(boost::get< T >(ret.get())); } - template< class... Args > - bool delay_flip(std::string flip_name, const std::function &closure, Args &&... args) { - if (!m_flip_enabled) return false; + template < class... Args > + bool delay_flip(std::string flip_name, const std::function< void() >& closure, Args&&... args) { + if (!m_flip_enabled) + return false; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) return false; // Not a hit + auto ret = __test_flip< bool, SET_DELAY >(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) + return false; // Not a hit - uint64_t delay_usec = boost::get(ret.get()); + uint64_t delay_usec = boost::get< uint64_t >(ret.get()); m_timer.schedule(boost::posix_time::microseconds(delay_usec), closure); return true; } - template - bool get_delay_flip(std::string flip_name, const std::function &closure, Args &&... args) { - if (!m_flip_enabled) return false; + template < typename T, class... Args > + bool get_delay_flip(std::string flip_name, const std::function< void(T) >& closure, Args&&... args) { + if (!m_flip_enabled) + return false; - auto ret = __test_flip(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) return false; // Not a hit + auto ret = __test_flip< T, DELAYED_RETURN >(flip_name, std::forward< Args >(args)...); + if (ret == boost::none) + return false; // Not a hit - auto param = boost::get>(ret.get()); + auto param = boost::get< delayed_return_param< T > >(ret.get()); LOG(INFO) << "Returned param delay = " << param.delay_usec << " val = " << param.val; - m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { - closure(param.val); - }); + m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); return true; } private: - template< typename T, int ActionType, class... Args > - boost::optional< boost::variant> > __test_flip(std::string flip_name, Args &&... args) { - bool exec_completed = false; // If all the exec for the flip is completed. - flip_instance *inst = nullptr; + template < typename T, int ActionType, class... Args > + boost::optional< boost::variant< T, bool, uint64_t, delayed_return_param< T > > > __test_flip(std::string flip_name, + Args&&... args) { + bool exec_completed = false; // If all the exec for the flip is completed. + flip_instance* inst = nullptr; { - std::shared_lock lock(m_mutex); + std::shared_lock< std::shared_mutex > lock(m_mutex); inst = match_flip(flip_name, std::forward< Args >(args)...); if (inst == nullptr) { - //LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; + // LOG(INFO) << "Flip " << flip_name << " either not exist or conditions not match"; return boost::none; } - auto &fspec = inst->m_fspec; + auto& fspec = inst->m_fspec; // Check if we are subjected to rate limit if (!handle_hits(fspec.flip_frequency(), inst)) { @@ -427,44 +453,42 @@ class Flip { LOG(INFO) << "Flip " << flip_name << " matches and hits"; } - boost::variant> val_ret ; + boost::variant< T, bool, uint64_t, delayed_return_param< T > > val_ret; switch (inst->m_fspec.flip_action().action_case()) { case FlipAction::kReturns: if (ActionType == RETURN_VAL) { - val_ret = val_converter< T >()(inst->m_fspec.flip_action().returns().return_()); + val_ret = val_converter< T >()(inst->m_fspec.flip_action().returns().retval()); } else { val_ret = true; } break; case FlipAction::kNoAction: - //static_assert(!std::is_same::value || std::is_same::value, "__test_flip without value should be called with bool as type"); + // static_assert(!std::is_same::value || std::is_same::value, "__test_flip + // without value should be called with bool as type"); val_ret = true; break; - case FlipAction::kDelays: - val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); - break; + case FlipAction::kDelays: val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); break; case FlipAction::kDelayReturns: if (ActionType == DELAYED_RETURN) { - auto &flip_dr = inst->m_fspec.flip_action().delay_returns(); - delayed_return_param dr; + auto& flip_dr = inst->m_fspec.flip_action().delay_returns(); + delayed_return_param< T > dr; dr.delay_usec = flip_dr.delay_in_usec(); - dr.val = val_converter< T >()(flip_dr.return_()); + dr.val = val_converter< T >()(flip_dr.retval()); val_ret = dr; } else { val_ret = true; } break; - default: - val_ret = true; + default: val_ret = true; } if (exec_completed) { // If we completed the execution, need to remove them - std::unique_lock lock(m_mutex); + std::unique_lock< std::shared_mutex > lock(m_mutex); if (inst->m_remain_exec_count.load(std::memory_order_relaxed) == 0) { m_flip_specs.erase(flip_name); } @@ -472,9 +496,9 @@ class Flip { return val_ret; } - template< class... Args > - flip_instance * match_flip(std::string flip_name, Args &&... args) { - flip_instance *match_inst = nullptr; + template < class... Args > + flip_instance* match_flip(std::string flip_name, Args&&... args) { + flip_instance* match_inst = nullptr; auto search = m_flip_specs.equal_range(flip_name); for (auto it = search.first; it != search.second; ++it) { @@ -483,9 +507,9 @@ class Flip { // Check for all the condition match std::tuple< Args... > arglist(std::forward< Args >(args)...); - auto i = 0U; - bool matched = true; - for_each(arglist, [this, fspec, &i, &matched](auto &v) { + auto i = 0U; + bool matched = true; + for_each(arglist, [this, fspec, &i, &matched](auto& v) { if (!condition_matches(v, fspec.conditions()[i++])) { matched = false; } @@ -500,13 +524,13 @@ class Flip { return match_inst; } - template< typename T > - bool condition_matches(T &comp_val, const FlipCondition &cond) { + template < typename T > + bool condition_matches(T& comp_val, const FlipCondition& cond) { auto val1 = val_converter< T >()(cond.value()); return compare_val< T >()(comp_val, val1, cond.oper()); } - bool handle_hits(const FlipFrequency &freq, flip_instance *inst) { + bool handle_hits(const FlipFrequency& freq, flip_instance* inst) { auto hit_count = inst->m_hit_count.fetch_add(1, std::memory_order_release); if (freq.every_nth() != 0) { return ((hit_count % freq.every_nth()) == 0); @@ -576,23 +600,26 @@ class Flip { #endif private: std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; - std::shared_mutex m_mutex; - bool m_flip_enabled; - FlipTimer m_timer; + std::shared_mutex m_mutex; + bool m_flip_enabled; + FlipTimer m_timer; + std::unique_ptr< std::thread > m_flip_server_thread; }; class FlipClient { public: - explicit FlipClient(Flip *f) : m_flip(f) {} + explicit FlipClient(Flip* f) : m_flip(f) {} - template< typename T> - void create_condition(const std::string& param_name, flip::Operator oper, const T& value, FlipCondition *out_condition) { + template < typename T > + void create_condition(const std::string& param_name, flip::Operator oper, const T& value, + FlipCondition* out_condition) { *(out_condition->mutable_name()) = param_name; out_condition->set_oper(oper); - to_proto_converter()(value, out_condition->mutable_value()); + to_proto_converter< T >()(value, out_condition->mutable_value()); } - bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, const FlipFrequency &freq) { + bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq) { FlipSpec fspec; _create_flip_spec(flip_name, conditions, freq, fspec); @@ -602,20 +629,20 @@ class FlipClient { return true; } - template + template < typename T > bool inject_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, const T& retval) { + const FlipFrequency& freq, const T& retval) { FlipSpec fspec; _create_flip_spec(flip_name, conditions, freq, fspec); - to_proto_converter()(retval, fspec.mutable_flip_action()->mutable_returns()->mutable_return_()); + to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_returns()->mutable_retval()); m_flip->add(fspec); return true; } bool inject_delay_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, uint64_t delay_usec) { + const FlipFrequency& freq, uint64_t delay_usec) { FlipSpec fspec; _create_flip_spec(flip_name, conditions, freq, fspec); @@ -625,14 +652,14 @@ class FlipClient { return true; } - template + template < typename T > bool inject_delay_and_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency &freq, uint64_t delay_usec, const T& retval) { + const FlipFrequency& freq, uint64_t delay_usec, const T& retval) { FlipSpec fspec; _create_flip_spec(flip_name, conditions, freq, fspec); fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); - to_proto_converter()(retval, fspec.mutable_flip_action()->mutable_delay_returns()->mutable_return_()); + to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_delay_returns()->mutable_retval()); m_flip->add(fspec); return true; @@ -640,9 +667,9 @@ class FlipClient { private: void _create_flip_spec(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, FlipSpec& out_fspec) { + const FlipFrequency& freq, FlipSpec& out_fspec) { *(out_fspec.mutable_flip_name()) = flip_name; - for (auto &c: conditions) { + for (auto& c : conditions) { *(out_fspec.mutable_conditions()->Add()) = c; } *(out_fspec.mutable_flip_frequency()) = freq; @@ -653,4 +680,4 @@ class FlipClient { }; } // namespace flip -#endif //FLIP_FLIP_HPP +#endif // FLIP_FLIP_HPP diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index c344e84a..ac814f5e 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -16,7 +16,7 @@ void create_ret_fspec(flip::FlipSpec *fspec) { cond->set_oper(flip::Operator::EQUAL); cond->mutable_value()->set_string_value("item_shipping"); - fspec->mutable_flip_action()->mutable_returns()->mutable_return_()->set_string_value("Error simulated value"); + fspec->mutable_flip_action()->mutable_returns()->mutable_retval()->set_string_value("Error simulated value"); auto freq = fspec->mutable_flip_frequency(); freq->set_count(2); @@ -118,7 +118,7 @@ void create_delay_ret_fspec(flip::FlipSpec *fspec) { cond->mutable_value()->set_int_value(2); fspec->mutable_flip_action()->mutable_delay_returns()->set_delay_in_usec(100000); - fspec->mutable_flip_action()->mutable_delay_returns()->mutable_return_()->set_string_value("Delayed error simulated value"); + fspec->mutable_flip_action()->mutable_delay_returns()->mutable_retval()->set_string_value("Delayed error simulated value"); auto freq = fspec->mutable_flip_frequency(); freq->set_count(2); @@ -197,6 +197,7 @@ int main(int argc, char *argv[]) { create_delay_ret_fspec(&delay_ret_fspec); flip::Flip flip; + flip.start_rpc_server(); flip.add(ret_fspec); flip.add(check_fspec); flip.add(delay_fspec); diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt index 8aa7da0e..67014cad 100644 --- a/src/flip/proto/CMakeLists.txt +++ b/src/flip/proto/CMakeLists.txt @@ -1,14 +1,27 @@ +include(../CMakeScripts/grpc.cmake) file(GLOB PROTO_IDLS *.proto) +message("PROTO_IDLS = " ${PROTO_IDLS}) PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_IDLS}) +file(GLOB GRPC_IDLS flip_server.proto) +message("GRPC_IDLS = " ${GRPC_IDLS}) +PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${GRPC_IDLS}) + list(GET PROTO_HDRS 0 FIRST_PROTO) get_filename_component(PROTO_DIR ${FIRST_PROTO} DIRECTORY) set(PROTO_PATH ${PROTO_DIR} PARENT_SCOPE) +include_directories(BEFORE include ${PROTO_DIR}) set(FLIP_LIB_FILES + ${FLIP_LIB_FILES} ${PROTO_SRCS} ${PROTO_HDRS} + ${GRPC_SRCS} + ${GRPC_HDRS} ) + +message("FLIP_LIB_FILES = " ${FLIP_LIB_FILES}) +message("PROTO_DIR = " ${PROTO_DIR}) add_library(flip ${FLIP_LIB_FILES}) target_link_libraries(flip ${CONAN_LIBS}) diff --git a/src/flip/proto/flip_server.proto b/src/flip/proto/flip_server.proto new file mode 100644 index 00000000..79e3ae71 --- /dev/null +++ b/src/flip/proto/flip_server.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package flip; + +import "flip_spec.proto"; + +message FlipListResponse { + message FlipInfo { string info = 1; } + + repeated FlipInfo infos = 1; +} + +message FlipNameRequest { string name = 1; } + +service FlipServer { + // Inject a fault rpc + rpc InjectFault(flip.FlipSpec) returns (flip.FlipResponse); + + // Get details about one or all faults + rpc GetFaults(FlipNameRequest) returns (FlipListResponse); +} \ No newline at end of file diff --git a/src/flip/proto/flip_spec.proto b/src/flip/proto/flip_spec.proto index b0bdf86b..e58e84e9 100644 --- a/src/flip/proto/flip_spec.proto +++ b/src/flip/proto/flip_spec.proto @@ -72,7 +72,7 @@ message FlipCondition { message FlipAction { message ActionReturns { - ParamValue return = 1; + ParamValue retval = 1; } message ActionDelays { @@ -81,7 +81,7 @@ message FlipAction { message ActionDelayedReturns { uint64 delay_in_usec = 1; - ParamValue return = 2; + ParamValue retval = 2; } oneof action { diff --git a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp new file mode 100644 index 00000000..f8bc7459 --- /dev/null +++ b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp @@ -0,0 +1,13 @@ +// +// Created by Kadayam, Hari on 28/03/18. +// + +#include "flip.hpp" + +int main(int argc, char *argv[]) { + flip::Flip f; + f.start_rpc_server(); + + sleep(1000); + return 0; +} \ No newline at end of file From f2a3eae13744fc503d69a782ac8540bd2d6acee4 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Thu, 13 Jun 2019 18:58:54 -0700 Subject: [PATCH 048/385] Removed old code and fixed build failure --- src/flip/README.md | 6 ++- src/flip/client/python/flip_rpc_client.py | 61 ----------------------- 2 files changed, 5 insertions(+), 62 deletions(-) diff --git a/src/flip/README.md b/src/flip/README.md index ba6dc642..7d9fd064 100644 --- a/src/flip/README.md +++ b/src/flip/README.md @@ -238,8 +238,11 @@ If application uses GRPC, the grpc definition needs to add the following RPC cal // Inject a fault rpc rpc InjectFault (flip.FlipSpec) returns (flip.FlipResponse); ``` +Flip also supports optional GRPC server which can be started using -**TODO:** Future work will provide a mechanism to start its own grpc server if needed, instead of relying on application rpc mechanism. +```c++ +Flip::start_rpc_server() +``` # Flip Client @@ -261,6 +264,7 @@ await test.do_inject_fault( ) ``` +### Python Client **TODO:** Write a standalone client which can be used to trigger various faults on different languages. ## Local Client diff --git a/src/flip/client/python/flip_rpc_client.py b/src/flip/client/python/flip_rpc_client.py index 1de7c5cc..25bb6747 100644 --- a/src/flip/client/python/flip_rpc_client.py +++ b/src/flip/client/python/flip_rpc_client.py @@ -48,64 +48,3 @@ def all_flip_details(self): list_response = self.stub.GetFaults(flip_server_pb2.FlipNameRequest(name=None)) for finfo in list_response.infos: print(finfo.info) - -""" def run(): - with grpc.insecure_channel('localhost:50051') as channel: - stub = flip_server_pb2_grpc.FlipServerStub(channel) - print("------ Inject Fault -------------") - #stub.InjectFault(flip_spec_pb2.FlipSpec(flip_name="xyz", flip_action=flip_spec_pb2.FlipAction.action(no_action = true))) - #cond = flip_spec_pb2.FlipCondition(name = "my_id", oper = flip_spec_pb2.Operator(EQUAL), value = flip_spec_pb2.ParamValue(kind = flip_spec_pb2.ParamValue.kind(null_value=true) )) - action = flip_spec_pb2.FlipAction(no_action=1) - freq = flip_spec_pb2.FlipFrequency(count=2) - stub.InjectFault( - flip_spec_pb2.FlipSpec(flip_name="xyz", - flip_action=action, - flip_frequency=freq)) - - -def connect(): - with grpc.insecure_channel('localhost:50051') as channel: - global stub - stub = flip_server_pb2_grpc.FlipServerStub(channel) - print("Stub = ", stub) - -if __name__ == '__main__': - logging.basicConfig() - fclient = FlipRPCClient('localhost:50051') - - fclient.inject_test_flip("flip1", - fspec.FlipFrequency(count=4, percent=100), - [ - fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), - fspec.FlipCondition(oper=fspec.Operator.DONT_CARE) - ]) - fclient.inject_test_flip("flip2", - fspec.FlipFrequency(count=2, every_nth=5), - []) - - fclient.inject_ret_flip("flip3", - fspec.FlipFrequency(count=2, percent=100), - [ - fspec.FlipCondition(oper=fspec.Operator.NOT_EQUAL, value=fspec.ParamValue(int_value=5)), - ], - fspec.ParamValue(string_value="Simulated corruption") - ) - - fclient.inject_delay_flip("flip4", - fspec.FlipFrequency(count=10000, percent=100), - [ - fspec.FlipCondition(oper=fspec.Operator.GREATER_THAN_OR_EQUAL, - value=fspec.ParamValue(long_value=50000)), - ], - 1000 - ) - - fclient.inject_delay_ret_flip("flip5", - fspec.FlipFrequency(count=1, percent=50), - [ - fspec.FlipCondition(oper=fspec.Operator.LESS_THAN_OR_EQUAL, - value=fspec.ParamValue(double_value=800.15)), - ], - 1000, - fspec.ParamValue(bool_value=False) - ) """ \ No newline at end of file From 57b5fd27641e78cdf04a5d880cab154074d24d44 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Fri, 14 Jun 2019 10:01:14 -0700 Subject: [PATCH 049/385] Moved CMakeScripts to cmake path --- src/flip/CMakeLists.txt | 6 +-- src/flip/cmake/grpc.cmake | 57 ++++++++++++++++++++++++++ src/flip/cmake/protobuf.cmake | 77 +++++++++++++++++++++++++++++++++++ 3 files changed, 137 insertions(+), 3 deletions(-) create mode 100644 src/flip/cmake/grpc.cmake create mode 100644 src/flip/cmake/protobuf.cmake diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 5de8ce77..c11bf8e2 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,7 +1,5 @@ cmake_minimum_required(VERSION 3.10) project(flip) -include(CMakeScripts/grpc.cmake) - set(CMAKE_CXX_STANDARD 17) if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) @@ -21,6 +19,8 @@ if (${MEMORY_SANITIZER_ON}) include (cmake/mem_sanitizer.cmake) endif () +include(cmake/grpc.cmake) + find_program(CCACHE_FOUND ccache) if (CCACHE_FOUND) set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) @@ -58,4 +58,4 @@ add_executable(test_flip_local_client src/test_flip_local_client.cpp) target_link_libraries(test_flip_local_client flip) add_executable(test_flip_server src/test_flip_server.cpp) -target_link_libraries(test_flip_server flip) \ No newline at end of file +target_link_libraries(test_flip_server flip) diff --git a/src/flip/cmake/grpc.cmake b/src/flip/cmake/grpc.cmake new file mode 100644 index 00000000..14df71de --- /dev/null +++ b/src/flip/cmake/grpc.cmake @@ -0,0 +1,57 @@ +find_program(GRPC_CPP_PLUGIN grpc_cpp_plugin) # Get full path to plugin + +function(PROTOBUF_GENERATE_GRPC_CPP SRCS HDRS) + if(NOT ARGN) + message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files") + return() + endif() + + if(PROTOBUF_GENERATE_CPP_APPEND_PATH) # This variable is common for all types of output. + # Create an include path for each file specified + foreach(FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(ABS_PATH ${ABS_FIL} PATH) + list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${ABS_PATH}) + endif() + endforeach() + else() + set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR}) + endif() + + if(DEFINED PROTOBUF_IMPORT_DIRS) + foreach(DIR ${Protobuf_IMPORT_DIRS}) + get_filename_component(ABS_PATH ${DIR} ABSOLUTE) + list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${ABS_PATH}) + endif() + endforeach() + endif() + + set(${SRCS}) + set(${HDRS}) + foreach(FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(FIL_WE ${FIL} NAME_WE) + + list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc") + list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h") + + add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc" + "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h" + COMMAND ${Protobuf_PROTOC_EXECUTABLE} + ARGS --grpc_out=${CMAKE_CURRENT_BINARY_DIR} + --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} + ${_protobuf_include_path} ${ABS_FIL} + DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} + COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}" + VERBATIM) + endforeach() + + set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) + set(${SRCS} ${${SRCS}} PARENT_SCOPE) + set(${HDRS} ${${HDRS}} PARENT_SCOPE) +endfunction() diff --git a/src/flip/cmake/protobuf.cmake b/src/flip/cmake/protobuf.cmake new file mode 100644 index 00000000..a507eaee --- /dev/null +++ b/src/flip/cmake/protobuf.cmake @@ -0,0 +1,77 @@ + +# protobuf_generate +# -------------------------- +# +# Add custom commands to process ``.proto`` files to C++ using protoc and +# GRPC plugin: +# +# protobuf_generate( <*.proto files>) +# +# ``ARGN`` +# ``.proto`` files +# +macro(m_protobuf_generate _target) + message(STATUS "inside protobuf_generate_grpc_cpp") + if(NOT TARGET ${_target}) + message(SEND_ERROR "protobuf_generate requires target as first argument") + return() + endif() + if(NOT ${ARGC} GREATER 1) + message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files as arguments") + return() + endif() + if(NOT _generated_headers) + set(_generated_headers) + endif() + # set(_protobuf_include_path -I . ) + foreach(FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + message(STATUS "protobuf_generate_grpc_cpp: processing ${ABS_FIL}") + get_filename_component(FIL_WE ${FIL} NAME_WE) + file(RELATIVE_PATH REL_FIL ${CMAKE_CURRENT_SOURCE_DIR}/proto ${ABS_FIL}) + get_filename_component(REL_DIR ${REL_FIL} DIRECTORY) + set(RELFIL_WE "${REL_DIR}/${FIL_WE}") + + set(_GEN_HEADERS + ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.grpc.pb.h; + ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.h +# ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}_mock.grpc.pb.h + ) + list(APPEND _generated_headers "${_GEN_HEADERS}") + set(_GEN_SOURCES + ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.grpc.pb.cc + ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.cc + ) + #add generated sources to the sources of _target + target_sources(${_target} PRIVATE ${_GEN_SOURCES}) + set_source_files_properties(${_GEN_SOURCES} ${_GEN_HEADERS} PROPERTIES GENERATED TRUE) + + add_custom_command( + OUTPUT ${_GEN_SOURCES} ${_GEN_HEADERS} + COMMAND ${deps_prefix}/bin/protoc + ARGS --grpc_out=generate_mock_code=false:${_gRPC_PROTO_GENS_DIR} + --cpp_out=${_gRPC_PROTO_GENS_DIR} + --plugin=protoc-gen-grpc=${deps_prefix}/bin/grpc_cpp_plugin + -I . + ${REL_FIL} + DEPENDS ${FIL} + WORKING_DIRECTORY ${_PROTO_IMPORT_DIR} + COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}" + VERBATIM + ) + message(STATUS "protoc will generate ${_GEN_SOURCES} and ${_GEN_HEADERS}") + #since some of the headers generated by this command are also included in hand-written sources make this command run before the _target + get_property(_sources TARGET ${_target} PROPERTY SOURCES) + set_source_files_properties(${_sources} PROPERTIES OBJECT_DEPENDS "${_GEN_HEADERS}") + + # foreach(_source ${_sources}) + # message(STATUS "setting files ${_source} to depend on ${_GEN_HEADERS}") + # set_source_files_properties(${_source} PROPERTIES OBJECT_DEPENDS "${_GEN_HEADERS}") + # endforeach() + + # #since some of the headers generated by this command also included in hand-written sources make this command run before the _target + # set(_custom_target_name "touch-protoc-${FIL_WE}.proto") + # add_custom_target(${_custom_target_name} touch ${_custom_target_name} DEPENDS ${_GEN_SRCS_CC}) + # add_dependencies(${_target} ${_custom_target_name}) + endforeach() +endmacro() From 2374862c513895ec3bb899cae55dbdb035763135 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Fri, 14 Jun 2019 12:38:51 -0700 Subject: [PATCH 050/385] Updated README with GRPC client/server details --- src/flip/README.md | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/src/flip/README.md b/src/flip/README.md index 7d9fd064..ecfb6b95 100644 --- a/src/flip/README.md +++ b/src/flip/README.md @@ -233,22 +233,41 @@ Returns: If the flip is hit or not. Whether flip is hit or not is immediately kn Flip is a header only framework and hence will be included and compiled along with application binary. It uses a protobuf to serialize the message about how faults can be triggered. The protobuf could be used against any RPCs the application provide. -If application uses GRPC, the grpc definition needs to add the following RPC call to the grpc service proto + +Flip supports an optional GRPC server which can be started using + ```c++ -// Inject a fault rpc -rpc InjectFault (flip.FlipSpec) returns (flip.FlipResponse); +Flip::start_rpc_server() ``` -Flip also supports optional GRPC server which can be started using +If application uses GRPC and if another GRPC server creation is to be avoided, then application can instead add the +following grpc definition to its RPC call to the grpc service proto. ```c++ -Flip::start_rpc_server() +// Inject a fault rpc +rpc InjectFault (flip.FlipSpec) returns (flip.FlipResponse); ``` # Flip Client -Flip needs a client to trigger the faults externally. The exact client depends on which RPC it is integrated with application. +Flip needs a client to trigger the faults externally. At present there are 2 forms of flip client, one GRPC client, which +allows flip faults can be injected remotely from external application. Second form is local flip client, which means +flip will be triggered by the same application binary which is to be fault tested. ## GRPC Client + +### Python GRPC Client +There is python grpc client library through which it can be triggered remotely. To setup the python client, execute +``` +bash ./setup_python_client.sh +``` + +Python library is available under ***src/client/python/flip_rpc_client.py*** + +Libraries are defined in class **FlipRPCClient**. Examples of how to use library is provided by the python script +***src/client/python/flip_client_example.py*** + +### Nodejs GRPC Client + There is a current implementation using GRPC for a project called "NuData/MonstorDB.git" which has nodejs client to inject the fault. Example of grpc service is provided in path "MonstorDB/nodejs-test/test/support/monstor_client/inject_fault.js" and examples of how to use is in "MonstorDB/nodejst-test/test/support/run_grpc_client.js" @@ -264,8 +283,6 @@ await test.do_inject_fault( ) ``` -### Python Client -**TODO:** Write a standalone client which can be used to trigger various faults on different languages. ## Local Client If the code that needs to be fault injected and tested is a library in itself and that there is separate unit tests which runs From 7101dbb1e00a0d1e7bfdf033cf8850b522b79dd1 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Aug 2019 11:12:25 -0700 Subject: [PATCH 051/385] Namespace change in 1.23.0 of grpc. --- include/sds_grpc/server.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 427548e9..a21c94e4 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -23,7 +23,7 @@ namespace sds::grpc { using ::grpc::Server; -using ::grpc::ServerAsyncResponseWriter; +using ::grpc_impl::ServerAsyncResponseWriter; using ::grpc::ServerBuilder; using ::grpc::ServerContext; using ::grpc::ServerCompletionQueue; @@ -100,7 +100,7 @@ class ServerCallData final : public BaseServerCallData { void(TSERVICE*, ::grpc::ServerContext*, TREQUEST*, - ::grpc::ServerAsyncResponseWriter*, + ::grpc_impl::ServerAsyncResponseWriter*, ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void *)>; @@ -123,7 +123,7 @@ class ServerCallData final : public BaseServerCallData { handle_request_func_(handle_request) { } - ::grpc::ServerAsyncResponseWriter& responder() { + ::grpc_impl::ServerAsyncResponseWriter& responder() { return responder_; } @@ -137,7 +137,7 @@ class ServerCallData final : public BaseServerCallData { TREQUEST request_; TRESPONSE reponse_; - ::grpc::ServerAsyncResponseWriter responder_; + ::grpc_impl::ServerAsyncResponseWriter responder_; request_call_func_t wait_request_func_; handle_call_func_t handle_request_func_; @@ -226,7 +226,7 @@ class GrpcServer : private boost::noncopyable { void(typename TSVC::AsyncService*, ::grpc::ServerContext*, TREQUEST*, - ::grpc::ServerAsyncResponseWriter*, + ::grpc_impl::ServerAsyncResponseWriter*, ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void *)> request_call_func, From 21a80d9a0ac8b1b43f9524f4f89c48e1ea3f99fb Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 20 Sep 2019 08:30:14 -0700 Subject: [PATCH 052/385] v0.2.0 --- .../client/local/test_flip_local_client.cpp | 20 +++++--- src/flip/lib/flip.hpp | 51 +++++++++++++++++-- src/flip/proto/flip_spec.proto | 5 +- 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 69f166f1..0664c59d 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -24,20 +24,25 @@ void run_and_validate_noret_flip() { void run_and_validate_ret_flip() { std::string my_vol = "vol1"; + std::string valid_dev_name = "/dev/sda"; std::string unknown_vol = "unknown_vol"; + std::string invalid_dev_name = "/boot/sda"; - auto result = g_flip.get_test_flip("simval_flip", my_vol); + auto result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); assert(result); assert(result.get() == "Simulated error value"); - result = g_flip.get_test_flip("simval_flip", unknown_vol); + result = g_flip.get_test_flip("simval_flip", unknown_vol, valid_dev_name); assert(!result); - result = g_flip.get_test_flip("simval_flip", my_vol); + result = g_flip.get_test_flip("simval_flip", my_vol, invalid_dev_name); + assert(!result); + + result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); assert(result); assert(result.get() == "Simulated error value"); - result = g_flip.get_test_flip("simval_flip", my_vol); + result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); assert(!result); // Not more than 2 } @@ -105,10 +110,11 @@ int main(int argc, char *argv[]) { fclient.inject_noreturn_flip("noret_flip", {cond1}, freq); /* Inject a invalid return action flip */ - FlipCondition cond2; + FlipCondition cond2, cond6; fclient.create_condition("vol_name", flip::Operator::EQUAL, "vol1", &cond2); + fclient.create_condition("dev_name", flip::Operator::REG_EX, "\\/dev\\/", &cond6); freq.set_count(2); freq.set_percent(100); - fclient.inject_retval_flip("simval_flip", {cond2}, freq, "Simulated error value"); + fclient.inject_retval_flip("simval_flip", {cond2, cond6}, freq, "Simulated error value"); /* Inject a delay of 100ms action flip */ FlipCondition cond3, cond4; @@ -130,4 +136,4 @@ int main(int argc, char *argv[]) { run_and_validate_delay_return_flip(); return 0; -} \ No newline at end of file +} diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 3be2855a..be390735 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace flip { @@ -214,9 +215,44 @@ struct compare_val { } }; -template <> -struct compare_val< const char* > { - bool operator()(const char*& val1, const char*& val2, Operator oper) { +template<> +struct compare_val { + bool operator()(const std::string& val1, const std::string& val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 == val2); + + case Operator::NOT_EQUAL: + return (val1 != val2); + + case Operator::GREATER_THAN: + return (val1 > val2); + + case Operator::LESS_THAN: + return (val1 < val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 >= val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 <= val2); + + case Operator::REG_EX: { + const std::regex re(val2); + return (std::sregex_iterator(val1.begin(), val1.end(), re) != std::sregex_iterator()); + } + + default: + return false; + } + } +}; +template<> +struct compare_val { + bool operator()(const char *&val1, const char *&val2, Operator oper) { switch (oper) { case Operator::DONT_CARE: return true; @@ -235,7 +271,14 @@ struct compare_val< const char* > { case Operator::LESS_THAN_OR_EQUAL: return (val1 && val2 && (strcmp(val1, val2) <= 0)) || (!val1 && val2) || (!val1 && !val2); - default: return false; + case Operator::REG_EX: { + const std::regex re(val2); + const std::string v(val1); + return (std::sregex_iterator(v.begin(), v.end(), re) != std::sregex_iterator()); + } + + default: + return false; } } }; diff --git a/src/flip/proto/flip_spec.proto b/src/flip/proto/flip_spec.proto index e58e84e9..e191994f 100644 --- a/src/flip/proto/flip_spec.proto +++ b/src/flip/proto/flip_spec.proto @@ -24,6 +24,9 @@ enum Operator { // Don't care about DONT_CARE = 6; + + // RegEx Pattern + REG_EX = 7; } enum Frequency { @@ -112,4 +115,4 @@ message FlipResponse { bool success = 1; map metadata = 200; -} \ No newline at end of file +} From 05f52dba20566eceeef2fefff75bb9ebccd0d731 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 25 Oct 2019 14:45:37 -0700 Subject: [PATCH 053/385] v0.2.3 - Remove glog dependency --- .../client/local/test_flip_local_client.cpp | 21 ++- src/flip/lib/flip.hpp | 16 +- src/flip/lib/test_flip.cpp | 138 ++++++++++-------- .../flip_rpc_server.cpp/test_flip_server.cpp | 12 +- 4 files changed, 109 insertions(+), 78 deletions(-) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 0664c59d..0ef1888e 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -7,8 +7,13 @@ #include #include +#include + using namespace flip; +SDS_LOGGING_INIT(flip) +SDS_OPTIONS_ENABLE(logging) + Flip g_flip; void run_and_validate_noret_flip() { @@ -61,7 +66,7 @@ void run_and_validate_delay_flip() { assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, valid_size_bytes1)); sleep(2); - DCHECK_EQ((*closure_calls).load(), 2); + DEBUG_ASSERT_EQ((*closure_calls).load(), 2); } void run_and_validate_delay_return_flip() { @@ -71,7 +76,7 @@ void run_and_validate_delay_return_flip() { assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { (*closure_calls)++; - DCHECK_EQ(error, "Simulated delayed errval"); + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); }, valid_double)); assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { @@ -80,7 +85,7 @@ void run_and_validate_delay_return_flip() { }, invalid_double)); assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - DCHECK_EQ(error, "Simulated delayed errval"); + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); (*closure_calls)++; }, valid_double)); @@ -90,16 +95,20 @@ void run_and_validate_delay_return_flip() { }, invalid_double)); assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - DCHECK_EQ(error, "Simulated delayed errval"); + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); (*closure_calls)++; - LOG(INFO) << "Called with error = " << error; + LOGINFO("Called with error = {}", error); }, valid_double)); sleep(2); - DCHECK_EQ((*closure_calls).load(), 2); + DEBUG_ASSERT_EQ((*closure_calls).load(), 2); } int main(int argc, char *argv[]) { + SDS_OPTIONS_LOAD(argc, argv, logging) + sds_logging::SetLogger(std::string(argv[0])); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + FlipClient fclient(&g_flip); FlipFrequency freq; diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index be390735..f04486f3 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -19,6 +19,8 @@ #include #include +SDS_LOGGING_DECL(flip) + namespace flip { template < size_t Index = 0, // start iteration at 0 index @@ -308,7 +310,7 @@ class FlipTimer { auto t = std::make_shared< deadline_timer >(m_svc, delay_us); t->async_wait([this, closure, t](const boost::system::error_code& e) { if (e) { - LOG(ERROR) << "Error in timer routine, message " << e.message(); + LOGERRORMOD(flip, "Error in timer routine, message {}", e.message()); } else { closure(); } @@ -360,7 +362,7 @@ class Flip { // TODO: Add verification to see if the flip is already scheduled, any errors etc.. std::unique_lock< std::shared_mutex > lock(m_mutex); m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); - LOG(INFO) << "Added new fault flip " << fspec.flip_name() << " to the list of flips"; + LOGINFOMOD(flip, "Added new fault flip {} to the list of flips", fspec.flip_name()); //LOG(INFO) << "Flip details:" << inst.to_string(); return true; } @@ -458,7 +460,7 @@ class Flip { return false; // Not a hit auto param = boost::get< delayed_return_param< T > >(ret.get()); - LOG(INFO) << "Returned param delay = " << param.delay_usec << " val = " << param.val; + LOGINFOMOD(flip, "Returned param delay = {} val = {}", param.delay_usec, param.val); m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); return true; } @@ -481,7 +483,7 @@ class Flip { // Check if we are subjected to rate limit if (!handle_hits(fspec.flip_frequency(), inst)) { - LOG(INFO) << "Flip " << flip_name << " matches, but it is rate limited"; + LOGINFOMOD(flip, "Flip {} matches, but it is rate limited", flip_name); return boost::none; } @@ -490,10 +492,10 @@ class Flip { if (remain_count == 0) { exec_completed = true; } else if (remain_count < 0) { - LOG(INFO) << "Flip " << flip_name << " matches, but reaches max count"; + LOGINFOMOD(flip, "Flip {} matches, but reaches max count", flip_name); return boost::none; } - LOG(INFO) << "Flip " << flip_name << " matches and hits"; + LOGINFOMOD(flip, "Flip {} matches and hits", flip_name); } boost::variant< T, bool, uint64_t, delayed_return_param< T > > val_ret; diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index ac814f5e..7321437f 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -7,7 +7,12 @@ #include #include -void create_ret_fspec(flip::FlipSpec *fspec) { +#include + +SDS_LOGGING_INIT(flip) +SDS_OPTIONS_ENABLE(logging) + +void create_ret_fspec(flip::FlipSpec* fspec) { *(fspec->mutable_flip_name()) = "ret_fspec"; // Create a new condition and add it to flip spec @@ -23,26 +28,26 @@ void create_ret_fspec(flip::FlipSpec *fspec) { freq->set_percent(100); } -void run_and_validate_ret_flip(flip::Flip *flip) { +void run_and_validate_ret_flip(flip::Flip* flip) { std::string my_coll = "item_shipping"; std::string unknown_coll = "unknown_collection"; - auto result = flip->get_test_flip("ret_fspec", my_coll); + auto result = flip->get_test_flip< std::string >("ret_fspec", my_coll); assert(result); assert(result.get() == "Error simulated value"); - result = flip->get_test_flip("ret_fspec", unknown_coll); + result = flip->get_test_flip< std::string >("ret_fspec", unknown_coll); assert(!result); - result = flip->get_test_flip("ret_fspec", my_coll); + result = flip->get_test_flip< std::string >("ret_fspec", my_coll); assert(result); assert(result.get() == "Error simulated value"); - result = flip->get_test_flip("ret_fspec", my_coll); + result = flip->get_test_flip< std::string >("ret_fspec", my_coll); assert(!result); // Not more than 2 } -void create_check_fspec(flip::FlipSpec *fspec) { +void create_check_fspec(flip::FlipSpec* fspec) { *(fspec->mutable_flip_name()) = "check_fspec"; auto cond = fspec->mutable_conditions()->Add(); @@ -55,7 +60,7 @@ void create_check_fspec(flip::FlipSpec *fspec) { freq->set_percent(100); } -void run_and_validate_check_flip(flip::Flip *flip) { +void run_and_validate_check_flip(flip::Flip* flip) { int valid_cmd = 1; int invalid_cmd = -1; @@ -66,7 +71,7 @@ void run_and_validate_check_flip(flip::Flip *flip) { assert(!flip->test_flip("check_fspec", valid_cmd)); // Not more than 2 } -void create_delay_fspec(flip::FlipSpec *fspec) { +void create_delay_fspec(flip::FlipSpec* fspec) { *(fspec->mutable_flip_name()) = "delay_fspec"; auto cond = fspec->mutable_conditions()->Add(); @@ -80,36 +85,26 @@ void create_delay_fspec(flip::FlipSpec *fspec) { freq->set_percent(100); } -void run_and_validate_delay_flip(flip::Flip *flip) { - int valid_cmd = 2; - int invalid_cmd = -1; - std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); +void run_and_validate_delay_flip(flip::Flip* flip) { + int valid_cmd = 2; + int invalid_cmd = -1; + std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(flip->delay_flip("delay_fspec", [closure_calls]() { - (*closure_calls)++; - }, valid_cmd)); + assert(flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { - (*closure_calls)++; - }, invalid_cmd)); + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); - assert(flip->delay_flip("delay_fspec", [closure_calls]() { - (*closure_calls)++; - }, valid_cmd)); + assert(flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { - (*closure_calls)++; - }, invalid_cmd)); + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { - (*closure_calls)++; - }, valid_cmd)); + assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); sleep(2); - DCHECK_EQ((*closure_calls).load(), 2); + DEBUG_ASSERT_EQ((*closure_calls).load(), 2); } -void create_delay_ret_fspec(flip::FlipSpec *fspec) { +void create_delay_ret_fspec(flip::FlipSpec* fspec) { *(fspec->mutable_flip_name()) = "delay_ret_fspec"; auto cond = fspec->mutable_conditions()->Add(); @@ -118,46 +113,57 @@ void create_delay_ret_fspec(flip::FlipSpec *fspec) { cond->mutable_value()->set_int_value(2); fspec->mutable_flip_action()->mutable_delay_returns()->set_delay_in_usec(100000); - fspec->mutable_flip_action()->mutable_delay_returns()->mutable_retval()->set_string_value("Delayed error simulated value"); + fspec->mutable_flip_action()->mutable_delay_returns()->mutable_retval()->set_string_value( + "Delayed error simulated value"); auto freq = fspec->mutable_flip_frequency(); freq->set_count(2); freq->set_percent(100); } -void run_and_validate_delay_return_flip(flip::Flip *flip) { - int valid_cmd = 2; - int invalid_cmd = -1; - std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); - - assert(flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { - (*closure_calls)++; - DCHECK_EQ(error, "Delayed error simulated value"); - }, valid_cmd)); - - assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, invalid_cmd)); - - assert(flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { - DCHECK_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - }, valid_cmd)); - - assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, invalid_cmd)); - - assert(!flip->get_delay_flip("delay_ret_fspec", [closure_calls](std::string error) { - DCHECK_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - LOG(INFO) << "Called with error = " << error; - }, valid_cmd)); +void run_and_validate_delay_return_flip(flip::Flip* flip) { + int valid_cmd = 2; + int invalid_cmd = -1; + std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); + + assert(flip->get_delay_flip< std::string >("delay_ret_fspec", + [closure_calls](std::string error) { + (*closure_calls)++; + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + }, + valid_cmd)); + + assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd)); + + assert(flip->get_delay_flip< std::string >("delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + }, + valid_cmd)); + + assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd)); + + assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + LOGINFO("Called with error = {}", error); + }, + valid_cmd)); sleep(2); - DCHECK_EQ((*closure_calls).load(), 2); + DEBUG_ASSERT_EQ((*closure_calls).load(), 2); } #if 0 @@ -183,7 +189,11 @@ void create_multi_cond_fspec(flip::FlipSpec *fspec) { } #endif -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { + SDS_OPTIONS_LOAD(argc, argv, logging) + sds_logging::SetLogger(std::string(argv[0])); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + flip::FlipSpec ret_fspec; create_ret_fspec(&ret_fspec); @@ -209,4 +219,4 @@ int main(int argc, char *argv[]) { run_and_validate_delay_return_flip(&flip); return 0; -} \ No newline at end of file +} diff --git a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp index f8bc7459..79d4a86c 100644 --- a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp +++ b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp @@ -4,10 +4,20 @@ #include "flip.hpp" +#include + +SDS_LOGGING_INIT(flip) + +SDS_OPTIONS_ENABLE(logging) + int main(int argc, char *argv[]) { + SDS_OPTIONS_LOAD(argc, argv, logging) + sds_logging::SetLogger(std::string(argv[0])); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + flip::Flip f; f.start_rpc_server(); sleep(1000); return 0; -} \ No newline at end of file +} From 749e2923e952cea3be54003e017b988cb1227af0 Mon Sep 17 00:00:00 2001 From: rishabh mittal Date: Fri, 1 Nov 2019 05:03:25 +0530 Subject: [PATCH 054/385] change log type to some messages --- src/flip/lib/flip.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index f04486f3..5c3efbe1 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -483,7 +483,7 @@ class Flip { // Check if we are subjected to rate limit if (!handle_hits(fspec.flip_frequency(), inst)) { - LOGINFOMOD(flip, "Flip {} matches, but it is rate limited", flip_name); + LOGDEBUGMOD(flip, "Flip {} matches, but it is rate limited", flip_name); return boost::none; } @@ -492,7 +492,7 @@ class Flip { if (remain_count == 0) { exec_completed = true; } else if (remain_count < 0) { - LOGINFOMOD(flip, "Flip {} matches, but reaches max count", flip_name); + LOGDEBUGMOD(flip, "Flip {} matches, but reaches max count", flip_name); return boost::none; } LOGINFOMOD(flip, "Flip {} matches and hits", flip_name); From e5ce549807fb7a3c0838165d0cf75412e57ae093 Mon Sep 17 00:00:00 2001 From: rishabh mittal Date: Fri, 1 Nov 2019 05:24:08 +0530 Subject: [PATCH 055/385] change messages to logdebug --- src/flip/lib/flip.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index f04486f3..5c3efbe1 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -483,7 +483,7 @@ class Flip { // Check if we are subjected to rate limit if (!handle_hits(fspec.flip_frequency(), inst)) { - LOGINFOMOD(flip, "Flip {} matches, but it is rate limited", flip_name); + LOGDEBUGMOD(flip, "Flip {} matches, but it is rate limited", flip_name); return boost::none; } @@ -492,7 +492,7 @@ class Flip { if (remain_count == 0) { exec_completed = true; } else if (remain_count < 0) { - LOGINFOMOD(flip, "Flip {} matches, but reaches max count", flip_name); + LOGDEBUGMOD(flip, "Flip {} matches, but reaches max count", flip_name); return boost::none; } LOGINFOMOD(flip, "Flip {} matches and hits", flip_name); From 289ada933dd1fecdafdb348aef47ba17bce3c33e Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Wed, 2 Sep 2020 21:59:59 -0700 Subject: [PATCH 056/385] Changed log messages to debug, updated latest clang format --- src/flip/lib/.clang-format | 6 +- src/flip/lib/flip.hpp | 124 +++++++++++++++++++------------------ 2 files changed, 67 insertions(+), 63 deletions(-) diff --git a/src/flip/lib/.clang-format b/src/flip/lib/.clang-format index 6f3aa76a..2f771200 100644 --- a/src/flip/lib/.clang-format +++ b/src/flip/lib/.clang-format @@ -12,14 +12,14 @@ ColumnLimit: 120 AccessModifierOffset: -4 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: true +AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: false AlignTrailingComments: true AllowShortBlocksOnASingleLine: true -AllowShortIfStatementsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: true AllowShortBlocksOnASingleLine: true -AllowShortCaseLabelsOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: false # AllowShortFunctionsOnASingleLine: InlineOnly # AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 5c3efbe1..54a71948 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -58,14 +58,14 @@ struct flip_instance { std::string to_string() const { std::stringstream ss; - ss << "\n---------------------------" << m_fspec.flip_name() << "-----------------------\n"; + ss << "\n---------------------------" << m_fspec.flip_name() << "-----------------------\n"; ss << "Hitcount: " << m_hit_count << "\n"; ss << "Remaining count: " << m_remain_exec_count << "\n"; ss << m_fspec.flip_frequency().DebugString(); ss << m_fspec.flip_action().DebugString(); ss << "Conditions: [\n"; auto i = 1; - for (const auto& cond: m_fspec.conditions()) { + for (const auto& cond : m_fspec.conditions()) { ss << std::to_string(i) << ") " << Operator_Name(cond.oper()) << " => " << cond.value().DebugString(); ++i; } @@ -74,9 +74,9 @@ struct flip_instance { return ss.str(); } - FlipSpec m_fspec; + FlipSpec m_fspec; std::atomic< uint32_t > m_hit_count; - std::atomic< int32_t > m_remain_exec_count; + std::atomic< int32_t > m_remain_exec_count; }; /****************************** Proto Param to Value converter ******************************/ @@ -137,7 +137,7 @@ struct val_converter< bool > { template < typename T > struct delayed_return_param { uint64_t delay_usec; - T val; + T val; }; template < typename T > @@ -198,27 +198,35 @@ template < typename T > struct compare_val { bool operator()(const T& val1, const T& val2, Operator oper) { switch (oper) { - case Operator::DONT_CARE: return true; + case Operator::DONT_CARE: + return true; - case Operator::EQUAL: return (val1 == val2); + case Operator::EQUAL: + return (val1 == val2); - case Operator::NOT_EQUAL: return (val1 != val2); + case Operator::NOT_EQUAL: + return (val1 != val2); - case Operator::GREATER_THAN: return (val1 > val2); + case Operator::GREATER_THAN: + return (val1 > val2); - case Operator::LESS_THAN: return (val1 < val2); + case Operator::LESS_THAN: + return (val1 < val2); - case Operator::GREATER_THAN_OR_EQUAL: return (val1 >= val2); + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 >= val2); - case Operator::LESS_THAN_OR_EQUAL: return (val1 <= val2); + case Operator::LESS_THAN_OR_EQUAL: + return (val1 <= val2); - default: return false; + default: + return false; } } }; -template<> -struct compare_val { +template <> +struct compare_val< std::string > { bool operator()(const std::string& val1, const std::string& val2, Operator oper) { switch (oper) { case Operator::DONT_CARE: @@ -252,20 +260,24 @@ struct compare_val { } } }; -template<> -struct compare_val { - bool operator()(const char *&val1, const char *&val2, Operator oper) { +template <> +struct compare_val< const char* > { + bool operator()(const char*& val1, const char*& val2, Operator oper) { switch (oper) { - case Operator::DONT_CARE: return true; + case Operator::DONT_CARE: + return true; - case Operator::EQUAL: return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); + case Operator::EQUAL: + return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); case Operator::NOT_EQUAL: return (val1 && val2 && (strcmp(val1, val2) != 0)) || (!val1 && val2) || (val1 && !val2); - case Operator::GREATER_THAN: return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); + case Operator::GREATER_THAN: + return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); - case Operator::LESS_THAN: return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); + case Operator::LESS_THAN: + return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); case Operator::GREATER_THAN_OR_EQUAL: return (val1 && val2 && (strcmp(val1, val2) >= 0)) || (val1 && !val2) || (!val1 && !val2); @@ -327,10 +339,10 @@ class FlipTimer { } private: - io_service m_svc; - std::unique_ptr< io_work > m_work; - std::mutex m_thr_mutex; - int32_t m_timer_count; + io_service m_svc; + std::unique_ptr< io_work > m_work; + std::mutex m_thr_mutex; + int32_t m_timer_count; std::unique_ptr< std::thread > m_timer_thread; }; @@ -362,8 +374,8 @@ class Flip { // TODO: Add verification to see if the flip is already scheduled, any errors etc.. std::unique_lock< std::shared_mutex > lock(m_mutex); m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); - LOGINFOMOD(flip, "Added new fault flip {} to the list of flips", fspec.flip_name()); - //LOG(INFO) << "Flip details:" << inst.to_string(); + LOGDEBUGMOD(flip, "Added new fault flip {} to the list of flips", fspec.flip_name()); + // LOG(INFO) << "Flip details:" << inst.to_string(); return true; } @@ -419,31 +431,26 @@ class Flip { template < class... Args > bool test_flip(std::string flip_name, Args&&... args) { - if (!m_flip_enabled) - return false; + if (!m_flip_enabled) return false; auto ret = __test_flip< bool, TEST_ONLY >(flip_name, std::forward< Args >(args)...); return (ret != boost::none); } template < typename T, class... Args > boost::optional< T > get_test_flip(std::string flip_name, Args&&... args) { - if (!m_flip_enabled) - return boost::none; + if (!m_flip_enabled) return boost::none; auto ret = __test_flip< T, RETURN_VAL >(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) - return boost::none; + if (ret == boost::none) return boost::none; return boost::optional< T >(boost::get< T >(ret.get())); } template < class... Args > bool delay_flip(std::string flip_name, const std::function< void() >& closure, Args&&... args) { - if (!m_flip_enabled) - return false; + if (!m_flip_enabled) return false; auto ret = __test_flip< bool, SET_DELAY >(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) - return false; // Not a hit + if (ret == boost::none) return false; // Not a hit uint64_t delay_usec = boost::get< uint64_t >(ret.get()); m_timer.schedule(boost::posix_time::microseconds(delay_usec), closure); @@ -452,15 +459,13 @@ class Flip { template < typename T, class... Args > bool get_delay_flip(std::string flip_name, const std::function< void(T) >& closure, Args&&... args) { - if (!m_flip_enabled) - return false; + if (!m_flip_enabled) return false; auto ret = __test_flip< T, DELAYED_RETURN >(flip_name, std::forward< Args >(args)...); - if (ret == boost::none) - return false; // Not a hit + if (ret == boost::none) return false; // Not a hit auto param = boost::get< delayed_return_param< T > >(ret.get()); - LOGINFOMOD(flip, "Returned param delay = {} val = {}", param.delay_usec, param.val); + LOGDEBUGMOD(flip, "Returned param delay = {} val = {}", param.delay_usec, param.val); m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); return true; } @@ -469,7 +474,7 @@ class Flip { template < typename T, int ActionType, class... Args > boost::optional< boost::variant< T, bool, uint64_t, delayed_return_param< T > > > __test_flip(std::string flip_name, Args&&... args) { - bool exec_completed = false; // If all the exec for the flip is completed. + bool exec_completed = false; // If all the exec for the flip is completed. flip_instance* inst = nullptr; { @@ -495,7 +500,7 @@ class Flip { LOGDEBUGMOD(flip, "Flip {} matches, but reaches max count", flip_name); return boost::none; } - LOGINFOMOD(flip, "Flip {} matches and hits", flip_name); + LOGDEBUGMOD(flip, "Flip {} matches and hits", flip_name); } boost::variant< T, bool, uint64_t, delayed_return_param< T > > val_ret; @@ -514,11 +519,13 @@ class Flip { val_ret = true; break; - case FlipAction::kDelays: val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); break; + case FlipAction::kDelays: + val_ret = inst->m_fspec.flip_action().delays().delay_in_usec(); + break; case FlipAction::kDelayReturns: if (ActionType == DELAYED_RETURN) { - auto& flip_dr = inst->m_fspec.flip_action().delay_returns(); + auto& flip_dr = inst->m_fspec.flip_action().delay_returns(); delayed_return_param< T > dr; dr.delay_usec = flip_dr.delay_in_usec(); dr.val = val_converter< T >()(flip_dr.retval()); @@ -528,15 +535,14 @@ class Flip { } break; - default: val_ret = true; + default: + val_ret = true; } if (exec_completed) { // If we completed the execution, need to remove them std::unique_lock< std::shared_mutex > lock(m_mutex); - if (inst->m_remain_exec_count.load(std::memory_order_relaxed) == 0) { - m_flip_specs.erase(flip_name); - } + if (inst->m_remain_exec_count.load(std::memory_order_relaxed) == 0) { m_flip_specs.erase(flip_name); } } return val_ret; } @@ -552,12 +558,10 @@ class Flip { // Check for all the condition match std::tuple< Args... > arglist(std::forward< Args >(args)...); - auto i = 0U; - bool matched = true; + auto i = 0U; + bool matched = true; for_each(arglist, [this, fspec, &i, &matched](auto& v) { - if (!condition_matches(v, fspec.conditions()[i++])) { - matched = false; - } + if (!condition_matches(v, fspec.conditions()[i++])) { matched = false; } }); // One or more conditions does not match. @@ -645,10 +649,10 @@ class Flip { #endif private: std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; - std::shared_mutex m_mutex; - bool m_flip_enabled; - FlipTimer m_timer; - std::unique_ptr< std::thread > m_flip_server_thread; + std::shared_mutex m_mutex; + bool m_flip_enabled; + FlipTimer m_timer; + std::unique_ptr< std::thread > m_flip_server_thread; }; class FlipClient { From de34328bb9352177750f7dbd1a036aff9fe87dd6 Mon Sep 17 00:00:00 2001 From: Hari Kadayam Date: Mon, 28 Sep 2020 22:50:51 -0700 Subject: [PATCH 057/385] SDSTOR-3331 Allow consumer of flip to define their own timer if need be --- src/flip/lib/flip.hpp | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 54a71948..2147be9a 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -301,22 +301,27 @@ using io_service = boost::asio::io_service; using deadline_timer = boost::asio::deadline_timer; using io_work = boost::asio::io_service::work; -class FlipTimer { +class FlipTimerBase { public: - FlipTimer() : m_timer_count(0) {} - ~FlipTimer() { + virtual void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) = 0; +}; + +class FlipTimerAsio : public FlipTimerBase { +public: + FlipTimerAsio() : m_timer_count(0) {} + ~FlipTimerAsio() { if (m_timer_thread != nullptr) { m_work.reset(); m_timer_thread->join(); } } - void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) { + void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) override { std::unique_lock< std::mutex > lk(m_thr_mutex); ++m_timer_count; if (m_work == nullptr) { m_work = std::make_unique< io_work >(m_svc); - m_timer_thread = std::make_unique< std::thread >(std::bind(&FlipTimer::timer_thr, this)); + m_timer_thread = std::make_unique< std::thread >(std::bind(&FlipTimerAsio::timer_thr, this)); } auto t = std::make_shared< deadline_timer >(m_svc, delay_us); @@ -373,6 +378,12 @@ class Flip { // TODO: Add verification to see if the flip is already scheduled, any errors etc.. std::unique_lock< std::shared_mutex > lock(m_mutex); + + // Create a timer instance only when we have delays/delayreturns flip added + auto action_type = fspec.flip_action().action_case(); + if ((action_type == FlipAction::kDelays) || (action_type == FlipAction::kDelayReturns)) { + if (m_timer == nullptr) { m_timer = std::make_unique< FlipTimerAsio >(); } + } m_flip_specs.emplace(std::pair< std::string, flip_instance >(fspec.flip_name(), inst)); LOGDEBUGMOD(flip, "Added new fault flip {} to the list of flips", fspec.flip_name()); // LOG(INFO) << "Flip details:" << inst.to_string(); @@ -453,7 +464,7 @@ class Flip { if (ret == boost::none) return false; // Not a hit uint64_t delay_usec = boost::get< uint64_t >(ret.get()); - m_timer.schedule(boost::posix_time::microseconds(delay_usec), closure); + get_timer().schedule(boost::posix_time::microseconds(delay_usec), closure); return true; } @@ -466,10 +477,16 @@ class Flip { auto param = boost::get< delayed_return_param< T > >(ret.get()); LOGDEBUGMOD(flip, "Returned param delay = {} val = {}", param.delay_usec, param.val); - m_timer.schedule(boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); + get_timer().schedule(boost::posix_time::microseconds(param.delay_usec), + [closure, param]() { closure(param.val); }); return true; } + void override_timer(std::unique_ptr< FlipTimerBase > t) { + std::unique_lock< std::shared_mutex > lock(m_mutex); + m_timer = std::move(t); + } + private: template < typename T, int ActionType, class... Args > boost::optional< boost::variant< T, bool, uint64_t, delayed_return_param< T > > > __test_flip(std::string flip_name, @@ -588,6 +605,8 @@ class Flip { } } + FlipTimerBase& get_timer() { return *m_timer; } + #if 0 template< typename T > bool compare_val(T &val1, T &val2, Operator oper) { @@ -651,7 +670,7 @@ class Flip { std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; std::shared_mutex m_mutex; bool m_flip_enabled; - FlipTimer m_timer; + std::unique_ptr< FlipTimerBase > m_timer; std::unique_ptr< std::thread > m_flip_server_thread; }; From 20b82afceb7802bdb9816a09995af48485ff4e22 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 7 Apr 2021 23:38:13 +0000 Subject: [PATCH 058/385] Start v2.x with updated grpc --- include/sds_grpc/server.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index a21c94e4..427548e9 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -23,7 +23,7 @@ namespace sds::grpc { using ::grpc::Server; -using ::grpc_impl::ServerAsyncResponseWriter; +using ::grpc::ServerAsyncResponseWriter; using ::grpc::ServerBuilder; using ::grpc::ServerContext; using ::grpc::ServerCompletionQueue; @@ -100,7 +100,7 @@ class ServerCallData final : public BaseServerCallData { void(TSERVICE*, ::grpc::ServerContext*, TREQUEST*, - ::grpc_impl::ServerAsyncResponseWriter*, + ::grpc::ServerAsyncResponseWriter*, ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void *)>; @@ -123,7 +123,7 @@ class ServerCallData final : public BaseServerCallData { handle_request_func_(handle_request) { } - ::grpc_impl::ServerAsyncResponseWriter& responder() { + ::grpc::ServerAsyncResponseWriter& responder() { return responder_; } @@ -137,7 +137,7 @@ class ServerCallData final : public BaseServerCallData { TREQUEST request_; TRESPONSE reponse_; - ::grpc_impl::ServerAsyncResponseWriter responder_; + ::grpc::ServerAsyncResponseWriter responder_; request_call_func_t wait_request_func_; handle_call_func_t handle_request_func_; @@ -226,7 +226,7 @@ class GrpcServer : private boost::noncopyable { void(typename TSVC::AsyncService*, ::grpc::ServerContext*, TREQUEST*, - ::grpc_impl::ServerAsyncResponseWriter*, + ::grpc::ServerAsyncResponseWriter*, ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void *)> request_call_func, From 0716cb1f0c93438a52f9ec2383a271031b053023 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 10 May 2021 19:50:50 +0000 Subject: [PATCH 059/385] Set thread name for gRPC client and server. --- include/sds_grpc/client.h | 199 +++++++++------------------ include/sds_grpc/server.h | 175 ++++++++--------------- include/sds_grpc/utils.h | 4 +- lib/client.cpp | 93 +++++-------- lib/server.cpp | 64 ++++----- lib/utils.cpp | 13 +- tests/function/echo_async_client.cpp | 63 +++------ tests/function/echo_server.cpp | 34 ++--- tests/function/echo_sync_client.cpp | 54 +++----- 9 files changed, 243 insertions(+), 456 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index ec8413f8..bf8f472e 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -35,67 +35,53 @@ using namespace ::std::chrono; * A interface for handling gRPC async response */ class ClientCallMethod : private boost::noncopyable { - public: +public: virtual ~ClientCallMethod() {} - virtual void handle_response(bool ok=true) = 0; + virtual void handle_response(bool ok = true) = 0; }; - /** * The specialized 'ClientCallMethod' per gRPC call, it stores * the response handler function * */ -template +template < typename TREQUEST, typename TREPLY > class ClientCallData final : public ClientCallMethod { - using handle_response_cb_t = std::function< - void(TREPLY&, ::grpc::Status& status)>; - - using ResponseReaderType = std::unique_ptr< - ::grpc::ClientAsyncResponseReaderInterface>; + using handle_response_cb_t = std::function< void(TREPLY&, ::grpc::Status& status) >; - private: + using ResponseReaderType = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< TREPLY > >; +private: /* Allow GrpcAsyncClient and its inner classes to use * ClientCallData. */ friend class GrpcAsyncClient; - ClientCallData(handle_response_cb_t handle_response_cb) - : handle_response_cb_(handle_response_cb) { } + ClientCallData(handle_response_cb_t handle_response_cb) : handle_response_cb_(handle_response_cb) {} // TODO: support time in any time unit -- lhuang8 void set_deadline(uint32_t seconds) { - system_clock::time_point deadline = system_clock::now() + - std::chrono::seconds(seconds); + system_clock::time_point deadline = system_clock::now() + std::chrono::seconds(seconds); context_.set_deadline(deadline); } - ResponseReaderType& responder_reader() { - return response_reader_; - } + ResponseReaderType& responder_reader() { return response_reader_; } - Status & status() { - return status_; - } + Status& status() { return status_; } - TREPLY & reply() { - return reply_; - } + TREPLY& reply() { return reply_; } - ClientContext & context() { - return context_; - } + ClientContext& context() { return context_; } - virtual void handle_response([[maybe_unused]] bool ok=true) override { + virtual void handle_response([[maybe_unused]] bool ok = true) override { // For unary call, ok is always true, `status_` will indicate error // if there are any. handle_response_cb_(reply_, status_); } - private: +private: handle_response_cb_t handle_response_cb_; TREPLY reply_; ClientContext context_; @@ -103,7 +89,6 @@ class ClientCallData final : public ClientCallMethod { ResponseReaderType response_reader_; }; - /** * A GrpcBaseClient takes care of establish a channel to grpc * server. The channel can be used by any number of grpc @@ -111,47 +96,39 @@ class ClientCallData final : public ClientCallMethod { * */ class GrpcBaseClient { - protected: +protected: const std::string server_addr_; const std::string target_domain_; const std::string ssl_cert_; - std::shared_ptr<::grpc::ChannelInterface> channel_; + std::shared_ptr<::grpc::ChannelInterface > channel_; - public: - GrpcBaseClient(const std::string& server_addr, - const std::string& target_domain = "", - const std::string& ssl_cert = "") - : server_addr_(server_addr), - target_domain_(target_domain), - ssl_cert_(ssl_cert) { - } +public: + GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", + const std::string& ssl_cert = "") : + server_addr_(server_addr), target_domain_(target_domain), ssl_cert_(ssl_cert) {} virtual ~GrpcBaseClient() = default; virtual bool init(); virtual bool is_connection_ready(); - private: +private: virtual bool init_channel(); virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); }; - class GrpcSyncClient : public GrpcBaseClient { - public: - +public: using GrpcBaseClient::GrpcBaseClient; - template - std::unique_ptr MakeStub() { + template < typename TSERVICE > + std::unique_ptr< typename TSERVICE::StubInterface > MakeStub() { return TSERVICE::NewStub(channel_); } }; - - /** * One GrpcBaseClient can have multiple stub * @@ -164,47 +141,36 @@ class GrpcSyncClient : public GrpcBaseClient { */ class GrpcAyncClientWorker final { - enum class State { - VOID, - INIT, - RUNNING, - SHUTTING_DOWN, - TERMINATED - }; - - public: + enum class State { VOID, INIT, RUNNING, SHUTTING_DOWN, TERMINATED }; - using UPtr = std::unique_ptr; +public: + using UPtr = std::unique_ptr< GrpcAyncClientWorker >; GrpcAyncClientWorker(); ~GrpcAyncClientWorker(); - bool run(uint32_t num_threads); - CompletionQueue& cq() { - return completion_queue_; - } + CompletionQueue& cq() { return completion_queue_; } /** * Create a GrpcAyncClientWorker. * */ - static bool create_worker(const char * name, int num_thread); + static bool create_worker(const char* name, int num_thread); /** * * Get a pointer of GrpcAyncClientWorker by name. */ - static GrpcAyncClientWorker * get_worker(const char * name); + static GrpcAyncClientWorker* get_worker(const char* name); /** * Must be called explicitly before program exit if any worker created. */ static void shutdown_all(); - private: - +private: /* * Shutdown CompletionQueue and threads. * @@ -216,21 +182,17 @@ class GrpcAyncClientWorker final { void async_complete_rpc(); static std::mutex mutex_workers; - static std::unordered_map workers; + static std::unordered_map< const char*, GrpcAyncClientWorker::UPtr > workers; State state_ = State::VOID; CompletionQueue completion_queue_; - std::list> threads_; - + std::list< std::shared_ptr< std::thread > > threads_; }; - class GrpcAsyncClient : public GrpcBaseClient { - public: - - template - using StubPtr = std::unique_ptr; - +public: + template < typename TSERVICE > + using StubPtr = std::unique_ptr< typename TSERVICE::StubInterface >; /** * AsyncStub is a wrapper of generated service stub. @@ -242,32 +204,24 @@ class GrpcAsyncClient : public GrpcBaseClient { * Please use GrpcAsyncClient::make_stub() to create AsyncStub. * */ - template + template < typename TSERVICE > struct AsyncStub { - using UPtr = std::unique_ptr; + using UPtr = std::unique_ptr< AsyncStub >; - AsyncStub(StubPtr stub, GrpcAyncClientWorker * worker) : - stub_(std::move(stub)), worker_(worker) { - } + AsyncStub(StubPtr< TSERVICE > stub, GrpcAyncClientWorker* worker) : stub_(std::move(stub)), worker_(worker) {} using stub_t = typename TSERVICE::StubInterface; /* unary call helper */ - template - using unary_call_return_t = - std::unique_ptr< - ::grpc::ClientAsyncResponseReaderInterface>; - - template - using unary_call_t = - unary_call_return_t (stub_t::*) ( - ::grpc::ClientContext*, - const TREQUEST&, - ::grpc::CompletionQueue*); - - template - using unary_callback_t = - std::function; + template < typename TRESPONSE > + using unary_call_return_t = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< TRESPONSE > >; + + template < typename TREQUEST, typename TRESPONSE > + using unary_call_t = unary_call_return_t< TRESPONSE > (stub_t::*)(::grpc::ClientContext*, const TREQUEST&, + ::grpc::CompletionQueue*); + + template < typename TREQUEST, typename TRESPONSE > + using unary_callback_t = std::function< void(TRESPONSE&, ::grpc::Status& status) >; /** * Make a unary call. @@ -288,13 +242,11 @@ class GrpcAsyncClient : public GrpcBaseClient { * indicates the error code and error message. * */ - template - void call_unary( - const TREQUEST& request, - unary_call_t call, - unary_callback_t callback) { + template < typename TREQUEST, typename TRESPONSE > + void call_unary(const TREQUEST& request, unary_call_t< TREQUEST, TRESPONSE > call, + unary_callback_t< TREQUEST, TRESPONSE > callback) { - auto data = new ClientCallData(callback); + auto data = new ClientCallData< TREQUEST, TRESPONSE >(callback); // Note that async unary RPCs don't post a CQ tag in call data->responder_reader() = (stub_.get()->*call)(&data->context(), request, cq()); // CQ tag posted here @@ -303,30 +255,21 @@ class GrpcAsyncClient : public GrpcBaseClient { return; } + StubPtr< TSERVICE > stub_; + GrpcAyncClientWorker* worker_; - StubPtr stub_; - GrpcAyncClientWorker * worker_; - - const StubPtr& stub() { - return stub_; - } - - CompletionQueue* cq() { - return &worker_->cq(); - } + const StubPtr< TSERVICE >& stub() { return stub_; } + CompletionQueue* cq() { return &worker_->cq(); } }; - - template + template < typename T, typename... Ts > static auto make(Ts&&... params) { - std::unique_ptr ret; + std::unique_ptr< T > ret; - if (!std::is_base_of::value) { - return ret; - } + if (!std::is_base_of< GrpcAsyncClient, T >::value) { return ret; } - ret = std::make_unique(std::forward(params)...); + ret = std::make_unique< T >(std::forward< Ts >(params)...); if (!ret->init()) { ret.reset(nullptr); return ret; @@ -335,10 +278,10 @@ class GrpcAsyncClient : public GrpcBaseClient { return ret; } - template - auto make_stub(const char * worker) { + template < typename TSERVICE > + auto make_stub(const char* worker) { - typename AsyncStub::UPtr ret; + typename AsyncStub< TSERVICE >::UPtr ret; auto w = GrpcAyncClientWorker::get_worker(worker); BOOST_ASSERT(w); @@ -347,21 +290,15 @@ class GrpcAsyncClient : public GrpcBaseClient { } auto stub = TSERVICE::NewStub(channel_); - ret = std::make_unique>(std::move(stub), w); + ret = std::make_unique< AsyncStub< TSERVICE > >(std::move(stub), w); return ret; } - GrpcAsyncClient( - const std::string& server_addr, - const std::string& target_domain = "", - const std::string& ssl_cert = "") - : GrpcBaseClient(server_addr, target_domain, ssl_cert) { - } - - virtual ~GrpcAsyncClient() { - } - + GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", + const std::string& ssl_cert = "") : + GrpcBaseClient(server_addr, target_domain, ssl_cert) {} + virtual ~GrpcAsyncClient() {} }; } // end of namespace sds::grpc diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h index 427548e9..5c5b6d79 100644 --- a/include/sds_grpc/server.h +++ b/include/sds_grpc/server.h @@ -4,7 +4,6 @@ * Created on: Sep 19, 2018 */ - #pragma once #include @@ -19,32 +18,26 @@ #include #include "utils.h" - namespace sds::grpc { using ::grpc::Server; using ::grpc::ServerAsyncResponseWriter; using ::grpc::ServerBuilder; -using ::grpc::ServerContext; using ::grpc::ServerCompletionQueue; +using ::grpc::ServerContext; using ::grpc::Status; - - /** * Defines the life cycle of handling a gRPC call. * */ class BaseServerCallData { - public: +public: enum CallStatus { CREATE, PROCESS, FINISH }; - CallStatus& status() { - return status_; - } - - public: + CallStatus& status() { return status_; } +public: /** * During the life cycle of this object, this method should be called * 3 times with different status: @@ -60,12 +53,10 @@ class BaseServerCallData { * - FINISH is for destroy this object, gRPC server has sent the * appropriate signals to the client to end the call. */ - void proceed(bool ok=true); - - protected: + void proceed(bool ok = true); - BaseServerCallData() : status_(CREATE) { - } +protected: + BaseServerCallData() : status_(CREATE) {} virtual ~BaseServerCallData() {} @@ -87,131 +78,97 @@ class BaseServerCallData { CallStatus status_; }; - /** * Each instance only handles one request, after that it will be destroyed; * a new instance will be created automatically for handling next request. * */ -template +template < typename TSERVICE, typename TREQUEST, typename TRESPONSE > class ServerCallData final : public BaseServerCallData { - using request_call_func_t = std::function< - void(TSERVICE*, - ::grpc::ServerContext*, - TREQUEST*, - ::grpc::ServerAsyncResponseWriter*, - ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, - void *)>; + using request_call_func_t = std::function< void(TSERVICE*, ::grpc::ServerContext*, TREQUEST*, + ::grpc::ServerAsyncResponseWriter< TRESPONSE >*, + ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*) >; - using handle_call_func_t = std::function< - ::grpc::Status(TREQUEST&, TRESPONSE&)>; + using handle_call_func_t = std::function<::grpc::Status(TREQUEST&, TRESPONSE&) >; - using T = ServerCallData; + using T = ServerCallData< TSERVICE, TREQUEST, TRESPONSE >; - private: +private: friend class GrpcServer; - ServerCallData(TSERVICE * service, - ::grpc::ServerCompletionQueue *cq, - request_call_func_t wait_request, - handle_call_func_t handle_request): - BaseServerCallData(), - service_(service), cq_(cq), responder_(&context_), - wait_request_func_(wait_request), - handle_request_func_(handle_request) { - } - - ::grpc::ServerAsyncResponseWriter& responder() { - return responder_; - } + ServerCallData(TSERVICE* service, ::grpc::ServerCompletionQueue* cq, request_call_func_t wait_request, + handle_call_func_t handle_request) : + BaseServerCallData(), + service_(service), + cq_(cq), + responder_(&context_), + wait_request_func_(wait_request), + handle_request_func_(handle_request) {} - protected: + ::grpc::ServerAsyncResponseWriter< TRESPONSE >& responder() { return responder_; } +protected: ServerContext context_; - TSERVICE * service_; + TSERVICE* service_; // The producer-consumer queue where for asynchronous server notifications. ::grpc::ServerCompletionQueue* cq_; TREQUEST request_; TRESPONSE reponse_; - ::grpc::ServerAsyncResponseWriter responder_; + ::grpc::ServerAsyncResponseWriter< TRESPONSE > responder_; request_call_func_t wait_request_func_; handle_call_func_t handle_request_func_; - void do_create() { - wait_request_func_(service_, &context_, &request_, &responder_, - cq_, cq_, this); - } + void do_create() { wait_request_func_(service_, &context_, &request_, &responder_, cq_, cq_, this); } void do_process() { - (new T(service_, cq_, - wait_request_func_, handle_request_func_))->proceed(); - //LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); + (new T(service_, cq_, wait_request_func_, handle_request_func_))->proceed(); + // LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); ::grpc::Status status = handle_request_func_(request_, reponse_); responder_.Finish(reponse_, status, this); } - }; - - class GrpcServer : private boost::noncopyable { - enum State { - VOID, - INITED, - RUNNING, - SHUTTING_DOWN, - TERMINATED - }; + enum State { VOID, INITED, RUNNING, SHUTTING_DOWN, TERMINATED }; - private: +private: GrpcServer(); - bool init(const std::string& listen_addr, uint32_t threads, - const std::string& ssl_key, const std::string& ssl_cert); + bool init(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert); - public: +public: virtual ~GrpcServer(); /** * Create a new GrpcServer instance and initialize it. */ - static GrpcServer* make(const std::string& listen_addr, - uint32_t threads=1, - const std::string& ssl_key="", - const std::string& ssl_cert=""); + static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", + const std::string& ssl_cert = ""); bool run(); void shutdown(); - bool is_terminated() { - return state_ == State::TERMINATED; - } + bool is_terminated() { return state_ == State::TERMINATED; } - ::grpc::ServerCompletionQueue * completion_queue() { - return cq_.get(); - } + ::grpc::ServerCompletionQueue* completion_queue() { return cq_.get(); } - template + template < typename TSVC > bool register_async_service() { - BOOST_ASSERT_MSG(State::INITED == state_, - "register service in non-INITED state"); + BOOST_ASSERT_MSG(State::INITED == state_, "register service in non-INITED state"); auto name = TSVC::service_full_name(); - BOOST_ASSERT_MSG(services_.find(name) == services_.end(), - "Double register async service."); - if (services_.find(name) != services_.end()) { - return false; - } + BOOST_ASSERT_MSG(services_.find(name) == services_.end(), "Double register async service."); + if (services_.find(name) != services_.end()) { return false; } auto svc = new typename TSVC::AsyncService(); builder_.RegisterService(svc); @@ -220,20 +177,14 @@ class GrpcServer : private boost::noncopyable { return true; } - template - bool register_rpc( - std::function< - void(typename TSVC::AsyncService*, - ::grpc::ServerContext*, - TREQUEST*, - ::grpc::ServerAsyncResponseWriter*, - ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, - void *)> request_call_func, - std::function<::grpc::Status(TREQUEST&, TRESPONSE&)> handle_request_func) { - - BOOST_ASSERT_MSG(State::RUNNING == state_, - "register service in non-INITED state"); + template < typename TSVC, typename TREQUEST, typename TRESPONSE > + bool register_rpc(std::function< void(typename TSVC::AsyncService*, ::grpc::ServerContext*, TREQUEST*, + ::grpc::ServerAsyncResponseWriter< TRESPONSE >*, ::grpc::CompletionQueue*, + ::grpc::ServerCompletionQueue*, void*) > + request_call_func, + std::function<::grpc::Status(TREQUEST&, TRESPONSE&) > handle_request_func) { + + BOOST_ASSERT_MSG(State::RUNNING == state_, "register service in non-INITED state"); auto it = services_.find(TSVC::service_full_name()); if (it == services_.end()) { @@ -241,24 +192,21 @@ class GrpcServer : private boost::noncopyable { return false; } - auto svc = static_cast(it->second); - (new ServerCallData ( - svc, cq_.get(), - request_call_func, - handle_request_func))->proceed(); + auto svc = static_cast< typename TSVC::AsyncService* >(it->second); + (new ServerCallData< typename TSVC::AsyncService, TREQUEST, TRESPONSE >(svc, cq_.get(), request_call_func, + handle_request_func)) + ->proceed(); return true; } - - private: - +private: /* * This can be called by multiple threads */ void handle_rpcs(); - void process(BaseServerCallData * cm); + void process(BaseServerCallData* cm); State state_ = State::VOID; @@ -266,12 +214,11 @@ class GrpcServer : private boost::noncopyable { ServerBuilder builder_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_; - std::unique_ptr server_; - std::list> threads_; + std::unique_ptr<::grpc::ServerCompletionQueue > cq_; + std::unique_ptr< Server > server_; + std::list< std::shared_ptr< std::thread > > threads_; - std::unordered_map services_; + std::unordered_map< const char*, ::grpc::Service* > services_; }; - -} +} // namespace sds::grpc diff --git a/include/sds_grpc/utils.h b/include/sds_grpc/utils.h index a6387b8e..4e071dd2 100644 --- a/include/sds_grpc/utils.h +++ b/include/sds_grpc/utils.h @@ -8,11 +8,9 @@ #include - namespace sds::grpc { - -bool get_file_contents(const std::string & file_name, std::string & contents); +bool get_file_contents(const std::string& file_name, std::string& contents); } diff --git a/lib/client.cpp b/lib/client.cpp index 5ad246e9..f062c001 100644 --- a/lib/client.cpp +++ b/lib/client.cpp @@ -6,20 +6,22 @@ #include "sds_grpc/client.h" - +#ifdef _POSIX_THREADS +#ifndef __APPLE__ +extern "C" { +#include +} +#endif +#endif namespace sds::grpc { - bool GrpcBaseClient::init() { - if (!init_channel()) { - return false; - } + if (!init_channel()) { return false; } return true; } - bool GrpcBaseClient::init_channel() { ::grpc::SslCredentialsOptions ssl_opts; @@ -29,42 +31,32 @@ bool GrpcBaseClient::init_channel() { if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { ::grpc::ChannelArguments channel_args; channel_args.SetSslTargetNameOverride(target_domain_); - channel_ = ::grpc::CreateCustomChannel(server_addr_, - ::grpc::SslCredentials(ssl_opts), - channel_args); + channel_ = ::grpc::CreateCustomChannel(server_addr_, ::grpc::SslCredentials(ssl_opts), channel_args); } else { return false; } } else { - channel_ = ::grpc::CreateChannel(server_addr_, - ::grpc::InsecureChannelCredentials()); + channel_ = ::grpc::CreateChannel(server_addr_, ::grpc::InsecureChannelCredentials()); } return true; } bool GrpcBaseClient::load_ssl_cert(const std::string& ssl_cert, std::string& content) { - return ::sds::grpc::get_file_contents(ssl_cert, content);; + return ::sds::grpc::get_file_contents(ssl_cert, content); + ; } - bool GrpcBaseClient::is_connection_ready() { - return (channel_->GetState(true) == - grpc_connectivity_state::GRPC_CHANNEL_READY); + return (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY); } - std::mutex GrpcAyncClientWorker::mutex_workers; -std::unordered_map GrpcAyncClientWorker::workers; - -GrpcAyncClientWorker::GrpcAyncClientWorker() { - state_ = State::INIT; -} +std::unordered_map< const char*, GrpcAyncClientWorker::UPtr > GrpcAyncClientWorker::workers; +GrpcAyncClientWorker::GrpcAyncClientWorker() { state_ = State::INIT; } -GrpcAyncClientWorker::~GrpcAyncClientWorker() { - shutdown(); -} +GrpcAyncClientWorker::~GrpcAyncClientWorker() { shutdown(); } void GrpcAyncClientWorker::shutdown() { if (state_ == State::RUNNING) { @@ -81,17 +73,20 @@ void GrpcAyncClientWorker::shutdown() { return; } - bool GrpcAyncClientWorker::run(uint32_t num_threads) { BOOST_ASSERT(State::INIT == state_); - if (num_threads == 0) { - return false; - } + if (num_threads == 0) { return false; } for (uint32_t i = 0; i < num_threads; ++i) { - std::shared_ptr t = std::shared_ptr( - new std::thread(&GrpcAyncClientWorker::async_complete_rpc, this)); + std::shared_ptr< std::thread > t = + std::shared_ptr< std::thread >(new std::thread(&GrpcAyncClientWorker::async_complete_rpc, this)); +#ifdef _POSIX_THREADS +#ifndef __APPLE__ + auto tname = std::string("grpc_client").substr(0, 15); + pthread_setname_np(t->native_handle(), tname.c_str()); +#endif /* __APPLE__ */ +#endif /* _POSIX_THREADS */ threads_.push_back(t); } @@ -99,51 +94,41 @@ bool GrpcAyncClientWorker::run(uint32_t num_threads) { return true; } - void GrpcAyncClientWorker::async_complete_rpc() { void* tag; bool ok = false; while (completion_queue_.Next(&tag, &ok)) { // For client-side unary call, `ok` is always true, // even server is not running - ClientCallMethod* cm = static_cast(tag); + ClientCallMethod* cm = static_cast< ClientCallMethod* >(tag); cm->handle_response(ok); delete cm; } } +bool GrpcAyncClientWorker::create_worker(const char* name, int num_thread) { + std::lock_guard< std::mutex > lock(mutex_workers); -bool GrpcAyncClientWorker::create_worker(const char * name, int num_thread) { - std::lock_guard lock(mutex_workers); + if (auto it = workers.find(name); it != workers.end()) { return true; } - if (auto it = workers.find(name); it != workers.end()) { - return true; - } - - auto worker = std::make_unique(); - if (!worker->run(num_thread)) { - return false; - } + auto worker = std::make_unique< GrpcAyncClientWorker >(); + if (!worker->run(num_thread)) { return false; } workers.insert(std::make_pair(name, std::move(worker))); return true; } - -GrpcAyncClientWorker * GrpcAyncClientWorker::get_worker(const char * name) { - std::lock_guard lock(mutex_workers); +GrpcAyncClientWorker* GrpcAyncClientWorker::get_worker(const char* name) { + std::lock_guard< std::mutex > lock(mutex_workers); auto it = workers.find(name); - if (it == workers.end()) { - return nullptr; - } + if (it == workers.end()) { return nullptr; } return it->second.get(); } - void GrpcAyncClientWorker::shutdown_all() { - std::lock_guard lock(mutex_workers); + std::lock_guard< std::mutex > lock(mutex_workers); for (auto& it : workers) { it.second->shutdown(); @@ -152,13 +137,7 @@ void GrpcAyncClientWorker::shutdown_all() { // g_core_codegen_interface it.second.reset(); } - - -} - } - - - +} // namespace sds::grpc diff --git a/lib/server.cpp b/lib/server.cpp index 195edb60..d0732c76 100644 --- a/lib/server.cpp +++ b/lib/server.cpp @@ -5,8 +5,16 @@ */ #include -#include +#ifdef _POSIX_THREADS +#ifndef __APPLE__ +extern "C" { +#include +} +#endif +#endif + +#include namespace sds::grpc { @@ -40,18 +48,13 @@ void BaseServerCallData::proceed(bool ok) { } } - void BaseServerCallData::do_finish() { GPR_ASSERT(status_ == FINISH); // Once in the FINISH state, this can be destroyed delete this; } - -GrpcServer::GrpcServer() { - -} - +GrpcServer::GrpcServer() {} GrpcServer::~GrpcServer() { shutdown(); @@ -64,29 +67,23 @@ GrpcServer::~GrpcServer() { services_.clear(); } - -bool GrpcServer::init(const std::string& listen_addr, uint32_t threads, - const std::string& ssl_key, const std::string& ssl_cert) { - +bool GrpcServer::init(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert) { BOOST_ASSERT(State::VOID == state_); - if (listen_addr.empty() || threads == 0) { - return false; - } + if (listen_addr.empty() || threads == 0) { return false; } thread_num_ = threads; if (!ssl_cert.empty() && !ssl_key.empty()) { - std::string key_contents; - std::string cert_contents; + std::string key_contents; + std::string cert_contents; get_file_contents(ssl_cert, cert_contents); get_file_contents(ssl_key, key_contents); - if (cert_contents.empty() || key_contents.empty()) { - return false; - } + if (cert_contents.empty() || key_contents.empty()) { return false; } - ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = { key_contents, cert_contents }; + ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; ::grpc::SslServerCredentialsOptions ssl_opts; ssl_opts.pem_root_certs = ""; ssl_opts.pem_key_cert_pairs.push_back(pkcp); @@ -102,10 +99,7 @@ bool GrpcServer::init(const std::string& listen_addr, uint32_t threads, return true; } - -GrpcServer* GrpcServer::make(const std::string& listen_addr, - uint32_t threads, - const std::string& ssl_key, +GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert) { auto ret = new GrpcServer(); if (!ret->init(listen_addr, threads, ssl_key, ssl_cert)) { @@ -116,16 +110,19 @@ GrpcServer* GrpcServer::make(const std::string& listen_addr, return ret; } - bool GrpcServer::run() { - BOOST_ASSERT(State::INITED == state_); server_ = builder_.BuildAndStart(); - for (uint32_t i = 0; i < thread_num_; ++i) { - auto t = std::shared_ptr( - new std::thread(&GrpcServer::handle_rpcs, this)); + for (uint32_t i = 0; i < thread_num_; ++i) { + auto t = std::shared_ptr< std::thread >(new std::thread(&GrpcServer::handle_rpcs, this)); +#ifdef _POSIX_THREADS +#ifndef __APPLE__ + auto tname = std::string("grpc_server").substr(0, 15); + pthread_setname_np(t->native_handle(), tname.c_str()); +#endif /* __APPLE__ */ +#endif /* _POSIX_THREADS */ threads_.push_back(t); } @@ -133,13 +130,11 @@ bool GrpcServer::run() { return true; } - void GrpcServer::handle_rpcs() { void* tag; bool ok = false; while (cq_->Next(&tag, &ok)) { - // `ok` is true if read a successful event, false otherwise. // Success here means that this operation completed in the normal // valid manner. @@ -157,8 +152,7 @@ void GrpcServer::handle_rpcs() { // is already dead (i.e., canceled, deadline expired, other side // dropped the channel, etc). - - BaseServerCallData* cm = static_cast(tag); + BaseServerCallData* cm = static_cast< BaseServerCallData* >(tag); cm->proceed(ok); } } @@ -180,6 +174,4 @@ void GrpcServer::shutdown() { return; } - - -} +} // namespace sds::grpc diff --git a/lib/utils.cpp b/lib/utils.cpp index f705af65..0251b436 100644 --- a/lib/utils.cpp +++ b/lib/utils.cpp @@ -10,7 +10,7 @@ namespace sds::grpc { -bool get_file_contents(const std::string & file_name, std::string & contents) { +bool get_file_contents(const std::string& file_name, std::string& contents) { try { std::ifstream in(file_name.c_str(), std::ios::in); if (in) { @@ -21,16 +21,9 @@ bool get_file_contents(const std::string & file_name, std::string & contents) { contents = t.str(); return true; } - } catch (...) { - - } + } catch (...) {} return false; } - - - - -} - +} // namespace sds::grpc diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 49ce5e28..dae3c388 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -20,54 +20,41 @@ #include "sds_grpc/client.h" #include "sds_grpc_test.grpc.pb.h" - using namespace ::grpc; using namespace ::sds::grpc; using namespace ::sds_grpc_test; using namespace std::placeholders; -#define WORKER_NAME "worker-1" - +#define WORKER_NAME "worker-1" class EchoAndPingAsyncClient : GrpcAsyncClient { - public: - +public: using GrpcAsyncClient::GrpcAsyncClient; virtual bool init() { - if (!GrpcAsyncClient::init()) { - return false; - } + if (!GrpcAsyncClient::init()) { return false; } - echo_stub_ = make_stub(WORKER_NAME); - ping_stub_ = make_stub(WORKER_NAME); + echo_stub_ = make_stub< EchoService >(WORKER_NAME); + ping_stub_ = make_stub< PingService >(WORKER_NAME); return true; } + void Echo(const EchoRequest& request, std::function< void(EchoReply&, ::grpc::Status& status) > callback) { - void Echo(const EchoRequest& request, - std::function callback) { - - echo_stub_->call_unary(request, - &EchoService::StubInterface::AsyncEcho, - callback); + echo_stub_->call_unary< EchoRequest, EchoReply >(request, &EchoService::StubInterface::AsyncEcho, callback); } - void Ping(const PingRequest& request, - std::function callback) { + void Ping(const PingRequest& request, std::function< void(PingReply&, ::grpc::Status& status) > callback) { - ping_stub_->call_unary(request, - &PingService::StubInterface::AsyncPing, - callback); + ping_stub_->call_unary< PingRequest, PingReply >(request, &PingService::StubInterface::AsyncPing, callback); } - AsyncStub::UPtr echo_stub_; - AsyncStub::UPtr ping_stub_; + AsyncStub< EchoService >::UPtr echo_stub_; + AsyncStub< PingService >::UPtr ping_stub_; }; - std::atomic_int g_echo_counter; std::atomic_int g_ping_counter; @@ -75,17 +62,12 @@ std::atomic_int g_ping_counter; * Echo implements async response handler. */ class Echo { - public: - - Echo(int seqno) { - request_.set_message(std::to_string(seqno)); - } +public: + Echo(int seqno) { request_.set_message(std::to_string(seqno)); } void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) { if (!status.ok()) { - LOGERROR("echo request {} failed, status {}: {}", - request_.message(), - status.error_code(), + LOGERROR("echo request {} failed, status {}: {}", request_.message(), status.error_code(), status.error_message()); return; } @@ -99,14 +81,13 @@ class Echo { EchoRequest request_; }; - #define GRPC_CALL_COUNT 10 int RunClient(const std::string& server_address) { GrpcAyncClientWorker::create_worker(WORKER_NAME, 4); - auto client = GrpcAsyncClient::make(server_address, "", ""); + auto client = GrpcAsyncClient::make< EchoAndPingAsyncClient >(server_address, "", ""); if (!client) { LOGCRITICAL("Create async client failed."); return -1; @@ -116,9 +97,8 @@ int RunClient(const std::string& server_address) { if (i % 2 == 0) { // Async response handling logic can be put in a class's member // function, then use a lambda to wrap it. - Echo * echo = new Echo(i); - client->Echo(echo->request_, - [echo] (EchoReply& reply, ::grpc::Status& status) { + Echo* echo = new Echo(i); + client->Echo(echo->request_, [echo](EchoReply& reply, ::grpc::Status& status) { echo->handle_echo_reply(reply, status); delete echo; }); @@ -132,13 +112,9 @@ int RunClient(const std::string& server_address) { request->set_seqno(i); // response can be handled with lambda directly - client->Ping(*request, - [request] (PingReply& reply, ::grpc::Status& status) { - + client->Ping(*request, [request](PingReply& reply, ::grpc::Status& status) { if (!status.ok()) { - LOGERROR("ping request {} failed, status {}: {}", - request->seqno(), - status.error_code(), + LOGERROR("ping request {} failed, status {}: {}", request->seqno(), status.error_code(), status.error_message()); return; } @@ -173,4 +149,3 @@ int main(int argc, char** argv) { return 0; } - diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index a1ffa48f..f770a03f 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -24,10 +24,9 @@ using namespace ::sds::grpc; using namespace ::sds_grpc_test; using namespace std::placeholders; - class EchoServiceImpl { - public: +public: virtual ~EchoServiceImpl() = default; virtual ::grpc::Status echo_request(EchoRequest& request, EchoReply& response) { @@ -38,7 +37,7 @@ class EchoServiceImpl { bool register_service(GrpcServer* server) { - if (!server->register_async_service()) { + if (!server->register_async_service< EchoService >()) { LOGERROR("register service failed"); return false; } @@ -48,23 +47,19 @@ class EchoServiceImpl { bool register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc( - &EchoService::AsyncService::RequestEcho, - std::bind(&EchoServiceImpl::echo_request, this, _1, _2))) { + if (!server->register_rpc< EchoService, EchoRequest, EchoReply >( + &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1, _2))) { LOGERROR("register rpc failed"); return false; } return true; } - }; - - class PingServiceImpl { - public: +public: virtual ~PingServiceImpl() = default; virtual ::grpc::Status ping_request(PingRequest& request, PingReply& response) { @@ -75,7 +70,7 @@ class PingServiceImpl { bool register_service(GrpcServer* server) { - if (!server->register_async_service()) { + if (!server->register_async_service< PingService >()) { LOGERROR("register ping service failed"); return false; } @@ -85,25 +80,21 @@ class PingServiceImpl { bool register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc( - &PingService::AsyncService::RequestPing, - std::bind(&PingServiceImpl::ping_request, this, _1, _2))) { + if (!server->register_rpc< PingService, PingRequest, PingReply >( + &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1, _2))) { LOGERROR("register ping rpc failed"); return false; } return true; } - }; - GrpcServer* g_grpc_server = nullptr; -EchoServiceImpl * g_echo_impl = nullptr; -PingServiceImpl * g_ping_impl = nullptr; +EchoServiceImpl* g_echo_impl = nullptr; +PingServiceImpl* g_ping_impl = nullptr; -void sighandler(int signum, siginfo_t *info, void *ptr) -{ +void sighandler(int signum, siginfo_t* info, void* ptr) { LOGINFO("Received signal {}", signum); if (signum == SIGTERM) { @@ -130,14 +121,11 @@ void StartServer() { g_echo_impl->register_rpcs(g_grpc_server); g_ping_impl->register_rpcs(g_grpc_server); - } - SDS_LOGGING_INIT() SDS_OPTIONS_ENABLE(logging) - int main(int argc, char* argv[]) { SDS_OPTIONS_LOAD(argc, argv, logging) sds_logging::SetLogger("echo_server"); diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index b0a3adf6..0173f0f1 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -4,7 +4,6 @@ * Created on: Sep 22, 2018 */ - #include #include #include @@ -19,51 +18,39 @@ #include "sds_grpc/client.h" #include "sds_grpc_test.grpc.pb.h" - using namespace ::grpc; using namespace ::sds::grpc; using namespace ::sds_grpc_test; using namespace std::placeholders; - class EchoAndPingClient : public GrpcSyncClient { - public: - +public: using GrpcSyncClient::GrpcSyncClient; virtual bool init() { - if (!GrpcSyncClient::init()) { - return false; - } + if (!GrpcSyncClient::init()) { return false; } - echo_stub_ = MakeStub(); - ping_stub_ = MakeStub(); + echo_stub_ = MakeStub< EchoService >(); + ping_stub_ = MakeStub< PingService >(); return true; } - const std::unique_ptr& echo_stub() { - return echo_stub_; - } + const std::unique_ptr< EchoService::StubInterface >& echo_stub() { return echo_stub_; } - const std::unique_ptr& ping_stub() { - return ping_stub_; - } - - private: - - std::unique_ptr echo_stub_; - std::unique_ptr ping_stub_; + const std::unique_ptr< PingService::StubInterface >& ping_stub() { return ping_stub_; } +private: + std::unique_ptr< EchoService::StubInterface > echo_stub_; + std::unique_ptr< PingService::StubInterface > ping_stub_; }; - #define GRPC_CALL_COUNT 10 int RunClient(const std::string& server_address) { - auto client = std::make_unique(server_address, "", ""); + auto client = std::make_unique< EchoAndPingClient >(server_address, "", ""); if (!client || !client->init()) { LOGERROR("Create grpc sync client failed."); return -1; @@ -74,45 +61,36 @@ int RunClient(const std::string& server_address) { ClientContext context; if (i % 2 == 0) { - EchoRequest request; + EchoRequest request; EchoReply reply; request.set_message(std::to_string(i)); Status status = client->echo_stub()->Echo(&context, request, &reply); if (!status.ok()) { - LOGERROR("echo request {} failed, status {}: {}", - request.message(), - status.error_code(), + LOGERROR("echo request {} failed, status {}: {}", request.message(), status.error_code(), status.error_message()); continue; } LOGINFO("echo request {} reply {}", request.message(), reply.message()); - if (request.message() == reply.message()) { - ret++; - } + if (request.message() == reply.message()) { ret++; } } else { - PingRequest request; + PingRequest request; PingReply reply; request.set_seqno(i); Status status = client->ping_stub()->Ping(&context, request, &reply); if (!status.ok()) { - LOGERROR("ping request {} failed, status {}: {}", - request.seqno(), - status.error_code(), + LOGERROR("ping request {} failed, status {}: {}", request.seqno(), status.error_code(), status.error_message()); continue; } LOGINFO("ping request {} reply {}", request.seqno(), reply.seqno()); - if (request.seqno() == reply.seqno()) { - ret++; - } + if (request.seqno() == reply.seqno()) { ret++; } } - } return ret; From dda1f54580228ab116330a10ce55854f34eed13e Mon Sep 17 00:00:00 2001 From: Ravi Nagarjun Akella Date: Fri, 25 Jun 2021 14:05:00 -0700 Subject: [PATCH 060/385] SDSTOR-4956: Add deadline option to call_unary --- include/sds_grpc/client.h | 4 +++- tests/function/echo_async_client.cpp | 5 ++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h index bf8f472e..2c1187fb 100644 --- a/include/sds_grpc/client.h +++ b/include/sds_grpc/client.h @@ -240,13 +240,15 @@ class GrpcAsyncClient : public GrpcBaseClient { * The callback function must check if `::grpc::Status` argument is * OK before handling the response. If call failed, `::grpc::Status` * indicates the error code and error message. + * @param deadline - deadline in seconds * */ template < typename TREQUEST, typename TRESPONSE > void call_unary(const TREQUEST& request, unary_call_t< TREQUEST, TRESPONSE > call, - unary_callback_t< TREQUEST, TRESPONSE > callback) { + unary_callback_t< TREQUEST, TRESPONSE > callback, uint32_t deadline) { auto data = new ClientCallData< TREQUEST, TRESPONSE >(callback); + data->set_deadline(deadline); // Note that async unary RPCs don't post a CQ tag in call data->responder_reader() = (stub_.get()->*call)(&data->context(), request, cq()); // CQ tag posted here diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index dae3c388..9c507bb0 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -43,12 +43,12 @@ class EchoAndPingAsyncClient : GrpcAsyncClient { void Echo(const EchoRequest& request, std::function< void(EchoReply&, ::grpc::Status& status) > callback) { - echo_stub_->call_unary< EchoRequest, EchoReply >(request, &EchoService::StubInterface::AsyncEcho, callback); + echo_stub_->call_unary< EchoRequest, EchoReply >(request, &EchoService::StubInterface::AsyncEcho, callback, 1); } void Ping(const PingRequest& request, std::function< void(PingReply&, ::grpc::Status& status) > callback) { - ping_stub_->call_unary< PingRequest, PingReply >(request, &PingService::StubInterface::AsyncPing, callback); + ping_stub_->call_unary< PingRequest, PingReply >(request, &PingService::StubInterface::AsyncPing, callback, 1); } AsyncStub< EchoService >::UPtr echo_stub_; @@ -148,4 +148,3 @@ int main(int argc, char** argv) { return 0; } - From 34c655aafff6a489d0dd53db45d5027f2fb2ae3e Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 3 Sep 2021 20:01:25 -0700 Subject: [PATCH 061/385] Forked off and build different version of RPC Server and Client where it supports async server, more context information, more logging. --- CMakeLists.txt | 12 +- include/grpc_helper/rpc_call.hpp | 395 +++++++++++++++++++++++++++ include/grpc_helper/rpc_client.hpp | 292 ++++++++++++++++++++ include/grpc_helper/rpc_common.hpp | 8 + include/grpc_helper/rpc_server.hpp | 99 +++++++ include/sds_grpc/client.h | 306 --------------------- include/sds_grpc/server.h | 224 --------------- include/sds_grpc/utils.h | 16 -- lib/client.cpp | 143 ---------- lib/rpc_client.cpp | 107 ++++++++ lib/rpc_server.cpp | 127 +++++++++ lib/server.cpp | 177 ------------ lib/utils.cpp | 29 -- tests/function/CMakeLists.txt | 12 +- tests/function/echo_async_client.cpp | 272 +++++++++++------- tests/function/echo_server.cpp | 59 ++-- tests/proto/CMakeLists.txt | 4 +- tests/proto/grpc_helper_test.proto | 26 ++ tests/proto/sds_grpc_test.proto | 37 --- tests/unit/CMakeLists.txt | 4 +- 20 files changed, 1264 insertions(+), 1085 deletions(-) create mode 100644 include/grpc_helper/rpc_call.hpp create mode 100644 include/grpc_helper/rpc_client.hpp create mode 100644 include/grpc_helper/rpc_common.hpp create mode 100644 include/grpc_helper/rpc_server.hpp delete mode 100644 include/sds_grpc/client.h delete mode 100644 include/sds_grpc/server.h delete mode 100644 include/sds_grpc/utils.h delete mode 100644 lib/client.cpp create mode 100644 lib/rpc_client.cpp create mode 100644 lib/rpc_server.cpp delete mode 100644 lib/server.cpp delete mode 100644 lib/utils.cpp create mode 100644 tests/proto/grpc_helper_test.proto delete mode 100644 tests/proto/sds_grpc_test.proto diff --git a/CMakeLists.txt b/CMakeLists.txt index 286b3906..17ab7cf3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,17 +16,15 @@ if (${CMAKE_BUILD_TYPE} STREQUAL Debug) list(REMOVE_ITEM CONAN_LIBS tcmalloc tcmalloc_minimal) endif () -set(SDS_GRPC_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) +set(GRPC_HELPER_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) -set (SDS_GRPC_SOURCE - lib/client.cpp - lib/server.cpp - lib/utils.cpp +set (GRPC_HELPER_SOURCE + lib/rpc_server.cpp + lib/rpc_client.cpp ) -add_library(sds_grpc ${SDS_GRPC_SOURCE}) - +add_library(grpc_helper ${GRPC_HELPER_SOURCE}) add_subdirectory(tests) diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp new file mode 100644 index 00000000..872e1634 --- /dev/null +++ b/include/grpc_helper/rpc_call.hpp @@ -0,0 +1,395 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include "rpc_common.hpp" + +SDS_LOGGING_DECL(grpc_server) + +#define RPC_SERVER_LOG(level, msg, ...) \ + LOG##level##MOD_FMT(grpc_server, ([&](fmt::memory_buffer& buf, const char* __m, auto&&... args) -> bool { \ + fmt::format_to(buf, "[{}:{}] [RPC={} id={}] ", file_name(__FILE__), __LINE__, \ + m_rpc_info->m_rpc_name, request_id()); \ + fmt::format_to(buf, __m, std::forward< decltype(args) >(args)...); \ + return true; \ + }), \ + msg, ##__VA_ARGS__); + +namespace grpc_helper { +class RpcDataAbstract : public boost::intrusive_ref_counter< RpcDataAbstract, boost::thread_safe_counter > { +public: + RpcDataAbstract(size_t queue_idx) : + m_queue_idx{queue_idx}, m_request_id(s_glob_request_id.fetch_add(1, std::memory_order_relaxed)) {} + + virtual ~RpcDataAbstract() = default; + virtual size_t get_rpc_idx() const = 0; + + ::grpc::ServerContext& server_context() noexcept { return m_ctx; } + uint64_t request_id() const { return m_request_id; } + bool canceled() const { return m_is_canceled; } + + // enqueues this call to be matched with incoming rpc requests + virtual void enqueue_call_request(::grpc::ServerCompletionQueue& cq) = 0; + + // the grpc queue index on which this request is to be enqueued + size_t const m_queue_idx; + +protected: + // ref counter of this instance + RpcDataAbstract* ref() { + intrusive_ptr_add_ref(this); + return this; + } + void unref() { intrusive_ptr_release(this); } + virtual RpcDataAbstract* create_new() = 0; + friend class RpcTag; + + uint64_t const m_request_id; + grpc::ServerContext m_ctx; + std::atomic_bool m_is_canceled{false}; + static inline std::atomic< uint64_t > s_glob_request_id = 0; +}; + +// Associates a tag in a `::grpc::CompletionQueue` with a callback +// for an incoming RPC. An active Tag owns a reference on the corresponding +// RpcData object. +class RpcTag { +public: + RpcTag(RpcDataAbstract* rpc) : m_rpc_data{rpc} {} + RpcTag* ref() { + m_rpc_data->ref(); + return this; + } + // Calls the callback associated with this tag. + // The callback takes ownership of `this->call_`. + // @return if not null - a replacement of this call for registration with the server; null otherwise + RpcDataAbstract* process(bool ok) { + RpcDataAbstract* ret = do_process(ok); + m_rpc_data->unref(); // undo ref() acquired when tag handed over to grpc. + return ret; + } + +protected: + virtual RpcDataAbstract* do_process(bool ok) = 0; + RpcDataAbstract* const m_rpc_data; // `this` owns one reference. +}; + +class RpcStaticInfoBase { +public: + virtual ~RpcStaticInfoBase() = default; +}; + +template < typename ServiceT, typename ReqT, typename RespT, bool streaming > +class RpcData; + +template < typename ServiceT, typename ReqT, typename RespT > +using AsyncRpcDataPtr = boost::intrusive_ptr< RpcData< ServiceT, ReqT, RespT, false > >; + +template < typename ServiceT, typename ReqT, typename RespT > +using StreamRpcDataPtr = boost::intrusive_ptr< RpcData< ServiceT, ReqT, RespT, true > >; + +#define RPC_DATA_PTR_SPEC boost::intrusive_ptr< RpcData< ServiceT, ReqT, RespT, streaming > > +#define request_call_cb_t \ + std::function< void(typename ServiceT::AsyncService*, ::grpc::ServerContext*, ReqT*, \ + ::grpc::ServerAsyncResponseWriter< RespT >*, ::grpc::CompletionQueue*, \ + ::grpc::ServerCompletionQueue*, void*) > +#define rpc_handler_cb_t std::function< bool(const RPC_DATA_PTR_SPEC& rpc_call) > +#define rpc_completed_cb_t std::function< void(const RPC_DATA_PTR_SPEC& rpc_call) > +#define rpc_call_static_info_t RpcStaticInfo< ServiceT, ReqT, RespT, streaming > + +// This class represents all static information related to a specific RpcData, so these information does not need to be +// built for every RPC +template < typename ServiceT, typename ReqT, typename RespT, bool streaming = false > +class RpcStaticInfo : public RpcStaticInfoBase { +public: + RpcStaticInfo(GrpcServer* server, typename ServiceT::AsyncService& svc, const request_call_cb_t& call_cb, + const rpc_handler_cb_t& rpc_cb, const rpc_completed_cb_t& comp_cb, size_t idx, + const std::string& name) : + m_server{server}, + m_svc{svc}, + m_req_call_cb{call_cb}, + m_handler_cb{rpc_cb}, + m_comp_cb{comp_cb}, + m_rpc_idx{idx}, + m_rpc_name{name} {} + + GrpcServer* m_server; + typename ServiceT::AsyncService& m_svc; + request_call_cb_t m_req_call_cb; + rpc_handler_cb_t m_handler_cb; + rpc_completed_cb_t m_comp_cb; + size_t m_rpc_idx; + std::string m_rpc_name; +}; + +/** + * This class represents an incoming request and its associated context + * Template argument 'streaming' should be understood as server streaming. If we later want + * client/bidirectional streaming then we can restructure this code + */ +template < typename ServiceT, typename ReqT, typename RespT, bool streaming = false > +class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, ReqT, RespT, streaming > > { +public: + static RpcDataAbstract* make(rpc_call_static_info_t* rpc_info, size_t queue_idx) { + return new RpcData(rpc_info, queue_idx); + } + + RpcDataAbstract* create_new() override { return new RpcData(m_rpc_info, m_queue_idx); } + ~RpcData() override = default; + + const ReqT& request() const { return *m_request; } + + template < bool mode = streaming > + std::enable_if_t< !mode, RespT& > response() { + return *m_response; + } + + // invoked by the application completion flow when the response payload `m_response` is formed + //@param is_last - true to indicate that this is the last chunk in a streaming response (where + // applicable) + // NOTE: this function MUST `unref()` this call + template < bool mode = streaming > + std::enable_if_t< !mode, void > send_response(bool is_last = true) { + do_non_streaming_send(); + } + + /** + * @param response, the response should own by m_arena_resp + * @param is_last + * @return return false, when we can't send send_response anymore. + * The reasons includes: + * 1. last streaming response has sent + * + * 2. the rpc call has canceled. + * 3. ok == false in ResponseSent + * 4. e.t.c + * Note: We must call send_response with is_last = true once even when the call return false at + * last time to indicate use will not hold the RpcData anymore. + */ + template < bool mode = streaming > + std::enable_if_t< mode, bool > send_response(std::unique_ptr< RespT > response, bool is_last) { + std::lock_guard< std::mutex > lock{m_streaming_mutex}; + if (is_last && !m_last) { + m_last = true; + // ses comment in _start_request_processing + unref(); + } + if (m_streaming_disable_enqueue) { return false; } + if (m_last) { m_streaming_disable_enqueue = true; } + + RPC_SERVER_LOG(DEBUG, "ENQUEUE STREAMING RESPONSE, is_last={}", is_last); + RPC_SERVER_LOG(TRACE, "resp. payload={}", response->DebugString()); + + m_pending_streaming_responses.push(std::move(response)); + do_streaming_send_if_needed(); + return !m_streaming_disable_enqueue; + } + + ::grpc::string get_peer_info() { return m_ctx.peer(); } + std::string get_client_req_context() { + /*if (m_client_req_context.empty()) { + std::string* client_id_str = google::protobuf::Arena::Create< std::string >( + &m_arena_req, m_ctx.peer() + "_" + std::to_string(request_id())); + m_client_req_context = grpc::string_ref(*client_id_str); + } + return m_client_req_context; */ + return fmt::format("{}_{}", m_ctx.peer(), request_id()); + } + size_t get_rpc_idx() const { return m_rpc_info->m_rpc_idx; } + + RpcData(rpc_call_static_info_t* rpc_info, size_t queue_idx) : + RpcDataAbstract{queue_idx}, + m_rpc_info{rpc_info}, + m_request{google::protobuf::Arena::CreateMessage< ReqT >(&m_arena_req)}, + m_response{google::protobuf::Arena::CreateMessage< RespT >(&m_arena_resp)}, + // m_rpc_context{google::protobuf::Arena::Create< context_t >(&m_arena_req, *this)}, + m_responder(&m_ctx), + m_streaming_responder(&m_ctx) {} + +private: + // The implementation of this method should dispatch the request for processing by calling + // do_start_request_processing One reference on `this` is transferred to the callee, and the + // callee is responsible for releasing it (typically via `RpcData::send_response(..)`). + // + // `ok` is true if the request was received is a "regular event", otherwise false. + // @return a new instance of the same class for enqueueing as a replacement of this call + RpcDataAbstract* on_request_received(bool ok) { + bool in_shutdown = RPCHelper::has_server_shutdown(m_rpc_info->m_server); + RPC_SERVER_LOG(TRACE, "request received with is_ok={} is_shutdown={}", ok, in_shutdown); + + if (ok) { + ref(); // we now own one ref since we are starting the processing + + RPC_SERVER_LOG(DEBUG, "Received client_req_context={}, from peer={}", get_client_req_context(), + get_peer_info()); + RPC_SERVER_LOG(TRACE, "req. payload={}", request().DebugString()); + + if constexpr (streaming) { + // In no-streaming mode, we call ref() to inc the ref count for keep the RpcData live + // before users finish their work and send responses in RequestReceived. + // But in streaming mode, The time user finishes their work may be different to + // the time grpc finsihes the grpc call. E.g.: + // 1) The user queues the last streaming resposne. At that time. We can't unref the RpcData and + // must do it after it sends all responses. + // 2) The user queues a no-last streaming response, then RpcData find the call was canceled. + // We can't unref the call, because users don't know it, they will send next responses. + // So instead of using only one ref in no-streaming mode. We use two ref to make lifecyle clear: + // 1) first one in RequestReceived and unref after grpc call finished. + // 2) second one in here and unref after called send_response with is_last = true; + ref(); + } + if (m_rpc_info->m_handler_cb(RPC_DATA_PTR_SPEC{this})) { send_response(); } + } + + return in_shutdown ? nullptr : create_new(); + } + + // This method will be called in response to one of `m_responder.Finish*` flavours + RpcDataAbstract* on_response_sent(bool ok) { + RPC_SERVER_LOG(TRACE, "response sent with is_ok={}", ok); + + if constexpr (streaming) { + if (ok) { + std::lock_guard< std::mutex > lock{m_streaming_mutex}; + m_write_pending = false; + do_streaming_send_if_needed(); + } else { + m_streaming_disable_enqueue = true; + // The ResponseSent can be triggered by Write, WriteAndFinish and Finish. + // Only when it triggered by Write, we should call unref() + if (!m_streaming_disable_send) { unref(); } + } + } + return nullptr; + } + + // This method will be called either (i) when the server is notified that the request has been canceled, or (ii) + // when the request completes normally. The implementation should distinguish these cases by querying the + // grpc::ServerContext associated with the request. + RpcDataAbstract* on_request_completed(bool ok) { + RPC_SERVER_LOG(TRACE, "request completed with is_ok={}", ok); + if (m_ctx.IsCancelled()) { + m_is_canceled.store(true, std::memory_order_release); + RPC_SERVER_LOG(DEBUG, "request is CANCELLED by the caller"); + } + if (m_rpc_info->m_comp_cb) { m_rpc_info->m_comp_cb(RPC_DATA_PTR_SPEC{this}); } + return nullptr; + } + + void enqueue_call_request(::grpc::ServerCompletionQueue& cq) override { + RPC_SERVER_LOG(TRACE, "enqueue new call request"); + + if (m_rpc_info->m_comp_cb) { + // Creates a completion queue tag for handling cancellation by the client. + // NOTE: This method must be called before this call is enqueued on a completion queue. + m_ctx.AsyncNotifyWhenDone(m_completed_tag.ref()); + } + + m_rpc_info->m_req_call_cb(&m_rpc_info->m_svc, &m_ctx, m_request, &m_responder, &cq, &cq, + m_request_received_tag.ref()); + } + + // actual sending of the response via grpc + // MUST unref() after send is enqueued + void do_non_streaming_send() { + if (!m_is_canceled.load(std::memory_order_relaxed)) { + RPC_SERVER_LOG(DEBUG, "SENDING RESPONSE"); + RPC_SERVER_LOG(TRACE, "resp. payload={}", m_response->DebugString()); + + if (m_retstatus.ok()) { + m_responder.Finish(*m_response, grpc::Status::OK, m_response_sent_tag.ref()); + } else { + m_responder.FinishWithError(m_retstatus, m_response_sent_tag.ref()); + } + } + unref(); // because we have enqueued response for this call and not longer own it + } + + // MUST be called in streaming mode and under m_streaming_mutex. + void do_streaming_send_if_needed() { + if (m_streaming_disable_send) { return; } + + if (m_is_canceled.load(std::memory_order_relaxed)) { + m_streaming_disable_enqueue = true; + m_streaming_disable_send = true; + unref(); + return; + } + + if (m_write_pending) { return; } + + if (!m_retstatus.ok()) { + m_streaming_responder.Finish(m_retstatus, m_response_sent_tag.ref()); + m_streaming_disable_enqueue = true; + m_streaming_disable_send = true; + unref(); + return; + } + + if (m_pending_streaming_responses.empty()) { return; } + auto response = std::move(m_pending_streaming_responses.front()); + m_pending_streaming_responses.pop(); + if (m_pending_streaming_responses.empty() && m_streaming_disable_enqueue) { + RPC_SERVER_LOG(DEBUG, "SENDING LAST STREAMING RESPONSE"); + RPC_SERVER_LOG(TRACE, "resp. payload={}", m_response->DebugString()); + + m_streaming_responder.WriteAndFinish(*response, grpc::WriteOptions(), grpc::Status::OK, + m_response_sent_tag.ref()); + m_streaming_disable_send = true; + unref(); + } else { + RPC_SERVER_LOG(DEBUG, "SENDING STREAMING RESPONSE"); + RPC_SERVER_LOG(TRACE, "resp. payload={}", m_response->DebugString()); + m_streaming_responder.Write(*response, grpc::WriteOptions(), m_response_sent_tag.ref()); + m_write_pending = true; + } + } + +private: + rpc_call_static_info_t* m_rpc_info; + ::google::protobuf::Arena m_arena_req, m_arena_resp; + ReqT* const m_request; + RespT* const m_response; + + // this field is used when there is a high level grpc-level request error + grpc::Status m_retstatus{grpc::Status::OK}; + + grpc::ServerAsyncResponseWriter< RespT > m_responder; + grpc::ServerAsyncWriter< RespT > m_streaming_responder; + + std::mutex m_streaming_mutex; + bool m_last{false}; + bool m_write_pending{false}; + bool m_streaming_disable_enqueue{false}; + bool m_streaming_disable_send{false}; + std::queue< std::unique_ptr< RespT > > m_pending_streaming_responses; + + // implements abstract method `_process() by delegating to registered pointer to member function + struct RpcTagImpl : public RpcTag { + using callback_type = RpcDataAbstract* (RpcData::*)(bool ok); + RpcTagImpl(RpcData* rpc, callback_type cb) : RpcTag{rpc}, m_callback{cb} {} + + RpcDataAbstract* do_process(bool ok) override { return (static_cast< RpcData* >(m_rpc_data)->*m_callback)(ok); } + + callback_type m_callback; + }; + + // Used as void* completion markers from grpc to indicate different events of interest for a + // Call. + RpcTagImpl m_request_received_tag{this, &RpcData::on_request_received}; + RpcTagImpl m_response_sent_tag{this, &RpcData::on_response_sent}; + RpcTagImpl m_completed_tag{this, &RpcData::on_request_completed}; +}; + +} // namespace grpc_helper diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp new file mode 100644 index 00000000..3d16afba --- /dev/null +++ b/include/grpc_helper/rpc_client.hpp @@ -0,0 +1,292 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +namespace grpc_helper { + +/** + * A interface for handling gRPC async response + */ +class ClientRpcDataAbstract : private boost::noncopyable { +public: + virtual ~ClientRpcDataAbstract() = default; + virtual void handle_response(bool ok = true) = 0; +}; + +template < typename ReqT, typename RespT > +class ClientRpcData; + +template < typename ReqT, typename RespT > +using rpc_comp_cb_t = std::function< void(ClientRpcData< ReqT, RespT >& cd) >; + +template < typename ReqT > +using req_builder_cb_t = std::function< void(ReqT&) >; + +template < typename RespT > +using unary_callback_t = std::function< void(RespT&, ::grpc::Status& status) >; + +/** + * The specialized 'ClientRpcDataInternal' per gRPC call, it stores + * the response handler function + * + */ +template < typename ReqT, typename RespT > +class ClientRpcDataInternal : public ClientRpcDataAbstract { +public: + using ResponseReaderPtr = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< RespT > >; + + /* Allow GrpcAsyncClient and its inner classes to use + * ClientCallData. + */ + friend class GrpcAsyncClient; + + ClientRpcDataInternal() = default; + ClientRpcDataInternal(const unary_callback_t< RespT >& cb) : m_cb{cb} {} + virtual ~ClientRpcDataInternal() = default; + + // TODO: support time in any time unit -- lhuang8 + void set_deadline(uint32_t seconds) { + std::chrono::system_clock::time_point deadline = + std::chrono::system_clock::now() + std::chrono::seconds(seconds); + m_context.set_deadline(deadline); + } + + ResponseReaderPtr& responder_reader() { return m_resp_reader_ptr; } + ::grpc::Status& status() { return m_status; } + RespT& reply() { return m_reply; } + ::grpc::ClientContext& context() { return m_context; } + + virtual void handle_response([[maybe_unused]] bool ok = true) override { + // For unary call, ok is always true, `status_` will indicate error if there are any. + m_cb(m_reply, m_status); + } + + unary_callback_t< RespT > m_cb; + RespT m_reply; + ::grpc::ClientContext m_context; + ::grpc::Status m_status; + ResponseReaderPtr m_resp_reader_ptr; +}; + +template < typename ReqT, typename RespT > +class ClientRpcData : public ClientRpcDataInternal< ReqT, RespT > { +public: + ClientRpcData(const rpc_comp_cb_t< ReqT, RespT >& comp_cb) : m_comp_cb{comp_cb} {} + virtual ~ClientRpcData() = default; + + virtual void handle_response([[maybe_unused]] bool ok = true) override { + // For unary call, ok is always true, `status_` will indicate error if there are any. + m_comp_cb(*this); + // Caller could delete this pointer and thus don't acccess anything after this. + } + + const ReqT& req() { return m_req; } + + rpc_comp_cb_t< ReqT, RespT > m_comp_cb; + ReqT m_req; +}; + +/** + * A GrpcBaseClient takes care of establish a channel to grpc + * server. The channel can be used by any number of grpc + * generated stubs. + * + */ +class GrpcBaseClient { +protected: + const std::string m_server_addr; + const std::string m_target_domain; + const std::string m_ssl_cert; + + std::shared_ptr< ::grpc::ChannelInterface > m_channel; + +public: + GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", + const std::string& ssl_cert = ""); + virtual ~GrpcBaseClient() = default; + virtual bool is_connection_ready() const; + +private: + // virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); +}; + +class GrpcSyncClient : public GrpcBaseClient { +public: + using GrpcBaseClient::GrpcBaseClient; + + template < typename ServiceT > + std::unique_ptr< typename ServiceT::StubInterface > MakeStub() { + return ServiceT::NewStub(m_channel); + } +}; + +ENUM(ClientState, uint8_t, VOID, INIT, RUNNING, SHUTTING_DOWN, TERMINATED); + +/** + * One GrpcBaseClient can have multiple stub + * + * The gRPC client worker, it owns a CompletionQueue and one or more threads, + * it's only used for handling asynchronous responses. + * + * The CompletionQueue is used to send asynchronous request, then the + * response will be handled on worker threads. + * + */ +class GrpcAsyncClientWorker final { +public: + using UPtr = std::unique_ptr< GrpcAsyncClientWorker >; + + GrpcAsyncClientWorker() = default; + ~GrpcAsyncClientWorker(); + + void run(uint32_t num_threads); + + ::grpc::CompletionQueue& cq() { return m_cq; } + + static void create_worker(const std::string& name, int num_threads); + static GrpcAsyncClientWorker* get_worker(const std::string& name); + + /** + * Must be called explicitly before program exit if any worker created. + */ + static void shutdown_all(); + +private: + /* + * Shutdown CompletionQueue and threads. + * + * For now, workers can only by shutdown by + * GrpcAsyncClientWorker::shutdown_all(). + */ + void shutdown(); + void client_loop(); + +private: + static std::mutex s_workers_mtx; + static std::unordered_map< std::string, GrpcAsyncClientWorker::UPtr > s_workers; + + ClientState m_state{ClientState::INIT}; + ::grpc::CompletionQueue m_cq; + std::vector< std::thread > m_threads; +}; + +class GrpcAsyncClient : public GrpcBaseClient { +public: + template < typename ServiceT > + using StubPtr = std::unique_ptr< typename ServiceT::StubInterface >; + + GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", + const std::string& ssl_cert = "") : + GrpcBaseClient(server_addr, target_domain, ssl_cert) {} + + virtual ~GrpcAsyncClient() {} + + /** + * AsyncStub is a wrapper of generated service stub. + * + * An AsyncStub is created with a GrpcAsyncClientWorker, all responses + * of grpc async calls made on it will be handled on the + * GrpcAsyncClientWorker's threads. + * + * Please use GrpcAsyncClient::make_stub() to create AsyncStub. + * + */ + template < typename ServiceT > + struct AsyncStub { + using UPtr = std::unique_ptr< AsyncStub >; + + AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker) : + m_stub(std::move(stub)), m_worker(worker) {} + + using stub_t = typename ServiceT::StubInterface; + + /* unary call helper */ + template < typename RespT > + using unary_call_return_t = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< RespT > >; + + template < typename ReqT, typename RespT > + using unary_call_t = unary_call_return_t< RespT > (stub_t::*)(::grpc::ClientContext*, const ReqT&, + ::grpc::CompletionQueue*); + + // using unary_callback_t = std::function< void(RespT&, ::grpc::Status& status) >; + + /** + * Make a unary call. + * + * @param request - a request of this unary call. + * @param call - a pointer to a member function in grpc service stub + * which used to make an aync call. If service name is + * "EchoService" and an unary rpc is defined as: + * ` rpc Echo (EchoRequest) returns (EchoReply) {}` + * then the member function used here should be: + * `EchoService::StubInterface::AsyncEcho`. + * @param callback - the response handler function, which will be + * called after response received asynchronously or call failed(which + * would happen if the channel is either permanently broken or + * transiently broken, or call timeout). + * The callback function must check if `::grpc::Status` argument is + * OK before handling the response. If call failed, `::grpc::Status` + * indicates the error code and error message. + * @param deadline - deadline in seconds + * + */ + template < typename ReqT, typename RespT > + void call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, + const unary_callback_t< RespT >& callback, uint32_t deadline) { + auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); + data->set_deadline(deadline); + // Note that async unary RPCs don't post a CQ tag in call + data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); + // CQ tag posted here + data->m_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + return; + } + + template < typename ReqT, typename RespT > + void call_rpc(const req_builder_cb_t< ReqT >& builder_cb, const unary_call_t< ReqT, RespT >& method, + const rpc_comp_cb_t< ReqT, RespT >& done_cb, uint32_t deadline) { + auto cd = new ClientRpcData< ReqT, RespT >(done_cb); + builder_cb(cd->m_req); + cd->set_deadline(deadline); + cd->m_resp_reader_ptr = (m_stub.get()->*method)(&cd->context(), cd->m_req, cq()); + cd->m_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); + } + + StubPtr< ServiceT > m_stub; + GrpcAsyncClientWorker* m_worker; + + const StubPtr< ServiceT >& stub() { return m_stub; } + + ::grpc::CompletionQueue* cq() { return &m_worker->cq(); } + }; + + template < typename T, typename... Ts > + static auto make(Ts&&... params) { + return std::make_unique< T >(std::forward< Ts >(params)...); + } + + template < typename ServiceT > + auto make_stub(const std::string& worker) { + auto w = GrpcAsyncClientWorker::get_worker(worker); + if (w == nullptr) { throw std::runtime_error("worker thread not available"); } + + return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w); + } +}; + +} // namespace grpc_helper diff --git a/include/grpc_helper/rpc_common.hpp b/include/grpc_helper/rpc_common.hpp new file mode 100644 index 00000000..b07b649b --- /dev/null +++ b/include/grpc_helper/rpc_common.hpp @@ -0,0 +1,8 @@ +#pragma once + +namespace grpc_helper { +class GrpcServer; +struct RPCHelper { + static bool has_server_shutdown(const GrpcServer* server); +}; +} // namespace grpc_helper diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp new file mode 100644 index 00000000..5f34c882 --- /dev/null +++ b/include/grpc_helper/rpc_server.hpp @@ -0,0 +1,99 @@ +#include +#include +#include + +#include +#include +#include +#include +#include "rpc_call.hpp" + +namespace grpc_helper { + +using rpc_thread_start_cb_t = std::function< void(uint32_t) >; + +ENUM(ServerState, uint8_t, VOID, INITED, RUNNING, SHUTTING_DOWN, TERMINATED); + +class GrpcServer : private boost::noncopyable { + friend class RPCHelper; + +public: + GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert); + virtual ~GrpcServer(); + + /** + * Create a new GrpcServer instance and initialize it. + */ + static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", + const std::string& ssl_cert = ""); + + void run(const rpc_thread_start_cb_t& thread_start_cb = nullptr); + void shutdown(); + bool is_terminated() const { return m_state.load(std::memory_order_acquire) == ServerState::TERMINATED; } + + template < typename ServiceT > + bool register_async_service() { + DEBUG_ASSERT_EQ(ServerState::INITED, m_state, "register service in non-INITED state"); + + auto name = ServiceT::service_full_name(); + if (m_services.find(name) != m_services.end()) { + LOGMSG_ASSERT(false, "Duplicate register async service"); + return false; + } + + auto svc = new typename ServiceT::AsyncService(); + m_builder.RegisterService(svc); + m_services.insert({name, svc}); + + return true; + } + + template < typename ServiceT, typename ReqT, typename RespT, bool streaming = false > + bool register_rpc(const std::string& name, const request_call_cb_t& request_call_cb, + const rpc_handler_cb_t& rpc_handler, const rpc_completed_cb_t& done_handler = nullptr) { + DEBUG_ASSERT_EQ(ServerState::RUNNING, m_state, "register service in non-INITED state"); + + auto it = m_services.find(ServiceT::service_full_name()); + if (it == m_services.end()) { + LOGMSG_ASSERT(false, "RPC registration attempted before service is registered"); + return false; + } + + auto svc = static_cast< typename ServiceT::AsyncService* >(it->second); + + size_t rpc_idx; + { + std::unique_lock lg(m_rpc_registry_mtx); + rpc_idx = m_rpc_registry.size(); + m_rpc_registry.emplace_back(new RpcStaticInfo< ServiceT, ReqT, RespT, false >( + this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name)); + + // Register one call per cq. + for (auto i = 0u; i < m_cqs.size(); ++i) { + auto rpc_call = RpcData< ServiceT, ReqT, RespT, false >::make( + (rpc_call_static_info_t*)m_rpc_registry[rpc_idx].get(), i); + rpc_call->enqueue_call_request(*m_cqs[i]); + } + } + + return true; + } + +private: + void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); + +private: + std::atomic< ServerState > m_state{ServerState::VOID}; + uint32_t m_num_threads{0}; + ::grpc::ServerBuilder m_builder; + + std::unique_ptr< ::grpc::Server > m_server; + std::vector< std::shared_ptr< std::thread > > m_threads; + std::vector< std::unique_ptr< ::grpc::ServerCompletionQueue > > m_cqs; + + std::unordered_map< const char*, ::grpc::Service* > m_services; + std::mutex m_rpc_registry_mtx; + std::vector< std::unique_ptr< RpcStaticInfoBase > > m_rpc_registry; +}; +} // namespace grpc_helper diff --git a/include/sds_grpc/client.h b/include/sds_grpc/client.h deleted file mode 100644 index 2c1187fb..00000000 --- a/include/sds_grpc/client.h +++ /dev/null @@ -1,306 +0,0 @@ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "utils.h" - -namespace sds::grpc { - -using ::grpc::Channel; -using ::grpc::ClientAsyncResponseReader; -using ::grpc::ClientContext; -using ::grpc::CompletionQueue; -using ::grpc::Status; - -using namespace ::std::chrono; - -/** - * A interface for handling gRPC async response - */ -class ClientCallMethod : private boost::noncopyable { -public: - virtual ~ClientCallMethod() {} - - virtual void handle_response(bool ok = true) = 0; -}; - -/** - * The specialized 'ClientCallMethod' per gRPC call, it stores - * the response handler function - * - */ -template < typename TREQUEST, typename TREPLY > -class ClientCallData final : public ClientCallMethod { - - using handle_response_cb_t = std::function< void(TREPLY&, ::grpc::Status& status) >; - - using ResponseReaderType = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< TREPLY > >; - -private: - /* Allow GrpcAsyncClient and its inner classes to use - * ClientCallData. - */ - friend class GrpcAsyncClient; - - ClientCallData(handle_response_cb_t handle_response_cb) : handle_response_cb_(handle_response_cb) {} - - // TODO: support time in any time unit -- lhuang8 - void set_deadline(uint32_t seconds) { - system_clock::time_point deadline = system_clock::now() + std::chrono::seconds(seconds); - context_.set_deadline(deadline); - } - - ResponseReaderType& responder_reader() { return response_reader_; } - - Status& status() { return status_; } - - TREPLY& reply() { return reply_; } - - ClientContext& context() { return context_; } - - virtual void handle_response([[maybe_unused]] bool ok = true) override { - // For unary call, ok is always true, `status_` will indicate error - // if there are any. - handle_response_cb_(reply_, status_); - } - -private: - handle_response_cb_t handle_response_cb_; - TREPLY reply_; - ClientContext context_; - Status status_; - ResponseReaderType response_reader_; -}; - -/** - * A GrpcBaseClient takes care of establish a channel to grpc - * server. The channel can be used by any number of grpc - * generated stubs. - * - */ -class GrpcBaseClient { -protected: - const std::string server_addr_; - const std::string target_domain_; - const std::string ssl_cert_; - - std::shared_ptr<::grpc::ChannelInterface > channel_; - -public: - GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "") : - server_addr_(server_addr), target_domain_(target_domain), ssl_cert_(ssl_cert) {} - - virtual ~GrpcBaseClient() = default; - - virtual bool init(); - virtual bool is_connection_ready(); - -private: - virtual bool init_channel(); - - virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); -}; - -class GrpcSyncClient : public GrpcBaseClient { -public: - using GrpcBaseClient::GrpcBaseClient; - - template < typename TSERVICE > - std::unique_ptr< typename TSERVICE::StubInterface > MakeStub() { - return TSERVICE::NewStub(channel_); - } -}; - -/** - * One GrpcBaseClient can have multiple stub - * - * The gRPC client worker, it owns a CompletionQueue and one or more threads, - * it's only used for handling asynchronous responses. - * - * The CompletionQueue is used to send asynchronous request, then the - * response will be handled on worker threads. - * - */ -class GrpcAyncClientWorker final { - - enum class State { VOID, INIT, RUNNING, SHUTTING_DOWN, TERMINATED }; - -public: - using UPtr = std::unique_ptr< GrpcAyncClientWorker >; - - GrpcAyncClientWorker(); - ~GrpcAyncClientWorker(); - - bool run(uint32_t num_threads); - - CompletionQueue& cq() { return completion_queue_; } - - /** - * Create a GrpcAyncClientWorker. - * - */ - static bool create_worker(const char* name, int num_thread); - - /** - * - * Get a pointer of GrpcAyncClientWorker by name. - */ - static GrpcAyncClientWorker* get_worker(const char* name); - - /** - * Must be called explicitly before program exit if any worker created. - */ - static void shutdown_all(); - -private: - /* - * Shutdown CompletionQueue and threads. - * - * For now, workers can only by shutdown by - * GrpcAyncClientWorker::shutdown_all(). - */ - void shutdown(); - - void async_complete_rpc(); - - static std::mutex mutex_workers; - static std::unordered_map< const char*, GrpcAyncClientWorker::UPtr > workers; - - State state_ = State::VOID; - CompletionQueue completion_queue_; - std::list< std::shared_ptr< std::thread > > threads_; -}; - -class GrpcAsyncClient : public GrpcBaseClient { -public: - template < typename TSERVICE > - using StubPtr = std::unique_ptr< typename TSERVICE::StubInterface >; - - /** - * AsyncStub is a wrapper of generated service stub. - * - * An AsyncStub is created with a GrpcAyncClientWorker, all responses - * of grpc async calls made on it will be handled on the - * GrpcAyncClientWorker's threads. - * - * Please use GrpcAsyncClient::make_stub() to create AsyncStub. - * - */ - template < typename TSERVICE > - struct AsyncStub { - using UPtr = std::unique_ptr< AsyncStub >; - - AsyncStub(StubPtr< TSERVICE > stub, GrpcAyncClientWorker* worker) : stub_(std::move(stub)), worker_(worker) {} - - using stub_t = typename TSERVICE::StubInterface; - - /* unary call helper */ - template < typename TRESPONSE > - using unary_call_return_t = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< TRESPONSE > >; - - template < typename TREQUEST, typename TRESPONSE > - using unary_call_t = unary_call_return_t< TRESPONSE > (stub_t::*)(::grpc::ClientContext*, const TREQUEST&, - ::grpc::CompletionQueue*); - - template < typename TREQUEST, typename TRESPONSE > - using unary_callback_t = std::function< void(TRESPONSE&, ::grpc::Status& status) >; - - /** - * Make a unary call. - * - * @param request - a request of this unary call. - * @param call - a pointer to a member function in grpc service stub - * which used to make an aync call. If service name is - * "EchoService" and an unary rpc is defined as: - * ` rpc Echo (EchoRequest) returns (EchoReply) {}` - * then the member function used here should be: - * `EchoService::StubInterface::AsyncEcho`. - * @param callback - the response handler function, which will be - * called after response received asynchronously or call failed(which - * would happen if the channel is either permanently broken or - * transiently broken, or call timeout). - * The callback function must check if `::grpc::Status` argument is - * OK before handling the response. If call failed, `::grpc::Status` - * indicates the error code and error message. - * @param deadline - deadline in seconds - * - */ - template < typename TREQUEST, typename TRESPONSE > - void call_unary(const TREQUEST& request, unary_call_t< TREQUEST, TRESPONSE > call, - unary_callback_t< TREQUEST, TRESPONSE > callback, uint32_t deadline) { - - auto data = new ClientCallData< TREQUEST, TRESPONSE >(callback); - data->set_deadline(deadline); - // Note that async unary RPCs don't post a CQ tag in call - data->responder_reader() = (stub_.get()->*call)(&data->context(), request, cq()); - // CQ tag posted here - data->responder_reader()->Finish(&data->reply(), &data->status(), (void*)data); - - return; - } - - StubPtr< TSERVICE > stub_; - GrpcAyncClientWorker* worker_; - - const StubPtr< TSERVICE >& stub() { return stub_; } - - CompletionQueue* cq() { return &worker_->cq(); } - }; - - template < typename T, typename... Ts > - static auto make(Ts&&... params) { - std::unique_ptr< T > ret; - - if (!std::is_base_of< GrpcAsyncClient, T >::value) { return ret; } - - ret = std::make_unique< T >(std::forward< Ts >(params)...); - if (!ret->init()) { - ret.reset(nullptr); - return ret; - } - - return ret; - } - - template < typename TSERVICE > - auto make_stub(const char* worker) { - - typename AsyncStub< TSERVICE >::UPtr ret; - - auto w = GrpcAyncClientWorker::get_worker(worker); - BOOST_ASSERT(w); - if (!w) { - return ret; // null - } - - auto stub = TSERVICE::NewStub(channel_); - ret = std::make_unique< AsyncStub< TSERVICE > >(std::move(stub), w); - return ret; - } - - GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "") : - GrpcBaseClient(server_addr, target_domain, ssl_cert) {} - - virtual ~GrpcAsyncClient() {} -}; - -} // end of namespace sds::grpc diff --git a/include/sds_grpc/server.h b/include/sds_grpc/server.h deleted file mode 100644 index 5c5b6d79..00000000 --- a/include/sds_grpc/server.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Server.h - * - * Created on: Sep 19, 2018 - */ - -#pragma once - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include "utils.h" - -namespace sds::grpc { - -using ::grpc::Server; -using ::grpc::ServerAsyncResponseWriter; -using ::grpc::ServerBuilder; -using ::grpc::ServerCompletionQueue; -using ::grpc::ServerContext; -using ::grpc::Status; - -/** - * Defines the life cycle of handling a gRPC call. - * - */ -class BaseServerCallData { -public: - enum CallStatus { CREATE, PROCESS, FINISH }; - - CallStatus& status() { return status_; } - -public: - /** - * During the life cycle of this object, this method should be called - * 3 times with different status: - * - CREATE is the initial status, the object was just created, it request - * that the gRPC server start processing async requests. In this request, - * "this" is used as tag for uniquely identifying the request, so that - * different CallData instances can serve different requests - * concurrently. - * - PROCESS is for handling the request, e.g. the incoming request can be - * routed to a callback function. Once the handling is done, the gRPC - * runtime should be informed, e.g for unary calls, - * ServerAsyncResponseWriter::Finish() should be called. - * - FINISH is for destroy this object, gRPC server has sent the - * appropriate signals to the client to end the call. - */ - void proceed(bool ok = true); - -protected: - BaseServerCallData() : status_(CREATE) {} - - virtual ~BaseServerCallData() {} - - /** - * See BaseServerCallData::proceed() for semantics. - */ - virtual void do_create() = 0; - - /** - * See BaseServerCallData::proceed() for semantics. - */ - virtual void do_process() = 0; - - /** - * See BaseServerCallData::proceed() for semantics. - */ - virtual void do_finish(); - - CallStatus status_; -}; - -/** - * Each instance only handles one request, after that it will be destroyed; - * a new instance will be created automatically for handling next request. - * - */ -template < typename TSERVICE, typename TREQUEST, typename TRESPONSE > -class ServerCallData final : public BaseServerCallData { - - using request_call_func_t = std::function< void(TSERVICE*, ::grpc::ServerContext*, TREQUEST*, - ::grpc::ServerAsyncResponseWriter< TRESPONSE >*, - ::grpc::CompletionQueue*, ::grpc::ServerCompletionQueue*, void*) >; - - using handle_call_func_t = std::function<::grpc::Status(TREQUEST&, TRESPONSE&) >; - - using T = ServerCallData< TSERVICE, TREQUEST, TRESPONSE >; - -private: - friend class GrpcServer; - - ServerCallData(TSERVICE* service, ::grpc::ServerCompletionQueue* cq, request_call_func_t wait_request, - handle_call_func_t handle_request) : - BaseServerCallData(), - service_(service), - cq_(cq), - responder_(&context_), - wait_request_func_(wait_request), - handle_request_func_(handle_request) {} - - ::grpc::ServerAsyncResponseWriter< TRESPONSE >& responder() { return responder_; } - -protected: - ServerContext context_; - - TSERVICE* service_; - // The producer-consumer queue where for asynchronous server notifications. - ::grpc::ServerCompletionQueue* cq_; - - TREQUEST request_; - TRESPONSE reponse_; - ::grpc::ServerAsyncResponseWriter< TRESPONSE > responder_; - - request_call_func_t wait_request_func_; - handle_call_func_t handle_request_func_; - - void do_create() { wait_request_func_(service_, &context_, &request_, &responder_, cq_, cq_, this); } - - void do_process() { - (new T(service_, cq_, wait_request_func_, handle_request_func_))->proceed(); - // LOGDEBUGMOD(GRPC, "receive {}", request_.GetTypeName()); - - ::grpc::Status status = handle_request_func_(request_, reponse_); - responder_.Finish(reponse_, status, this); - } -}; - -class GrpcServer : private boost::noncopyable { - - enum State { VOID, INITED, RUNNING, SHUTTING_DOWN, TERMINATED }; - -private: - GrpcServer(); - - bool init(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert); - -public: - virtual ~GrpcServer(); - - /** - * Create a new GrpcServer instance and initialize it. - */ - static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", - const std::string& ssl_cert = ""); - - bool run(); - - void shutdown(); - - bool is_terminated() { return state_ == State::TERMINATED; } - - ::grpc::ServerCompletionQueue* completion_queue() { return cq_.get(); } - - template < typename TSVC > - bool register_async_service() { - - BOOST_ASSERT_MSG(State::INITED == state_, "register service in non-INITED state"); - - auto name = TSVC::service_full_name(); - - BOOST_ASSERT_MSG(services_.find(name) == services_.end(), "Double register async service."); - if (services_.find(name) != services_.end()) { return false; } - - auto svc = new typename TSVC::AsyncService(); - builder_.RegisterService(svc); - services_.insert({name, svc}); - - return true; - } - - template < typename TSVC, typename TREQUEST, typename TRESPONSE > - bool register_rpc(std::function< void(typename TSVC::AsyncService*, ::grpc::ServerContext*, TREQUEST*, - ::grpc::ServerAsyncResponseWriter< TRESPONSE >*, ::grpc::CompletionQueue*, - ::grpc::ServerCompletionQueue*, void*) > - request_call_func, - std::function<::grpc::Status(TREQUEST&, TRESPONSE&) > handle_request_func) { - - BOOST_ASSERT_MSG(State::RUNNING == state_, "register service in non-INITED state"); - - auto it = services_.find(TSVC::service_full_name()); - if (it == services_.end()) { - BOOST_ASSERT_MSG(false, "service not registered"); - return false; - } - - auto svc = static_cast< typename TSVC::AsyncService* >(it->second); - (new ServerCallData< typename TSVC::AsyncService, TREQUEST, TRESPONSE >(svc, cq_.get(), request_call_func, - handle_request_func)) - ->proceed(); - - return true; - } - -private: - /* - * This can be called by multiple threads - */ - void handle_rpcs(); - - void process(BaseServerCallData* cm); - - State state_ = State::VOID; - - uint32_t thread_num_ = 0; - - ServerBuilder builder_; - - std::unique_ptr<::grpc::ServerCompletionQueue > cq_; - std::unique_ptr< Server > server_; - std::list< std::shared_ptr< std::thread > > threads_; - - std::unordered_map< const char*, ::grpc::Service* > services_; -}; - -} // namespace sds::grpc diff --git a/include/sds_grpc/utils.h b/include/sds_grpc/utils.h deleted file mode 100644 index 4e071dd2..00000000 --- a/include/sds_grpc/utils.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * utils.h - * - * Created on: Sep 25, 2018 - */ - -#pragma once - -#include - -namespace sds::grpc { - -bool get_file_contents(const std::string& file_name, std::string& contents); - -} - diff --git a/lib/client.cpp b/lib/client.cpp deleted file mode 100644 index f062c001..00000000 --- a/lib/client.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Client.cpp - * - * Created on: Sep 19, 2018 - */ - -#include "sds_grpc/client.h" - -#ifdef _POSIX_THREADS -#ifndef __APPLE__ -extern "C" { -#include -} -#endif -#endif - -namespace sds::grpc { - -bool GrpcBaseClient::init() { - if (!init_channel()) { return false; } - - return true; -} - -bool GrpcBaseClient::init_channel() { - - ::grpc::SslCredentialsOptions ssl_opts; - - if (!ssl_cert_.empty()) { - - if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { - ::grpc::ChannelArguments channel_args; - channel_args.SetSslTargetNameOverride(target_domain_); - channel_ = ::grpc::CreateCustomChannel(server_addr_, ::grpc::SslCredentials(ssl_opts), channel_args); - } else { - return false; - } - } else { - channel_ = ::grpc::CreateChannel(server_addr_, ::grpc::InsecureChannelCredentials()); - } - - return true; -} - -bool GrpcBaseClient::load_ssl_cert(const std::string& ssl_cert, std::string& content) { - return ::sds::grpc::get_file_contents(ssl_cert, content); - ; -} - -bool GrpcBaseClient::is_connection_ready() { - return (channel_->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY); -} - -std::mutex GrpcAyncClientWorker::mutex_workers; -std::unordered_map< const char*, GrpcAyncClientWorker::UPtr > GrpcAyncClientWorker::workers; - -GrpcAyncClientWorker::GrpcAyncClientWorker() { state_ = State::INIT; } - -GrpcAyncClientWorker::~GrpcAyncClientWorker() { shutdown(); } - -void GrpcAyncClientWorker::shutdown() { - if (state_ == State::RUNNING) { - completion_queue_.Shutdown(); - state_ = State::SHUTTING_DOWN; - - for (auto& it : threads_) { - it->join(); - } - - state_ = State::TERMINATED; - } - - return; -} - -bool GrpcAyncClientWorker::run(uint32_t num_threads) { - BOOST_ASSERT(State::INIT == state_); - - if (num_threads == 0) { return false; } - - for (uint32_t i = 0; i < num_threads; ++i) { - std::shared_ptr< std::thread > t = - std::shared_ptr< std::thread >(new std::thread(&GrpcAyncClientWorker::async_complete_rpc, this)); -#ifdef _POSIX_THREADS -#ifndef __APPLE__ - auto tname = std::string("grpc_client").substr(0, 15); - pthread_setname_np(t->native_handle(), tname.c_str()); -#endif /* __APPLE__ */ -#endif /* _POSIX_THREADS */ - threads_.push_back(t); - } - - state_ = State::RUNNING; - return true; -} - -void GrpcAyncClientWorker::async_complete_rpc() { - void* tag; - bool ok = false; - while (completion_queue_.Next(&tag, &ok)) { - // For client-side unary call, `ok` is always true, - // even server is not running - ClientCallMethod* cm = static_cast< ClientCallMethod* >(tag); - cm->handle_response(ok); - delete cm; - } -} - -bool GrpcAyncClientWorker::create_worker(const char* name, int num_thread) { - std::lock_guard< std::mutex > lock(mutex_workers); - - if (auto it = workers.find(name); it != workers.end()) { return true; } - - auto worker = std::make_unique< GrpcAyncClientWorker >(); - if (!worker->run(num_thread)) { return false; } - - workers.insert(std::make_pair(name, std::move(worker))); - return true; -} - -GrpcAyncClientWorker* GrpcAyncClientWorker::get_worker(const char* name) { - std::lock_guard< std::mutex > lock(mutex_workers); - - auto it = workers.find(name); - if (it == workers.end()) { return nullptr; } - - return it->second.get(); -} - -void GrpcAyncClientWorker::shutdown_all() { - std::lock_guard< std::mutex > lock(mutex_workers); - - for (auto& it : workers) { - it.second->shutdown(); - // release worker, the completion queue holds by it need to - // be destroyed before grpc lib internal object - // g_core_codegen_interface - it.second.reset(); - } -} - -} // namespace sds::grpc - diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp new file mode 100644 index 00000000..88c168f5 --- /dev/null +++ b/lib/rpc_client.cpp @@ -0,0 +1,107 @@ +#include "grpc_helper/rpc_client.hpp" + +namespace grpc_helper { + +GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, + const std::string& ssl_cert) : + m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert) { + ::grpc::SslCredentialsOptions ssl_opts; + if (!m_ssl_cert.empty()) { +#if 0 + if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { + ::grpc::ChannelArguments channel_args; + channel_args.SetSslTargetNameOverride(m_target_domain); + m_channel = ::grpc::CreateCustomChannel(m_server_addr, ::grpc::SslCredentials(ssl_opts), channel_args); + } else { + throw std::runtime_error("Unable to load ssl certification for grpc client"); + } +#endif + } else { + m_channel = ::grpc::CreateChannel(m_server_addr, ::grpc::InsecureChannelCredentials()); + } +} + +#if 0 +bool GrpcBaseClient::load_ssl_cert(const std::string& ssl_cert, std::string& content) { + return ::sds::grpc::get_file_contents(ssl_cert, content); +} +#endif + +bool GrpcBaseClient::is_connection_ready() const { + return (m_channel->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY); +} + +std::mutex GrpcAsyncClientWorker::s_workers_mtx; +std::unordered_map< std::string, GrpcAsyncClientWorker::UPtr > GrpcAsyncClientWorker::s_workers; + +GrpcAsyncClientWorker::~GrpcAsyncClientWorker() { shutdown(); } +void GrpcAsyncClientWorker::shutdown() { + if (m_state == ClientState::RUNNING) { + m_cq.Shutdown(); + m_state = ClientState::SHUTTING_DOWN; + + for (auto& thr : m_threads) { + thr.join(); + } + + m_state = ClientState::TERMINATED; + } + + return; +} + +void GrpcAsyncClientWorker::run(uint32_t num_threads) { + LOGMSG_ASSERT_EQ(ClientState::INIT, m_state); + + if (num_threads == 0) { throw(std::invalid_argument("Need atleast one worker thread")); } + for (uint32_t i = 0u; i < num_threads; ++i) { + m_threads.emplace_back(&GrpcAsyncClientWorker::client_loop, this); + } + + m_state = ClientState::RUNNING; +} + +void GrpcAsyncClientWorker::client_loop() { +#ifdef _POSIX_THREADS +#ifndef __APPLE__ + auto tname = std::string("grpc_client").substr(0, 15); + pthread_setname_np(pthread_self(), tname.c_str()); +#endif /* __APPLE__ */ +#endif /* _POSIX_THREADS */ + + void* tag; + bool ok = false; + while (m_cq.Next(&tag, &ok)) { + // For client-side unary call, `ok` is always true, even server is not running + auto cm = static_cast< ClientRpcDataAbstract* >(tag); + cm->handle_response(ok); + delete cm; + } +} + +void GrpcAsyncClientWorker::create_worker(const std::string& name, int num_threads) { + std::lock_guard< std::mutex > lock(s_workers_mtx); + if (s_workers.find(name) != s_workers.end()) { return; } + + auto worker = std::make_unique< GrpcAsyncClientWorker >(); + worker->run(num_threads); + s_workers.insert(std::make_pair(name, std::move(worker))); +} + +GrpcAsyncClientWorker* GrpcAsyncClientWorker::get_worker(const std::string& name) { + std::lock_guard< std::mutex > lock(s_workers_mtx); + auto it = s_workers.find(name); + if (it == s_workers.end()) { return nullptr; } + return it->second.get(); +} + +void GrpcAsyncClientWorker::shutdown_all() { + std::lock_guard< std::mutex > lock(s_workers_mtx); + for (auto& it : s_workers) { + it.second->shutdown(); + // release worker, the completion queue holds by it need to be destroyed before grpc lib internal object + // g_core_codegen_interface + it.second.reset(); + } +} +} // namespace grpc_helper diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp new file mode 100644 index 00000000..fc927a91 --- /dev/null +++ b/lib/rpc_server.cpp @@ -0,0 +1,127 @@ +/* + * server.cpp + * + * Created on: Oct 24, 2018 + */ + +#include + +#ifdef _POSIX_THREADS +#ifndef __APPLE__ +extern "C" { +#include +} +#endif +#endif + +#include + +namespace grpc_helper { + +GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert) : + m_num_threads{threads} { + if (listen_addr.empty() || threads == 0) { throw std::invalid_argument("Invalid parameter to start grpc server"); } + +#if 0 + if (!ssl_cert.empty() && !ssl_key.empty()) { + std::string key_contents; + std::string cert_contents; + get_file_contents(ssl_cert, cert_contents); + get_file_contents(ssl_key, key_contents); + + if (cert_contents.empty() || key_contents.empty()) { return false; } + + ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; + ::grpc::SslServerCredentialsOptions ssl_opts; + ssl_opts.pem_root_certs = ""; + ssl_opts.pem_key_cert_pairs.push_back(pkcp); + + m_builder.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); + } else { + m_builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); + } +#else + m_builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); +#endif + + // Create one cq per thread + for (auto i = 0u; i < threads; ++i) { + m_cqs.emplace_back(m_builder.AddCompletionQueue()); + } + + m_state.store(ServerState::INITED); +} + +GrpcServer::~GrpcServer() { + shutdown(); + for (auto& [k, v] : m_services) { + (void)k; + delete v; + } +} + +GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert) { + return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert); +} + +void GrpcServer::run(const rpc_thread_start_cb_t& thread_start_cb) { + LOGMSG_ASSERT_EQ(m_state.load(std::memory_order_relaxed), ServerState::INITED, "Grpcserver duplicate run?"); + + m_server = m_builder.BuildAndStart(); + + for (uint32_t i = 0; i < m_num_threads; ++i) { + auto t = std::make_shared< std::thread >(&GrpcServer::handle_rpcs, this, i, thread_start_cb); +#ifdef _POSIX_THREADS +#ifndef __APPLE__ + auto tname = std::string("grpc_server").substr(0, 15); + pthread_setname_np(t->native_handle(), tname.c_str()); +#endif /* __APPLE__ */ +#endif /* _POSIX_THREADS */ + m_threads.push_back(t); + } + + m_state.store(ServerState::RUNNING); +} + +void GrpcServer::handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb) { + void* tag; + bool ok = false; + + if (thread_start_cb) { thread_start_cb(thread_num); } + + while (m_cqs[thread_num]->Next(&tag, &ok)) { + // `ok` is true if read a successful event, false otherwise. + [[likely]] if (tag != nullptr) { + // Process the rpc and refill the cq with a new rpc call + auto new_rpc_call = static_cast< RpcTag* >(tag)->process(ok); + if (new_rpc_call != nullptr) { new_rpc_call->enqueue_call_request(*m_cqs[new_rpc_call->m_queue_idx]); } + } + } +} + +void GrpcServer::shutdown() { + if (m_state.load() == ServerState::RUNNING) { + m_state.store(ServerState::SHUTTING_DOWN); + + m_server->Shutdown(); + for (auto& cq : m_cqs) { + cq->Shutdown(); // Always *after* the associated server's Shutdown()! + } + + m_server->Wait(); + // drain the cq_ + for (auto& thr : m_threads) { + if (thr->joinable()) thr->join(); + } + + m_state.store(ServerState::TERMINATED); + } +} + +bool RPCHelper::has_server_shutdown(const GrpcServer* server) { + return (server->m_state.load(std::memory_order_acquire) != ServerState::RUNNING); +} + +} // namespace grpc_helper diff --git a/lib/server.cpp b/lib/server.cpp deleted file mode 100644 index d0732c76..00000000 --- a/lib/server.cpp +++ /dev/null @@ -1,177 +0,0 @@ -/* - * server.cpp - * - * Created on: Oct 24, 2018 - */ - -#include - -#ifdef _POSIX_THREADS -#ifndef __APPLE__ -extern "C" { -#include -} -#endif -#endif - -#include - -namespace sds::grpc { - -void BaseServerCallData::proceed(bool ok) { - if (!ok && status_ != FINISH) { - // for unary call, there are two cases ok can be false in server-side: - // - Server-side RPC request: the server has been Shutdown - // before this particular call got matched to an incoming RPC. - // Call data should be released in this case. - // - Server-side Finish: response not going to the wire because - // the call is already dead (i.e., canceled, deadline expired, - // other side dropped the channel, etc) - // In this case, not only this call data should be released, - // server-side may need to handle the error, e.g roll back the - // grpc call's operation. This version sds_grpc doesn't expose - // API for handling this case, such API will be provided in next - // version of this library. - status_ = FINISH; - } - - if (status_ == CREATE) { - status_ = PROCESS; - do_create(); - } else if (status_ == PROCESS) { - // status must be changed firstly, otherwise this may - // cause concurrency issue with multi-threads - status_ = FINISH; - do_process(); - } else { - do_finish(); - } -} - -void BaseServerCallData::do_finish() { - GPR_ASSERT(status_ == FINISH); - // Once in the FINISH state, this can be destroyed - delete this; -} - -GrpcServer::GrpcServer() {} - -GrpcServer::~GrpcServer() { - shutdown(); - - for (auto [k, v] : services_) { - (void)k; - delete v; - } - - services_.clear(); -} - -bool GrpcServer::init(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert) { - BOOST_ASSERT(State::VOID == state_); - - if (listen_addr.empty() || threads == 0) { return false; } - - thread_num_ = threads; - - if (!ssl_cert.empty() && !ssl_key.empty()) { - std::string key_contents; - std::string cert_contents; - get_file_contents(ssl_cert, cert_contents); - get_file_contents(ssl_key, key_contents); - - if (cert_contents.empty() || key_contents.empty()) { return false; } - - ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; - ::grpc::SslServerCredentialsOptions ssl_opts; - ssl_opts.pem_root_certs = ""; - ssl_opts.pem_key_cert_pairs.push_back(pkcp); - - builder_.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); - } else { - builder_.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); - } - - cq_ = builder_.AddCompletionQueue(); - - state_ = State::INITED; - return true; -} - -GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert) { - auto ret = new GrpcServer(); - if (!ret->init(listen_addr, threads, ssl_key, ssl_cert)) { - delete ret; - return nullptr; - } - - return ret; -} - -bool GrpcServer::run() { - BOOST_ASSERT(State::INITED == state_); - - server_ = builder_.BuildAndStart(); - - for (uint32_t i = 0; i < thread_num_; ++i) { - auto t = std::shared_ptr< std::thread >(new std::thread(&GrpcServer::handle_rpcs, this)); -#ifdef _POSIX_THREADS -#ifndef __APPLE__ - auto tname = std::string("grpc_server").substr(0, 15); - pthread_setname_np(t->native_handle(), tname.c_str()); -#endif /* __APPLE__ */ -#endif /* _POSIX_THREADS */ - threads_.push_back(t); - } - - state_ = State::RUNNING; - return true; -} - -void GrpcServer::handle_rpcs() { - void* tag; - bool ok = false; - - while (cq_->Next(&tag, &ok)) { - // `ok` is true if read a successful event, false otherwise. - // Success here means that this operation completed in the normal - // valid manner. - - // This version of sds_grpc only support unary grpc call, so only - // two cases need to be considered: - // - // Server-side RPC request: \a ok indicates that the RPC has indeed - // been started. If it is false, the server has been Shutdown - // before this particular call got matched to an incoming RPC. - // - // Server-side Finish: ok means that the data/metadata/status/etc is - // going to go to the wire. - // If it is false, it not going to the wire because the call - // is already dead (i.e., canceled, deadline expired, other side - // dropped the channel, etc). - - BaseServerCallData* cm = static_cast< BaseServerCallData* >(tag); - cm->proceed(ok); - } -} - -void GrpcServer::shutdown() { - if (state_ == State::RUNNING) { - server_->Shutdown(); - cq_->Shutdown(); // Always *after* the associated server's Shutdown()! - state_ = State::SHUTTING_DOWN; - - // drain the cq_ - for (auto& it : threads_) { - it->join(); - } - - state_ = State::TERMINATED; - } - - return; -} - -} // namespace sds::grpc diff --git a/lib/utils.cpp b/lib/utils.cpp deleted file mode 100644 index 0251b436..00000000 --- a/lib/utils.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/* - * utils.cpp - * - * Created on: Sep 25, 2018 - */ - -#include "sds_grpc/utils.h" -#include -#include - -namespace sds::grpc { - -bool get_file_contents(const std::string& file_name, std::string& contents) { - try { - std::ifstream in(file_name.c_str(), std::ios::in); - if (in) { - std::ostringstream t; - t << in.rdbuf(); - in.close(); - - contents = t.str(); - return true; - } - } catch (...) {} - return false; -} - -} // namespace sds::grpc - diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 43de1108..b9991bff 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -1,24 +1,24 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) -set(FUNCTION_TEST_LIBS sds_grpc test_proto +set(FUNCTION_TEST_LIBS grpc_helper test_proto ${CONAN_LIBS}) # build echo_server add_executable(echo_server echo_server.cpp) -add_dependencies(echo_server sds_grpc test_proto) +add_dependencies(echo_server grpc_helper test_proto) target_link_libraries(echo_server ${FUNCTION_TEST_LIBS} ) # build echo_sync_client -add_executable(echo_sync_client echo_sync_client.cpp) -add_dependencies(echo_sync_client sds_grpc test_proto) -target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) +#add_executable(echo_sync_client echo_sync_client.cpp) +#add_dependencies(echo_sync_client grpc_helper test_proto) +#target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) # build echo_async_client add_executable(echo_async_client echo_async_client.cpp) -add_dependencies(echo_async_client sds_grpc test_proto) +add_dependencies(echo_async_client grpc_helper test_proto) target_link_libraries(echo_async_client ${FUNCTION_TEST_LIBS} ) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 9c507bb0..4a24b729 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -1,150 +1,220 @@ -/* - * echo_async_client.cpp - * - * Created on: Oct 9, 2018 - */ - -#include #include #include -#include #include #include #include -#include -#include +#include #include #include -#include "sds_grpc/client.h" -#include "sds_grpc_test.grpc.pb.h" +#include "grpc_helper/rpc_client.hpp" +#include "grpc_helper/rpc_server.hpp" +#include "grpc_helper_test.grpc.pb.h" -using namespace ::grpc; -using namespace ::sds::grpc; -using namespace ::sds_grpc_test; +using namespace grpc_helper; +using namespace ::grpc_helper_test; using namespace std::placeholders; -#define WORKER_NAME "worker-1" - -class EchoAndPingAsyncClient : GrpcAsyncClient { - +class TestClient { public: - using GrpcAsyncClient::GrpcAsyncClient; - - virtual bool init() { - if (!GrpcAsyncClient::init()) { return false; } - - echo_stub_ = make_stub< EchoService >(WORKER_NAME); - ping_stub_ = make_stub< PingService >(WORKER_NAME); - - return true; + static constexpr int GRPC_CALL_COUNT = 100; + const std::string WORKER_NAME{"Worker-1"}; + + void validate_echo_reply(const EchoRequest& req, EchoReply& reply, ::grpc::Status& status) { + RELEASE_ASSERT_EQ(status.ok(), true, "echo request {} failed, status {}: {}", req.message(), + status.error_code(), status.error_message()); + LOGDEBUGMOD(grpc_server, "echo request {} reply {}", req.message(), reply.message()); + RELEASE_ASSERT_EQ(req.message(), reply.message()); + { + std::unique_lock lk(m_wait_mtx); + if (--m_echo_counter == 0) { m_cv.notify_all(); } + } } - void Echo(const EchoRequest& request, std::function< void(EchoReply&, ::grpc::Status& status) > callback) { - - echo_stub_->call_unary< EchoRequest, EchoReply >(request, &EchoService::StubInterface::AsyncEcho, callback, 1); + void validate_ping_reply(const PingRequest& req, PingReply& reply, ::grpc::Status& status) { + RELEASE_ASSERT_EQ(status.ok(), true, "ping request {} failed, status {}: {}", req.seqno(), status.error_code(), + status.error_message()); + LOGDEBUGMOD(grpc_server, "ping request {} reply {}", req.seqno(), reply.seqno()); + RELEASE_ASSERT_EQ(req.seqno(), reply.seqno()); + { + std::unique_lock lk(m_wait_mtx); + if (--m_ping_counter == 0) { m_cv.notify_all(); } + } } - void Ping(const PingRequest& request, std::function< void(PingReply&, ::grpc::Status& status) > callback) { + void run(const std::string& server_address) { + auto client = std::make_unique< GrpcAsyncClient >(server_address, "", ""); + GrpcAsyncClientWorker::create_worker(WORKER_NAME, 4); + + auto echo_stub = client->make_stub< EchoService >(WORKER_NAME); + auto ping_stub = client->make_stub< PingService >(WORKER_NAME); + + m_ping_counter = GRPC_CALL_COUNT; + m_echo_counter = GRPC_CALL_COUNT; + for (int i = 1; i <= GRPC_CALL_COUNT * 2; ++i) { + if ((i % 2) == 0) { + if ((i % 4) == 0) { + EchoRequest req; + req.set_message(std::to_string(i)); + echo_stub->call_unary< EchoRequest, EchoReply >( + req, &EchoService::StubInterface::AsyncEcho, + [req, this](EchoReply& reply, ::grpc::Status& status) { + validate_echo_reply(req, reply, status); + }, + 1); + } else { + echo_stub->call_rpc< EchoRequest, EchoReply >( + [i](EchoRequest& req) { req.set_message(std::to_string(i)); }, + &EchoService::StubInterface::AsyncEcho, + [this](ClientRpcData< EchoRequest, EchoReply >& cd) { + validate_echo_reply(cd.req(), cd.reply(), cd.status()); + }, + 1); + } + } else { + if ((i % 3) == 0) { + PingRequest req; + req.set_seqno(i); + ping_stub->call_unary< PingRequest, PingReply >( + req, &PingService::StubInterface::AsyncPing, + [req, this](PingReply& reply, ::grpc::Status& status) { + validate_ping_reply(req, reply, status); + }, + 1); + } else { + ping_stub->call_rpc< PingRequest, PingReply >( + [i](PingRequest& req) { req.set_seqno(i); }, &PingService::StubInterface::AsyncPing, + [this](ClientRpcData< PingRequest, PingReply >& cd) { + validate_ping_reply(cd.req(), cd.reply(), cd.status()); + }, + 1); + } + } + } + } - ping_stub_->call_unary< PingRequest, PingReply >(request, &PingService::StubInterface::AsyncPing, callback, 1); + void wait() { + std::unique_lock lk(m_wait_mtx); + m_cv.wait(lk, [this]() { return ((m_echo_counter == 0) && (m_ping_counter == 0)); }); + GrpcAsyncClientWorker::shutdown_all(); } - AsyncStub< EchoService >::UPtr echo_stub_; - AsyncStub< PingService >::UPtr ping_stub_; +private: + int m_echo_counter; + int m_ping_counter; + std::mutex m_wait_mtx; + std::condition_variable m_cv; }; -std::atomic_int g_echo_counter; -std::atomic_int g_ping_counter; - -/** - * Echo implements async response handler. - */ -class Echo { +class TestServer { public: - Echo(int seqno) { request_.set_message(std::to_string(seqno)); } + class EchoServiceImpl { + public: + virtual ~EchoServiceImpl() = default; + + virtual bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + LOGDEBUGMOD(grpc_server, "receive echo request {}", rpc_data->request().message()); + rpc_data->response().set_message(rpc_data->request().message()); + return true; + } + + bool register_service(GrpcServer* server) { + if (!server->register_async_service< EchoService >()) { + LOGERROR("register service failed"); + return false; + } - void handle_echo_reply(EchoReply& reply, ::grpc::Status& status) { - if (!status.ok()) { - LOGERROR("echo request {} failed, status {}: {}", request_.message(), status.error_code(), - status.error_message()); - return; + return true; } - LOGINFO("echo request {} reply {}", request_.message(), reply.message()); + bool register_rpcs(GrpcServer* server) { + LOGINFO("register rpc calls"); + if (!server->register_rpc< EchoService, EchoRequest, EchoReply, false >( + "Echo", &EchoService::AsyncService::RequestEcho, + std::bind(&EchoServiceImpl::echo_request, this, _1))) { + LOGERROR("register rpc failed"); + return false; + } - assert(request_.message() == reply.message()); - g_echo_counter.fetch_add(1, std::memory_order_relaxed); - } + return true; + } + }; - EchoRequest request_; -}; + class PingServiceImpl { + public: + virtual ~PingServiceImpl() = default; -#define GRPC_CALL_COUNT 10 + virtual bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { + LOGDEBUGMOD(grpc_server, "receive ping request {}", rpc_data->request().seqno()); + rpc_data->response().set_seqno(rpc_data->request().seqno()); + return true; + } -int RunClient(const std::string& server_address) { + bool register_service(GrpcServer* server) { + if (!server->register_async_service< PingService >()) { + LOGERROR("register ping service failed"); + return false; + } + return true; + } - GrpcAyncClientWorker::create_worker(WORKER_NAME, 4); + bool register_rpcs(GrpcServer* server) { + LOGINFO("register rpc calls"); + if (!server->register_rpc< PingService, PingRequest, PingReply, false >( + "Ping", &PingService::AsyncService::RequestPing, + std::bind(&PingServiceImpl::ping_request, this, _1))) { + LOGERROR("register ping rpc failed"); + return false; + } - auto client = GrpcAsyncClient::make< EchoAndPingAsyncClient >(server_address, "", ""); - if (!client) { - LOGCRITICAL("Create async client failed."); - return -1; - } + return true; + } + }; - for (int i = 0; i < GRPC_CALL_COUNT; i++) { - if (i % 2 == 0) { - // Async response handling logic can be put in a class's member - // function, then use a lambda to wrap it. - Echo* echo = new Echo(i); - client->Echo(echo->request_, [echo](EchoReply& reply, ::grpc::Status& status) { - echo->handle_echo_reply(reply, status); - delete echo; - }); - - // std::bind() can also be used, but need to take care releasing - // 'echo' additionally: - // std::bind(&Echo::handle_echo_reply, echo, _1, _2); - - } else { - PingRequest* request = new PingRequest; - request->set_seqno(i); - - // response can be handled with lambda directly - client->Ping(*request, [request](PingReply& reply, ::grpc::Status& status) { - if (!status.ok()) { - LOGERROR("ping request {} failed, status {}: {}", request->seqno(), status.error_code(), - status.error_message()); - return; - } + void start(const std::string& server_address) { + LOGINFO("Start echo and ping server on {}...", server_address); + m_grpc_server = GrpcServer::make(server_address, 4, "", ""); + m_echo_impl = new EchoServiceImpl(); + m_echo_impl->register_service(m_grpc_server); - LOGINFO("ping request {} reply {}", request->seqno(), reply.seqno()); + m_ping_impl = new PingServiceImpl(); + m_ping_impl->register_service(m_grpc_server); - assert(request->seqno() == reply.seqno()); - g_ping_counter.fetch_add(1, std::memory_order_relaxed); - delete request; - }); - } + m_grpc_server->run(); + LOGINFO("Server listening on {}", server_address); + + m_echo_impl->register_rpcs(m_grpc_server); + m_ping_impl->register_rpcs(m_grpc_server); } - GrpcAyncClientWorker::shutdown_all(); + void shutdown() { + LOGINFO("Shutting down grpc server"); + m_grpc_server->shutdown(); + } - return g_echo_counter.load() + g_ping_counter.load(); -} +private: + GrpcServer* m_grpc_server = nullptr; + EchoServiceImpl* m_echo_impl = nullptr; + PingServiceImpl* m_ping_impl = nullptr; +}; -SDS_LOGGING_INIT() +SDS_LOGGING_INIT(logging, grpc_server) SDS_OPTIONS_ENABLE(logging) +THREAD_BUFFER_INIT int main(int argc, char** argv) { SDS_OPTIONS_LOAD(argc, argv, logging) sds_logging::SetLogger("async_client"); + + TestServer server; std::string server_address("0.0.0.0:50051"); + server.start(server_address); - if (RunClient(server_address) != GRPC_CALL_COUNT) { - LOGERROR("Only {} calls are successful", GRPC_CALL_COUNT); - return 1; - } + TestClient client; + client.run(server_address); + client.wait(); + server.shutdown(); return 0; } diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index f770a03f..68992c91 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -16,27 +16,25 @@ #include #include -#include "sds_grpc/server.h" -#include "sds_grpc_test.grpc.pb.h" +#include "grpc_helper/rpc_server.hpp" +#include "grpc_helper_test.grpc.pb.h" using namespace ::grpc; -using namespace ::sds::grpc; -using namespace ::sds_grpc_test; +using namespace grpc_helper; +using namespace ::grpc_helper_test; using namespace std::placeholders; class EchoServiceImpl { - public: virtual ~EchoServiceImpl() = default; - virtual ::grpc::Status echo_request(EchoRequest& request, EchoReply& response) { - LOGINFO("receive echo request {}", request.message()); - response.set_message(request.message()); - return ::grpc::Status::OK; + virtual bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + LOGINFO("receive echo request {}", rpc_data->request().message()); + rpc_data->response().set_message(rpc_data->request().message()); + return true; } bool register_service(GrpcServer* server) { - if (!server->register_async_service< EchoService >()) { LOGERROR("register service failed"); return false; @@ -47,8 +45,8 @@ class EchoServiceImpl { bool register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc< EchoService, EchoRequest, EchoReply >( - &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1, _2))) { + if (!server->register_rpc< EchoService, EchoRequest, EchoReply, false >( + "Echo", &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1))) { LOGERROR("register rpc failed"); return false; } @@ -62,26 +60,24 @@ class PingServiceImpl { public: virtual ~PingServiceImpl() = default; - virtual ::grpc::Status ping_request(PingRequest& request, PingReply& response) { - LOGINFO("receive ping request {}", request.seqno()); - response.set_seqno(request.seqno()); - return ::grpc::Status::OK; + virtual bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { + LOGINFO("receive ping request {}", rpc_data->request().seqno()); + rpc_data->response().set_seqno(rpc_data->request().seqno()); + return true; } bool register_service(GrpcServer* server) { - if (!server->register_async_service< PingService >()) { LOGERROR("register ping service failed"); return false; } - return true; } bool register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc< PingService, PingRequest, PingReply >( - &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1, _2))) { + if (!server->register_rpc< PingService, PingRequest, PingReply, false >( + "Ping", &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1))) { LOGERROR("register ping rpc failed"); return false; } @@ -94,14 +90,11 @@ GrpcServer* g_grpc_server = nullptr; EchoServiceImpl* g_echo_impl = nullptr; PingServiceImpl* g_ping_impl = nullptr; -void sighandler(int signum, siginfo_t* info, void* ptr) { - LOGINFO("Received signal {}", signum); +void waiter_thread() { + std::this_thread::sleep_for(std::chrono::seconds(5)); - if (signum == SIGTERM) { - // shutdown server gracefully for check memory leak - LOGINFO("Shutdown grpc server"); - g_grpc_server->shutdown(); - } + LOGINFO("Shutting down grpc server"); + g_grpc_server->shutdown(); } void StartServer() { @@ -123,8 +116,9 @@ void StartServer() { g_ping_impl->register_rpcs(g_grpc_server); } -SDS_LOGGING_INIT() +SDS_LOGGING_INIT(logging, grpc_server) SDS_OPTIONS_ENABLE(logging) +THREAD_BUFFER_INIT int main(int argc, char* argv[]) { SDS_OPTIONS_LOAD(argc, argv, logging) @@ -133,20 +127,15 @@ int main(int argc, char* argv[]) { StartServer(); - struct sigaction act; - memset(&act, 0, sizeof(act)); - act.sa_sigaction = sighandler; - - sigaction(SIGTERM, &act, NULL); - + auto t = std::thread(waiter_thread); while (!g_grpc_server->is_terminated()) { std::this_thread::sleep_for(std::chrono::seconds(1)); } + t.join(); delete g_grpc_server; delete g_echo_impl; delete g_ping_impl; return 0; } - diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt index ea405a50..6f9fde1c 100644 --- a/tests/proto/CMakeLists.txt +++ b/tests/proto/CMakeLists.txt @@ -1,7 +1,7 @@ -protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS sds_grpc_test.proto) +protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS grpc_helper_test.proto) -PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS sds_grpc_test.proto) +PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS grpc_helper_test.proto) MESSAGE( STATUS "PROTO_SRCS = " ${PROTO_SRCS} " " ${PROTO_HDRS}) diff --git a/tests/proto/grpc_helper_test.proto b/tests/proto/grpc_helper_test.proto new file mode 100644 index 00000000..500816ae --- /dev/null +++ b/tests/proto/grpc_helper_test.proto @@ -0,0 +1,26 @@ + +syntax = "proto3"; + +package grpc_helper_test; + +service EchoService { + rpc Echo(EchoRequest) returns (EchoReply) {} + + rpc EchoLongReply(EchoRequest) returns (stream EchoReply) {} + + rpc LongEcho(stream EchoRequest) returns (EchoReply) {} + + rpc LongEchoLongReply(stream EchoRequest) returns (stream EchoReply) {} +} + +message EchoRequest { string message = 1; } + +message EchoReply { string message = 1; } + +service PingService { + rpc Ping(PingRequest) returns (PingReply) {} +} + +message PingRequest { uint32 seqno = 1; } + +message PingReply { uint32 seqno = 1; } diff --git a/tests/proto/sds_grpc_test.proto b/tests/proto/sds_grpc_test.proto deleted file mode 100644 index 09d4238e..00000000 --- a/tests/proto/sds_grpc_test.proto +++ /dev/null @@ -1,37 +0,0 @@ - -syntax = "proto3"; - -package sds_grpc_test; - -service EchoService { - rpc Echo (EchoRequest) returns (EchoReply) {} - - rpc EchoLongReply (EchoRequest) returns (stream EchoReply) {} - - rpc LongEcho (stream EchoRequest) returns (EchoReply) {} - - rpc LongEchoLongReply (stream EchoRequest) returns (stream EchoReply) {} - -} - -message EchoRequest { - string message = 1; -} - -message EchoReply { - string message = 1; -} - - -service PingService { - rpc Ping (PingRequest) returns (PingReply) {} -} - -message PingRequest { - uint32 seqno = 1; -} - -message PingReply { - uint32 seqno = 1; -} - diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 8fa7cde9..0b6f4299 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -14,8 +14,8 @@ foreach(_test_file ${TEST_SRC_FILES}) get_filename_component(_test_name ${_test_file} NAME_WE) add_executable(${_test_name} ${_test_file}) - add_dependencies(${_test_name} sds_grpc ) - target_link_libraries (${_test_name} sds_grpc ${CONAN_LIBS} ) + add_dependencies(${_test_name} grpc_helper ) + target_link_libraries (${_test_name} grpc_helper ${CONAN_LIBS} ) add_test(NAME ${_test_name} COMMAND ${_test_name} WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) set_tests_properties(${_test_name} PROPERTIES TIMEOUT 5) endforeach() From 1266124e17c9a69866f7b821693c585f69d184cc Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 7 Sep 2021 09:25:38 -0700 Subject: [PATCH 062/385] Added sync server rpc registration and then used latest sisl --- include/grpc_helper/rpc_call.hpp | 9 ++++++--- include/grpc_helper/rpc_client.hpp | 5 +++-- include/grpc_helper/rpc_server.hpp | 13 ++++++++++++- lib/rpc_client.cpp | 4 +++- tests/function/CMakeLists.txt | 6 +++--- tests/function/echo_async_client.cpp | 2 +- tests/function/echo_server.cpp | 1 - tests/unit/CMakeLists.txt | 2 +- 8 files changed, 29 insertions(+), 13 deletions(-) diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index 872e1634..d5920df6 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -10,9 +10,9 @@ #include #include -#include -#include -#include +#include +#include +#include #include "rpc_common.hpp" SDS_LOGGING_DECL(grpc_server) @@ -107,6 +107,7 @@ using StreamRpcDataPtr = boost::intrusive_ptr< RpcData< ServiceT, ReqT, RespT, t #define rpc_handler_cb_t std::function< bool(const RPC_DATA_PTR_SPEC& rpc_call) > #define rpc_completed_cb_t std::function< void(const RPC_DATA_PTR_SPEC& rpc_call) > #define rpc_call_static_info_t RpcStaticInfo< ServiceT, ReqT, RespT, streaming > +#define rpc_sync_handler_cb_t std::function< ::grpc::Status(const ReqT&, RespT&) > // This class represents all static information related to a specific RpcData, so these information does not need to be // built for every RPC @@ -155,6 +156,8 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, return *m_response; } + void set_status(grpc::Status status) { m_retstatus = status; } + // invoked by the application completion flow when the response payload `m_response` is formed //@param is_last - true to indicate that this is the last chunk in a streaming response (where // applicable) diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 3d16afba..983fd747 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -14,8 +14,8 @@ #include #include -#include -#include +#include +#include namespace grpc_helper { @@ -120,6 +120,7 @@ class GrpcBaseClient { const std::string& ssl_cert = ""); virtual ~GrpcBaseClient() = default; virtual bool is_connection_ready() const; + virtual void init(); private: // virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 5f34c882..613fbe0b 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -1,3 +1,5 @@ +#pragma once + #include #include #include @@ -5,7 +7,7 @@ #include #include #include -#include +#include #include "rpc_call.hpp" namespace grpc_helper { @@ -80,6 +82,15 @@ class GrpcServer : private boost::noncopyable { return true; } + template < typename ServiceT, typename ReqT, typename RespT, bool streaming = false > + bool register_sync_rpc(const std::string& name, const request_call_cb_t& request_call_cb, + const rpc_sync_handler_cb_t& handler) { + return register_rpc(name, request_call_cb, [handler](const RPC_DATA_PTR_SPEC& rpc_data) -> bool { + rpc_data->set_status(handler(rpc_data->request(), rpc_data->response())); + return true; + }); + } + private: void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index 88c168f5..a87d3e56 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -4,7 +4,9 @@ namespace grpc_helper { GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, const std::string& ssl_cert) : - m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert) { + m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert) {} + +void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; if (!m_ssl_cert.empty()) { #if 0 diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index b9991bff..bdbdb8cb 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -4,21 +4,21 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) set(FUNCTION_TEST_LIBS grpc_helper test_proto ${CONAN_LIBS}) +enable_testing() # build echo_server add_executable(echo_server echo_server.cpp) add_dependencies(echo_server grpc_helper test_proto) target_link_libraries(echo_server ${FUNCTION_TEST_LIBS} ) - +add_test(NAME Echo_Ping_Server COMMAND echo_server) # build echo_sync_client #add_executable(echo_sync_client echo_sync_client.cpp) #add_dependencies(echo_sync_client grpc_helper test_proto) #target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) - # build echo_async_client add_executable(echo_async_client echo_async_client.cpp) add_dependencies(echo_async_client grpc_helper test_proto) target_link_libraries(echo_async_client ${FUNCTION_TEST_LIBS} ) - +add_test(NAME Echo_Ping_Async_Client_Server COMMAND echo_async_client) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 4a24b729..5b2e4a58 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -45,6 +45,7 @@ class TestClient { void run(const std::string& server_address) { auto client = std::make_unique< GrpcAsyncClient >(server_address, "", ""); + client->init(); GrpcAsyncClientWorker::create_worker(WORKER_NAME, 4); auto echo_stub = client->make_stub< EchoService >(WORKER_NAME); @@ -201,7 +202,6 @@ class TestServer { SDS_LOGGING_INIT(logging, grpc_server) SDS_OPTIONS_ENABLE(logging) -THREAD_BUFFER_INIT int main(int argc, char** argv) { SDS_OPTIONS_LOAD(argc, argv, logging) diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index 68992c91..e748cc9e 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -118,7 +118,6 @@ void StartServer() { SDS_LOGGING_INIT(logging, grpc_server) SDS_OPTIONS_ENABLE(logging) -THREAD_BUFFER_INIT int main(int argc, char* argv[]) { SDS_OPTIONS_LOAD(argc, argv, logging) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 0b6f4299..69352947 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,7 +1,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../proto) -# enable_testing() +#enable_testing() file(GLOB TEST_SRC_FILES **/*.cpp) From c6d4ba02dd2ff4c08dc2e24b7c0aed8a9cdf3fbc Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 7 Sep 2021 11:47:30 -0700 Subject: [PATCH 063/385] Make tests run sequentially --- tests/function/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index bdbdb8cb..79eabb60 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -22,3 +22,4 @@ add_executable(echo_async_client echo_async_client.cpp) add_dependencies(echo_async_client grpc_helper test_proto) target_link_libraries(echo_async_client ${FUNCTION_TEST_LIBS} ) add_test(NAME Echo_Ping_Async_Client_Server COMMAND echo_async_client) +SET_TESTS_PROPERTIES(Echo_Ping_Async_Client_Server PROPERTIES DEPENDS TestHttpSanity) From b4a75d545f848bd3f6faf673a9c0bc462d7f0344 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 7 Sep 2021 12:04:57 -0700 Subject: [PATCH 064/385] Fixed test leaks --- tests/function/echo_async_client.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 5b2e4a58..6f37fa3b 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -192,6 +192,9 @@ class TestServer { void shutdown() { LOGINFO("Shutting down grpc server"); m_grpc_server->shutdown(); + delete m_grpc_server; + delete m_echo_impl; + delete m_ping_impl; } private: From 6d540f443f1e7791a24eab2455ab1fde526adaec Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 14 Dec 2021 16:29:39 -0800 Subject: [PATCH 065/385] Use sisl 7.x latest --- .../client/local/test_flip_local_client.cpp | 131 +++++++++++------- src/flip/lib/flip.hpp | 8 +- src/flip/lib/test_flip.cpp | 108 ++++++++------- .../flip_rpc_server.cpp/test_flip_server.cpp | 12 +- 4 files changed, 146 insertions(+), 113 deletions(-) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 0ef1888e..8b1b5ca8 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -7,12 +7,12 @@ #include #include -#include +#include using namespace flip; -SDS_LOGGING_INIT(flip) -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT(flip) +SISL_OPTIONS_ENABLE(logging) Flip g_flip; @@ -33,21 +33,21 @@ void run_and_validate_ret_flip() { std::string unknown_vol = "unknown_vol"; std::string invalid_dev_name = "/boot/sda"; - auto result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); + auto result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); assert(result); assert(result.get() == "Simulated error value"); - result = g_flip.get_test_flip("simval_flip", unknown_vol, valid_dev_name); + result = g_flip.get_test_flip< std::string >("simval_flip", unknown_vol, valid_dev_name); assert(!result); - result = g_flip.get_test_flip("simval_flip", my_vol, invalid_dev_name); + result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, invalid_dev_name); assert(!result); - result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); + result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); assert(result); assert(result.get() == "Simulated error value"); - result = g_flip.get_test_flip("simval_flip", my_vol, valid_dev_name); + result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); assert(!result); // Not more than 2 } @@ -57,13 +57,18 @@ void run_and_validate_delay_flip() { long valid_size_bytes2 = 2048; int invalid_cmd = -1; long invalid_size_bytes = 4096; - std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); - - assert(g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, valid_size_bytes1)); - assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, invalid_cmd, valid_size_bytes1)); - assert(g_flip.delay_flip("delay_flip", [closure_calls]() { (*closure_calls)++;}, valid_cmd, valid_size_bytes2)); - assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, invalid_size_bytes)); - assert(!g_flip.delay_flip("delay_flip", [closure_calls]() {(*closure_calls)++;}, valid_cmd, valid_size_bytes1)); + std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); + + assert(g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1)); + assert(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, invalid_cmd, valid_size_bytes1)); + assert(g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes2)); + assert(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, invalid_size_bytes)); + assert(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1)); sleep(2); DEBUG_ASSERT_EQ((*closure_calls).load(), 2); @@ -72,41 +77,56 @@ void run_and_validate_delay_flip() { void run_and_validate_delay_return_flip() { double valid_double = 2.0; double invalid_double = 1.85; - std::shared_ptr< std::atomic > closure_calls = std::make_shared>(0); - - assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - (*closure_calls)++; - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - }, valid_double)); - - assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, invalid_double)); - - assert(g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - (*closure_calls)++; - }, valid_double)); - - assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, invalid_double)); - - assert(!g_flip.get_delay_flip("delay_simval_flip", [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - (*closure_calls)++; - LOGINFO("Called with error = {}", error); - }, valid_double)); + std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); + + assert(g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + (*closure_calls)++; + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); + }, + valid_double)); + + assert(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_double)); + + assert(g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); + (*closure_calls)++; + }, + valid_double)); + + assert(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_double)); + + assert(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); + (*closure_calls)++; + LOGINFO("Called with error = {}", error); + }, + valid_double)); sleep(2); DEBUG_ASSERT_EQ((*closure_calls).load(), 2); } -int main(int argc, char *argv[]) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger(std::string(argv[0])); +int main(int argc, char* argv[]) { + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger(std::string(argv[0])); spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); FlipClient fclient(&g_flip); @@ -115,28 +135,33 @@ int main(int argc, char *argv[]) { /* Inject a no return action flip */ FlipCondition cond1; fclient.create_condition("cmd_type", flip::Operator::EQUAL, (int)1, &cond1); - freq.set_count(2); freq.set_percent(100); + freq.set_count(2); + freq.set_percent(100); fclient.inject_noreturn_flip("noret_flip", {cond1}, freq); /* Inject a invalid return action flip */ FlipCondition cond2, cond6; - fclient.create_condition("vol_name", flip::Operator::EQUAL, "vol1", &cond2); - fclient.create_condition("dev_name", flip::Operator::REG_EX, "\\/dev\\/", &cond6); - freq.set_count(2); freq.set_percent(100); - fclient.inject_retval_flip("simval_flip", {cond2, cond6}, freq, "Simulated error value"); + fclient.create_condition< std::string >("vol_name", flip::Operator::EQUAL, "vol1", &cond2); + fclient.create_condition< std::string >("dev_name", flip::Operator::REG_EX, "\\/dev\\/", &cond6); + freq.set_count(2); + freq.set_percent(100); + fclient.inject_retval_flip< std::string >("simval_flip", {cond2, cond6}, freq, "Simulated error value"); /* Inject a delay of 100ms action flip */ FlipCondition cond3, cond4; fclient.create_condition("cmd_type", flip::Operator::EQUAL, (int)1, &cond3); fclient.create_condition("size_bytes", flip::Operator::LESS_THAN_OR_EQUAL, (long)2048, &cond4); - freq.set_count(2); freq.set_percent(100); + freq.set_count(2); + freq.set_percent(100); fclient.inject_delay_flip("delay_flip", {cond3, cond4}, freq, 100000); /* Inject a delay of 1second and return a value action flip */ FlipCondition cond5; fclient.create_condition("double_val", flip::Operator::NOT_EQUAL, (double)1.85, &cond5); - freq.set_count(2); freq.set_percent(100); - fclient.inject_delay_and_retval_flip("delay_simval_flip", {cond5}, freq, 1000000, "Simulated delayed errval"); + freq.set_count(2); + freq.set_percent(100); + fclient.inject_delay_and_retval_flip< std::string >("delay_simval_flip", {cond5}, freq, 1000000, + "Simulated delayed errval"); /* Now execute the flip and validate that they are correct */ run_and_validate_noret_flip(); diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 2147be9a..af17c5bc 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -19,7 +19,7 @@ #include #include -SDS_LOGGING_DECL(flip) +SISL_LOGGING_DECL(flip) namespace flip { @@ -46,9 +46,7 @@ struct flip_name_compare { struct flip_instance { flip_instance(const FlipSpec& fspec) : - m_fspec(fspec), - m_hit_count(0), - m_remain_exec_count(fspec.flip_frequency().count()) {} + m_fspec(fspec), m_hit_count(0), m_remain_exec_count(fspec.flip_frequency().count()) {} flip_instance(const flip_instance& other) { m_fspec = other.m_fspec; diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 7321437f..9fd6965a 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -7,10 +7,10 @@ #include #include -#include +#include -SDS_LOGGING_INIT(flip) -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT(flip) +SISL_OPTIONS_ENABLE(logging) void create_ret_fspec(flip::FlipSpec* fspec) { *(fspec->mutable_flip_name()) = "ret_fspec"; @@ -86,19 +86,24 @@ void create_delay_fspec(flip::FlipSpec* fspec) { } void run_and_validate_delay_flip(flip::Flip* flip) { - int valid_cmd = 2; - int invalid_cmd = -1; + int valid_cmd = 2; + int invalid_cmd = -1; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + assert(flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); + assert(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); - assert(flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + assert(flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); + assert(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); - assert(!flip->delay_flip("delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + assert(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); sleep(2); DEBUG_ASSERT_EQ((*closure_calls).load(), 2); @@ -122,45 +127,50 @@ void create_delay_ret_fspec(flip::FlipSpec* fspec) { } void run_and_validate_delay_return_flip(flip::Flip* flip) { - int valid_cmd = 2; - int invalid_cmd = -1; + int valid_cmd = 2; + int invalid_cmd = -1; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(flip->get_delay_flip< std::string >("delay_ret_fspec", - [closure_calls](std::string error) { - (*closure_calls)++; - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - }, - valid_cmd)); - - assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_cmd)); - - assert(flip->get_delay_flip< std::string >("delay_ret_fspec", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - }, - valid_cmd)); - - assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_cmd)); - - assert(!flip->get_delay_flip< std::string >("delay_ret_fspec", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - LOGINFO("Called with error = {}", error); - }, - valid_cmd)); + assert(flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + (*closure_calls)++; + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + }, + valid_cmd)); + + assert(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd)); + + assert(flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + }, + valid_cmd)); + + assert(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd)); + + assert(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + LOGINFO("Called with error = {}", error); + }, + valid_cmd)); sleep(2); DEBUG_ASSERT_EQ((*closure_calls).load(), 2); @@ -190,8 +200,8 @@ void create_multi_cond_fspec(flip::FlipSpec *fspec) { #endif int main(int argc, char* argv[]) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger(std::string(argv[0])); + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger(std::string(argv[0])); spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); flip::FlipSpec ret_fspec; diff --git a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp index 79d4a86c..e2b45075 100644 --- a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp +++ b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp @@ -4,15 +4,15 @@ #include "flip.hpp" -#include +#include -SDS_LOGGING_INIT(flip) +SISL_LOGGING_INIT(flip) -SDS_OPTIONS_ENABLE(logging) +SISL_OPTIONS_ENABLE(logging) -int main(int argc, char *argv[]) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger(std::string(argv[0])); +int main(int argc, char* argv[]) { + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger(std::string(argv[0])); spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); flip::Flip f; From d2f66c257ca391c7a986b1329c0080d9fdf2973f Mon Sep 17 00:00:00 2001 From: hkadayam Date: Wed, 15 Dec 2021 15:50:50 -0800 Subject: [PATCH 066/385] Using sisl 7.x and logging/options changes as a result (#2) --- include/grpc_helper/rpc_call.hpp | 4 ++-- include/grpc_helper/rpc_client.hpp | 2 +- include/grpc_helper/rpc_server.hpp | 2 +- tests/function/echo_async_client.cpp | 12 ++++++------ tests/function/echo_server.cpp | 12 ++++++------ tests/function/echo_sync_client.cpp | 12 ++++++------ 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index d5920df6..173712fd 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -9,13 +9,13 @@ #include #include -#include +#include #include #include #include #include "rpc_common.hpp" -SDS_LOGGING_DECL(grpc_server) +SISL_LOGGING_DECL(grpc_server) #define RPC_SERVER_LOG(level, msg, ...) \ LOG##level##MOD_FMT(grpc_server, ([&](fmt::memory_buffer& buf, const char* __m, auto&&... args) -> bool { \ diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 983fd747..b0597361 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 613fbe0b..bd4d8863 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include "rpc_call.hpp" diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 6f37fa3b..f57b0b16 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -5,8 +5,8 @@ #include #include -#include -#include +#include +#include #include "grpc_helper/rpc_client.hpp" #include "grpc_helper/rpc_server.hpp" @@ -203,12 +203,12 @@ class TestServer { PingServiceImpl* m_ping_impl = nullptr; }; -SDS_LOGGING_INIT(logging, grpc_server) -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT(logging, grpc_server) +SISL_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger("async_client"); + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger("async_client"); TestServer server; std::string server_address("0.0.0.0:50051"); diff --git a/tests/function/echo_server.cpp b/tests/function/echo_server.cpp index e748cc9e..5762f3eb 100644 --- a/tests/function/echo_server.cpp +++ b/tests/function/echo_server.cpp @@ -13,8 +13,8 @@ #include #include -#include -#include +#include +#include #include "grpc_helper/rpc_server.hpp" #include "grpc_helper_test.grpc.pb.h" @@ -116,12 +116,12 @@ void StartServer() { g_ping_impl->register_rpcs(g_grpc_server); } -SDS_LOGGING_INIT(logging, grpc_server) -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT(logging, grpc_server) +SISL_OPTIONS_ENABLE(logging) int main(int argc, char* argv[]) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger("echo_server"); + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger("echo_server"); LOGINFO("Start echo server ..."); StartServer(); diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 0173f0f1..93e4e9bd 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -12,8 +12,8 @@ #include #include -#include -#include +#include +#include #include "sds_grpc/client.h" #include "sds_grpc_test.grpc.pb.h" @@ -96,12 +96,12 @@ int RunClient(const std::string& server_address) { return ret; } -SDS_LOGGING_INIT() -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT() +SISL_OPTIONS_ENABLE(logging) int main(int argc, char** argv) { - SDS_OPTIONS_LOAD(argc, argv, logging) - sds_logging::SetLogger("sync_client"); + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger("sync_client"); std::string server_address("0.0.0.0:50051"); From 7819b9f8e844424f8f8478f0a9147639c8c14f2b Mon Sep 17 00:00:00 2001 From: Ravi Nagarjun Akella Date: Tue, 9 Nov 2021 16:56:16 -0700 Subject: [PATCH 067/385] SDSTOR-5926: Add tls encryption to grpc server and client --- include/grpc_helper/rpc_client.hpp | 2 +- include/utils.hpp | 18 ++++++++++++++++++ lib/rpc_client.cpp | 20 +++++++++++--------- lib/rpc_server.cpp | 14 +++++++------- 4 files changed, 37 insertions(+), 17 deletions(-) create mode 100644 include/utils.hpp diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index b0597361..e3c543be 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -123,7 +123,7 @@ class GrpcBaseClient { virtual void init(); private: - // virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); + virtual bool load_ssl_cert(const std::string& ssl_cert, std::string& content); }; class GrpcSyncClient : public GrpcBaseClient { diff --git a/include/utils.hpp b/include/utils.hpp new file mode 100644 index 00000000..7668bf5c --- /dev/null +++ b/include/utils.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +namespace grpc_helper { + +static bool get_file_contents(const std::string& file_name, std::string& contents) { + try { + std::ifstream f(file_name); + std::string buffer(std::istreambuf_iterator< char >{f}, std::istreambuf_iterator< char >{}); + contents = buffer; + return !content.empty(); + } catch (...) {} + return false; +} + +} // namespace grpc_helper \ No newline at end of file diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index a87d3e56..1d426b64 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -1,4 +1,5 @@ #include "grpc_helper/rpc_client.hpp" +#include "utils.hpp" namespace grpc_helper { @@ -9,25 +10,26 @@ GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; if (!m_ssl_cert.empty()) { -#if 0 - if (load_ssl_cert(ssl_cert_, ssl_opts.pem_root_certs)) { - ::grpc::ChannelArguments channel_args; - channel_args.SetSslTargetNameOverride(m_target_domain); - m_channel = ::grpc::CreateCustomChannel(m_server_addr, ::grpc::SslCredentials(ssl_opts), channel_args); + if (load_ssl_cert(m_ssl_cert, ssl_opts.pem_root_certs)) { + if (!m_target_domain.empty()) { + ::grpc::ChannelArguments channel_args; + channel_args.SetSslTargetNameOverride(m_target_domain); + m_channel = ::grpc::CreateCustomChannel(m_server_addr, ::grpc::SslCredentials(ssl_opts), channel_args); + } else { + m_channel = ::grpc::CreateChannel(m_server_addr, ::grpc::SslCredentials(ssl_opts)); + } + } else { throw std::runtime_error("Unable to load ssl certification for grpc client"); } -#endif } else { m_channel = ::grpc::CreateChannel(m_server_addr, ::grpc::InsecureChannelCredentials()); } } -#if 0 bool GrpcBaseClient::load_ssl_cert(const std::string& ssl_cert, std::string& content) { - return ::sds::grpc::get_file_contents(ssl_cert, content); + return get_file_contents(ssl_cert, content); } -#endif bool GrpcBaseClient::is_connection_ready() const { return (m_channel->GetState(true) == grpc_connectivity_state::GRPC_CHANNEL_READY); diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index fc927a91..87db12f3 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -5,6 +5,7 @@ */ #include +#include "utils.hpp" #ifdef _POSIX_THREADS #ifndef __APPLE__ @@ -23,14 +24,16 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s m_num_threads{threads} { if (listen_addr.empty() || threads == 0) { throw std::invalid_argument("Invalid parameter to start grpc server"); } -#if 0 if (!ssl_cert.empty() && !ssl_key.empty()) { std::string key_contents; std::string cert_contents; - get_file_contents(ssl_cert, cert_contents); - get_file_contents(ssl_key, key_contents); - if (cert_contents.empty() || key_contents.empty()) { return false; } + if (!get_file_contents(ssl_cert, cert_contents)) { + throw std::runtime_error("Unable to load ssl certification for grpc server"); + } + if (!get_file_contents(ssl_key, key_contents)) { + throw std::runtime_error("Unable to load ssl key for grpc server"); + } ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; ::grpc::SslServerCredentialsOptions ssl_opts; @@ -41,9 +44,6 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s } else { m_builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); } -#else - m_builder.AddListeningPort(listen_addr, ::grpc::InsecureServerCredentials()); -#endif // Create one cq per thread for (auto i = 0u; i < threads; ++i) { From ac42f5a31bfa570c443a5f69d2e6bb45455b94bb Mon Sep 17 00:00:00 2001 From: raakella Date: Mon, 15 Nov 2021 00:49:20 +0530 Subject: [PATCH 068/385] SDSTOR-5926: jwt token autherization, add auth for client and server. Add unit tests --- include/grpc_helper/rpc_call.hpp | 78 +++++-- include/grpc_helper/rpc_client.hpp | 21 +- include/grpc_helper/rpc_common.hpp | 19 ++ include/grpc_helper/rpc_server.hpp | 9 +- include/utils.hpp | 2 +- lib/rpc_client.cpp | 4 +- lib/rpc_server.cpp | 8 +- tests/CMakeLists.txt | 1 + tests/function/echo_sync_client.cpp | 11 +- tests/unit/CMakeLists.txt | 26 +-- tests/unit/auth_test.cpp | 310 ++++++++++++++++++++++++++++ tests/unit/test_token.hpp | 72 +++++++ 12 files changed, 505 insertions(+), 56 deletions(-) create mode 100644 tests/unit/auth_test.cpp create mode 100644 tests/unit/test_token.hpp diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index 173712fd..004f1271 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -13,6 +13,7 @@ #include #include #include +#include #include "rpc_common.hpp" SISL_LOGGING_DECL(grpc_server) @@ -116,14 +117,15 @@ class RpcStaticInfo : public RpcStaticInfoBase { public: RpcStaticInfo(GrpcServer* server, typename ServiceT::AsyncService& svc, const request_call_cb_t& call_cb, const rpc_handler_cb_t& rpc_cb, const rpc_completed_cb_t& comp_cb, size_t idx, - const std::string& name) : + const std::string& name, sisl::AuthManager* auth_mgr) : m_server{server}, m_svc{svc}, m_req_call_cb{call_cb}, m_handler_cb{rpc_cb}, m_comp_cb{comp_cb}, m_rpc_idx{idx}, - m_rpc_name{name} {} + m_rpc_name{name}, + m_auth_mgr{auth_mgr} {} GrpcServer* m_server; typename ServiceT::AsyncService& m_svc; @@ -132,6 +134,7 @@ class RpcStaticInfo : public RpcStaticInfoBase { rpc_completed_cb_t m_comp_cb; size_t m_rpc_idx; std::string m_rpc_name; + sisl::AuthManager* m_auth_mgr; }; /** @@ -221,6 +224,39 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, m_streaming_responder(&m_ctx) {} private: + bool do_autherization() { + bool ret{true}; + // Auth is enabled if auth mgr is not null + if (m_rpc_info->m_auth_mgr) { + auto& client_headers = m_ctx.client_metadata(); + if (auto it = client_headers.find("authorization"); it != client_headers.end()) { + const std::string bearer{"Bearer "}; + if (it->second.starts_with(bearer)) { + auto token_ref = it->second.substr(bearer.size()); + std::string raw_token{token_ref.begin(), token_ref.end()}; + std::string msg; + m_retstatus = grpc::Status( + RPCHelper::to_grpc_statuscode(m_rpc_info->m_auth_mgr->verify(raw_token, msg)), msg); + ret = m_retstatus.error_code() == grpc::StatusCode::OK; + } else { + m_retstatus = + grpc::Status(grpc::StatusCode::UNAUTHENTICATED, + grpc::string("authorization header value does not start with 'Bearer '")); + RPC_SERVER_LOG(ERROR, + "authorization header value does not start with Bearer, client_req_context={}, " + "from peer={}", + get_client_req_context(), get_peer_info()); + } + } else { + m_retstatus = + grpc::Status(grpc::StatusCode::UNAUTHENTICATED, grpc::string("missing header authorization")); + ret = false; + RPC_SERVER_LOG(ERROR, "missing header authorization, client_req_context={}, from peer={}", + get_client_req_context(), get_peer_info()); + } + } + return ret; + } // The implementation of this method should dispatch the request for processing by calling // do_start_request_processing One reference on `this` is transferred to the callee, and the // callee is responsible for releasing it (typically via `RpcData::send_response(..)`). @@ -238,21 +274,31 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, get_peer_info()); RPC_SERVER_LOG(TRACE, "req. payload={}", request().DebugString()); - if constexpr (streaming) { - // In no-streaming mode, we call ref() to inc the ref count for keep the RpcData live - // before users finish their work and send responses in RequestReceived. - // But in streaming mode, The time user finishes their work may be different to - // the time grpc finsihes the grpc call. E.g.: - // 1) The user queues the last streaming resposne. At that time. We can't unref the RpcData and - // must do it after it sends all responses. - // 2) The user queues a no-last streaming response, then RpcData find the call was canceled. - // We can't unref the call, because users don't know it, they will send next responses. - // So instead of using only one ref in no-streaming mode. We use two ref to make lifecyle clear: - // 1) first one in RequestReceived and unref after grpc call finished. - // 2) second one in here and unref after called send_response with is_last = true; - ref(); + // Autherization + if (auto auth_success = do_autherization(); !auth_success) { + if constexpr (streaming) { + std::lock_guard< std::mutex > lock{m_streaming_mutex}; + do_streaming_send_if_needed(); + } else { + send_response(); + } + } else { + if constexpr (streaming) { + // In no-streaming mode, we call ref() to inc the ref count for keep the RpcData live + // before users finish their work and send responses in RequestReceived. + // But in streaming mode, The time user finishes their work may be different to + // the time grpc finsihes the grpc call. E.g.: + // 1) The user queues the last streaming resposne. At that time. We can't unref the RpcData and + // must do it after it sends all responses. + // 2) The user queues a no-last streaming response, then RpcData find the call was canceled. + // We can't unref the call, because users don't know it, they will send next responses. + // So instead of using only one ref in no-streaming mode. We use two ref to make lifecyle clear: + // 1) first one in RequestReceived and unref after grpc call finished. + // 2) second one in here and unref after called send_response with is_last = true; + ref(); + } + if (m_rpc_info->m_handler_cb(RPC_DATA_PTR_SPEC{this})) { send_response(); } } - if (m_rpc_info->m_handler_cb(RPC_DATA_PTR_SPEC{this})) { send_response(); } } return in_shutdown ? nullptr : create_new(); diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index e3c543be..d931ddfe 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace grpc_helper { @@ -76,6 +77,10 @@ class ClientRpcDataInternal : public ClientRpcDataAbstract { m_cb(m_reply, m_status); } + void add_metadata(const std::string& meta_key, const std::string& meta_value) { + m_context.AddMetadata(meta_key, meta_value); + } + unary_callback_t< RespT > m_cb; RespT m_reply; ::grpc::ClientContext m_context; @@ -114,10 +119,11 @@ class GrpcBaseClient { const std::string m_ssl_cert; std::shared_ptr< ::grpc::ChannelInterface > m_channel; + std::shared_ptr< sisl::TrfClient > m_trf_client; public: GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = ""); + const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient > trf_client = nullptr); virtual ~GrpcBaseClient() = default; virtual bool is_connection_ready() const; virtual void init(); @@ -192,8 +198,8 @@ class GrpcAsyncClient : public GrpcBaseClient { using StubPtr = std::unique_ptr< typename ServiceT::StubInterface >; GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "") : - GrpcBaseClient(server_addr, target_domain, ssl_cert) {} + const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient > trf_client = nullptr) : + GrpcBaseClient(server_addr, target_domain, ssl_cert, trf_client) {} virtual ~GrpcAsyncClient() {} @@ -211,8 +217,8 @@ class GrpcAsyncClient : public GrpcBaseClient { struct AsyncStub { using UPtr = std::unique_ptr< AsyncStub >; - AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker) : - m_stub(std::move(stub)), m_worker(worker) {} + AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker, sisl::TrfClient* trf_client) : + m_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} using stub_t = typename ServiceT::StubInterface; @@ -251,6 +257,7 @@ class GrpcAsyncClient : public GrpcBaseClient { const unary_callback_t< RespT >& callback, uint32_t deadline) { auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); data->set_deadline(deadline); + if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } // Note that async unary RPCs don't post a CQ tag in call data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); // CQ tag posted here @@ -264,12 +271,14 @@ class GrpcAsyncClient : public GrpcBaseClient { auto cd = new ClientRpcData< ReqT, RespT >(done_cb); builder_cb(cd->m_req); cd->set_deadline(deadline); + if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } cd->m_resp_reader_ptr = (m_stub.get()->*method)(&cd->context(), cd->m_req, cq()); cd->m_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); } StubPtr< ServiceT > m_stub; GrpcAsyncClientWorker* m_worker; + sisl::TrfClient* m_trf_client; const StubPtr< ServiceT >& stub() { return m_stub; } @@ -286,7 +295,7 @@ class GrpcAsyncClient : public GrpcBaseClient { auto w = GrpcAsyncClientWorker::get_worker(worker); if (w == nullptr) { throw std::runtime_error("worker thread not available"); } - return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w); + return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client.get()); } }; diff --git a/include/grpc_helper/rpc_common.hpp b/include/grpc_helper/rpc_common.hpp index b07b649b..ba00a45a 100644 --- a/include/grpc_helper/rpc_common.hpp +++ b/include/grpc_helper/rpc_common.hpp @@ -4,5 +4,24 @@ namespace grpc_helper { class GrpcServer; struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); + + static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status) { + grpc::StatusCode ret; + switch (status) { + case sisl::AuthVerifyStatus::OK: + ret = grpc::StatusCode::OK; + break; + case sisl::AuthVerifyStatus::UNAUTH: + ret = grpc::StatusCode::UNAUTHENTICATED; + break; + case sisl::AuthVerifyStatus::FORBIDDEN: + ret = grpc::StatusCode::PERMISSION_DENIED; + break; + default: + ret = grpc::StatusCode::UNKNOWN; + break; + } + return ret; + } }; } // namespace grpc_helper diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index bd4d8863..1507ab06 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -8,6 +8,7 @@ #include #include #include +#include #include "rpc_call.hpp" namespace grpc_helper { @@ -21,14 +22,15 @@ class GrpcServer : private boost::noncopyable { public: GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert); + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr = nullptr); virtual ~GrpcServer(); /** * Create a new GrpcServer instance and initialize it. */ static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", - const std::string& ssl_cert = ""); + const std::string& ssl_cert = "", + const std::shared_ptr< sisl::AuthManager > auth_mgr = nullptr); void run(const rpc_thread_start_cb_t& thread_start_cb = nullptr); void shutdown(); @@ -69,7 +71,7 @@ class GrpcServer : private boost::noncopyable { std::unique_lock lg(m_rpc_registry_mtx); rpc_idx = m_rpc_registry.size(); m_rpc_registry.emplace_back(new RpcStaticInfo< ServiceT, ReqT, RespT, false >( - this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name)); + this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name, m_auth_mgr.get())); // Register one call per cq. for (auto i = 0u; i < m_cqs.size(); ++i) { @@ -106,5 +108,6 @@ class GrpcServer : private boost::noncopyable { std::unordered_map< const char*, ::grpc::Service* > m_services; std::mutex m_rpc_registry_mtx; std::vector< std::unique_ptr< RpcStaticInfoBase > > m_rpc_registry; + std::shared_ptr< sisl::AuthManager > m_auth_mgr; }; } // namespace grpc_helper diff --git a/include/utils.hpp b/include/utils.hpp index 7668bf5c..d579fe41 100644 --- a/include/utils.hpp +++ b/include/utils.hpp @@ -10,7 +10,7 @@ static bool get_file_contents(const std::string& file_name, std::string& content std::ifstream f(file_name); std::string buffer(std::istreambuf_iterator< char >{f}, std::istreambuf_iterator< char >{}); contents = buffer; - return !content.empty(); + return !contents.empty(); } catch (...) {} return false; } diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index 1d426b64..db163573 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -4,8 +4,8 @@ namespace grpc_helper { GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, - const std::string& ssl_cert) : - m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert) {} + const std::string& ssl_cert, const std::shared_ptr< sisl::TrfClient > trf_client) : + m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert), m_trf_client(trf_client) {} void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 87db12f3..940fa205 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -20,8 +20,8 @@ extern "C" { namespace grpc_helper { GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert) : - m_num_threads{threads} { + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr) : + m_num_threads{threads}, m_auth_mgr{auth_mgr} { if (listen_addr.empty() || threads == 0) { throw std::invalid_argument("Invalid parameter to start grpc server"); } if (!ssl_cert.empty() && !ssl_key.empty()) { @@ -62,8 +62,8 @@ GrpcServer::~GrpcServer() { } GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert) { - return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert); + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr) { + return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert, auth_mgr); } void GrpcServer::run(const rpc_thread_start_cb_t& thread_start_cb) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 155a795c..414418f8 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(function) add_subdirectory(proto) +add_subdirectory(unit) diff --git a/tests/function/echo_sync_client.cpp b/tests/function/echo_sync_client.cpp index 93e4e9bd..12664991 100644 --- a/tests/function/echo_sync_client.cpp +++ b/tests/function/echo_sync_client.cpp @@ -15,7 +15,7 @@ #include #include -#include "sds_grpc/client.h" +#include "grpc_helper/rpc_client.hpp" #include "sds_grpc_test.grpc.pb.h" using namespace ::grpc; @@ -28,13 +28,11 @@ class EchoAndPingClient : public GrpcSyncClient { public: using GrpcSyncClient::GrpcSyncClient; - virtual bool init() { - if (!GrpcSyncClient::init()) { return false; } + virtual void init() { + GrpcSyncClient::init(); echo_stub_ = MakeStub< EchoService >(); ping_stub_ = MakeStub< PingService >(); - - return true; } const std::unique_ptr< EchoService::StubInterface >& echo_stub() { return echo_stub_; } @@ -51,10 +49,11 @@ class EchoAndPingClient : public GrpcSyncClient { int RunClient(const std::string& server_address) { auto client = std::make_unique< EchoAndPingClient >(server_address, "", ""); - if (!client || !client->init()) { + if (!client) { LOGERROR("Create grpc sync client failed."); return -1; } + client->init(); int ret = 0; for (int i = 0; i < GRPC_CALL_COUNT; i++) { diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 69352947..93b95e03 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,25 +1,15 @@ -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../proto) +include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) -#enable_testing() +set(UNIT_TEST_LIBS grpc_helper test_proto sisl + ${CONAN_LIBS}) -file(GLOB TEST_SRC_FILES **/*.cpp) - -MESSAGE( STATUS "TEST_SRC_FILES = " ${TEST_SRC_FILES} ) -MESSAGE( STATUS "CMAKE_RUNTIME_OUTPUT_DIRECTORY = " ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} ) - - -# from list of files we'll create tests -foreach(_test_file ${TEST_SRC_FILES}) - get_filename_component(_test_name ${_test_file} NAME_WE) - add_executable(${_test_name} ${_test_file}) - - add_dependencies(${_test_name} grpc_helper ) - target_link_libraries (${_test_name} grpc_helper ${CONAN_LIBS} ) - add_test(NAME ${_test_name} COMMAND ${_test_name} WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) - set_tests_properties(${_test_name} PROPERTIES TIMEOUT 5) -endforeach() +enable_testing() +add_executable(auth_test auth_test.cpp) +#add_dependencies(auth_test grpc_helper test_proto) +target_link_libraries(auth_test ${UNIT_TEST_LIBS} ) +add_test(NAME Auth_Test COMMAND auth_test) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp new file mode 100644 index 00000000..dbd700fa --- /dev/null +++ b/tests/unit/auth_test.cpp @@ -0,0 +1,310 @@ +#include +#include +#include +#include + +#include +#include +#include + +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-function-type" +#endif +#include +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic pop +#endif + +#include "grpc_helper/rpc_client.hpp" +#include "grpc_helper/rpc_server.hpp" +#include "grpc_helper_test.grpc.pb.h" +#include "test_token.hpp" + +SDS_LOGGING_INIT(logging, grpc_server, httpserver_lmod) +SDS_OPTIONS_ENABLE(logging) + +namespace grpc_helper::testing { +using namespace sisl; +using namespace ::grpc_helper_test; +using namespace ::testing; + +static const std::string grpc_server_addr{"0.0.0.0:12345"}; +static const std::string trf_token_server_ip{"127.0.0.1"}; +static const uint32_t trf_token_server_port{12346}; + +class EchoServiceImpl { +public: + virtual ~EchoServiceImpl() = default; + + virtual bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + LOGDEBUG("receive echo request {}", rpc_data->request().message()); + rpc_data->response().set_message(rpc_data->request().message()); + return true; + } + + bool register_service(GrpcServer* server) { + if (!server->register_async_service< EchoService >()) { + LOGERROR("register service failed"); + return false; + } + + return true; + } + + bool register_rpcs(GrpcServer* server) { + LOGINFO("register rpc calls"); + if (!server->register_rpc< EchoService, EchoRequest, EchoReply, false >( + "Echo", &EchoService::AsyncService::RequestEcho, + std::bind(&EchoServiceImpl::echo_request, this, std::placeholders::_1))) { + LOGERROR("register rpc failed"); + return false; + } + + return true; + } +}; + +class AuthBaseTest : public ::testing::Test { +public: + virtual void SetUp() {} + + virtual void TearDown() {} + + void grpc_server_start(const std::string& server_address, std::shared_ptr< AuthManager > auth_mgr) { + LOGINFO("Start echo and ping server on {}...", server_address); + m_grpc_server = GrpcServer::make(server_address, 4, "", "", auth_mgr); + m_echo_impl = new EchoServiceImpl(); + m_echo_impl->register_service(m_grpc_server); + m_grpc_server->run(); + LOGINFO("Server listening on {}", server_address); + m_echo_impl->register_rpcs(m_grpc_server); + } + + void process_echo_reply() { + m_echo_received.store(true); + m_cv.notify_all(); + } + + void call_async_echo(EchoRequest& req, EchoReply& reply, grpc::Status& status) { + m_echo_stub->call_unary< EchoRequest, EchoReply >( + req, &EchoService::StubInterface::AsyncEcho, + [&reply, &status, this](EchoReply& reply_, grpc::Status& status_) { + reply = reply_; + status = status_; + process_echo_reply(); + }, + 1); + { + std::unique_lock lk(m_wait_mtx); + m_cv.wait(lk, [this]() { return m_echo_received.load(); }); + } + } + +protected: + std::shared_ptr< AuthManager > m_auth_mgr; + EchoServiceImpl* m_echo_impl = nullptr; + GrpcServer* m_grpc_server = nullptr; + std::unique_ptr< GrpcAsyncClient > m_async_grpc_client; + std::unique_ptr< GrpcAsyncClient::AsyncStub< EchoService > > m_echo_stub; + std::atomic_bool m_echo_received{false}; + std::mutex m_wait_mtx; + std::condition_variable m_cv; +}; + +class AuthDisableTest : public AuthBaseTest { +public: + virtual void SetUp() { + // start grpc server without auth + grpc_server_start(grpc_server_addr, nullptr); + + // Client without auth + m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", ""); + m_async_grpc_client->init(); + GrpcAsyncClientWorker::create_worker("worker-1", 4); + m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + } + + virtual void TearDown() { m_grpc_server->shutdown(); } +}; + +TEST_F(AuthDisableTest, allow_on_disabled_mode) { + EchoRequest req; + // server sets the same message as response + req.set_message("dummy_msg"); + EchoReply reply; + grpc::Status status; + call_async_echo(req, reply, status); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(req.message(), reply.message()); +} + +class AuthServerOnlyTest : public AuthBaseTest { +public: + virtual void SetUp() { + // start grpc server with auth + AuthMgrConfig auth_cfg; + auth_cfg.tf_token_url = "http://127.0.0.1"; + auth_cfg.auth_allowed_apps = "app1, testapp, app2"; + auth_cfg.auth_exp_leeway = 0; + m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); + m_auth_mgr->set_config(auth_cfg); + grpc_server_start(grpc_server_addr, m_auth_mgr); + + // Client without auth + m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", ""); + m_async_grpc_client->init(); + GrpcAsyncClientWorker::create_worker("worker-1", 4); + m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + } + + virtual void TearDown() { m_grpc_server->shutdown(); } +}; + +TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { + EchoRequest req; + // server sets the same message as response + req.set_message("dummy_msg"); + EchoReply reply; + grpc::Status status; + call_async_echo(req, reply, status); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(status.error_code(), grpc::UNAUTHENTICATED); + EXPECT_EQ(status.error_message(), "missing header authorization"); +} + +static std::string get_cur_file_dir() { + const std::string cur_file_path{__FILE__}; + auto last_slash_pos = cur_file_path.rfind('/'); + if (last_slash_pos == std::string::npos) { return ""; } + return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; +} + +static const std::string cur_file_dir{get_cur_file_dir()}; + +class AuthEnableTest : public AuthBaseTest { +public: + virtual void SetUp() { + // start grpc server with auth + AuthMgrConfig auth_cfg; + auth_cfg.tf_token_url = "http://127.0.0.1"; + auth_cfg.auth_allowed_apps = "app1, testapp, app2"; + auth_cfg.auth_exp_leeway = 0; + auth_cfg.issuer = "trustfabric"; + auth_cfg.verify = false; + m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); + m_auth_mgr->set_config(auth_cfg); + grpc_server_start(grpc_server_addr, m_auth_mgr); + + // start token server + HttpServerConfig http_cfg; + http_cfg.is_tls_enabled = false; + http_cfg.bind_address = trf_token_server_ip; + http_cfg.server_port = trf_token_server_port; + http_cfg.read_write_timeout_secs = 10; + http_cfg.is_auth_enabled = false; + m_token_server = std::unique_ptr< HttpServer >( + new HttpServer(http_cfg, + {handler_info("/token", AuthEnableTest::get_token, (void*)this), + handler_info("/download_key", AuthEnableTest::download_key, (void*)this)})); + m_token_server->start(); + + // Client with auth + TrfClientConfig trf_cfg; + trf_cfg.leeway = 0; + trf_cfg.server = fmt::format("{}:{}/token", trf_token_server_ip, trf_token_server_port); + trf_cfg.verify = false; + trf_cfg.grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); + std::ofstream outfile(trf_cfg.grant_path); + outfile << "dummy cg contents\n"; + outfile.close(); + m_trf_client = std::make_shared< TrfClient >(trf_cfg); + m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", "", m_trf_client); + m_async_grpc_client->init(); + GrpcAsyncClientWorker::create_worker("worker-1", 4); + m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + } + + virtual void TearDown() { + m_token_server->stop(); + m_grpc_server->shutdown(); + } + + static void get_token(HttpCallData cd) { + std::string msg; + std::cout << "sending token to client" << std::endl; + pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, m_token_response); + } + + static void download_key(HttpCallData cd) { + std::string msg; + pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, rsa_pub_key); + } + + static void set_token_response(const std::string& raw_token) { + m_token_response = "{\n" + " \"access_token\": \"" + + raw_token + + "\",\n" + " \"token_type\": \"Bearer\",\n" + " \"expires_in\": \"2000\",\n" + " \"refresh_token\": \"dummy_refresh_token\"\n" + "}"; + } + +protected: + std::unique_ptr< HttpServer > m_token_server; + std::shared_ptr< TrfClient > m_trf_client; + static AuthEnableTest* pThis(HttpCallData cd) { return (AuthEnableTest*)cd->cookie(); } + static std::string m_token_response; +}; +std::string AuthEnableTest::m_token_response; + +TEST_F(AuthEnableTest, allow_with_auth) { + auto raw_token = TestToken().sign_rs256(); + AuthEnableTest::set_token_response(raw_token); + EchoRequest req; + req.set_message("dummy_msg"); + EchoReply reply; + grpc::Status status; + call_async_echo(req, reply, status); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(req.message(), reply.message()); +} + +// sync client +class EchoAndPingClient : public GrpcSyncClient { + +public: + using GrpcSyncClient::GrpcSyncClient; + virtual void init() { + GrpcSyncClient::init(); + echo_stub_ = MakeStub< EchoService >(); + } + + const std::unique_ptr< EchoService::StubInterface >& echo_stub() { return echo_stub_; } + +private: + std::unique_ptr< EchoService::StubInterface > echo_stub_; +}; + +TEST_F(AuthEnableTest, allow_sync_client_with_auth) { + auto sync_client = std::make_unique< EchoAndPingClient >(grpc_server_addr, "", ""); + sync_client->init(); + EchoRequest req; + EchoReply reply; + req.set_message("dummy_sync_msg"); + ::grpc::ClientContext context; + context.AddMetadata("authorization", m_trf_client->get_typed_token()); + auto status = sync_client->echo_stub()->Echo(&context, req, &reply); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(req.message(), reply.message()); +} + +} // namespace grpc_helper::testing + +int main(int argc, char* argv[]) { + ::testing::InitGoogleMock(&argc, argv); + SDS_OPTIONS_LOAD(argc, argv, logging) + return RUN_ALL_TESTS(); +} diff --git a/tests/unit/test_token.hpp b/tests/unit/test_token.hpp new file mode 100644 index 00000000..416c962c --- /dev/null +++ b/tests/unit/test_token.hpp @@ -0,0 +1,72 @@ +#pragma once + +namespace grpc_helper::testing { +// public and private keys for unit test + +static const std::string rsa_pub_key = "-----BEGIN PUBLIC KEY-----\n" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" + "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" + "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" + "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" + "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" + "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" + "YwIDAQAB\n" + "-----END PUBLIC KEY-----"; + +static const std::string rsa_priv_key = "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4ZtdaIrd1BPIJ\n" + "tfnF0TjIK5inQAXZ3XlCrUlJdP+XHwIRxdv1FsN12XyMYO/6ymLmo9ryoQeIrsXB\n" + "XYqlET3zfAY+diwCb0HEsVvhisthwMU4gZQu6TYW2s9LnXZB5rVtcBK69hcSlA2k\n" + "ZudMZWxZcj0L7KMfO2rIvaHw/qaVOE9j0T257Z8Kp2CLF9MUgX0ObhIsdumFRLaL\n" + "DvDUmBPr2zuh/34j2XmWwn1yjN/WvGtdfhXW79Ki1S40HcWnygHgLV8sESFKUxxQ\n" + "mKvPUTwDOIwLFL5WtE8Mz7N++kgmDcmWMCHc8kcOIu73Ta/3D4imW7VbKgHZo9+K\n" + "3ESFE3RjAgMBAAECggEBAJTEIyjMqUT24G2FKiS1TiHvShBkTlQdoR5xvpZMlYbN\n" + "tVWxUmrAGqCQ/TIjYnfpnzCDMLhdwT48Ab6mQJw69MfiXwc1PvwX1e9hRscGul36\n" + "ryGPKIVQEBsQG/zc4/L2tZe8ut+qeaK7XuYrPp8bk/X1e9qK5m7j+JpKosNSLgJj\n" + "NIbYsBkG2Mlq671irKYj2hVZeaBQmWmZxK4fw0Istz2WfN5nUKUeJhTwpR+JLUg4\n" + "ELYYoB7EO0Cej9UBG30hbgu4RyXA+VbptJ+H042K5QJROUbtnLWuuWosZ5ATldwO\n" + "u03dIXL0SH0ao5NcWBzxU4F2sBXZRGP2x/jiSLHcqoECgYEA4qD7mXQpu1b8XO8U\n" + "6abpKloJCatSAHzjgdR2eRDRx5PMvloipfwqA77pnbjTUFajqWQgOXsDTCjcdQui\n" + "wf5XAaWu+TeAVTytLQbSiTsBhrnoqVrr3RoyDQmdnwHT8aCMouOgcC5thP9vQ8Us\n" + "rVdjvRRbnJpg3BeSNimH+u9AHgsCgYEA0EzcbOltCWPHRAY7B3Ge/AKBjBQr86Kv\n" + "TdpTlxePBDVIlH+BM6oct2gaSZZoHbqPjbq5v7yf0fKVcXE4bSVgqfDJ/sZQu9Lp\n" + "PTeV7wkk0OsAMKk7QukEpPno5q6tOTNnFecpUhVLLlqbfqkB2baYYwLJR3IRzboJ\n" + "FQbLY93E8gkCgYB+zlC5VlQbbNqcLXJoImqItgQkkuW5PCgYdwcrSov2ve5r/Acz\n" + "FNt1aRdSlx4176R3nXyibQA1Vw+ztiUFowiP9WLoM3PtPZwwe4bGHmwGNHPIfwVG\n" + "m+exf9XgKKespYbLhc45tuC08DATnXoYK7O1EnUINSFJRS8cezSI5eHcbQKBgQDC\n" + "PgqHXZ2aVftqCc1eAaxaIRQhRmY+CgUjumaczRFGwVFveP9I6Gdi+Kca3DE3F9Pq\n" + "PKgejo0SwP5vDT+rOGHN14bmGJUMsX9i4MTmZUZ5s8s3lXh3ysfT+GAhTd6nKrIE\n" + "kM3Nh6HWFhROptfc6BNusRh1kX/cspDplK5x8EpJ0QKBgQDWFg6S2je0KtbV5PYe\n" + "RultUEe2C0jYMDQx+JYxbPmtcopvZQrFEur3WKVuLy5UAy7EBvwMnZwIG7OOohJb\n" + "vkSpADK6VPn9lbqq7O8cTedEHttm6otmLt8ZyEl3hZMaL3hbuRj6ysjmoFKx6CrX\n" + "rK0/Ikt5ybqUzKCMJZg2VKGTxg==\n" + "-----END PRIVATE KEY-----"; + +struct TestToken { + using token_t = jwt::builder; + + TestToken() : + token{jwt::create() + .set_type("JWT") + .set_algorithm("RS256") + .set_key_id("abc123") + .set_issuer("trustfabric") + .set_header_claim("x5u", jwt::claim(std::string{"http://127.0.0.1:12346/download_key"})) + .set_audience(std::set< std::string >{"test-sisl", "protegoreg"}) + .set_issued_at(std::chrono::system_clock::now() - std::chrono::seconds(180)) + .set_not_before(std::chrono::system_clock::now() - std::chrono::seconds(180)) + .set_expires_at(std::chrono::system_clock::now() + std::chrono::seconds(180)) + .set_subject("uid=sdsapp,networkaddress=dummy_ip,ou=orchmanager+l=" + "production,o=testapp,dc=tess,dc=ebay,dc=com") + .set_payload_claim("ver", jwt::claim(std::string{"2"})) + .set_payload_claim("vpc", jwt::claim(std::string{"production"})) + .set_payload_claim("instances", jwt::claim(std::string{"dummy_ip"}))} {} + + std::string sign_rs256() { return token.sign(jwt::algorithm::rs256(rsa_pub_key, rsa_priv_key, "", "")); } + std::string sign_rs512() { return token.sign(jwt::algorithm::rs512(rsa_pub_key, rsa_priv_key, "", "")); } + token_t& get_token() { return token; } + +private: + token_t token; +}; +} // namespace grpc_helper::testing \ No newline at end of file From e096a9649cbb952ebef81c6f5e75235263d1f49d Mon Sep 17 00:00:00 2001 From: raakella Date: Mon, 13 Dec 2021 18:01:14 +0530 Subject: [PATCH 069/385] SDSTOR-5926: bump minor version --- tests/unit/auth_test.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index dbd700fa..63f1bcf2 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -3,8 +3,8 @@ #include #include -#include -#include +#include +#include #include #if defined __clang__ or defined __GNUC__ @@ -21,8 +21,8 @@ #include "grpc_helper_test.grpc.pb.h" #include "test_token.hpp" -SDS_LOGGING_INIT(logging, grpc_server, httpserver_lmod) -SDS_OPTIONS_ENABLE(logging) +SISL_LOGGING_INIT(logging, grpc_server, httpserver_lmod) +SISL_OPTIONS_ENABLE(logging) namespace grpc_helper::testing { using namespace sisl; @@ -305,6 +305,6 @@ TEST_F(AuthEnableTest, allow_sync_client_with_auth) { int main(int argc, char* argv[]) { ::testing::InitGoogleMock(&argc, argv); - SDS_OPTIONS_LOAD(argc, argv, logging) + SISL_OPTIONS_LOAD(argc, argv, logging) return RUN_ALL_TESTS(); } From 262358141bd57c2da6dff4c574719ab8850d8240 Mon Sep 17 00:00:00 2001 From: raakella Date: Tue, 14 Dec 2021 23:53:25 +0530 Subject: [PATCH 070/385] SDSTOR-5926: fix memory leaks --- include/grpc_helper/rpc_client.hpp | 5 ++++- tests/unit/auth_test.cpp | 27 ++++++++++++++++++--------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index d931ddfe..5430ac5c 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -293,7 +293,10 @@ class GrpcAsyncClient : public GrpcBaseClient { template < typename ServiceT > auto make_stub(const std::string& worker) { auto w = GrpcAsyncClientWorker::get_worker(worker); - if (w == nullptr) { throw std::runtime_error("worker thread not available"); } + if (w == nullptr) { + std::cout << "Hello there !" << std::endl; + throw std::runtime_error("worker thread not available"); + } return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client.get()); } diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 63f1bcf2..7630671b 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -69,7 +69,13 @@ class AuthBaseTest : public ::testing::Test { public: virtual void SetUp() {} - virtual void TearDown() {} + virtual void TearDown() { + if (m_grpc_server) { + m_grpc_server->shutdown(); + delete m_grpc_server; + delete m_echo_impl; + } + } void grpc_server_start(const std::string& server_address, std::shared_ptr< AuthManager > auth_mgr) { LOGINFO("Start echo and ping server on {}...", server_address); @@ -125,7 +131,7 @@ class AuthDisableTest : public AuthBaseTest { m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); } - virtual void TearDown() { m_grpc_server->shutdown(); } + virtual void TearDown() { AuthBaseTest::TearDown(); } }; TEST_F(AuthDisableTest, allow_on_disabled_mode) { @@ -147,6 +153,7 @@ class AuthServerOnlyTest : public AuthBaseTest { auth_cfg.tf_token_url = "http://127.0.0.1"; auth_cfg.auth_allowed_apps = "app1, testapp, app2"; auth_cfg.auth_exp_leeway = 0; + auth_cfg.verify = false; m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); m_auth_mgr->set_config(auth_cfg); grpc_server_start(grpc_server_addr, m_auth_mgr); @@ -154,11 +161,11 @@ class AuthServerOnlyTest : public AuthBaseTest { // Client without auth m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", ""); m_async_grpc_client->init(); - GrpcAsyncClientWorker::create_worker("worker-1", 4); - m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + GrpcAsyncClientWorker::create_worker("worker-2", 4); + m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-2"); } - virtual void TearDown() { m_grpc_server->shutdown(); } + virtual void TearDown() { AuthBaseTest::TearDown(); } }; TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { @@ -221,13 +228,13 @@ class AuthEnableTest : public AuthBaseTest { m_trf_client = std::make_shared< TrfClient >(trf_cfg); m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", "", m_trf_client); m_async_grpc_client->init(); - GrpcAsyncClientWorker::create_worker("worker-1", 4); - m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + GrpcAsyncClientWorker::create_worker("worker-3", 4); + m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-3"); } virtual void TearDown() { + AuthBaseTest::TearDown(); m_token_server->stop(); - m_grpc_server->shutdown(); } static void get_token(HttpCallData cd) { @@ -306,5 +313,7 @@ TEST_F(AuthEnableTest, allow_sync_client_with_auth) { int main(int argc, char* argv[]) { ::testing::InitGoogleMock(&argc, argv); SISL_OPTIONS_LOAD(argc, argv, logging) - return RUN_ALL_TESTS(); + int ret{RUN_ALL_TESTS()}; + grpc_helper::GrpcAsyncClientWorker::shutdown_all(); + return ret; } From 6dfef59ece85aeebcb6890dbe00028438ec4873e Mon Sep 17 00:00:00 2001 From: raakella Date: Mon, 28 Mar 2022 09:54:46 -0700 Subject: [PATCH 071/385] SDSTOR-5926: review comments --- include/grpc_helper/rpc_call.hpp | 8 ++++---- include/grpc_helper/rpc_client.hpp | 7 ++----- include/grpc_helper/rpc_server.hpp | 6 +++--- lib/rpc_client.cpp | 2 +- lib/rpc_server.cpp | 4 ++-- 5 files changed, 12 insertions(+), 15 deletions(-) diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index 004f1271..85600a5e 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -117,7 +117,7 @@ class RpcStaticInfo : public RpcStaticInfoBase { public: RpcStaticInfo(GrpcServer* server, typename ServiceT::AsyncService& svc, const request_call_cb_t& call_cb, const rpc_handler_cb_t& rpc_cb, const rpc_completed_cb_t& comp_cb, size_t idx, - const std::string& name, sisl::AuthManager* auth_mgr) : + const std::string& name, std::shared_ptr< sisl::AuthManager > auth_mgr) : m_server{server}, m_svc{svc}, m_req_call_cb{call_cb}, @@ -134,7 +134,7 @@ class RpcStaticInfo : public RpcStaticInfoBase { rpc_completed_cb_t m_comp_cb; size_t m_rpc_idx; std::string m_rpc_name; - sisl::AuthManager* m_auth_mgr; + std::shared_ptr< sisl::AuthManager > m_auth_mgr; }; /** @@ -224,7 +224,7 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, m_streaming_responder(&m_ctx) {} private: - bool do_autherization() { + bool do_authorization() { bool ret{true}; // Auth is enabled if auth mgr is not null if (m_rpc_info->m_auth_mgr) { @@ -275,7 +275,7 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, RPC_SERVER_LOG(TRACE, "req. payload={}", request().DebugString()); // Autherization - if (auto auth_success = do_autherization(); !auth_success) { + if (!do_authorization()) { if constexpr (streaming) { std::lock_guard< std::mutex > lock{m_streaming_mutex}; do_streaming_send_if_needed(); diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 5430ac5c..214ef58a 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -123,7 +123,7 @@ class GrpcBaseClient { public: GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient > trf_client = nullptr); + const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient >& trf_client = nullptr); virtual ~GrpcBaseClient() = default; virtual bool is_connection_ready() const; virtual void init(); @@ -293,10 +293,7 @@ class GrpcAsyncClient : public GrpcBaseClient { template < typename ServiceT > auto make_stub(const std::string& worker) { auto w = GrpcAsyncClientWorker::get_worker(worker); - if (w == nullptr) { - std::cout << "Hello there !" << std::endl; - throw std::runtime_error("worker thread not available"); - } + if (w == nullptr) { throw std::runtime_error("worker thread not available"); } return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client.get()); } diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 1507ab06..3cad031a 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -22,7 +22,7 @@ class GrpcServer : private boost::noncopyable { public: GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr = nullptr); + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr = nullptr); virtual ~GrpcServer(); /** @@ -30,7 +30,7 @@ class GrpcServer : private boost::noncopyable { */ static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", const std::string& ssl_cert = "", - const std::shared_ptr< sisl::AuthManager > auth_mgr = nullptr); + const std::shared_ptr< sisl::AuthManager >& auth_mgr = nullptr); void run(const rpc_thread_start_cb_t& thread_start_cb = nullptr); void shutdown(); @@ -71,7 +71,7 @@ class GrpcServer : private boost::noncopyable { std::unique_lock lg(m_rpc_registry_mtx); rpc_idx = m_rpc_registry.size(); m_rpc_registry.emplace_back(new RpcStaticInfo< ServiceT, ReqT, RespT, false >( - this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name, m_auth_mgr.get())); + this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name, m_auth_mgr)); // Register one call per cq. for (auto i = 0u; i < m_cqs.size(); ++i) { diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index db163573..55589571 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -4,7 +4,7 @@ namespace grpc_helper { GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, - const std::string& ssl_cert, const std::shared_ptr< sisl::TrfClient > trf_client) : + const std::string& ssl_cert, const std::shared_ptr< sisl::TrfClient >& trf_client) : m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert), m_trf_client(trf_client) {} void GrpcBaseClient::init() { diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 940fa205..ef3f3154 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -20,7 +20,7 @@ extern "C" { namespace grpc_helper { GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr) : + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr) : m_num_threads{threads}, m_auth_mgr{auth_mgr} { if (listen_addr.empty() || threads == 0) { throw std::invalid_argument("Invalid parameter to start grpc server"); } @@ -62,7 +62,7 @@ GrpcServer::~GrpcServer() { } GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager > auth_mgr) { + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr) { return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert, auth_mgr); } From 7a4f2b6cbcb4426f24f9e9fb5fab90f575a5bdc2 Mon Sep 17 00:00:00 2001 From: raakella Date: Thu, 31 Mar 2022 13:02:50 -0700 Subject: [PATCH 072/385] Add additional constructor to allow backward compatibility --- include/grpc_helper/rpc_client.hpp | 19 +++++++++++++------ include/grpc_helper/rpc_server.hpp | 9 ++++++--- lib/rpc_client.cpp | 6 +++++- lib/rpc_server.cpp | 10 +++++++++- tests/unit/auth_test.cpp | 4 ++-- 5 files changed, 35 insertions(+), 13 deletions(-) diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 214ef58a..78c9e562 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -123,7 +123,9 @@ class GrpcBaseClient { public: GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient >& trf_client = nullptr); + const std::string& ssl_cert = ""); + GrpcBaseClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient >& trf_client, + const std::string& target_domain = "", const std::string& ssl_cert = ""); virtual ~GrpcBaseClient() = default; virtual bool is_connection_ready() const; virtual void init(); @@ -197,9 +199,13 @@ class GrpcAsyncClient : public GrpcBaseClient { template < typename ServiceT > using StubPtr = std::unique_ptr< typename ServiceT::StubInterface >; + GrpcAsyncClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient > trf_client, + const std::string& target_domain = "", const std::string& ssl_cert = "") : + GrpcBaseClient(server_addr, trf_client, target_domain, ssl_cert) {} + GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", - const std::string& ssl_cert = "", const std::shared_ptr< sisl::TrfClient > trf_client = nullptr) : - GrpcBaseClient(server_addr, target_domain, ssl_cert, trf_client) {} + const std::string& ssl_cert = "") : + GrpcAsyncClient(server_addr, nullptr, target_domain, ssl_cert) {} virtual ~GrpcAsyncClient() {} @@ -217,7 +223,8 @@ class GrpcAsyncClient : public GrpcBaseClient { struct AsyncStub { using UPtr = std::unique_ptr< AsyncStub >; - AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker, sisl::TrfClient* trf_client) : + AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker, + std::shared_ptr< sisl::TrfClient > trf_client) : m_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} using stub_t = typename ServiceT::StubInterface; @@ -278,7 +285,7 @@ class GrpcAsyncClient : public GrpcBaseClient { StubPtr< ServiceT > m_stub; GrpcAsyncClientWorker* m_worker; - sisl::TrfClient* m_trf_client; + std::shared_ptr< sisl::TrfClient > m_trf_client; const StubPtr< ServiceT >& stub() { return m_stub; } @@ -295,7 +302,7 @@ class GrpcAsyncClient : public GrpcBaseClient { auto w = GrpcAsyncClientWorker::get_worker(worker); if (w == nullptr) { throw std::runtime_error("worker thread not available"); } - return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client.get()); + return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client); } }; diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 3cad031a..ef1f7e39 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -22,15 +22,18 @@ class GrpcServer : private boost::noncopyable { public: GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr = nullptr); + const std::string& ssl_cert); + GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr); virtual ~GrpcServer(); /** * Create a new GrpcServer instance and initialize it. */ static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", - const std::string& ssl_cert = "", - const std::shared_ptr< sisl::AuthManager >& auth_mgr = nullptr); + const std::string& ssl_cert = ""); + static GrpcServer* make(const std::string& listen_addr, const std::shared_ptr< sisl::AuthManager >& auth_mgr, + uint32_t threads = 1, const std::string& ssl_key = "", const std::string& ssl_cert = ""); void run(const rpc_thread_start_cb_t& thread_start_cb = nullptr); void shutdown(); diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index 55589571..35b1e571 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -4,7 +4,11 @@ namespace grpc_helper { GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, - const std::string& ssl_cert, const std::shared_ptr< sisl::TrfClient >& trf_client) : + const std::string& ssl_cert) : + GrpcBaseClient::GrpcBaseClient(server_addr, nullptr, target_domain, ssl_cert) {} + +GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient >& trf_client, + const std::string& target_domain, const std::string& ssl_cert) : m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert), m_trf_client(trf_client) {} void GrpcBaseClient::init() { diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index ef3f3154..cf069664 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -18,6 +18,9 @@ extern "C" { #include namespace grpc_helper { +GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, + const std::string& ssl_cert) : + GrpcServer::GrpcServer(listen_addr, threads, ssl_key, ssl_cert, nullptr) {} GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr) : @@ -62,7 +65,12 @@ GrpcServer::~GrpcServer() { } GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr) { + const std::string& ssl_cert) { + return GrpcServer::make(listen_addr, nullptr, threads, ssl_key, ssl_cert); +} + +GrpcServer* GrpcServer::make(const std::string& listen_addr, const std::shared_ptr< sisl::AuthManager >& auth_mgr, + uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert) { return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert, auth_mgr); } diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 7630671b..614b30f0 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -79,7 +79,7 @@ class AuthBaseTest : public ::testing::Test { void grpc_server_start(const std::string& server_address, std::shared_ptr< AuthManager > auth_mgr) { LOGINFO("Start echo and ping server on {}...", server_address); - m_grpc_server = GrpcServer::make(server_address, 4, "", "", auth_mgr); + m_grpc_server = GrpcServer::make(server_address, auth_mgr, 4, "", ""); m_echo_impl = new EchoServiceImpl(); m_echo_impl->register_service(m_grpc_server); m_grpc_server->run(); @@ -226,7 +226,7 @@ class AuthEnableTest : public AuthBaseTest { outfile << "dummy cg contents\n"; outfile.close(); m_trf_client = std::make_shared< TrfClient >(trf_cfg); - m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, "", "", m_trf_client); + m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, m_trf_client, "", ""); m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-3", 4); m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-3"); From 9c992c4a5240ba0d383214d525d53fc78f1794d0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 29 Jun 2022 12:33:29 -0700 Subject: [PATCH 073/385] Update sisl --- src/flip/server/flip_rpc_server.cpp | 54 +++++++++++++++++++ .../flip_rpc_server.cpp/test_flip_server.cpp | 23 -------- 2 files changed, 54 insertions(+), 23 deletions(-) create mode 100644 src/flip/server/flip_rpc_server.cpp delete mode 100644 src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp diff --git a/src/flip/server/flip_rpc_server.cpp b/src/flip/server/flip_rpc_server.cpp new file mode 100644 index 00000000..d5351782 --- /dev/null +++ b/src/flip/server/flip_rpc_server.cpp @@ -0,0 +1,54 @@ +#include + +#include +#include +#include +#include +#include + +#include "flip_rpc_server.hpp" +#include "flip.hpp" + +namespace flip { +grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) { + // LOG(INFO) << "Flipspec request = " << request->DebugString() << "\n"; + flip::Flip::instance().add(*request); + response->set_success(true); + return grpc::Status::OK; +}; + +grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, + FlipListResponse* response) { + // LOG(INFO) << "GetFaults request = " << request->DebugString(); + auto resp = request->name().size() ? flip::Flip::instance().get(request->name()) : flip::Flip::instance().get_all(); + for (const auto& r : resp) { + response->add_infos()->set_info(r); + } + // LOG(INFO) << "GetFaults response = " << response->DebugString(); + return grpc::Status::OK; +}; + +class FlipRPCServiceWrapper : public FlipRPCServer::Service { +public: + void print_method_names() { + for (auto i = 0; i < 2; ++i) { + auto method = (::grpc::internal::RpcServiceMethod*)GetHandler(i); + if (method) { std::cout << "Method name = " << method->name() << "\n"; } + } + } +}; + +void FlipRPCServer::rpc_thread() { + std::string server_address("0.0.0.0:50051"); + FlipRPCServiceWrapper service; + + grpc::ServerBuilder builder; + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService((FlipRPCServer*)&service); + service.print_method_names(); + std::unique_ptr< grpc::Server > server(builder.BuildAndStart()); + std::cout << "Server listening on " << server_address << std::endl; + server->Wait(); +} + +} // namespace flip diff --git a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp b/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp deleted file mode 100644 index e2b45075..00000000 --- a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// -// Created by Kadayam, Hari on 28/03/18. -// - -#include "flip.hpp" - -#include - -SISL_LOGGING_INIT(flip) - -SISL_OPTIONS_ENABLE(logging) - -int main(int argc, char* argv[]) { - SISL_OPTIONS_LOAD(argc, argv, logging) - sisl::logging::SetLogger(std::string(argv[0])); - spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); - - flip::Flip f; - f.start_rpc_server(); - - sleep(1000); - return 0; -} From 985a53388f2d98216f5ae32c242dcea600c51d97 Mon Sep 17 00:00:00 2001 From: Ravi Nagarjun Date: Fri, 8 Jul 2022 15:59:24 -0700 Subject: [PATCH 074/385] remove AuthCfg --- tests/unit/auth_test.cpp | 59 +++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 614b30f0..fdc7cd50 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -145,17 +145,37 @@ TEST_F(AuthDisableTest, allow_on_disabled_mode) { EXPECT_EQ(req.message(), reply.message()); } +static std::string get_cur_file_dir() { + const std::string cur_file_path{__FILE__}; + auto last_slash_pos = cur_file_path.rfind('/'); + if (last_slash_pos == std::string::npos) { return ""; } + return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; +} + +static const std::string cur_file_dir{get_cur_file_dir()}; + +static void load_auth_settings() { + static const std::string grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); + std::ofstream outfile{grant_path}; + outfile << "dummy cg contents\n"; + outfile.close(); + SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { + s.auth_manager->auth_allowed_apps = "app1, testapp, app2"; + s.auth_manager->tf_token_url = "http://127.0.0.1"; + s.auth_manager->leeway = 0; + s.auth_manager->issuer = "trustfabric"; + s.trf_client->grant_path = grant_path; + s.trf_client->server = fmt::format("{}:{}/token", trf_token_server_ip, trf_token_server_port); + }); + SECURITY_SETTINGS_FACTORY().save(); +} + class AuthServerOnlyTest : public AuthBaseTest { public: virtual void SetUp() { // start grpc server with auth - AuthMgrConfig auth_cfg; - auth_cfg.tf_token_url = "http://127.0.0.1"; - auth_cfg.auth_allowed_apps = "app1, testapp, app2"; - auth_cfg.auth_exp_leeway = 0; - auth_cfg.verify = false; + load_auth_settings(); m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); - m_auth_mgr->set_config(auth_cfg); grpc_server_start(grpc_server_addr, m_auth_mgr); // Client without auth @@ -180,27 +200,12 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { EXPECT_EQ(status.error_message(), "missing header authorization"); } -static std::string get_cur_file_dir() { - const std::string cur_file_path{__FILE__}; - auto last_slash_pos = cur_file_path.rfind('/'); - if (last_slash_pos == std::string::npos) { return ""; } - return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; -} - -static const std::string cur_file_dir{get_cur_file_dir()}; - class AuthEnableTest : public AuthBaseTest { public: virtual void SetUp() { // start grpc server with auth - AuthMgrConfig auth_cfg; - auth_cfg.tf_token_url = "http://127.0.0.1"; - auth_cfg.auth_allowed_apps = "app1, testapp, app2"; - auth_cfg.auth_exp_leeway = 0; - auth_cfg.issuer = "trustfabric"; - auth_cfg.verify = false; + load_auth_settings(); m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); - m_auth_mgr->set_config(auth_cfg); grpc_server_start(grpc_server_addr, m_auth_mgr); // start token server @@ -217,15 +222,7 @@ class AuthEnableTest : public AuthBaseTest { m_token_server->start(); // Client with auth - TrfClientConfig trf_cfg; - trf_cfg.leeway = 0; - trf_cfg.server = fmt::format("{}:{}/token", trf_token_server_ip, trf_token_server_port); - trf_cfg.verify = false; - trf_cfg.grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); - std::ofstream outfile(trf_cfg.grant_path); - outfile << "dummy cg contents\n"; - outfile.close(); - m_trf_client = std::make_shared< TrfClient >(trf_cfg); + m_trf_client = std::make_shared< TrfClient >(); m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, m_trf_client, "", ""); m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-3", 4); From 6879e81683f69bbce621b4b0351706e921e3f127 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 25 Jul 2022 11:49:19 -0700 Subject: [PATCH 075/385] Remove ignoring [[deprecated]] from compiler flags. --- include/grpc_helper/rpc_call.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index 85600a5e..baea9dd9 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -20,9 +20,9 @@ SISL_LOGGING_DECL(grpc_server) #define RPC_SERVER_LOG(level, msg, ...) \ LOG##level##MOD_FMT(grpc_server, ([&](fmt::memory_buffer& buf, const char* __m, auto&&... args) -> bool { \ - fmt::format_to(buf, "[{}:{}] [RPC={} id={}] ", file_name(__FILE__), __LINE__, \ - m_rpc_info->m_rpc_name, request_id()); \ - fmt::format_to(buf, __m, std::forward< decltype(args) >(args)...); \ + fmt::format_to(fmt::appender(buf), "[{}:{}] [RPC={} id={}] ", file_name(__FILE__), \ + __LINE__, m_rpc_info->m_rpc_name, request_id()); \ + fmt::format_to(fmt::appender(buf), __m, std::forward< decltype(args) >(args)...); \ return true; \ }), \ msg, ##__VA_ARGS__); From fe63d1a02424fb7f98fff1649ff70a40c5dbd854 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 26 Jul 2022 09:53:13 -0700 Subject: [PATCH 076/385] Some minor cleanup. --- tests/function/CMakeLists.txt | 21 ++++++++------ tests/proto/CMakeLists.txt | 2 +- tests/unit/auth_test.cpp | 52 +++++++++++++++++------------------ 3 files changed, 38 insertions(+), 37 deletions(-) diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 79eabb60..5d10b848 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -1,15 +1,15 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) -set(FUNCTION_TEST_LIBS grpc_helper test_proto - ${CONAN_LIBS}) - enable_testing() # build echo_server -add_executable(echo_server echo_server.cpp) -add_dependencies(echo_server grpc_helper test_proto) -target_link_libraries(echo_server ${FUNCTION_TEST_LIBS} ) +add_executable(echo_server + echo_server.cpp + $ + ) +add_dependencies(echo_server grpc_helper) +target_link_libraries(echo_server grpc_helper ${CONAN_LIBS} ) add_test(NAME Echo_Ping_Server COMMAND echo_server) # build echo_sync_client @@ -18,8 +18,11 @@ add_test(NAME Echo_Ping_Server COMMAND echo_server) #target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) # build echo_async_client -add_executable(echo_async_client echo_async_client.cpp) -add_dependencies(echo_async_client grpc_helper test_proto) -target_link_libraries(echo_async_client ${FUNCTION_TEST_LIBS} ) +add_executable(echo_async_client + echo_async_client.cpp + $ + ) +add_dependencies(echo_async_client grpc_helper) +target_link_libraries(echo_async_client grpc_helper ${CONAN_LIBS} ) add_test(NAME Echo_Ping_Async_Client_Server COMMAND echo_async_client) SET_TESTS_PROPERTIES(Echo_Ping_Async_Client_Server PROPERTIES DEPENDS TestHttpSanity) diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt index 6f9fde1c..776af90d 100644 --- a/tests/proto/CMakeLists.txt +++ b/tests/proto/CMakeLists.txt @@ -8,4 +8,4 @@ MESSAGE( STATUS "PROTO_SRCS = " ${PROTO_SRCS} " " ${PROTO_HDRS}) MESSAGE( STATUS "GRPC_SRCS = " ${GRPC_SRCS} " " ${GRPC_HDRS}) -add_library(test_proto ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS}) +add_library(test_proto OBJECT ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS}) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index fdc7cd50..c2ebfe60 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -33,11 +33,11 @@ static const std::string grpc_server_addr{"0.0.0.0:12345"}; static const std::string trf_token_server_ip{"127.0.0.1"}; static const uint32_t trf_token_server_port{12346}; -class EchoServiceImpl { +class EchoServiceImpl final { public: - virtual ~EchoServiceImpl() = default; + ~EchoServiceImpl() = default; - virtual bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { LOGDEBUG("receive echo request {}", rpc_data->request().message()); rpc_data->response().set_message(rpc_data->request().message()); return true; @@ -67,9 +67,9 @@ class EchoServiceImpl { class AuthBaseTest : public ::testing::Test { public: - virtual void SetUp() {} + void SetUp() override {} - virtual void TearDown() { + void TearDown() override { if (m_grpc_server) { m_grpc_server->shutdown(); delete m_grpc_server; @@ -120,7 +120,7 @@ class AuthBaseTest : public ::testing::Test { class AuthDisableTest : public AuthBaseTest { public: - virtual void SetUp() { + void SetUp() override { // start grpc server without auth grpc_server_start(grpc_server_addr, nullptr); @@ -131,7 +131,7 @@ class AuthDisableTest : public AuthBaseTest { m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); } - virtual void TearDown() { AuthBaseTest::TearDown(); } + void TearDown() override { AuthBaseTest::TearDown(); } }; TEST_F(AuthDisableTest, allow_on_disabled_mode) { @@ -145,17 +145,9 @@ TEST_F(AuthDisableTest, allow_on_disabled_mode) { EXPECT_EQ(req.message(), reply.message()); } -static std::string get_cur_file_dir() { - const std::string cur_file_path{__FILE__}; - auto last_slash_pos = cur_file_path.rfind('/'); - if (last_slash_pos == std::string::npos) { return ""; } - return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; -} - -static const std::string cur_file_dir{get_cur_file_dir()}; +static auto const grant_path = std::string{"dummy_grant.cg"}; static void load_auth_settings() { - static const std::string grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); std::ofstream outfile{grant_path}; outfile << "dummy cg contents\n"; outfile.close(); @@ -170,9 +162,14 @@ static void load_auth_settings() { SECURITY_SETTINGS_FACTORY().save(); } +static void remove_auth_settings() { + auto const grant_fs_path = std::filesystem::path{grant_path}; + EXPECT_TRUE(std::filesystem::remove(grant_fs_path)); +} + class AuthServerOnlyTest : public AuthBaseTest { public: - virtual void SetUp() { + void SetUp() override { // start grpc server with auth load_auth_settings(); m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); @@ -185,7 +182,10 @@ class AuthServerOnlyTest : public AuthBaseTest { m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-2"); } - virtual void TearDown() { AuthBaseTest::TearDown(); } + void TearDown() override { + AuthBaseTest::TearDown(); + remove_auth_settings(); + } }; TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { @@ -202,7 +202,7 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { class AuthEnableTest : public AuthBaseTest { public: - virtual void SetUp() { + void SetUp() override { // start grpc server with auth load_auth_settings(); m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); @@ -229,21 +229,18 @@ class AuthEnableTest : public AuthBaseTest { m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-3"); } - virtual void TearDown() { + void TearDown() override { AuthBaseTest::TearDown(); m_token_server->stop(); + remove_auth_settings(); } static void get_token(HttpCallData cd) { - std::string msg; - std::cout << "sending token to client" << std::endl; + LOGINFO("Sending token to client"); pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, m_token_response); } - static void download_key(HttpCallData cd) { - std::string msg; - pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, rsa_pub_key); - } + static void download_key(HttpCallData cd) { pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, rsa_pub_key); } static void set_token_response(const std::string& raw_token) { m_token_response = "{\n" @@ -281,7 +278,7 @@ class EchoAndPingClient : public GrpcSyncClient { public: using GrpcSyncClient::GrpcSyncClient; - virtual void init() { + void init() override { GrpcSyncClient::init(); echo_stub_ = MakeStub< EchoService >(); } @@ -310,6 +307,7 @@ TEST_F(AuthEnableTest, allow_sync_client_with_auth) { int main(int argc, char* argv[]) { ::testing::InitGoogleMock(&argc, argv); SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger("auth_test"); int ret{RUN_ALL_TESTS()}; grpc_helper::GrpcAsyncClientWorker::shutdown_all(); return ret; From 8eb611afaab18b94cc9fd5c13301bc4a82413c37 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 26 Jul 2022 10:23:12 -0700 Subject: [PATCH 077/385] Actually make Async test use ASYNC server. --- tests/function/echo_async_client.cpp | 74 +++++++++++++--------------- 1 file changed, 33 insertions(+), 41 deletions(-) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index f57b0b16..cdf4bf0a 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -110,66 +111,57 @@ class TestClient { class TestServer { public: - class EchoServiceImpl { + class EchoServiceImpl final { public: - virtual ~EchoServiceImpl() = default; + ~EchoServiceImpl() = default; - virtual bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { LOGDEBUGMOD(grpc_server, "receive echo request {}", rpc_data->request().message()); - rpc_data->response().set_message(rpc_data->request().message()); - return true; + auto t = std::thread([rpc = rpc_data] { + rpc->response().set_message(rpc->request().message()); + rpc->send_response(); + }); + t.detach(); + return false; } - bool register_service(GrpcServer* server) { - if (!server->register_async_service< EchoService >()) { - LOGERROR("register service failed"); - return false; - } - - return true; + void register_service(GrpcServer* server) { + auto const res = server->register_async_service< EchoService >(); + RELEASE_ASSERT(res, "Failed to Register Service"); } - bool register_rpcs(GrpcServer* server) { + void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc< EchoService, EchoRequest, EchoReply, false >( - "Echo", &EchoService::AsyncService::RequestEcho, - std::bind(&EchoServiceImpl::echo_request, this, _1))) { - LOGERROR("register rpc failed"); - return false; - } - - return true; + auto const res = server->register_rpc< EchoService, EchoRequest, EchoReply, false >( + "Echo", &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1)); + RELEASE_ASSERT(res, "register rpc failed"); } }; - class PingServiceImpl { + class PingServiceImpl final { public: - virtual ~PingServiceImpl() = default; + ~PingServiceImpl() = default; - virtual bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { + bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { LOGDEBUGMOD(grpc_server, "receive ping request {}", rpc_data->request().seqno()); - rpc_data->response().set_seqno(rpc_data->request().seqno()); - return true; + auto t = std::thread([rpc = rpc_data] { + rpc->response().set_seqno(rpc->request().seqno()); + rpc->send_response(); + }); + t.detach(); + return false; } - bool register_service(GrpcServer* server) { - if (!server->register_async_service< PingService >()) { - LOGERROR("register ping service failed"); - return false; - } - return true; + void register_service(GrpcServer* server) { + auto const res = server->register_async_service< PingService >(); + RELEASE_ASSERT(res, "Failed to Register Service"); } - bool register_rpcs(GrpcServer* server) { + void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - if (!server->register_rpc< PingService, PingRequest, PingReply, false >( - "Ping", &PingService::AsyncService::RequestPing, - std::bind(&PingServiceImpl::ping_request, this, _1))) { - LOGERROR("register ping rpc failed"); - return false; - } - - return true; + auto const res = server->register_rpc< PingService, PingRequest, PingReply, false >( + "Ping", &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1)); + RELEASE_ASSERT(res, "register ping rpc failed"); } }; From 364751ee15698e5e09213e383e0257510bf071d7 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 26 Jul 2022 12:24:46 -0700 Subject: [PATCH 078/385] Test both sync/async response --- tests/function/echo_async_client.cpp | 42 ++++++++++++++++++---------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index cdf4bf0a..3e69d5f5 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -112,17 +112,24 @@ class TestClient { class TestServer { public: class EchoServiceImpl final { + std::atomic< uint32_t > num_calls = 0ul; + public: ~EchoServiceImpl() = default; bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { - LOGDEBUGMOD(grpc_server, "receive echo request {}", rpc_data->request().message()); - auto t = std::thread([rpc = rpc_data] { - rpc->response().set_message(rpc->request().message()); - rpc->send_response(); - }); - t.detach(); - return false; + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async echo request {}", rpc_data->request().message()); + auto t = std::thread([rpc = rpc_data] { + rpc->response().set_message(rpc->request().message()); + rpc->send_response(); + }); + t.detach(); + return false; + } + LOGDEBUGMOD(grpc_server, "respond sync echo request {}", rpc_data->request().message()); + rpc_data->response().set_message(rpc_data->request().message()); + return true; } void register_service(GrpcServer* server) { @@ -139,17 +146,24 @@ class TestServer { }; class PingServiceImpl final { + std::atomic< uint32_t > num_calls = 0ul; + public: ~PingServiceImpl() = default; bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { - LOGDEBUGMOD(grpc_server, "receive ping request {}", rpc_data->request().seqno()); - auto t = std::thread([rpc = rpc_data] { - rpc->response().set_seqno(rpc->request().seqno()); - rpc->send_response(); - }); - t.detach(); - return false; + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async ping request {}", rpc_data->request().seqno()); + auto t = std::thread([rpc = rpc_data] { + rpc->response().set_seqno(rpc->request().seqno()); + rpc->send_response(); + }); + t.detach(); + return false; + } + LOGDEBUGMOD(grpc_server, "respond sync ping request {}", rpc_data->request().seqno()); + rpc_data->response().set_seqno(rpc_data->request().seqno()); + return true; } void register_service(GrpcServer* server) { From e227c99c5d7f311ed1ceafec1d74ef3017bec623 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 29 Jul 2022 12:31:51 -0600 Subject: [PATCH 079/385] Added build badge. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 46570a4a..56026bd7 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # SymbiosisLib (sisl) +[![Build Status](https://app.travis-ci.com/eBay/sisl.svg?branch=master)](https://app.travis-ci.com/eBay/sisl) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data structures and utilities. This is mostly on top of folly, boost, STL and other good well known libraries. Thus its not trying From afce17522255c6eed01bace9c7f74efcc428fed1 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 1 Aug 2022 08:59:13 -0700 Subject: [PATCH 080/385] Add building SISL project itself. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index ccb53a50..f9119251 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,3 +18,4 @@ install: script: # Download dependencies and build project - conan install --build missing . + - conan build . From b71371db74675c2d23efcc7e4ab9bbe893fae341 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 3 Aug 2022 11:25:16 -0700 Subject: [PATCH 081/385] Recipe cleanup --- CMakeLists.txt | 25 ++- LICENSE | 202 +++++++++++++++++++++++++ conanfile.py | 76 +++++----- src/async_http/CMakeLists.txt | 2 + src/auth_manager/CMakeLists.txt | 4 +- src/cache/CMakeLists.txt | 2 + src/fds/CMakeLists.txt | 2 + src/file_watcher/CMakeLists.txt | 2 + src/file_watcher/file_watcher.cpp | 2 +- src/file_watcher/file_watcher.hpp | 2 +- src/file_watcher/file_watcher_test.cpp | 2 +- src/logging/CMakeLists.txt | 2 + src/metrics/CMakeLists.txt | 2 + src/metrics/README.md | 2 +- src/options/CMakeLists.txt | 2 + src/settings/CMakeLists.txt | 4 + src/sisl_version/CMakeLists.txt | 2 + src/utility/CMakeLists.txt | 2 + src/utility/tests/test_enum.cpp | 2 +- src/wisr/CMakeLists.txt | 2 + src/wisr/README.md | 2 +- 21 files changed, 283 insertions(+), 60 deletions(-) create mode 100644 LICENSE diff --git a/CMakeLists.txt b/CMakeLists.txt index f2de3e62..43621acd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,20 +48,19 @@ if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") set(CMAKE_THREAD_PREFER_PTHREAD TRUE) endif() -find_package(Benchmark REQUIRED CONFIG) -find_package(Boost REQUIRED CONFIG) -find_package(cpr REQUIRED CONFIG) -find_package(cxxopts REQUIRED CONFIG) -find_package(Flatbuffers REQUIRED CONFIG) -find_package(Folly REQUIRED CONFIG) -find_package(GTest REQUIRED CONFIG) -find_package(jwt-cpp REQUIRED CONFIG) -find_package(nlohmann_json REQUIRED CONFIG) -find_package(prometheus-cpp REQUIRED CONFIG) -find_package(semver.c REQUIRED CONFIG) -find_package(spdlog REQUIRED CONFIG) +find_package(benchmark REQUIRED) +find_package(Boost REQUIRED) +find_package(cpr REQUIRED) +find_package(cxxopts REQUIRED) +find_package(folly REQUIRED) +find_package(GTest REQUIRED) +find_package(jwt-cpp REQUIRED) +find_package(nlohmann_json REQUIRED) +find_package(prometheus-cpp REQUIRED) +find_package(semver.c REQUIRED) +find_package(spdlog REQUIRED) find_package (Threads REQUIRED) -find_package(userspace-rcu REQUIRED CONFIG) +find_package(userspace-rcu REQUIRED) set(COMMON_DEPS Boost::headers diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/conanfile.py b/conanfile.py index 6ab70892..0b4e7994 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,16 +1,14 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- from conans import ConanFile, CMake, tools import os class MetricsConan(ConanFile): name = "sisl" version = "8.0.1" - - license = "Apache" - url = "https://github.corp.ebay.com/Symbiosis/sisl" + homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" - revision_mode = "scm" + topics = ("ebay", "components", "core", "efficiency") + url = "https://github.com/eBay/sisl" + license = "Apache-2.0" settings = "arch", "os", "compiler", "build_type" options = { @@ -22,37 +20,15 @@ class MetricsConan(ConanFile): 'prerelease' : ['True', 'False'], 'with_evhtp' : ['True', 'False'], } - default_options = ( - 'shared=False', - 'fPIC=True', - 'coverage=False', - 'sanitize=False', - 'malloc_impl=libc', - 'prerelease=True', - 'with_evhtp=False', - ) - - requires = ( - # Custom packages - "prometheus-cpp/1.0.0", - - # Generic packages (conan-center) - "boost/1.79.0", - "cpr/1.8.1", - "cxxopts/2.2.1", - "flatbuffers/1.12.0", - "folly/2022.01.31.00", - "jwt-cpp/0.4.0", - "nlohmann_json/3.10.5", - "semver.c/1.0.0", - "spdlog/1.10.0", - "userspace-rcu/0.11.4", - ("fmt/8.1.1", "override"), - ("libevent/2.1.12", "override"), - ("openssl/1.1.1q", "override"), - ("xz_utils/5.2.5", "override"), - ("zlib/1.2.12", "override"), - ) + default_options = { + 'shared': False, + 'fPIC': True, + 'coverage': False, + 'sanitize': False, + 'malloc_impl': 'libc', + 'prerelease': True, + 'with_evhtp': False, + } build_requires = ( # Generic packages (conan-center) @@ -60,8 +36,8 @@ class MetricsConan(ConanFile): "gtest/1.11.0", ) - generators = "cmake", "cmake_find_package_multi" - exports_sources = ("CMakeLists.txt", "cmake/*", "src/*") + generators = "cmake", "cmake_find_package" + exports_sources = ("CMakeLists.txt", "cmake/*", "src/*", "LICENSE") def config_options(self): if self.settings.build_type != "Debug": @@ -80,6 +56,25 @@ def configure(self): del self.options.fPIC def requirements(self): + # Custom packages + self.requires("prometheus-cpp/1.0.0") + + # Generic packages (conan-center) + self.requires("boost/1.79.0") + self.requires("cpr/1.8.1") + self.requires("cxxopts/2.2.1") + self.requires("flatbuffers/1.12.0") + self.requires("folly/2022.01.31.00") + self.requires("jwt-cpp/0.4.0") + self.requires("nlohmann_json/3.10.5") + self.requires("semver.c/1.0.0") + self.requires("spdlog/1.10.0") + self.requires("userspace-rcu/0.11.4") + self.requires("fmt/8.1.1", override=True) + self.requires("libevent/2.1.12", override=True) + self.requires("openssl/1.1.1q", override=True) + self.requires("xz_utils/5.2.5", override=True) + self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": self.requires("jemalloc/5.2.1") elif self.options.malloc_impl == "tcmalloc": @@ -113,6 +108,7 @@ def build(self): cmake.test(target=test_target) def package(self): + self.copy(pattern="LICENSE*", dst="licenses") self.copy("*.hpp", src="src/", dst="include/sisl", keep_path=True) self.copy("*.h", src="src/", dst="include/sisl", keep_path=True) self.copy("*.a", dst="lib/", keep_path=False) @@ -123,7 +119,7 @@ def package(self): self.copy("*.cmake", dst="cmake/", keep_path=False) def package_info(self): - self.cpp_info.libs = tools.collect_libs(self) + self.cpp_info.libs = ["sisl"] self.cpp_info.cppflags.append("-Wno-unused-local-typedefs") self.cpp_info.cppflags.append("-fconcepts") self.cpp_info.includedirs = ["include", "include/sisl/"] diff --git a/src/async_http/CMakeLists.txt b/src/async_http/CMakeLists.txt index 61e398ec..d8d8378c 100644 --- a/src/async_http/CMakeLists.txt +++ b/src/async_http/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + add_flags("-Wno-unused-parameter -Wno-cast-function-type") include_directories(BEFORE ..) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index d249492b..41132d4a 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -1,9 +1,11 @@ +cmake_minimum_required (VERSION 3.10) + add_flags("-Wno-unused-parameter") include_directories(BEFORE ..) include_directories(BEFORE .) -find_package(Flatbuffers REQUIRED CONFIG) +find_package(FlatBuffers REQUIRED) set(AUTH_MGR_SOURCE_FILES auth_manager.cpp diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index 675d68c9..d7961930 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index c30893f6..7455e22b 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() diff --git a/src/file_watcher/CMakeLists.txt b/src/file_watcher/CMakeLists.txt index 05b29f03..8efb5f48 100644 --- a/src/file_watcher/CMakeLists.txt +++ b/src/file_watcher/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + add_flags("-Wno-unused-parameter") include_directories(BEFORE ..) diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index 026ed003..7bd8db42 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -254,4 +254,4 @@ void FileWatcher::get_fileinfo(const int wd, FileInfo& file_info) const { LOGWARN("wd {} not found!", wd); } -} // namespace sisl \ No newline at end of file +} // namespace sisl diff --git a/src/file_watcher/file_watcher.hpp b/src/file_watcher/file_watcher.hpp index 87ad38cc..6e7b79c9 100644 --- a/src/file_watcher/file_watcher.hpp +++ b/src/file_watcher/file_watcher.hpp @@ -47,4 +47,4 @@ class FileWatcher { int m_pipefd[2] = {-1, -1}; }; -} // namespace sisl \ No newline at end of file +} // namespace sisl diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index e84ce0cf..eac9d031 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -80,4 +80,4 @@ int main(int argc, char* argv[]) { sisl::logging::SetLogger("test_file_watcher"); spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); return RUN_ALL_TESTS(); -} \ No newline at end of file +} diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 7ff7e2a1..680c2ce7 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index 3ec32cd0..0720f949 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-attributes") # needed for C++ 20 folly compilation endif() diff --git a/src/metrics/README.md b/src/metrics/README.md index 01ab1bdd..f028c253 100644 --- a/src/metrics/README.md +++ b/src/metrics/README.md @@ -54,4 +54,4 @@ inconsistency on sample, shouldn't skew the results and in general its an accept ### WISR RCU Framework - \ No newline at end of file + diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index d20dd196..f181f962 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + include_directories(BEFORE include) file(GLOB API_HEADERS include/*.h) diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index 790c5dce..4c5999a4 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-attributes") # needed for C++ 20 folly compilation endif() @@ -6,6 +8,8 @@ include_directories(BEFORE ..) include_directories(BEFORE .) include_directories(BEFORE . ${CMAKE_CURRENT_BINARY_DIR}/generated/) +find_package(FlatBuffers REQUIRED) + set(SETTINGS_SOURCE_FILES settings.cpp ) diff --git a/src/sisl_version/CMakeLists.txt b/src/sisl_version/CMakeLists.txt index 4d4d8ee8..d39b0d61 100644 --- a/src/sisl_version/CMakeLists.txt +++ b/src/sisl_version/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) add_flags("-Wno-attributes") # needed for C++ 20 folly compilation endif() diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index 5dccb2df..2453b1d5 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + add_flags("-Wno-unused-parameter") include_directories(BEFORE ..) diff --git a/src/utility/tests/test_enum.cpp b/src/utility/tests/test_enum.cpp index 1b5471e8..3155b179 100644 --- a/src/utility/tests/test_enum.cpp +++ b/src/utility/tests/test_enum.cpp @@ -205,4 +205,4 @@ TEST_F(EnumTest, enum_unsigned_test_bit_ops) { int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); -} \ No newline at end of file +} diff --git a/src/wisr/CMakeLists.txt b/src/wisr/CMakeLists.txt index 54d1079f..b3d601fe 100644 --- a/src/wisr/CMakeLists.txt +++ b/src/wisr/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.10) + add_flags("-Wno-unused-parameter") include_directories(BEFORE ..) diff --git a/src/wisr/README.md b/src/wisr/README.md index a5e3171b..0a86f52e 100644 --- a/src/wisr/README.md +++ b/src/wisr/README.md @@ -87,4 +87,4 @@ test_locked_vector_insert/iterations:100/threads:1 15143 ns 15143 ns test_wisr_vector_insert/iterations:100/threads:1 2101 ns 2103 ns 100 test_locked_vector_read/iterations:100/threads:1 83536 ns 83540 ns 100 test_wisr_vector_read/iterations:100/threads:1 25274 ns 25275 ns 100 -``` \ No newline at end of file +``` From f93f4ca9545494d584ea88a08f37b3042cfc7186 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 3 Aug 2022 14:23:20 -0700 Subject: [PATCH 082/385] Remove CONAN_CHANNEL from build --- .jenkins/Jenkinsfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index f795db11..54cb5642 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,7 +3,6 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') - CONAN_USER = 'sisl' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' @@ -39,8 +38,7 @@ pipeline { script { PROJECT = sh(script: "grep -m 1 'name =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) VER = sh(script: "grep -m 1 'version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) - CONAN_CHANNEL = sh(script: "echo ${BRANCH_NAME} | sed -E 's,(\\w+-?\\d*)/.*,\\1,' | sed -E 's,-,_,' | tr -d '\n'", returnStdout: true) - TAG = "${VER}@${CONAN_USER}/${CONAN_CHANNEL}" + TAG = "${VER}@" slackSend color: '#0063D1', channel: '#sds-ci', message: "*${PROJECT}/${TAG}* is building." } } From ff4dbec466902ea3bc679d9b7a5ee6f69bdb9ed9 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 5 Aug 2022 09:40:47 -0600 Subject: [PATCH 083/385] Test building with GitHub Actions. --- .github/workflows/build_with_conan.yml | 48 ++++++++++++++++++++++++++ .gitignore | 1 - 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/build_with_conan.yml diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml new file mode 100644 index 00000000..8f649601 --- /dev/null +++ b/.github/workflows/build_with_conan.yml @@ -0,0 +1,48 @@ +name: Conan + +on: + push: + branches: [ $default-branch ] + pull_request: + branches: [ $default-branch ] + +#env: + # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) + #BUILD_TYPE: Release + +jobs: + build: + # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. + # You can convert this to a matrix build if you need cross-platform coverage. + # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + python-version: ["3.8"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install Conan + run: | + python -m pip install --upgrade pip + python -m pip install conan + + - name: Configure Conan + # Configure conan profiles for build runner + run: | + conan user + + - name: Install dependencies + # Build your program with the given configuration + run: | + conan install --build missing . + + - name: Build + # Build your program with the given configuration + run: | + conan build . diff --git a/.gitignore b/.gitignore index aa529589..2216d9b6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,6 @@ bin/ # Compiled Object files -build* *.slo *.lo *.o From 3950e331d3c0d0f465f0b45cde9080b0085c358f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 5 Aug 2022 09:46:47 -0600 Subject: [PATCH 084/385] Add build-type --- .github/workflows/build_with_conan.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index 8f649601..318806d2 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -20,6 +20,7 @@ jobs: fail-fast: false matrix: python-version: ["3.8"] + build-type: ["Debug", "Release"] steps: - uses: actions/checkout@v3 @@ -40,7 +41,7 @@ jobs: - name: Install dependencies # Build your program with the given configuration run: | - conan install --build missing . + conan install -s build_type=${{ matrix.build-type }} --build missing . - name: Build # Build your program with the given configuration From f73f0224f468ffd20705b3ef9340f663b32681b8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 5 Aug 2022 09:56:57 -0600 Subject: [PATCH 085/385] Change branch trigger. --- .github/workflows/build_with_conan.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index 8f649601..e5a67d1e 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -2,9 +2,9 @@ name: Conan on: push: - branches: [ $default-branch ] + branches: [ master ] pull_request: - branches: [ $default-branch ] + branches: [ master ] #env: # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) From 3967700d47fa785eecd56de52ba5996f09b5dde7 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 5 Aug 2022 10:10:51 -0600 Subject: [PATCH 086/385] Disable travis and add github actions badge. --- .github/workflows/build_with_conan.yml | 2 +- .travis.yml => .travis.yml.bak | 0 README.md | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename .travis.yml => .travis.yml.bak (100%) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index 318806d2..e3f584ae 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -1,4 +1,4 @@ -name: Conan +name: Conan Build on: push: diff --git a/.travis.yml b/.travis.yml.bak similarity index 100% rename from .travis.yml rename to .travis.yml.bak diff --git a/README.md b/README.md index 56026bd7..425ddde1 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SymbiosisLib (sisl) -[![Build Status](https://app.travis-ci.com/eBay/sisl.svg?branch=master)](https://app.travis-ci.com/eBay/sisl) +[![Conan Build](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data structures and utilities. This is mostly on top of folly, boost, STL and other good well known libraries. Thus its not trying From b6b0e35c35622c8737935f553bfbbeac0ea1c079 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 8 Aug 2022 13:17:16 -0700 Subject: [PATCH 087/385] Use built-in protobuf and grpc generators. --- CMakeLists.txt | 48 ++++++++++++++++++++--------------- tests/CMakeLists.txt | 12 +++++++-- tests/function/CMakeLists.txt | 35 +++++++++++-------------- tests/proto/CMakeLists.txt | 24 +++++++++++------- tests/unit/CMakeLists.txt | 26 +++++++++---------- 5 files changed, 80 insertions(+), 65 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 17ab7cf3..cde3bd89 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,30 +1,38 @@ set(CMAKE_CXX_STANDARD 17) -set(CPP_WARNINGS "-Wall -Wextra -Werror -Wno-unused-parameter") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${CONAN_PACKAGE_NAME} -DPACKAGE_VERSION=${CONAN_PACKAGE_VERSION}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") -find_package(Boost REQUIRED) -find_package(Threads REQUIRED) -find_package(OpenSSL REQUIRED) -find_package(Protobuf REQUIRED) - -include(${CMAKE_HOME_DIRECTORY}/cmake/grpc.cmake) - -include_directories(BEFORE include) - -if (${CMAKE_BUILD_TYPE} STREQUAL Debug) - # Remove tcmalloc from debug builds so valgrind etc. work well - list(REMOVE_ITEM CONAN_LIBS tcmalloc tcmalloc_minimal) +set(PACKAGE_REVISION "0.0.0+unknown") +if (DEFINED CONAN_PACKAGE_NAME) + set(PACKAGE_REVISION "${CONAN_PACKAGE_VERSION}") endif () +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${PROJECT_NAME} -DPACKAGE_VERSION=${PACKAGE_REVISION}") -set(GRPC_HELPER_LIBS ${CONAN_LIBS} ${CMAKE_THREAD_LIBS_INIT}) +find_package(Threads REQUIRED) +find_package(sisl CONFIG REQUIRED) +find_package(gRPC CONFIG REQUIRED) +include_directories(BEFORE "include") -set (GRPC_HELPER_SOURCE +add_library(${PROJECT_NAME} lib/rpc_server.cpp lib/rpc_client.cpp -) - - -add_library(grpc_helper ${GRPC_HELPER_SOURCE}) + ) +target_link_libraries(${PROJECT_NAME} + gRPC::grpc++ + sisl::sisl + Boost::Boost + ) add_subdirectory(tests) + +# build info +string(TOUPPER "${CMAKE_BUILD_TYPE}" UC_CMAKE_BUILD_TYPE) +message(STATUS "Build configuration: ${CMAKE_BUILD_TYPE}") +message(STATUS "C compiler info: ${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION} ${CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN}") +message(STATUS "C++ compiler info: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} ${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}") +message(STATUS "C flags: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${UC_CMAKE_BUILD_TYPE}}") +message(STATUS "C++ flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${UC_CMAKE_BUILD_TYPE}}") +message(STATUS "Linker flags (executable): ${CMAKE_EXE_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") +message(STATUS "Linker flags (shared): ${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") +message(STATUS "Linker flags (module): ${CMAKE_MODULE_LINKER_FLAGS} ${CMAKE_MODULE_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") +message(STATUS "Linker flags (static): ${CMAKE_STATIC_LINKER_FLAGS} ${CMAKE_STATIC_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 414418f8..26303545 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,12 @@ -add_subdirectory(function) +cmake_minimum_required (VERSION 3.11) + +find_package(GTest CONFIG REQUIRED) + add_subdirectory(proto) -add_subdirectory(unit) +enable_testing() + +include_directories(BEFORE "${CMAKE_CURRENT_BINARY_DIR}/proto") + +add_subdirectory(function) +add_subdirectory(unit) diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 5d10b848..598722fd 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -1,28 +1,23 @@ - -include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) - -enable_testing() +cmake_minimum_required (VERSION 3.11) # build echo_server add_executable(echo_server - echo_server.cpp - $ - ) -add_dependencies(echo_server grpc_helper) -target_link_libraries(echo_server grpc_helper ${CONAN_LIBS} ) + echo_server.cpp + $ + ) +target_link_libraries(echo_server + grpc_helper + GTest::gtest + ) add_test(NAME Echo_Ping_Server COMMAND echo_server) -# build echo_sync_client -#add_executable(echo_sync_client echo_sync_client.cpp) -#add_dependencies(echo_sync_client grpc_helper test_proto) -#target_link_libraries(echo_sync_client ${FUNCTION_TEST_LIBS} ) - # build echo_async_client add_executable(echo_async_client - echo_async_client.cpp - $ - ) -add_dependencies(echo_async_client grpc_helper) -target_link_libraries(echo_async_client grpc_helper ${CONAN_LIBS} ) + echo_async_client.cpp + $ + ) +target_link_libraries(echo_async_client + grpc_helper + GTest::gtest + ) add_test(NAME Echo_Ping_Async_Client_Server COMMAND echo_async_client) -SET_TESTS_PROPERTIES(Echo_Ping_Async_Client_Server PROPERTIES DEPENDS TestHttpSanity) diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt index 776af90d..d36a2a63 100644 --- a/tests/proto/CMakeLists.txt +++ b/tests/proto/CMakeLists.txt @@ -1,11 +1,17 @@ +cmake_minimum_required(VERSION 3.11) -protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS grpc_helper_test.proto) +add_library(test_proto OBJECT + grpc_helper_test.proto + ) +target_link_libraries(test_proto + protobuf::libprotobuf + gRPC::grpc++ + ) -PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS grpc_helper_test.proto) - - -MESSAGE( STATUS "PROTO_SRCS = " ${PROTO_SRCS} " " ${PROTO_HDRS}) -MESSAGE( STATUS "GRPC_SRCS = " ${GRPC_SRCS} " " ${GRPC_HDRS}) - - -add_library(test_proto OBJECT ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS}) +protobuf_generate(LANGUAGE cpp TARGET test_proto PROTOS grpc_helper_test.proto) +protobuf_generate( + TARGET test_proto + LANGUAGE grpc + GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc + PLUGIN protoc-gen-grpc=$ +) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 93b95e03..b7f8d7a9 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,15 +1,13 @@ - -include_directories(${CMAKE_CURRENT_BINARY_DIR}/../proto) - -set(UNIT_TEST_LIBS grpc_helper test_proto sisl - ${CONAN_LIBS}) - -enable_testing() - -add_executable(auth_test auth_test.cpp) -#add_dependencies(auth_test grpc_helper test_proto) -target_link_libraries(auth_test ${UNIT_TEST_LIBS} ) +cmake_minimum_required (VERSION 3.10) + +add_executable(auth_test + auth_test.cpp + $ + ) +target_link_libraries(auth_test + grpc_helper + sisl::sisl + gRPC::grpc++ + GTest::gmock + ) add_test(NAME Auth_Test COMMAND auth_test) - - - From ed6dee451e136c51b12c33d9932f877a7bc25f8e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 8 Aug 2022 15:59:54 -0700 Subject: [PATCH 088/385] Cleanup recipe --- src/flip/CMakeLists.txt | 41 +++++++++++-------------------- src/flip/cmake/debug_flags.cmake | 10 +++++--- src/flip/proto/CMakeLists.txt | 42 +++++++++++++------------------- 3 files changed, 37 insertions(+), 56 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index c11bf8e2..0689816b 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -4,52 +4,37 @@ set(CMAKE_CXX_STANDARD 17) if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - conan_basic_setup() else() - message(WARNING "The file conanbuildinfo.cmake doesn't exist, you have to run conan install first") - return() + message(WARNING "Conan Build file does not exist, trying to build without!") endif() if (${CMAKE_BUILD_TYPE} STREQUAL Debug) - if (NOT ${CONAN_SETTINGS_COMPILER} STREQUAL "clang" AND NOT ${CONAN_SETTINGS_COMPILER} STREQUAL "apple-clang") - include (cmake/debug_flags.cmake) - endif () + include (cmake/debug_flags.cmake) endif () if (${MEMORY_SANITIZER_ON}) include (cmake/mem_sanitizer.cmake) endif () -include(cmake/grpc.cmake) - find_program(CCACHE_FOUND ccache) if (CCACHE_FOUND) set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) endif () -find_package(Protobuf REQUIRED) - -file(GLOB PROTO_IDLS proto/*.proto) -PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_IDLS}) - -file(GLOB GRPC_IDLS proto/flip_server.proto) -PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${GRPC_IDLS}) +find_package(gRPC CONFIG REQUIRED) +find_package(sisl CONFIG REQUIRED) -list(GET PROTO_HDRS 0 FIRST_PROTO) -get_filename_component(PROTO_DIR ${FIRST_PROTO} DIRECTORY) -set(PROTO_PATH ${PROTO_DIR}) +include_directories(BEFORE include) +include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/proto) -include_directories(BEFORE include ${PROTO_DIR} src) -set(FLIP_LIB_FILES - ${PROTO_SRCS} - ${PROTO_HDRS} - ${GRPC_SRCS} - ${GRPC_HDRS} +add_library(flip src/flip_rpc_server.cpp + $ + ) +target_link_libraries(flip + sisl::sisl + gRPC::grpc++ ) - -add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${CONAN_LIBS}) add_executable(test_flip src/test_flip.cpp) target_link_libraries(test_flip flip) @@ -59,3 +44,5 @@ target_link_libraries(test_flip_local_client flip) add_executable(test_flip_server src/test_flip_server.cpp) target_link_libraries(test_flip_server flip) + +add_subdirectory (proto) diff --git a/src/flip/cmake/debug_flags.cmake b/src/flip/cmake/debug_flags.cmake index 3b8d3db8..a0ad3d3f 100644 --- a/src/flip/cmake/debug_flags.cmake +++ b/src/flip/cmake/debug_flags.cmake @@ -59,9 +59,11 @@ set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. -if (${CONAN_BUILD_COVERAGE}) - include (cmake/CodeCoverage.cmake) - APPEND_COVERAGE_COMPILER_FLAGS() - SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) +if (DEFINED ${BUILD_COVERAGE}) + if (${BUILD_COVERAGE}) + include (cmake/CodeCoverage.cmake) + APPEND_COVERAGE_COMPILER_FLAGS() + SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) + endif () endif () set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${REALLY_NO_OPTIMIZATION_FLAGS}") diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt index 67014cad..43e990e1 100644 --- a/src/flip/proto/CMakeLists.txt +++ b/src/flip/proto/CMakeLists.txt @@ -1,27 +1,19 @@ -include(../CMakeScripts/grpc.cmake) +cmake_minimum_required(VERSION 3.11) -file(GLOB PROTO_IDLS *.proto) -message("PROTO_IDLS = " ${PROTO_IDLS}) -PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_IDLS}) +add_library(${PROJECT_NAME}_proto OBJECT + flip_server.proto + flip_spec.proto + ) +target_link_libraries(${PROJECT_NAME}_proto + protobuf::libprotobuf + gRPC::grpc++ + ) -file(GLOB GRPC_IDLS flip_server.proto) -message("GRPC_IDLS = " ${GRPC_IDLS}) -PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${GRPC_IDLS}) - -list(GET PROTO_HDRS 0 FIRST_PROTO) -get_filename_component(PROTO_DIR ${FIRST_PROTO} DIRECTORY) -set(PROTO_PATH ${PROTO_DIR} PARENT_SCOPE) - -include_directories(BEFORE include ${PROTO_DIR}) -set(FLIP_LIB_FILES - ${FLIP_LIB_FILES} - ${PROTO_SRCS} - ${PROTO_HDRS} - ${GRPC_SRCS} - ${GRPC_HDRS} - ) - -message("FLIP_LIB_FILES = " ${FLIP_LIB_FILES}) -message("PROTO_DIR = " ${PROTO_DIR}) -add_library(flip ${FLIP_LIB_FILES}) -target_link_libraries(flip ${CONAN_LIBS}) +protobuf_generate(LANGUAGE cpp TARGET ${PROJECT_NAME}_proto PROTOS flip_spec.proto) +protobuf_generate(LANGUAGE cpp TARGET ${PROJECT_NAME}_proto PROTOS flip_server.proto) +protobuf_generate( + TARGET ${PROJECT_NAME}_proto + LANGUAGE grpc + GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc + PLUGIN protoc-gen-grpc=$ +) From 92d296c35b5033530d2ce3908a8c67d1fd7062f8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 10:47:16 -0700 Subject: [PATCH 089/385] Enable evhtp by default --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 0b4e7994..67e90acf 100644 --- a/conanfile.py +++ b/conanfile.py @@ -27,7 +27,7 @@ class MetricsConan(ConanFile): 'sanitize': False, 'malloc_impl': 'libc', 'prerelease': True, - 'with_evhtp': False, + 'with_evhtp': True, } build_requires = ( From 35935d38bc2d4b0ddae2b818eb94fce4c795ec9b Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 10:48:45 -0700 Subject: [PATCH 090/385] Add back user/channel --- .jenkins/Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 54cb5642..f795db11 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,6 +3,7 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') + CONAN_USER = 'sisl' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' @@ -38,7 +39,8 @@ pipeline { script { PROJECT = sh(script: "grep -m 1 'name =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) VER = sh(script: "grep -m 1 'version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) - TAG = "${VER}@" + CONAN_CHANNEL = sh(script: "echo ${BRANCH_NAME} | sed -E 's,(\\w+-?\\d*)/.*,\\1,' | sed -E 's,-,_,' | tr -d '\n'", returnStdout: true) + TAG = "${VER}@${CONAN_USER}/${CONAN_CHANNEL}" slackSend color: '#0063D1', channel: '#sds-ci', message: "*${PROJECT}/${TAG}* is building." } } From 1dfd126116f7e26b7c4cb7413feecd21c84df18e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 11:01:11 -0700 Subject: [PATCH 091/385] Fix async tests. --- src/async_http/CMakeLists.txt | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/async_http/CMakeLists.txt b/src/async_http/CMakeLists.txt index d8d8378c..fb18f2c2 100644 --- a/src/async_http/CMakeLists.txt +++ b/src/async_http/CMakeLists.txt @@ -1,20 +1,31 @@ cmake_minimum_required (VERSION 3.10) +find_package(FlatBuffers REQUIRED) +find_package(evhtp REQUIRED) + add_flags("-Wno-unused-parameter -Wno-cast-function-type") include_directories(BEFORE ..) include_directories(BEFORE .) +set(AUTH_DEPS + sisl + ${COMMON_DEPS} + evhtp::evhtp + cpr::cpr + flatbuffers::flatbuffers + jwt-cpp::jwt-cpp + GTest::gmock + ) + set(TEST_HTTP_SERVER_SOURCES tests/test_http_server.cpp ) add_executable(test_http_server ${TEST_HTTP_SERVER_SOURCES}) -target_link_libraries(test_http_server sisl) +target_link_libraries(test_http_server ${AUTH_DEPS}) -set(TEST_HTTP_SERVER_AUTH_SOURCES +add_executable(test_http_server_auth tests/AuthTest.cpp - ) -add_executable(test_http_server_auth ${TEST_HTTP_SERVER_AUTH_SOURCES}) -target_link_libraries(test_http_server_auth sisl) + ) +target_link_libraries(test_http_server_auth ${AUTH_DEPS}) add_test(NAME test_http_server_auth COMMAND test_http_server_auth) - From 19f39bf8872047fafd4a4ed4f9a99d42fcf781de Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 11:07:32 -0700 Subject: [PATCH 092/385] Update sisl. --- src/flip/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 0689816b..d6b3ffe7 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -21,8 +21,8 @@ if (CCACHE_FOUND) set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) endif () -find_package(gRPC CONFIG REQUIRED) -find_package(sisl CONFIG REQUIRED) +find_package(gRPC REQUIRED) +find_package(sisl REQUIRED) include_directories(BEFORE include) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/proto) From 5cee4adb1373346e5550d58189b5333bfbf3ac4c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 11:08:52 -0700 Subject: [PATCH 093/385] Update sisl --- CMakeLists.txt | 4 ++-- tests/CMakeLists.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cde3bd89..bc3486ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,8 +8,8 @@ endif () set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${PROJECT_NAME} -DPACKAGE_VERSION=${PACKAGE_REVISION}") find_package(Threads REQUIRED) -find_package(sisl CONFIG REQUIRED) -find_package(gRPC CONFIG REQUIRED) +find_package(sisl REQUIRED) +find_package(gRPC REQUIRED) include_directories(BEFORE "include") diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 26303545..451ab4e0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required (VERSION 3.11) -find_package(GTest CONFIG REQUIRED) +find_package(GTest REQUIRED) add_subdirectory(proto) From 7a6813bb4dcf2676115fefaa8ba2d38a543af6cb Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 11:54:35 -0700 Subject: [PATCH 094/385] Default allocator back to tcmalloc --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 67e90acf..c7c057f3 100644 --- a/conanfile.py +++ b/conanfile.py @@ -25,7 +25,7 @@ class MetricsConan(ConanFile): 'fPIC': True, 'coverage': False, 'sanitize': False, - 'malloc_impl': 'libc', + 'malloc_impl': 'tcmalloc', 'prerelease': True, 'with_evhtp': True, } From f67c6c7ab2f52eb44daf9c0a7f705719ec27d47c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 18:43:56 -0700 Subject: [PATCH 095/385] Adjust fPIC based on options. --- CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 441a112c..db51963d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,6 +13,10 @@ enable_testing() if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) + conan_output_dirs_setup() + conan_set_rpath() + conan_set_std() + conan_set_fpic() else () message("The file conanbuildinfo.cmake doesn't exist, some properties will be unavailable") endif () From 9fd5f38f98c0607099a219b4b510eb9e3d5819db Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 9 Aug 2022 18:44:52 -0700 Subject: [PATCH 096/385] Adjust fPIC based on options. --- src/flip/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index d6b3ffe7..bec29e21 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -4,6 +4,10 @@ set(CMAKE_CXX_STANDARD 17) if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) + conan_output_dirs_setup() + conan_set_rpath() + conan_set_std() + conan_set_fpic() else() message(WARNING "Conan Build file does not exist, trying to build without!") endif() From 8d3bb8f0c7af8af59a7cafe52dffa1f8146b95bf Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 10 Aug 2022 11:16:06 -0700 Subject: [PATCH 097/385] Provide way to cleanup versions --- CMakeLists.txt | 5 +---- src/sisl_version/tests/test_version.cpp | 2 ++ src/sisl_version/version.cpp | 13 ++++++++++--- src/version.hpp | 1 + 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index db51963d..e3d0e4e0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,10 +13,7 @@ enable_testing() if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - conan_output_dirs_setup() - conan_set_rpath() - conan_set_std() - conan_set_fpic() + conan_basic_setup(TARGETS) else () message("The file conanbuildinfo.cmake doesn't exist, some properties will be unavailable") endif () diff --git a/src/sisl_version/tests/test_version.cpp b/src/sisl_version/tests/test_version.cpp index c787bd0b..8d2026ca 100644 --- a/src/sisl_version/tests/test_version.cpp +++ b/src/sisl_version/tests/test_version.cpp @@ -32,6 +32,8 @@ TEST(entryTest, entry) { auto versions{sisl::VersionMgr::getVersions()}; EXPECT_EQ((int)versions.size(), 2); + + sisl::VersionMgr::clear(); } int main(int argc, char* argv[]) { diff --git a/src/sisl_version/version.cpp b/src/sisl_version/version.cpp index 9ca27bbd..e5baf631 100644 --- a/src/sisl_version/version.cpp +++ b/src/sisl_version/version.cpp @@ -24,9 +24,7 @@ VersionMgr* VersionMgr::m_instance = nullptr; std::once_flag VersionMgr::m_init_flag; VersionMgr::~VersionMgr() { - for (auto it = m_version_map.begin(); m_version_map.end() != it; ++it) { - semver_free(&it->second); - } + clear(); } void VersionMgr::createAndInit() { @@ -41,6 +39,15 @@ VersionMgr* VersionMgr::getInstance() { return m_instance; } +void VersionMgr::clear() { + auto ver_info{VersionMgr::getInstance()}; + std::unique_lock l{ver_info->m_mutex}; + for (auto it = ver_info->m_version_map.begin(); ver_info->m_version_map.end() != it; ++it) { + semver_free(&it->second); + } + ver_info->m_version_map.clear(); +} + semver_t* VersionMgr::getVersion(const std::string& name) { auto ver_info{VersionMgr::getInstance()}; std::unique_lock l{ver_info->m_mutex}; diff --git a/src/version.hpp b/src/version.hpp index 3d15c8c6..7c4a30fb 100644 --- a/src/version.hpp +++ b/src/version.hpp @@ -41,6 +41,7 @@ class VersionMgr { void operator=(VersionMgr const&) = delete; static VersionMgr* getInstance(); + static void clear(); static semver_t* getVersion(const std::string& name); static std::vector< modinfo > getVersions(); static void addVersion(const std::string& name, const semver_t& ver); From b45f98c38a76dba4d0e20288b4ec722a77794ea8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 10 Aug 2022 11:39:00 -0700 Subject: [PATCH 098/385] Cleanup cmake --- CMakeLists.txt | 5 ++++- tests/function/CMakeLists.txt | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bc3486ac..402e82c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required (VERSION 3.11) + set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") @@ -13,7 +15,8 @@ find_package(gRPC REQUIRED) include_directories(BEFORE "include") -add_library(${PROJECT_NAME} +add_library(${PROJECT_NAME}) +target_sources(${PROJECT_NAME} PRIVATE lib/rpc_server.cpp lib/rpc_client.cpp ) diff --git a/tests/function/CMakeLists.txt b/tests/function/CMakeLists.txt index 598722fd..ed65373e 100644 --- a/tests/function/CMakeLists.txt +++ b/tests/function/CMakeLists.txt @@ -1,7 +1,8 @@ cmake_minimum_required (VERSION 3.11) # build echo_server -add_executable(echo_server +add_executable(echo_server) +target_sources(echo_server PRIVATE echo_server.cpp $ ) @@ -12,7 +13,8 @@ target_link_libraries(echo_server add_test(NAME Echo_Ping_Server COMMAND echo_server) # build echo_async_client -add_executable(echo_async_client +add_executable(echo_async_client) +target_sources(echo_async_client PRIVATE echo_async_client.cpp $ ) From ea8a36e3d0a68bf898b8ada27a5da258f33d0608 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 11 Aug 2022 08:43:30 -0700 Subject: [PATCH 099/385] Revert changes to version parsing. --- CMakeLists.txt | 2 +- conanfile.py | 2 +- src/sisl_version/CMakeLists.txt | 4 ++-- src/sisl_version/tests/test_version.cpp | 15 ++++----------- src/sisl_version/version.cpp | 25 +++++-------------------- src/version.hpp | 12 +++++------- 6 files changed, 18 insertions(+), 42 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e3d0e4e0..96af1229 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -58,7 +58,7 @@ find_package(GTest REQUIRED) find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) find_package(prometheus-cpp REQUIRED) -find_package(semver.c REQUIRED) +find_package(semver REQUIRED) find_package(spdlog REQUIRED) find_package (Threads REQUIRED) find_package(userspace-rcu REQUIRED) diff --git a/conanfile.py b/conanfile.py index c7c057f3..93d6ee76 100644 --- a/conanfile.py +++ b/conanfile.py @@ -67,7 +67,7 @@ def requirements(self): self.requires("folly/2022.01.31.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") - self.requires("semver.c/1.0.0") + self.requires("semver/1.1.0") self.requires("spdlog/1.10.0") self.requires("userspace-rcu/0.11.4") self.requires("fmt/8.1.1", override=True) diff --git a/src/sisl_version/CMakeLists.txt b/src/sisl_version/CMakeLists.txt index d39b0d61..0d1ee8d1 100644 --- a/src/sisl_version/CMakeLists.txt +++ b/src/sisl_version/CMakeLists.txt @@ -11,12 +11,12 @@ set(VERSION_SOURCE_FILES version.cpp ) add_library(sisl_version OBJECT ${VERSION_SOURCE_FILES}) -target_link_libraries(sisl_version ${COMMON_DEPS} semver.c::semver.c) +target_link_libraries(sisl_version ${COMMON_DEPS} semver::semver) target_include_directories(sisl_version PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) set(TEST_VERSION_SOURCE_FILES tests/test_version.cpp ) add_executable(test_version ${TEST_VERSION_SOURCE_FILES}) -target_link_libraries(test_version sisl ${COMMON_DEPS} semver.c::semver.c GTest::gtest) +target_link_libraries(test_version sisl ${COMMON_DEPS} semver::semver GTest::gtest) add_test(NAME VersionTest COMMAND test_version) diff --git a/src/sisl_version/tests/test_version.cpp b/src/sisl_version/tests/test_version.cpp index 8d2026ca..a432801f 100644 --- a/src/sisl_version/tests/test_version.cpp +++ b/src/sisl_version/tests/test_version.cpp @@ -10,30 +10,23 @@ SISL_OPTIONS_ENABLE(logging) SISL_LOGGING_INIT(test_version) void entry() { - semver_t new_version = {}; - RELEASE_ASSERT_EQ(0, semver_parse(PACKAGE_VERSION, &new_version), "Could not parse version: {}", PACKAGE_VERSION); - sisl::VersionMgr::addVersion("dummy", new_version); + auto ver{version::Semver200_version(PACKAGE_VERSION)}; + sisl::VersionMgr::addVersion("dummy", ver); } TEST(entryTest, entry) { entry(); - char temp_c_string[100] = {'\0'}; - semver_render(sisl::VersionMgr::getVersion("dummy"), temp_c_string); - const std::string dummy_ver{fmt::format("{0}", temp_c_string)}; + const std::string dummy_ver{fmt::format("{0}", sisl::VersionMgr::getVersion("dummy"))}; LOGINFO("Dummy ver. {}", dummy_ver); - temp_c_string[0] = '\0'; - semver_render(sisl::VersionMgr::getVersion("sisl"), temp_c_string); - const std::string sisl_ver{fmt::format("{0}", temp_c_string)}; + const std::string sisl_ver{fmt::format("{0}", sisl::VersionMgr::getVersion("sisl"))}; LOGINFO("SISL ver. {}", sisl_ver); EXPECT_EQ(dummy_ver, sisl_ver); auto versions{sisl::VersionMgr::getVersions()}; EXPECT_EQ((int)versions.size(), 2); - - sisl::VersionMgr::clear(); } int main(int argc, char* argv[]) { diff --git a/src/sisl_version/version.cpp b/src/sisl_version/version.cpp index e5baf631..4326b94b 100644 --- a/src/sisl_version/version.cpp +++ b/src/sisl_version/version.cpp @@ -15,7 +15,6 @@ * *********************************************************************************/ #include "version.hpp" -#include "logging/logging.h" #include namespace sisl { @@ -23,15 +22,10 @@ namespace sisl { VersionMgr* VersionMgr::m_instance = nullptr; std::once_flag VersionMgr::m_init_flag; -VersionMgr::~VersionMgr() { - clear(); -} - void VersionMgr::createAndInit() { m_instance = new VersionMgr(); - auto& version = m_instance->m_version_map["sisl"]; - auto const ret = semver_parse(PACKAGE_VERSION, &version); - RELEASE_ASSERT_EQ(0, ret, "Version could not be parsed: {}", PACKAGE_VERSION); + auto ver{version::Semver200_version(PACKAGE_VERSION)}; + m_instance->m_version_map["sisl"] = ver; } VersionMgr* VersionMgr::getInstance() { @@ -39,21 +33,12 @@ VersionMgr* VersionMgr::getInstance() { return m_instance; } -void VersionMgr::clear() { - auto ver_info{VersionMgr::getInstance()}; - std::unique_lock l{ver_info->m_mutex}; - for (auto it = ver_info->m_version_map.begin(); ver_info->m_version_map.end() != it; ++it) { - semver_free(&it->second); - } - ver_info->m_version_map.clear(); -} - -semver_t* VersionMgr::getVersion(const std::string& name) { +version::Semver200_version VersionMgr::getVersion(const std::string& name) { auto ver_info{VersionMgr::getInstance()}; std::unique_lock l{ver_info->m_mutex}; auto it{ver_info->m_version_map.find(name)}; assert(it != ver_info->m_version_map.end()); - return &it->second; + return it->second; } std::vector< modinfo > VersionMgr::getVersions() { @@ -64,7 +49,7 @@ std::vector< modinfo > VersionMgr::getVersions() { return res; } -void VersionMgr::addVersion(const std::string& name, const semver_t& ver) { +void VersionMgr::addVersion(const std::string& name, const version::Semver200_version& ver) { auto ver_info{VersionMgr::getInstance()}; std::unique_lock l{ver_info->m_mutex}; auto it{ver_info->m_version_map.find(name)}; diff --git a/src/version.hpp b/src/version.hpp index 7c4a30fb..97d1629a 100644 --- a/src/version.hpp +++ b/src/version.hpp @@ -15,22 +15,21 @@ * *********************************************************************************/ #pragma once -#include +#include #include #include #include namespace sisl { -typedef std::pair< std::string, semver_t > modinfo; +typedef std::pair< std::string, version::Semver200_version > modinfo; class VersionMgr { private: mutable std::mutex m_mutex; - std::unordered_map< std::string, semver_t > m_version_map; + std::unordered_map< std::string, version::Semver200_version > m_version_map; VersionMgr() = default; - ~VersionMgr(); static VersionMgr* m_instance; static std::once_flag m_init_flag; @@ -41,10 +40,9 @@ class VersionMgr { void operator=(VersionMgr const&) = delete; static VersionMgr* getInstance(); - static void clear(); - static semver_t* getVersion(const std::string& name); + static version::Semver200_version getVersion(const std::string& name); static std::vector< modinfo > getVersions(); - static void addVersion(const std::string& name, const semver_t& ver); + static void addVersion(const std::string& name, const version::Semver200_version& ver); }; } // namespace sisl From 15c5abc4eb8e51e86e132f34ef77e17bb1cfd91e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 11 Aug 2022 16:08:55 -0700 Subject: [PATCH 100/385] Remove "JEMALLOC_EXPORT" test, folly seems to set it regardless. --- conanfile.py | 4 ++-- src/fds/malloc_helper.hpp | 24 ++++++++++++------------ src/fds/tests/test_jemalloc_helper.cpp | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/conanfile.py b/conanfile.py index 93d6ee76..438b4cf3 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,7 +1,7 @@ from conans import ConanFile, CMake, tools import os -class MetricsConan(ConanFile): +class SISLConan(ConanFile): name = "sisl" version = "8.0.1" homepage = "https://github.com/eBay/sisl" @@ -56,7 +56,7 @@ def configure(self): del self.options.fPIC def requirements(self): - # Custom packages + # Custom packages self.requires("prometheus-cpp/1.0.0") # Generic packages (conan-center) diff --git a/src/fds/malloc_helper.hpp b/src/fds/malloc_helper.hpp index 914ee890..e0081353 100644 --- a/src/fds/malloc_helper.hpp +++ b/src/fds/malloc_helper.hpp @@ -44,7 +44,7 @@ #if defined(USING_TCMALLOC) #include -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) #include #endif @@ -53,7 +53,7 @@ namespace sisl { class MallocMetrics; static void get_parse_tcmalloc_stats(nlohmann::json* const j, MallocMetrics* const metrics); static uint64_t tcmalloc_page_size{8192}; -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) class MallocMetrics; static void get_parse_jemalloc_stats(nlohmann::json* const j, MallocMetrics* const metrics, const bool refresh); #endif @@ -79,7 +79,7 @@ class MallocMetrics : public MetricsGroupWrapper { HistogramBucketsType(LinearUpto128Buckets)); REGISTER_HISTOGRAM(inuse_page_span_distribution, "Continuous pages which are being used by app", HistogramBucketsType(LinearUpto128Buckets)); -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) REGISTER_GAUGE(active_memory, "Bytes in active pages allocated by the application"); REGISTER_GAUGE(allocated_memory, "Bytes allocated by the application"); REGISTER_GAUGE(metadata_memory, "Bytes dedicated to metadata"); @@ -105,7 +105,7 @@ class MallocMetrics : public MetricsGroupWrapper { void on_gather() { #ifdef USING_TCMALLOC get_parse_tcmalloc_stats(nullptr, this); -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) get_parse_jemalloc_stats(nullptr, this, true /* refresh */); #endif } @@ -118,7 +118,7 @@ class MallocMetrics : public MetricsGroupWrapper { }; #ifndef USING_TCMALLOC -#if defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#if defined(USING_JEMALLOC) || defined(USE_JEMALLOC) class JEMallocStatics { public: JEMallocStatics(const JEMallocStatics&) = delete; @@ -301,7 +301,7 @@ static size_t get_jemalloc_muzzy_page_count() { size_t allocated{0}; #ifndef USING_TCMALLOC -#if defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#if defined(USING_JEMALLOC) || defined(USE_JEMALLOC) static const auto& jemalloc_statics{JEMallocStatics::get()}; size_t sz_allocated{sizeof(allocated)}; static const auto& stats_allocated_mib{jemalloc_statics.get_stats_allocated_mib()}; @@ -399,7 +399,7 @@ static void get_parse_tcmalloc_stats(nlohmann::json* const j, MallocMetrics* con if (metrics) { MallocExtension::instance()->Ranges(nullptr, update_tcmalloc_range_stats); } delete[] stats_buf; } -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) static void get_parse_jemalloc_stats(nlohmann::json* const j, MallocMetrics* const metrics, const bool refresh) { static const auto& jemalloc_statics{JEMallocStatics::get()}; @@ -497,7 +497,7 @@ static void print_my_jemalloc_data(void* const opaque, const char* const buf) { j["Implementation"] = "TCMalloc (possibly)"; get_parse_tcmalloc_stats(&j, nullptr); j["Stats"]["Malloc"]["MemoryReleaseRate"] = MallocExtension::instance()->GetMemoryReleaseRate(); -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) static std::mutex stats_mutex; // get malloc data in JSON format std::string detailed; @@ -526,7 +526,7 @@ static void print_my_jemalloc_data(void* const opaque, const char* const buf) { } #ifndef USING_TCMALLOC -#if defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#if defined(USING_JEMALLOC) || defined(USE_JEMALLOC) [[maybe_unused]] static bool set_jemalloc_decay_times(const ssize_t dirty_decay_ms_in = 0, const ssize_t muzzy_decay_ms_in = 0) { static const auto& jemalloc_statics{JEMallocStatics::get()}; @@ -588,7 +588,7 @@ static std::atomic< bool > s_is_aggressive_decommit{false}; MallocExtension::instance()->SetNumericProperty("tcmalloc.aggressive_memory_decommit", 1); MallocExtension::instance()->ReleaseFreeMemory(); tcmalloc_helper::s_is_aggressive_decommit.store(true, std::memory_order_release); -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) static thread_local auto arena_purge_mib{JEMallocStatics::get().get_arena_purge_mib()}; arena_purge_mib.first[1] = static_cast< size_t >(MALLCTL_ARENAS_ALL); if (::mallctlbymib(arena_purge_mib.first.data(), arena_purge_mib.second, nullptr, nullptr, nullptr, 0) != 0) { @@ -617,7 +617,7 @@ static std::atomic< bool > s_is_aggressive_decommit{false}; [[maybe_unused]] static bool soft_decommit_mem() { #if defined(USING_TCMALLOC) MallocExtension::instance()->ReleaseFreeMemory(); -#elif defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#elif defined(USING_JEMALLOC) || defined(USE_JEMALLOC) static thread_local auto arena_decay_mib{JEMallocStatics::get().get_arena_decay_mib()}; arena_decay_mib.first[1] = static_cast< size_t >(MALLCTL_ARENAS_ALL); if (::mallctlbymib(arena_decay_mib.first.data(), arena_decay_mib.second, nullptr, nullptr, nullptr, 0) != 0) { @@ -630,7 +630,7 @@ static std::atomic< bool > s_is_aggressive_decommit{false}; [[maybe_unused]] static bool release_mem_if_needed(const size_t soft_threshold, const size_t aggressive_threshold_in) { bool ret{false}; -#if defined(USING_TCMALLOC) || defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#if defined(USING_TCMALLOC) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) size_t mem_usage{0}; const size_t aggressive_threshold{std::max(aggressive_threshold_in, soft_threshold)}; diff --git a/src/fds/tests/test_jemalloc_helper.cpp b/src/fds/tests/test_jemalloc_helper.cpp index 94328633..18d1e2ca 100644 --- a/src/fds/tests/test_jemalloc_helper.cpp +++ b/src/fds/tests/test_jemalloc_helper.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #ifndef USING_TCMALLOC -#if defined(JEMALLOC_EXPORT) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) +#if defined(USING_JEMALLOC) || defined(USE_JEMALLOC) #include #include From c4f299e61a516a6707d1899b6ef4abea281d0d3f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 15 Aug 2022 16:18:28 -0700 Subject: [PATCH 101/385] Use dummy package for PRERELEASE flag. --- CMakeLists.txt | 6 +++++- conanfile.py | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 96af1229..4ec34f2b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,13 +57,14 @@ find_package(folly REQUIRED) find_package(GTest REQUIRED) find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) +find_package(prerelease_dummy QUIET) find_package(prometheus-cpp REQUIRED) find_package(semver REQUIRED) find_package(spdlog REQUIRED) find_package (Threads REQUIRED) find_package(userspace-rcu REQUIRED) -set(COMMON_DEPS +list (APPEND COMMON_DEPS Boost::headers cxxopts::cxxopts nlohmann_json::nlohmann_json @@ -71,6 +72,9 @@ set(COMMON_DEPS spdlog::spdlog userspace-rcu::userspace-rcu ) +if (${prerelease_dummy_FOUND}) + list (APPEND COMMON_DEPS prerelease_dummy::prerelease_dummy) +endif () find_program(CCACHE_FOUND ccache) if (CCACHE_FOUND) diff --git a/conanfile.py b/conanfile.py index 438b4cf3..d674cc38 100644 --- a/conanfile.py +++ b/conanfile.py @@ -56,6 +56,9 @@ def configure(self): del self.options.fPIC def requirements(self): + if self.options.prerelease: + self.requires("prerelease_dummy/1.0.1") + # Custom packages self.requires("prometheus-cpp/1.0.0") @@ -123,8 +126,6 @@ def package_info(self): self.cpp_info.cppflags.append("-Wno-unused-local-typedefs") self.cpp_info.cppflags.append("-fconcepts") self.cpp_info.includedirs = ["include", "include/sisl/"] - if self.options.prerelease: - self.cpp_info.cxxflags.append("-D_PRERELEASE=1") if self.settings.os == "Linux": self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") self.cpp_info.cppflags.append("-D_FILE_OFFSET_BITS=64") From 7e58fd6aa613ede821d31f42ed6e3888912bb958 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 16 Aug 2022 09:16:57 -0700 Subject: [PATCH 102/385] Fix objlife ut. --- src/utility/CMakeLists.txt | 14 ++++++++------ src/utility/tests/test_objlife_counter.cpp | 13 +++++-------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index 2453b1d5..ea33be93 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -33,9 +33,11 @@ add_executable(test_enum ${TEST_ENUM}) target_link_libraries(test_enum ${COMMON_DEPS} GTest::gtest) add_test(NAME EnumTest COMMAND test_enum) -set(TEST_OBJLIFE - tests/test_objlife_counter.cpp - ) -add_executable(test_objlife ${TEST_OBJLIFE}) -target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME ObjLifeTest COMMAND test_objlife) +if (${prerelease_dummy_FOUND}) + set(TEST_OBJLIFE + tests/test_objlife_counter.cpp + ) + add_executable(test_objlife ${TEST_OBJLIFE}) + target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME ObjLifeTest COMMAND test_objlife) +endif () diff --git a/src/utility/tests/test_objlife_counter.cpp b/src/utility/tests/test_objlife_counter.cpp index bc8f997f..1b96a503 100644 --- a/src/utility/tests/test_objlife_counter.cpp +++ b/src/utility/tests/test_objlife_counter.cpp @@ -11,6 +11,7 @@ #include "options/options.h" #include "fds/buffer.hpp" + #include "obj_life_counter.hpp" SISL_LOGGING_INIT(test_objlife) @@ -70,13 +71,13 @@ TEST_F(ObjLifeTest, BasicCount) { const auto prom_format{sisl::MetricsFarm::getInstance().report(sisl::ReportFormat::kTextFormat)}; std::cout << "Prometheus Output = " << prom_format; - ASSERT_TRUE(prom_format.find(R"(TestClass_double__sisl::blob_{entity="Singleton",type="alive"} 1.0)") != + EXPECT_TRUE(prom_format.find(R"(TestClass_double__sisl::blob_{entity="Singleton",type="alive"} 1)") != std::string::npos); - ASSERT_TRUE(prom_format.find(R"(TestClass_double__sisl::blob_{entity="Singleton",type="created"} 1.0)") != + EXPECT_TRUE(prom_format.find(R"(TestClass_double__sisl::blob_{entity="Singleton",type="created"} 1)") != std::string::npos); - ASSERT_TRUE(prom_format.find(R"(TestClass_charP__unsigned_int_{entity="Singleton",type="alive"} 1.0)") != + EXPECT_TRUE(prom_format.find(R"(TestClass_charP__unsigned_int_{entity="Singleton",type="alive"} 1)") != std::string::npos); - ASSERT_TRUE(prom_format.find(R"(TestClass_charP__unsigned_int_{entity="Singleton",type="created"} 2.0)") != + EXPECT_TRUE(prom_format.find(R"(TestClass_charP__unsigned_int_{entity="Singleton",type="created"} 2)") != std::string::npos); } @@ -94,10 +95,6 @@ int main(int argc, char* argv[]) { g_num_threads = SISL_OPTIONS["num_threads"].as< uint32_t >(); -#ifdef _PRERELEASE const auto ret{RUN_ALL_TESTS()}; return ret; -#else - return 0; -#endif } From 1e774f0c7769af88f3356a1e48821919e3c0cfad Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 24 Aug 2022 15:39:47 -0700 Subject: [PATCH 103/385] Cleanup recipe --- CMakeLists.txt | 2 +- conanfile.py | 107 ++++++++++++-------------------- src/sisl_version/CMakeLists.txt | 4 +- 3 files changed, 44 insertions(+), 69 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4ec34f2b..e4c0e65f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) find_package(prerelease_dummy QUIET) find_package(prometheus-cpp REQUIRED) -find_package(semver REQUIRED) +find_package(semver200 REQUIRED) find_package(spdlog REQUIRED) find_package (Threads REQUIRED) find_package(userspace-rcu REQUIRED) diff --git a/conanfile.py b/conanfile.py index 72104172..198e797a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,5 +1,10 @@ -from conans import ConanFile, CMake, tools -import os +from os.path import join +from conan import ConanFile +from conan.tools.files import copy +from conans.tools import check_min_cppstd +from conans import CMake + +required_conan_version = ">=1.50.0" class SISLConan(ConanFile): name = "sisl" @@ -11,47 +16,27 @@ class SISLConan(ConanFile): license = "Apache-2.0" settings = "arch", "os", "compiler", "build_type" + options = { "shared": ['True', 'False'], "fPIC": ['True', 'False'], - "coverage": ['True', 'False'], - "sanitize": ['True', 'False'], - 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], + 'malloc_impl' : ['libc', 'jemalloc'], 'with_evhtp' : ['True', 'False'], } default_options = { 'shared': False, 'fPIC': True, - 'coverage': False, - 'sanitize': False, - 'malloc_impl': 'tcmalloc', - 'with_evhtp': True, + 'malloc_impl': 'libc', + 'with_evhtp': False, } - build_requires = ( - # Generic packages (conan-center) - "benchmark/1.6.1", - "gtest/1.11.0", - ) - generators = "cmake", "cmake_find_package" exports_sources = ("CMakeLists.txt", "cmake/*", "src/*", "LICENSE") - def config_options(self): - if self.settings.build_type != "Debug": - del self.options.sanitize - del self.options.coverage - elif os.getenv("OVERRIDE_SANITIZE") != None: - self.options.sanitize = True + def build_requirements(self): + self.build_requires("benchmark/1.6.1") + self.build_requires("gtest/1.11.0") - def configure(self): - if self.settings.build_type == "Debug": - if self.options.coverage and self.options.sanitize: - raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") - if self.options.coverage or self.options.sanitize: - self.options.malloc_impl = 'libc' - if self.options.shared: - del self.options.fPIC def requirements(self): # Custom packages @@ -75,69 +60,59 @@ def requirements(self): self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": self.requires("jemalloc/5.2.1") - elif self.options.malloc_impl == "tcmalloc": - self.requires("gperftools/2.7.0") if self.options.with_evhtp: self.requires("evhtp/1.2.18.2") + def validate(self): + if self.info.settings.compiler.cppstd: + check_min_cppstd(self, 20) + + def configure(self): + if self.options.shared: + del self.options.fPIC + def build(self): cmake = CMake(self) - definitions = {'CONAN_BUILD_COVERAGE': 'OFF', - 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', + definitions = {'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', - 'EVHTP_ON': 'OFF'} + 'EVHTP_ON': 'OFF', + 'MALLOC_IMPL': self.options.malloc_impl} test_target = None if self.options.with_evhtp: definitions['EVHTP_ON'] = 'ON' - if self.settings.build_type == "Debug": - if self.options.sanitize: - definitions['MEMORY_SANITIZER_ON'] = 'ON' - elif self.options.coverage: - definitions['CONAN_BUILD_COVERAGE'] = 'ON' - test_target = 'coverage' - - definitions['MALLOC_IMPL'] = self.options.malloc_impl - cmake.configure(defs=definitions) cmake.build() cmake.test(target=test_target) def package(self): - self.copy(pattern="LICENSE*", dst="licenses") - self.copy("*.hpp", src="src/", dst="include/sisl", keep_path=True) - self.copy("*.h", src="src/", dst="include/sisl", keep_path=True) - self.copy("*.a", dst="lib/", keep_path=False) - self.copy("*.lib", dst="lib/", keep_path=False) - self.copy("*.so", dst="lib/", keep_path=False) - self.copy("*.dll", dst="lib/", keep_path=False) - self.copy("*.dylib", dst="lib/", keep_path=False) - self.copy("*.cmake", dst="cmake/", keep_path=False) + lib_dir = join(self.package_folder, "lib") + copy(self, "LICENSE", self.source_folder, join(self.package_folder, "licenses/"), keep_path=False) + copy(self, "*.lib", self.build_folder, lib_dir, keep_path=False) + copy(self, "*.a", self.build_folder, lib_dir, keep_path=False) + copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) + copy(self, "*.dylib*", self.build_folder, lib_dir, keep_path=False) + copy(self, "*.dll*", self.build_folder, join(self.package_folder, "bin"), keep_path=False) + copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) + + hdr_dir = join(self.package_folder, join("include", "sisl")) + + copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True) + copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True) + copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) def package_info(self): self.cpp_info.libs = ["sisl"] - self.cpp_info.cppflags.append("-Wno-unused-local-typedefs") - self.cpp_info.cppflags.append("-fconcepts") - self.cpp_info.includedirs = ["include", "include/sisl/"] + self.cpp_info.cppflags.extend(["-Wno-unused-local-typedefs", "-fconcepts"]) + if self.settings.os == "Linux": self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") self.cpp_info.cppflags.append("-D_FILE_OFFSET_BITS=64") self.cpp_info.cppflags.append("-D_LARGEFILE64") - if self.settings.build_type == "Debug": - if self.options.sanitize: - self.cpp_info.sharedlinkflags.append("-fsanitize=address") - self.cpp_info.exelinkflags.append("-fsanitize=address") - self.cpp_info.sharedlinkflags.append("-fsanitize=undefined") - self.cpp_info.exelinkflags.append("-fsanitize=undefined") - elif self.options.coverage == 'True': - self.cpp_info.system_libs.append('gcov') - if self.settings.os == "Linux": self.cpp_info.system_libs.append("dl") self.cpp_info.exelinkflags.extend(["-export-dynamic"]) if self.options.malloc_impl == 'jemalloc': self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") - elif self.options.malloc_impl == 'tcmalloc': - self.cpp_info.cppflags.append("-DUSING_TCMALLOC=1") diff --git a/src/sisl_version/CMakeLists.txt b/src/sisl_version/CMakeLists.txt index 0d1ee8d1..f338167a 100644 --- a/src/sisl_version/CMakeLists.txt +++ b/src/sisl_version/CMakeLists.txt @@ -11,12 +11,12 @@ set(VERSION_SOURCE_FILES version.cpp ) add_library(sisl_version OBJECT ${VERSION_SOURCE_FILES}) -target_link_libraries(sisl_version ${COMMON_DEPS} semver::semver) +target_link_libraries(sisl_version ${COMMON_DEPS} semver200::semver200) target_include_directories(sisl_version PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) set(TEST_VERSION_SOURCE_FILES tests/test_version.cpp ) add_executable(test_version ${TEST_VERSION_SOURCE_FILES}) -target_link_libraries(test_version sisl ${COMMON_DEPS} semver::semver GTest::gtest) +target_link_libraries(test_version sisl ${COMMON_DEPS} semver200::semver200 GTest::gtest) add_test(NAME VersionTest COMMAND test_version) From d8e2838eabf26764cacc7699610dd6de61399800 Mon Sep 17 00:00:00 2001 From: Ravi Nagarjun Date: Tue, 26 Jul 2022 11:24:31 -0700 Subject: [PATCH 104/385] use pistache http server for unit tests --- tests/CMakeLists.txt | 1 + tests/unit/CMakeLists.txt | 2 +- tests/unit/auth_test.cpp | 78 ++++++++++++++------------------ tests/unit/basic_http_server.hpp | 61 +++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 46 deletions(-) create mode 100644 tests/unit/basic_http_server.hpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 451ab4e0..3bae7590 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,6 +1,7 @@ cmake_minimum_required (VERSION 3.11) find_package(GTest REQUIRED) +find_package(pistache REQUIRED) add_subdirectory(proto) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index b7f8d7a9..2780676c 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -6,7 +6,7 @@ add_executable(auth_test ) target_link_libraries(auth_test grpc_helper - sisl::sisl + pistache::pistache gRPC::grpc++ GTest::gmock ) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index c2ebfe60..72236ac0 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -7,21 +7,13 @@ #include #include -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-function-type" -#endif -#include -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic pop -#endif - +#include "basic_http_server.hpp" #include "grpc_helper/rpc_client.hpp" #include "grpc_helper/rpc_server.hpp" #include "grpc_helper_test.grpc.pb.h" #include "test_token.hpp" -SISL_LOGGING_INIT(logging, grpc_server, httpserver_lmod) +SISL_LOGGING_INIT(logging, grpc_server) SISL_OPTIONS_ENABLE(logging) namespace grpc_helper::testing { @@ -32,6 +24,17 @@ using namespace ::testing; static const std::string grpc_server_addr{"0.0.0.0:12345"}; static const std::string trf_token_server_ip{"127.0.0.1"}; static const uint32_t trf_token_server_port{12346}; +static std::string token_response; +static void set_token_response(const std::string& raw_token) { + token_response = "{\n" + " \"access_token\": \"" + + raw_token + + "\",\n" + " \"token_type\": \"Bearer\",\n" + " \"expires_in\": \"2000\",\n" + " \"refresh_token\": \"dummy_refresh_token\"\n" + "}"; +} class EchoServiceImpl final { public: @@ -200,6 +203,19 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { EXPECT_EQ(status.error_message(), "missing header authorization"); } +class TokenApiImpl : public TokenApi { +public: + void get_token_impl(Pistache::Http::ResponseWriter& response) { + LOGINFO("Sending token to client"); + response.send(Pistache::Http::Code::Ok, token_response); + } + + void get_key_impl(Pistache::Http::ResponseWriter& response) { + LOGINFO("Download rsa key"); + response.send(Pistache::Http::Code::Ok, rsa_pub_key); + } +}; + class AuthEnableTest : public AuthBaseTest { public: void SetUp() override { @@ -209,17 +225,10 @@ class AuthEnableTest : public AuthBaseTest { grpc_server_start(grpc_server_addr, m_auth_mgr); // start token server - HttpServerConfig http_cfg; - http_cfg.is_tls_enabled = false; - http_cfg.bind_address = trf_token_server_ip; - http_cfg.server_port = trf_token_server_port; - http_cfg.read_write_timeout_secs = 10; - http_cfg.is_auth_enabled = false; - m_token_server = std::unique_ptr< HttpServer >( - new HttpServer(http_cfg, - {handler_info("/token", AuthEnableTest::get_token, (void*)this), - handler_info("/download_key", AuthEnableTest::download_key, (void*)this)})); - m_token_server->start(); + APIBase::init(Pistache::Address(fmt::format("{}:{}", trf_token_server_ip, trf_token_server_port)), 1); + m_token_server = std::unique_ptr< TokenApiImpl >(new TokenApiImpl()); + m_token_server->setupRoutes(); + APIBase::start(); // Client with auth m_trf_client = std::make_shared< TrfClient >(); @@ -231,39 +240,18 @@ class AuthEnableTest : public AuthBaseTest { void TearDown() override { AuthBaseTest::TearDown(); - m_token_server->stop(); + APIBase::stop(); remove_auth_settings(); } - static void get_token(HttpCallData cd) { - LOGINFO("Sending token to client"); - pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, m_token_response); - } - - static void download_key(HttpCallData cd) { pThis(cd)->m_token_server->respond_OK(cd, EVHTP_RES_OK, rsa_pub_key); } - - static void set_token_response(const std::string& raw_token) { - m_token_response = "{\n" - " \"access_token\": \"" + - raw_token + - "\",\n" - " \"token_type\": \"Bearer\",\n" - " \"expires_in\": \"2000\",\n" - " \"refresh_token\": \"dummy_refresh_token\"\n" - "}"; - } - protected: - std::unique_ptr< HttpServer > m_token_server; + std::unique_ptr< TokenApiImpl > m_token_server; std::shared_ptr< TrfClient > m_trf_client; - static AuthEnableTest* pThis(HttpCallData cd) { return (AuthEnableTest*)cd->cookie(); } - static std::string m_token_response; }; -std::string AuthEnableTest::m_token_response; TEST_F(AuthEnableTest, allow_with_auth) { auto raw_token = TestToken().sign_rs256(); - AuthEnableTest::set_token_response(raw_token); + set_token_response(raw_token); EchoRequest req; req.set_message("dummy_msg"); EchoReply reply; diff --git a/tests/unit/basic_http_server.hpp b/tests/unit/basic_http_server.hpp new file mode 100644 index 00000000..1c15d85e --- /dev/null +++ b/tests/unit/basic_http_server.hpp @@ -0,0 +1,61 @@ +#include +#include +#include +#include +#include +#include + +#pragma once + +class APIBase { +public: + static void init(Pistache::Address addr, size_t thr) { + m_http_endpoint = std::make_shared< Pistache::Http::Endpoint >(addr); + auto flags = Pistache::Tcp::Options::ReuseAddr; + auto opts = Pistache::Http::Endpoint::options().threadsName("http_server").threads(thr).flags(flags); + m_http_endpoint->init(opts); + } + + static void start() { + m_http_endpoint->setHandler(m_router.handler()); + m_http_endpoint->serveThreaded(); + } + + static void stop() { m_http_endpoint->shutdown(); } + + virtual ~APIBase() {} + +protected: + static std::shared_ptr< Pistache::Http::Endpoint > m_http_endpoint; + static Pistache::Rest::Router m_router; +}; + +std::shared_ptr< Pistache::Http::Endpoint > APIBase::m_http_endpoint; +Pistache::Rest::Router APIBase::m_router; + +class TokenApi : public APIBase { +public: + void setupRoutes() { + Pistache::Rest::Routes::Post(m_router, "/token", + Pistache::Rest::Routes::bind(&TokenApi::get_token_handler, this)); + Pistache::Rest::Routes::Get(m_router, "/download_key", + Pistache::Rest::Routes::bind(&TokenApi::get_key_handler, this)); + } + + void get_token_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + this->get_token_impl(response); + } + + void get_key_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + + this->get_key_impl(response); + } + + virtual void get_token_impl(Pistache::Http::ResponseWriter& response) = 0; + virtual void get_key_impl(Pistache::Http::ResponseWriter& response) = 0; + + virtual ~TokenApi() { + Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); + Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Get, "/download_key"); + } +}; \ No newline at end of file From a20d98216008fccba6b78e4f455afbe73019dae8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 1 Sep 2022 14:53:35 -0700 Subject: [PATCH 105/385] SemVer200 renamed. --- CMakeLists.txt | 2 +- conanfile.py | 2 +- src/sisl_version/CMakeLists.txt | 4 ++-- src/version.hpp | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e4c0e65f..ef1e1add 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) find_package(prerelease_dummy QUIET) find_package(prometheus-cpp REQUIRED) -find_package(semver200 REQUIRED) +find_package(zmarok-semver REQUIRED) find_package(spdlog REQUIRED) find_package (Threads REQUIRED) find_package(userspace-rcu REQUIRED) diff --git a/conanfile.py b/conanfile.py index 198e797a..9efd68dd 100644 --- a/conanfile.py +++ b/conanfile.py @@ -50,7 +50,7 @@ def requirements(self): self.requires("folly/2022.01.31.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") - self.requires("semver200/1.1.0") + self.requires("zmarok-semver/1.1.0") self.requires("spdlog/1.10.0") self.requires("userspace-rcu/0.11.4") self.requires("fmt/8.1.1", override=True) diff --git a/src/sisl_version/CMakeLists.txt b/src/sisl_version/CMakeLists.txt index f338167a..f6aab51a 100644 --- a/src/sisl_version/CMakeLists.txt +++ b/src/sisl_version/CMakeLists.txt @@ -11,12 +11,12 @@ set(VERSION_SOURCE_FILES version.cpp ) add_library(sisl_version OBJECT ${VERSION_SOURCE_FILES}) -target_link_libraries(sisl_version ${COMMON_DEPS} semver200::semver200) +target_link_libraries(sisl_version ${COMMON_DEPS} zmarok-semver::zmarok-semver) target_include_directories(sisl_version PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) set(TEST_VERSION_SOURCE_FILES tests/test_version.cpp ) add_executable(test_version ${TEST_VERSION_SOURCE_FILES}) -target_link_libraries(test_version sisl ${COMMON_DEPS} semver200::semver200 GTest::gtest) +target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) add_test(NAME VersionTest COMMAND test_version) diff --git a/src/version.hpp b/src/version.hpp index 97d1629a..3956d21c 100644 --- a/src/version.hpp +++ b/src/version.hpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #pragma once -#include +#include #include #include #include From 853caf78aebda811c1f95730868c07b76f97d201 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 2 Sep 2022 10:59:10 -0600 Subject: [PATCH 106/385] Update imports. --- conanfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index 9efd68dd..e5eff56f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,7 +1,7 @@ from os.path import join from conan import ConanFile from conan.tools.files import copy -from conans.tools import check_min_cppstd +from conan.tools.build import check_min_cppstd from conans import CMake required_conan_version = ">=1.50.0" @@ -111,7 +111,7 @@ def package_info(self): self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") self.cpp_info.cppflags.append("-D_FILE_OFFSET_BITS=64") self.cpp_info.cppflags.append("-D_LARGEFILE64") - self.cpp_info.system_libs.append("dl") + self.cpp_info.system_libs.extend(["dl", "pthread"]) self.cpp_info.exelinkflags.extend(["-export-dynamic"]) if self.options.malloc_impl == 'jemalloc': From 385ec02caff2473b7e22aa325a2e47a368dac6fd Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 2 Sep 2022 14:43:33 -0700 Subject: [PATCH 107/385] Worked around the lack of reset histogram in prometheus source repo --- CMakeLists.txt | 2 +- conanfile.py | 8 ++++---- src/logging/logging.h | 3 ++- src/metrics/metrics_group_impl.cpp | 3 ++- src/metrics/prometheus_reporter.hpp | 15 ++++++++++----- src/metrics/reporter.hpp | 2 +- 6 files changed, 20 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 43621acd..00550625 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(nlohmann_json REQUIRED) find_package(prometheus-cpp REQUIRED) find_package(semver.c REQUIRED) find_package(spdlog REQUIRED) -find_package (Threads REQUIRED) +find_package(Threads REQUIRED) find_package(userspace-rcu REQUIRED) set(COMMON_DEPS diff --git a/conanfile.py b/conanfile.py index 0b4e7994..43474f91 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,9 +1,9 @@ from conans import ConanFile, CMake, tools import os -class MetricsConan(ConanFile): +class SislConan(ConanFile): name = "sisl" - version = "8.0.1" + version = "8.0.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -56,8 +56,7 @@ def configure(self): del self.options.fPIC def requirements(self): - # Custom packages - self.requires("prometheus-cpp/1.0.0") + # Custom packages # Generic packages (conan-center) self.requires("boost/1.79.0") @@ -70,6 +69,7 @@ def requirements(self): self.requires("semver.c/1.0.0") self.requires("spdlog/1.10.0") self.requires("userspace-rcu/0.11.4") + self.requires("prometheus-cpp/1.0.1") self.requires("fmt/8.1.1", override=True) self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) diff --git a/src/logging/logging.h b/src/logging/logging.h index 0769bc14..7593c368 100644 --- a/src/logging/logging.h +++ b/src/logging/logging.h @@ -265,7 +265,8 @@ constexpr const char* file_name(const char* const str) { return str_slant(str) ? * LOGMSG_ASSERT: If condition is not met: Logs the message with stack trace, aborts in debug build only. * DEBUG_ASSERT: No-op in release build, for debug build, if condition is not met, logs the message and aborts */ -#if __cplusplus > 201703L +//#if __cplusplus > 201703L +#if 0 #define _GENERIC_ASSERT(is_log_assert, cond, formatter, msg, ...) \ [[unlikely]] if (!(cond)) { _LOG_AND_ASSERT_FMT(is_log_assert, formatter, msg, ##__VA_ARGS__); } #else diff --git a/src/metrics/metrics_group_impl.cpp b/src/metrics/metrics_group_impl.cpp index 92cdcfe1..e21d0af2 100644 --- a/src/metrics/metrics_group_impl.cpp +++ b/src/metrics/metrics_group_impl.cpp @@ -366,7 +366,8 @@ HistogramDynamicInfo::HistogramDynamicInfo(const HistogramStaticInfo& static_inf void HistogramDynamicInfo::publish(const HistogramValue& hvalue) { if (is_histogram_reporter()) { const auto arr = hvalue.get_freqs(); - as_histogram()->set_value(std::vector< double >(arr.cbegin(), arr.cend()), hvalue.get_sum()); + auto v = std::vector< double >(arr.cbegin(), arr.cend()); + as_histogram()->set_value(v, hvalue.get_sum()); } else { as_gauge()->set_value(average(hvalue)); } diff --git a/src/metrics/prometheus_reporter.hpp b/src/metrics/prometheus_reporter.hpp index 6751d1b0..051302b6 100644 --- a/src/metrics/prometheus_reporter.hpp +++ b/src/metrics/prometheus_reporter.hpp @@ -71,14 +71,19 @@ class PrometheusReportHistogram : public ReportHistogram { PrometheusReportHistogram(prometheus::Family< prometheus::Histogram >& family, const std::map< std::string, std::string >& label_pairs, const hist_bucket_boundaries_t& bkt_boundaries) : - m_histogram(family.Add(label_pairs, bkt_boundaries)) {} - - virtual void set_value(const std::vector< double >& bucket_values, double sum) { - // Use modified prometheus method (not part of original repo) - m_histogram.TransferBucketCounters(bucket_values, sum); + m_histogram(family.Add(label_pairs, bkt_boundaries)), m_bkt_boundaries{bkt_boundaries} {} + + virtual void set_value(std::vector< double >& bucket_values, double sum) { + // Since histogram doesn't have reset facility (PR is yet to be accepted in the main repo), + // we are doing a placement new to reconstruct the entire object to force to call its constructor. This + // way we don't need to register histogram again to family. + bucket_values.resize(m_bkt_boundaries.size() + 1); + prometheus::Histogram* inplace_hist = new ((void*)&m_histogram) prometheus::Histogram(m_bkt_boundaries); + inplace_hist->ObserveMultiple(bucket_values, sum); } prometheus::Histogram& m_histogram; + const hist_bucket_boundaries_t& m_bkt_boundaries; }; class PrometheusReporter : public Reporter { diff --git a/src/metrics/reporter.hpp b/src/metrics/reporter.hpp index fa3e0c3f..93d8ad5e 100644 --- a/src/metrics/reporter.hpp +++ b/src/metrics/reporter.hpp @@ -38,7 +38,7 @@ class ReportGauge { class ReportHistogram { public: - virtual void set_value(const std::vector< double >& bucket_values, double sum) = 0; + virtual void set_value(std::vector< double >& bucket_values, double sum) = 0; }; class Reporter { From cc7b58fc92f027a07d13a40738a319967214e2f4 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 2 Sep 2022 14:48:58 -0700 Subject: [PATCH 108/385] Fixed merge conflicts --- conanfile.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/conanfile.py b/conanfile.py index a6e73170..e9a424af 100644 --- a/conanfile.py +++ b/conanfile.py @@ -40,10 +40,6 @@ def build_requirements(self): def requirements(self): # Custom packages -<<<<<<< HEAD -======= - self.requires("prometheus-cpp/1.0.0") ->>>>>>> 4dfc1da9e22a96c046783e85f1db48bcd2a7259d # Generic packages (conan-center) self.requires("boost/1.79.0") From f1e855facf9c599c073c19d9fa6f7946397a58d7 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 2 Sep 2022 15:10:59 -0700 Subject: [PATCH 109/385] Resolve an existing build error --- src/file_watcher/file_watcher_test.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index eac9d031..c5059958 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include From 2ef4ac44b41cd4031eb7a032d0db9c5fa5bbeec0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Sep 2022 10:28:18 -0700 Subject: [PATCH 110/385] Fix regex. --- .jenkins/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index f795db11..ab9d0bbb 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { } } } steps { script { - sh(script: "sed -Ei 's,version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*,version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") BUILD_MISSING = "--build missing" } } @@ -38,7 +38,7 @@ pipeline { steps { script { PROJECT = sh(script: "grep -m 1 'name =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) - VER = sh(script: "grep -m 1 'version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) + VER = sh(script: "grep -m 1 ' version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) CONAN_CHANNEL = sh(script: "echo ${BRANCH_NAME} | sed -E 's,(\\w+-?\\d*)/.*,\\1,' | sed -E 's,-,_,' | tr -d '\n'", returnStdout: true) TAG = "${VER}@${CONAN_USER}/${CONAN_CHANNEL}" slackSend color: '#0063D1', channel: '#sds-ci', message: "*${PROJECT}/${TAG}* is building." @@ -124,7 +124,7 @@ pipeline { branch "${STABLE_BRANCH}" } } } steps { - sh(script: "sed -Ei 's,version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*,version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") } } From bb3b7524b60c45b9cabbe2f107b36c1ca849984f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Sep 2022 10:28:18 -0700 Subject: [PATCH 111/385] Fix regex. --- .jenkins/Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index f795db11..ab9d0bbb 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { } } } steps { script { - sh(script: "sed -Ei 's,version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*,version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") BUILD_MISSING = "--build missing" } } @@ -38,7 +38,7 @@ pipeline { steps { script { PROJECT = sh(script: "grep -m 1 'name =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) - VER = sh(script: "grep -m 1 'version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) + VER = sh(script: "grep -m 1 ' version =' conanfile.py | awk '{print \$3}' | tr -d '\n' | tr -d '\"'", returnStdout: true) CONAN_CHANNEL = sh(script: "echo ${BRANCH_NAME} | sed -E 's,(\\w+-?\\d*)/.*,\\1,' | sed -E 's,-,_,' | tr -d '\n'", returnStdout: true) TAG = "${VER}@${CONAN_USER}/${CONAN_CHANNEL}" slackSend color: '#0063D1', channel: '#sds-ci', message: "*${PROJECT}/${TAG}* is building." @@ -124,7 +124,7 @@ pipeline { branch "${STABLE_BRANCH}" } } } steps { - sh(script: "sed -Ei 's,version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*,version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") } } From 7b026bf141e79ef648f2a213e4700fcde9dd80c5 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 14 Sep 2022 16:12:03 -0700 Subject: [PATCH 112/385] Revised btree with split of multiple files and seperated out --- src/btree/CMakeLists.txt | 18 + src/btree/btree.hpp | 215 +++ src/btree/btree.ipp | 415 ++++++ src/btree/btree_common.ipp | 367 ++++++ src/btree/btree_get_impl.hpp | 50 + src/btree/btree_internal.hpp | 280 ++++ src/btree/btree_kv.hpp | 314 +++++ src/btree/btree_mutate_impl.ipp | 523 ++++++++ src/btree/btree_node.hpp | 607 +++++++++ src/btree/btree_node_mgr.ipp | 480 +++++++ src/btree/btree_query_impl.ipp | 360 +++++ src/btree/btree_remove_impl.ipp | 391 ++++++ src/btree/btree_req.hpp | 242 ++++ src/btree/hs_btree.hpp | 396 ++++++ src/btree/mem_btree.hpp | 100 ++ src/btree/rough/btree_node.cpp | 364 +++++ src/btree/rough/physical_node.hpp | 525 ++++++++ src/btree/rough/sisl_btree.hpp | 1894 +++++++++++++++++++++++++++ src/btree/rough/sisl_btree_impl.hpp | 1653 +++++++++++++++++++++++ src/btree/simple_node.hpp | 301 +++++ src/btree/tests/btree_test_kvs.hpp | 294 +++++ src/btree/tests/test_btree_node.cpp | 347 +++++ src/btree/tests/test_mem_btree.cpp | 151 +++ src/btree/varlen_node.hpp | 695 ++++++++++ 24 files changed, 10982 insertions(+) create mode 100644 src/btree/CMakeLists.txt create mode 100644 src/btree/btree.hpp create mode 100644 src/btree/btree.ipp create mode 100644 src/btree/btree_common.ipp create mode 100644 src/btree/btree_get_impl.hpp create mode 100644 src/btree/btree_internal.hpp create mode 100644 src/btree/btree_kv.hpp create mode 100644 src/btree/btree_mutate_impl.ipp create mode 100644 src/btree/btree_node.hpp create mode 100644 src/btree/btree_node_mgr.ipp create mode 100644 src/btree/btree_query_impl.ipp create mode 100644 src/btree/btree_remove_impl.ipp create mode 100644 src/btree/btree_req.hpp create mode 100644 src/btree/hs_btree.hpp create mode 100644 src/btree/mem_btree.hpp create mode 100644 src/btree/rough/btree_node.cpp create mode 100644 src/btree/rough/physical_node.hpp create mode 100644 src/btree/rough/sisl_btree.hpp create mode 100644 src/btree/rough/sisl_btree_impl.hpp create mode 100644 src/btree/simple_node.hpp create mode 100644 src/btree/tests/btree_test_kvs.hpp create mode 100644 src/btree/tests/test_btree_node.cpp create mode 100644 src/btree/tests/test_mem_btree.cpp create mode 100644 src/btree/varlen_node.hpp diff --git a/src/btree/CMakeLists.txt b/src/btree/CMakeLists.txt new file mode 100644 index 00000000..eb133b9f --- /dev/null +++ b/src/btree/CMakeLists.txt @@ -0,0 +1,18 @@ +if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) + add_flags("-Wno-unused-parameter -Wno-cast-function-type") +endif() + +include_directories(BEFORE ..) +include_directories(BEFORE .) + +set(TEST_BTREENODE_SOURCE_FILES + tests/test_btree_node.cpp + ) +add_executable(test_btree_node ${TEST_BTREENODE_SOURCE_FILES}) +target_link_libraries(test_btree_node sisl ${COMMON_DEPS} GTest::gtest) + +set(TEST_MEMBTREE_SOURCE_FILES + tests/test_mem_btree.cpp + ) +add_executable(test_mem_btree ${TEST_MEMBTREE_SOURCE_FILES}) +target_link_libraries(test_mem_btree sisl ${COMMON_DEPS} GTest::gtest) \ No newline at end of file diff --git a/src/btree/btree.hpp b/src/btree/btree.hpp new file mode 100644 index 00000000..d68a22d4 --- /dev/null +++ b/src/btree/btree.hpp @@ -0,0 +1,215 @@ +/* + * Created on: 14-May-2016 + * Author: Hari Kadayam + * + * Copyright � 2016 Kadayam, Hari. All rights reserved. + */ +#pragma once + +#include +#include + +#include +#include "btree_internal.hpp" +#include "btree_req.hpp" +#include "btree_kv.hpp" +#include "btree_node.hpp" + +namespace sisl { +namespace btree { + +#ifdef INCASE_WE_NEED_COMMON +template < typename K, typename V > +class BtreeCommon { +public: + void deref_node(BtreeNode< K >* node) = 0; +}; +#endif + +template < typename K > +using BtreeNodePtr = boost::intrusive_ptr< sisl::btree::BtreeNode< K > >; + +template < typename K, typename V > +struct BtreeThreadVariables { + std::vector< btree_locked_node_info< K, V > > wr_locked_nodes; + std::vector< btree_locked_node_info< K, V > > rd_locked_nodes; + BtreeNodePtr< K > force_split_node{nullptr}; +}; + +template < typename K, typename V > +class Btree { +private: + mutable folly::SharedMutexWritePriority m_btree_lock; + bnodeid_t m_root_node_id{empty_bnodeid}; + uint32_t m_max_nodes; + + BtreeMetrics m_metrics; + std::atomic< bool > m_destroyed{false}; + std::atomic< uint64_t > m_total_nodes{0}; + uint32_t m_node_size{4096}; +#ifndef NDEBUG + std::atomic< uint64_t > m_req_id{0}; +#endif + + // This workaround of BtreeThreadVariables is needed instead of directly declaring statics + // to overcome the gcc bug, pointer here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66944 + static BtreeThreadVariables< K, V >* bt_thread_vars() { + static thread_local BtreeThreadVariables< K, V >* s_ptr{nullptr}; + if (s_ptr == nullptr) { + static thread_local BtreeThreadVariables< K, V > inst; + s_ptr = &inst; + } + return s_ptr; + } + +protected: + BtreeConfig m_bt_cfg; + +public: + /////////////////////////////////////// All External APIs ///////////////////////////// + Btree(const BtreeConfig& cfg); + virtual ~Btree(); + virtual btree_status_t init(void* op_context); + btree_status_t put(BtreeMutateRequest& put_req); + btree_status_t get(BtreeGetRequest& greq) const; + btree_status_t remove(BtreeRemoveRequest& rreq); + btree_status_t query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) const; + // bool verify_tree(bool update_debug_bm) const; + virtual std::pair< btree_status_t, uint64_t > destroy_btree(void* context); + nlohmann::json get_status(int log_level) const; + void print_tree() const; + nlohmann::json get_metrics_in_json(bool updated = true); + + // static void set_io_flip(); + // static void set_error_flip(); + + // static std::array< std::shared_ptr< BtreeCommon< K, V > >, sizeof(btree_stores_t) > s_btree_stores; + // static std::mutex s_store_reg_mtx; + +protected: + /////////////////////////// Methods the underlying store is expected to handle /////////////////////////// + virtual BtreeNodePtr< K > alloc_node(bool is_leaf, bool& is_new_allocation, + const BtreeNodePtr< K >& copy_from = nullptr) = 0; + virtual BtreeNode< K >* init_node(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf); + virtual btree_status_t read_node(bnodeid_t id, BtreeNodePtr< K >& bnode) const = 0; + virtual btree_status_t write_node(const BtreeNodePtr< K >& bn, const BtreeNodePtr< K >& dependent_bn, + void* context); + virtual btree_status_t write_node_sync(const BtreeNodePtr< K >& node, void* context); + virtual void swap_node(const BtreeNodePtr< K >& node1, const BtreeNodePtr< K >& node2, void* context) = 0; + virtual btree_status_t refresh_node(const BtreeNodePtr< K >& bn, bool is_write_modifiable, void* context) const = 0; + virtual void free_node(const BtreeNodePtr< K >& node, void* context) = 0; + + virtual void create_tree_precommit(const BtreeNodePtr< K >& root_node, void* op_context) = 0; + virtual void split_node_precommit(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, + const BtreeNodePtr< K >& child_node2, bool root_split, bool edge_split, + void* op_context) = 0; + virtual void merge_node_precommit(bool is_root_merge, const BtreeNodePtr< K >& parent_node, + uint32_t parent_merge_start_idx, const BtreeNodePtr< K >& child_node1, + const std::vector< BtreeNodePtr< K > >* old_child_nodes, + const std::vector< BtreeNodePtr< K > >* replace_child_nodes, + void* op_context) = 0; + virtual std::string btree_store_type() const = 0; + + /////////////////////////// Methods the application use case is expected to handle /////////////////////////// + virtual int64_t compute_single_put_needed_size(const V& current_val, const V& new_val) const; + virtual int64_t compute_range_put_needed_size(const std::vector< std::pair< K, V > >& existing_kvs, + const V& new_val) const; + virtual btree_status_t custom_kv_select_for_write(uint8_t node_version, + const std::vector< std::pair< K, V > >& match_kv, + std::vector< std::pair< K, V > >& replace_kv, + const BtreeKeyRange& range, + const BtreeRangeUpdateRequest& rureq) const; + virtual btree_status_t custom_kv_select_for_read(uint8_t node_version, + const std::vector< std::pair< K, V > >& match_kv, + std::vector< std::pair< K, V > >& replace_kv, + const BtreeKeyRange& range, const BtreeRangeRequest& qreq) const; + +protected: + /////////////////////////////// Internal Node Management Methods //////////////////////////////////// + std::pair< btree_status_t, bnodeid_t > create_root_node(void* op_context); + btree_status_t read_and_lock_root(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const; + btree_status_t read_and_lock_child(bnodeid_t child_id, BtreeNodePtr< K >& child_node, + const BtreeNodePtr< K >& parent_node, uint32_t parent_ind, + locktype_t int_lock_type, locktype_t leaf_lock_type, void* context) const; + btree_status_t read_and_lock_sibling(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const; + btree_status_t read_and_lock_node(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const; + btree_status_t get_child_and_lock_node(const BtreeNodePtr< K >& node, uint32_t index, BtreeNodeInfo& child_info, + BtreeNodePtr< K >& child_node, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const; + virtual btree_status_t write_node(const BtreeNodePtr< K >& node, void* context); + void read_node_or_fail(bnodeid_t id, BtreeNodePtr< K >& node) const; + btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, void* context, + locktype_t& cur_lock, locktype_t& child_cur_lock); + btree_status_t _lock_and_refresh_node(const BtreeNodePtr< K >& node, locktype_t type, void* context, + const char* fname, int line) const; + btree_status_t _lock_node_upgrade(const BtreeNodePtr< K >& node, void* context, const char* fname, int line); + void unlock_node(const BtreeNodePtr< K >& node, locktype_t type) const; + BtreeNodePtr< K > alloc_leaf_node(); + BtreeNodePtr< K > alloc_interior_node(); + void do_free_node(const BtreeNodePtr< K >& node); + std::pair< btree_status_t, uint64_t > do_destroy(); + void observe_lock_time(const BtreeNodePtr< K >& node, locktype_t type, uint64_t time_spent) const; + + static void _start_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype, const char* fname, int line); + static bool remove_locked_node(const BtreeNodePtr< K >& node, locktype_t ltype, + btree_locked_node_info< K, V >* out_info); + static uint64_t end_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype); +#ifndef NDEBUG + static void check_lock_debug(); +#endif + + /////////////////////////////////// Helper Methods /////////////////////////////////////// + btree_status_t post_order_traversal(locktype_t acq_lock, const auto& cb); + btree_status_t post_order_traversal(const BtreeNodePtr< K >& node, locktype_t acq_lock, const auto& cb); + void get_all_kvs(std::vector< pair< K, V > >& kvs) const; + btree_status_t do_destroy(uint64_t& n_freed_nodes, void* context); + uint64_t get_btree_node_cnt() const; + uint64_t get_child_node_cnt(bnodeid_t bnodeid) const; + void to_string(bnodeid_t bnodeid, std::string& buf) const; + void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const; + void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const; + void print_node(const bnodeid_t& bnodeid) const; + + //////////////////////////////// Impl Methods ////////////////////////////////////////// + + ///////// Mutate Impl Methods + btree_status_t do_put(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeMutateRequest& put_req, + int ind_hint); + btree_status_t mutate_write_leaf_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req); + btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req, int ind_hint, + const BtreeNodePtr< K >& child_node, locktype_t& curlock, + locktype_t& child_curlock, int child_ind, bool& split_occured); + btree_status_t check_split_root(BtreeMutateRequest& req); + btree_status_t split_node(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node, + uint32_t parent_ind, BtreeKey* out_split_key, bool root_split, void* context); + bool is_split_needed(const BtreeNodePtr< K >& node, const BtreeConfig& cfg, BtreeMutateRequest& req) const; + btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& node, BtreeMutateRequest& req, int& start_ind, + int& end_ind); + + ///////// Remove Impl Methods + btree_status_t do_remove(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeRemoveRequest& rreq); + btree_status_t check_collapse_root(void* context); + btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, + void* context); + + ///////// Query Impl Methods + btree_status_t do_sweep_query(BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values) const; + btree_status_t do_traversal_query(const BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values) const; +#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION + btree_status_t do_serialzable_query(const BtreeNodePtr< K >& my_node, BtreeSerializableQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values); + btree_status_t sweep_query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values); + btree_status_t serializable_query(BtreeSerializableQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values); +#endif + + ///////// Get Impl Methods + btree_status_t do_get(const BtreeNodePtr< K >& my_node, BtreeGetRequest& greq) const; +}; +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree.ipp b/src/btree/btree.ipp new file mode 100644 index 00000000..813e3a17 --- /dev/null +++ b/src/btree/btree.ipp @@ -0,0 +1,415 @@ +/* + * Created on: 14-May-2016 + * Author: Hari Kadayam + * + * Copyright � 2016 Kadayam, Hari. All rights reserved. + */ +#pragma once + +#include +#include +#include +#include + +#include +//#include +#include "logging/logging.h" + +#include "fds/buffer.hpp" +#include "btree.hpp" +#include "btree_common.ipp" +#include "btree_node_mgr.ipp" +#include "btree_mutate_impl.ipp" +#include "btree_query_impl.ipp" +#include "btree/btree_node.hpp" + +SISL_LOGGING_DECL(btree) +namespace sisl { +namespace btree { +#if 0 +#define container_of(ptr, type, member) ({ (type*)((char*)ptr - offsetof(type, member)); }) +#endif + +template < typename K, typename V > +Btree< K, V >::Btree(const BtreeConfig& cfg) : + m_metrics{cfg.name().c_str()}, m_node_size{cfg.node_size()}, m_bt_cfg{cfg} { + // calculate number of nodes + const uint32_t node_area_size = BtreeNode< K >::node_area_size(cfg); + uint32_t max_leaf_nodes = + (m_bt_cfg.max_objs() * (m_bt_cfg.max_key_size() + m_bt_cfg.max_value_size())) / node_area_size + 1; + max_leaf_nodes += (100 * max_leaf_nodes) / 60; // Assume 60% btree full + m_max_nodes = max_leaf_nodes + ((double)max_leaf_nodes * 0.05) + 1; // Assume 5% for interior nodes +} + +template < typename K, typename V > +Btree< K, V >::~Btree() = default; + +template < typename K, typename V > +btree_status_t Btree< K, V >::init(void* op_context) { + const auto ret = create_root_node(op_context); + return ret.first; +} + +template < typename K, typename V > +std::pair< btree_status_t, uint64_t > Btree< K, V >::destroy_btree(void* context) { + btree_status_t ret{btree_status_t::success}; + uint64_t n_freed_nodes{0}; + + bool expected = false; + if (!m_destroyed.compare_exchange_strong(expected, true)) { + BT_LOG(DEBUG, "Btree is already being destroyed, ignorining this request"); + return std::make_pair(btree_status_t::not_found, 0); + } + ret = do_destroy(n_freed_nodes, context); + if (ret == btree_status_t::success) { + BT_LOG(DEBUG, "btree(root: {}) {} nodes destroyed successfully", m_root_node_id, n_freed_nodes); + } else { + m_destroyed = false; + BT_LOG(ERROR, "btree(root: {}) nodes destroyed failed, ret: {}", m_root_node_id, ret); + } + + return std::make_pair(ret, n_freed_nodes); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::put(BtreeMutateRequest& put_req) { + COUNTER_INCREMENT(m_metrics, btree_write_ops_count, 1); + auto acq_lock = locktype_t::READ; + int ind = -1; + bool is_leaf = false; + + m_btree_lock.lock_shared(); + btree_status_t ret = btree_status_t::success; + +retry: +#ifndef NDEBUG + check_lock_debug(); +#endif + BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); + BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); + + BtreeNodePtr< K > root; + ret = read_and_lock_root(m_root_node_id, root, acq_lock, acq_lock, put_req_op_ctx(put_req)); + if (ret != btree_status_t::success) { goto out; } + is_leaf = root->is_leaf(); + + if (is_split_needed(root, m_bt_cfg, put_req)) { + // Time to do the split of root. + unlock_node(root, acq_lock); + m_btree_lock.unlock_shared(); + ret = check_split_root(put_req); + BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); + BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); + + // We must have gotten a new root, need to start from scratch. + m_btree_lock.lock_shared(); + if (ret != btree_status_t::success) { + LOGERROR("root split failed btree name {}", m_bt_cfg.name()); + goto out; + } + + goto retry; + } else if ((is_leaf) && (acq_lock != locktype_t::WRITE)) { + // Root is a leaf, need to take write lock, instead of read, retry + unlock_node(root, acq_lock); + acq_lock = locktype_t::WRITE; + goto retry; + } else { + ret = do_put(root, acq_lock, put_req, ind); + if (ret == btree_status_t::retry) { + // Need to start from top down again, since there is a race between 2 inserts or deletes. + acq_lock = locktype_t::READ; + BT_LOG(TRACE, "retrying put operation"); + BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); + BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); + goto retry; + } + } + +out: + m_btree_lock.unlock_shared(); +#ifndef NDEBUG + check_lock_debug(); +#endif + if (ret != btree_status_t::success && ret != btree_status_t::fast_path_not_possible && + ret != btree_status_t::cp_mismatch) { + BT_LOG(ERROR, "btree put failed {}", ret); + COUNTER_INCREMENT(m_metrics, write_err_cnt, 1); + } + + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::get(BtreeGetRequest& greq) const { + btree_status_t ret = btree_status_t::success; + bool is_found; + + m_btree_lock.lock_shared(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, get_req_op_ctx(greq)); + if (ret != btree_status_t::success) { goto out; } + + ret = do_get(root, greq); +out: + m_btree_lock.unlock_shared(); + +#ifndef NDEBUG + check_lock_debug(); +#endif + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::remove(BtreeRemoveRequest& rreq) { + locktype_t acq_lock = locktype_t::READ; + bool is_found = false; + bool is_leaf = false; + + m_btree_lock.lock_shared(); + +retry: + btree_status_t status = btree_status_t::success; + + BtreeNodePtr< K > root; + status = read_and_lock_root(m_root_node_id, root, acq_lock, acq_lock, remove_req_op_ctx(rreq)); + if (status != btree_status_t::success) { goto out; } + is_leaf = root->is_leaf(); + + if (root->get_total_entries() == 0) { + if (is_leaf) { + // There are no entries in btree. + unlock_node(root, acq_lock); + status = btree_status_t::not_found; + BT_LOG(DEBUG, root, "entry not found in btree"); + goto out; + } + BT_LOG_ASSERT(root->has_valid_edge(), root, "Invalid edge id"); + unlock_node(root, acq_lock); + m_btree_lock.unlock_shared(); + + status = check_collapse_root(remove_req_op_ctx(rreq)); + if (status != btree_status_t::success) { + LOGERROR("check collapse read failed btree name {}", m_bt_cfg.name()); + goto out; + } + + // We must have gotten a new root, need to + // start from scratch. + m_btree_lock.lock_shared(); + goto retry; + } else if ((is_leaf) && (acq_lock != locktype_t::WRITE)) { + // Root is a leaf, need to take write lock, instead + // of read, retry + unlock_node(root, acq_lock); + acq_lock = locktype_t::WRITE; + goto retry; + } else { + status = do_remove(root, acq_lock, rreq); + if (status == btree_status_t::retry) { + // Need to start from top down again, since + // there is a race between 2 inserts or deletes. + acq_lock = locktype_t::READ; + goto retry; + } + } + +out: + m_btree_lock.unlock_shared(); +#ifndef NDEBUG + check_lock_debug(); +#endif + return status; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) const { + COUNTER_INCREMENT(m_metrics, btree_query_ops_count, 1); + + btree_status_t ret = btree_status_t::success; + if (qreq.batch_size() == 0) { return ret; } + + m_btree_lock.lock_shared(); + BtreeNodePtr< K > root = nullptr; + ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, qreq.m_op_context); + if (ret != btree_status_t::success) { goto out; } + + switch (qreq.query_type()) { + case BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY: + ret = do_sweep_query(root, qreq, out_values); + break; + + case BtreeQueryType::TREE_TRAVERSAL_QUERY: + ret = do_traversal_query(root, qreq, out_values); + break; + + default: + unlock_node(root, locktype_t::READ); + LOGERROR("Query type {} is not supported yet", qreq.query_type()); + break; + } + + if ((qreq.query_type() == BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY || + qreq.query_type() == BtreeQueryType::TREE_TRAVERSAL_QUERY) && + out_values.size() > 0) { + + /* if return is not success then set the cursor to last read. No need to set cursor if user is not + * interested in it. + */ + qreq.search_state().set_cursor_key< K >(out_values.back().first); + + /* check if we finished just at the last key */ + if (out_values.back().first.compare(qreq.input_range().end_key()) == 0) { ret = btree_status_t::success; } + } + +out: + m_btree_lock.unlock_shared(); +#ifndef NDEBUG + check_lock_debug(); +#endif + if (ret != btree_status_t::success && ret != btree_status_t::has_more && + ret != btree_status_t::fast_path_not_possible) { + BT_LOG(ERROR, "btree query failed {}", ret); + COUNTER_INCREMENT(m_metrics, query_err_cnt, 1); + } + return ret; +} + +#if 0 +/** + * @brief : verify btree is consistent and no corruption; + * + * @param update_debug_bm : true or false; + * + * @return : true if btree is not corrupted. + * false if btree is corrupted; + */ +template < typename K, typename V > +bool Btree< K, V >::verify_tree(bool update_debug_bm) const { + m_btree_lock.lock_shared(); + bool ret = verify_node(m_root_node_id, nullptr, -1, update_debug_bm); + m_btree_lock.unlock_shared(); + + return ret; +} +#endif + +/** + * @brief : get the status of this btree; + * + * @param log_level : verbosity level; + * + * @return : status in json form; + */ +template < typename K, typename V > +nlohmann::json Btree< K, V >::get_status(int log_level) const { + nlohmann::json j; + return j; +} + +template < typename K, typename V > +void Btree< K, V >::print_tree() const { + std::string buf; + m_btree_lock.lock_shared(); + to_string(m_root_node_id, buf); + m_btree_lock.unlock_shared(); + + BT_LOG(INFO, "Pre order traversal of tree:\n<{}>", buf); +} + +template < typename K, typename V > +nlohmann::json Btree< K, V >::get_metrics_in_json(bool updated) { + return m_metrics.get_result_in_json(updated); +} + +// TODO: Commenting out flip till we figure out how to move flip dependency inside sisl package. +#if 0 +#ifdef _PRERELEASE +template < typename K, typename V > +static void Btree< K, V >::set_io_flip() { + /* IO flips */ + FlipClient* fc = homestore::HomeStoreFlip::client_instance(); + FlipFrequency freq; + FlipCondition cond1; + FlipCondition cond2; + freq.set_count(2000000000); + freq.set_percent(2); + + FlipCondition null_cond; + fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); + + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 0, &cond1); + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 1, &cond2); + fc->inject_noreturn_flip("btree_upgrade_node_fail", {cond1, cond2}, freq); + + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 4, &cond1); + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 2, &cond2); + + fc->inject_retval_flip("btree_delay_and_split", {cond1, cond2}, freq, 20); + fc->inject_retval_flip("btree_delay_and_split_leaf", {cond1, cond2}, freq, 20); + fc->inject_noreturn_flip("btree_parent_node_full", {null_cond}, freq); + fc->inject_noreturn_flip("btree_leaf_node_split", {null_cond}, freq); + fc->inject_retval_flip("btree_upgrade_delay", {null_cond}, freq, 20); + fc->inject_retval_flip("writeBack_completion_req_delay_us", {null_cond}, freq, 20); + fc->inject_noreturn_flip("btree_read_fast_path_not_possible", {null_cond}, freq); +} + +template < typename K, typename V > +static void Btree< K, V >::set_error_flip() { + /* error flips */ + FlipClient* fc = homestore::HomeStoreFlip::client_instance(); + FlipFrequency freq; + freq.set_count(20); + freq.set_percent(10); + + FlipCondition null_cond; + fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); + + fc->inject_noreturn_flip("btree_read_fail", {null_cond}, freq); + fc->inject_noreturn_flip("fixed_blkalloc_no_blks", {null_cond}, freq); +} +#endif +#endif + +template < typename K > +void intrusive_ptr_add_ref(BtreeNode< K >* node) { + node->m_refcount.increment(1); +} + +template < typename K > +void intrusive_ptr_release(BtreeNode< K >* node) { + if (node->m_refcount.decrement_testz(1)) { delete node; } +} + +#ifdef INCASE_WE_NEED_COMMON +template < typename K, typename V > +bool Btree< K, V >::create_store_common(btree_store_t store_type, + std::function< std::shared_ptr< BtreeCommon< K, V > >() >&& create_cb) { + std::unique_lock lg(s_store_reg_mtx); + if (s_btree_stores[int_cast(store_type)] != nullptr) { return false; } + s_btree_stores[int_cast(store_type)] = create_cb(); + return true; +} + +// Get doesn't need to take any lock, since the create/register is once and started once. Please don't add the lock +// here as this is called in critical path and completely unneccessary. +template < typename K, typename V > +BtreeCommon< K, V >* Btree< K, V >::get_store_common(uint8_t store_type) { + return s_btree_stores[store_type].get(); +} + +friend void intrusive_ptr_add_ref(BtreeNode< K >* node) { node->m_refcount.increment(1); } +friend void intrusive_ptr_release(BtreeNode< K >* node) { Btree< K, V >::get_store_common()->deref_node(node); } + +// static inline const char* _type_desc(const BtreeNodePtr< K >& n) { return n->is_leaf() ? "L" : "I"; } + +template < typename K, typename V > +std::array< std::shared_ptr< BtreeCommon< K, V > >, sizeof(btree_stores_t) > Btree< K, V >::s_btree_stores; + +template < typename K, typename V > +std::mutex Btree< K, V >::s_store_reg_mtx; +#endif + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_common.ipp b/src/btree/btree_common.ipp new file mode 100644 index 00000000..22cc337b --- /dev/null +++ b/src/btree/btree_common.ipp @@ -0,0 +1,367 @@ +#pragma once +#include "btree.hpp" + +namespace sisl { +namespace btree { + +template < typename K, typename V > +btree_status_t Btree< K, V >::post_order_traversal(locktype_t ltype, const auto& cb) { + BtreeNodePtr< K > root; + + if (ltype == locktype_t::READ) { + m_btree_lock.lock_shared(); + } else if (ltype == locktype_t::WRITE) { + m_btree_lock.lock(); + } + + btree_status_t ret{btree_status_t::success}; + if (m_root_node_id != empty_bnodeid) { + read_and_lock_root(m_root_node_id, root, ltype, ltype, nullptr); + if (ret != btree_status_t::success) { goto done; } + + ret = post_order_traversal(root, ltype, cb); + } +done: + if (ltype == locktype_t::READ) { + m_btree_lock.unlock_shared(); + } else if (ltype == locktype_t::WRITE) { + m_btree_lock.unlock(); + } + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::post_order_traversal(const BtreeNodePtr< K >& node, locktype_t ltype, const auto& cb) { + uint32_t i{0}; + btree_status_t ret = btree_status_t::success; + + if (!node->is_leaf()) { + BtreeNodeInfo child_info; + while (i <= node->get_total_entries()) { + if (i == node->get_total_entries()) { + if (!node->has_valid_edge()) { break; } + child_info.set_bnode_id(node->get_edge_id()); + } else { + node->get_nth_value(i, &child_info, false /* copy */); + } + + BtreeNodePtr< K > child; + ret = read_and_lock_child(child_info.bnode_id(), child, node, i, ltype, ltype, nullptr); + if (ret != btree_status_t::success) { return ret; } + ret = post_order_traversal(child, ltype, cb); + unlock_node(child, ltype); + ++i; + } + cb(node, false /* is_leaf */); + } + + if (ret == btree_status_t::success) { cb(node, true /* is_leaf */); } + return ret; +} + +template < typename K, typename V > +void Btree< K, V >::get_all_kvs(std::vector< pair< K, V > >& kvs) const { + post_order_traversal(locktype_t::READ, [this, &kvs](const auto& node, bool is_leaf) { + if (!is_leaf) { node->get_all_kvs(kvs); } + }); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::do_destroy(uint64_t& n_freed_nodes, void* context) { + return post_order_traversal(locktype_t::WRITE, [this, &n_freed_nodes, context](const auto& node, bool is_leaf) { + free_node(node, context); + ++n_freed_nodes; + }); +} + +template < typename K, typename V > +uint64_t Btree< K, V >::get_btree_node_cnt() const { + uint64_t cnt = 1; /* increment it for root */ + m_btree_lock.lock_shared(); + cnt += get_child_node_cnt(m_root_node_id); + m_btree_lock.unlock_shared(); + return cnt; +} + +template < typename K, typename V > +uint64_t Btree< K, V >::get_child_node_cnt(bnodeid_t bnodeid) const { + uint64_t cnt{0}; + BtreeNodePtr< K > node; + locktype_t acq_lock = locktype_t::READ; + + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return cnt; } + if (!node->is_leaf()) { + uint32_t i = 0; + while (i < node->get_total_entries()) { + BtreeNodeInfo p = node->get(i, false); + cnt += get_child_node_cnt(p.bnode_id()) + 1; + ++i; + } + if (node->has_valid_edge()) { cnt += get_child_node_cnt(node->get_edge_id()) + 1; } + } + unlock_node(node, acq_lock); + return cnt; +} + +template < typename K, typename V > +void Btree< K, V >::to_string(bnodeid_t bnodeid, std::string& buf) const { + BtreeNodePtr< K > node; + + locktype_t acq_lock = locktype_t::READ; + + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } + fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); + + if (!node->is_leaf()) { + uint32_t i = 0; + while (i < node->get_total_entries()) { + BtreeNodeInfo p; + node->get_nth_value(i, &p, false); + to_string(p.bnode_id(), buf); + ++i; + } + if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } + } + unlock_node(node, acq_lock); +} + +#if 0 + btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node_id) : read_node(jentry->parent_node.node_id); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } + } +#endif + +template < typename K, typename V > +void Btree< K, V >::validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const { + BtreeNodeInfo child_info; + K child_first_key; + K child_last_key; + K parent_key; + + parent_node->get(ind, &child_info, false /* copy */); + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries + BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); + } + return; + } + child_node->get_first_key(&child_first_key); + child_node->get_last_key(&child_last_key); + BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0); + if (ind == parent_node->get_total_entries()) { + BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); + if (ind > 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0); + BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0); + } + } else { + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + if (ind != 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) + } + } +} + +template < typename K, typename V > +void Btree< K, V >::validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const { + BtreeNodeInfo child_info; + K child_key; + K parent_key; + + if (parent_node->has_valid_edge()) { + if (ind == parent_node->get_total_entries()) { return; } + } else { + if (ind == parent_node->get_total_entries() - 1) { return; } + } + parent_node->get(ind + 1, &child_info, false /* copy */); + + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); + + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node can have 0 entries + BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), + true); + } + return; + } + /* in case of merge next child will never have zero entries otherwise it would have been merged */ + BT_NODE_REL_ASSERT_NE(child_node->get_total_entries(), 0, child_node); + child_node->get_first_key(&child_key); + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) + BT_REL_ASSERT_LT(parent_key.compare_start(&child_key), 0) +} + +template < typename K, typename V > +void Btree< K, V >::print_node(const bnodeid_t& bnodeid) const { + std::string buf; + BtreeNodePtr< K > node; + + m_btree_lock.lock_shared(); + locktype_t acq_lock = locktype_t::READ; + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } + buf = node->to_string(true /* print_friendly */); + unlock_node(node, acq_lock); + +done: + m_btree_lock.unlock_shared(); + + BT_LOG(INFO, "Node: <{}>", buf); +} + +#if 0 +template < typename K, typename V > +void Btree< K, V >::diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { + std::vector< pair< K, V > > my_kvs, other_kvs; + + get_all_kvs(&my_kvs); + other->get_all_kvs(&other_kvs); + auto it1 = my_kvs.begin(); + auto it2 = other_kvs.begin(); + + K k1, k2; + V v1, v2; + + if (it1 != my_kvs.end()) { + k1 = it1->first; + v1 = it1->second; + } + if (it2 != other_kvs.end()) { + k2 = it2->first; + v2 = it2->second; + } + + while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { + if (k1.preceeds(&k2)) { + /* k1 preceeds k2 - push k1 and continue */ + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + } else if (k1.succeeds(&k2)) { + /* k2 preceeds k1 - push k2 and continue */ + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + if (it2 == other_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + } else { + /* k1 and k2 overlaps */ + std::vector< pair< K, V > > overlap_kvs; + diff_read_next_t to_read = READ_BOTH; + + v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); + for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { + diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); + } + + switch (to_read) { + case READ_FIRST: + it1++; + if (it1 == my_kvs.end()) { + // Add k2,v2 + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + break; + } + k1 = it1->first; + v1 = it1->second; + break; + + case READ_SECOND: + it2++; + if (it2 == other_kvs.end()) { + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + break; + } + k2 = it2->first; + v2 = it2->second; + break; + + case READ_BOTH: + /* No tail part */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + + default: + LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, to_read); + /* skip both */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + } + } + } + + while (it1 != my_kvs.end()) { + diff_kv->emplace_back(make_pair(it1->first, it1->second)); + it1++; + } + + while (it2 != other_kvs.end()) { + diff_kv->emplace_back(make_pair(it2->first, it2->second)); + it2++; + } +} + +void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { + std::vector< pair< K, V > > other_kvs; + + other->get_all_kvs(&other_kvs); + for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { + K k = it->first; + V v = it->second; + BRangeCBParam local_param(k, v); + K start(k.start(), 1), end(k.end(), 1); + + auto search_range = BtreeSearchRange(start, true, end, true); + BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); + range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); + } +} +#endif + +#ifdef USE_STORE_TYPE +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; + +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; +#endif + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_get_impl.hpp b/src/btree/btree_get_impl.hpp new file mode 100644 index 00000000..eea09c68 --- /dev/null +++ b/src/btree/btree_get_impl.hpp @@ -0,0 +1,50 @@ +#pragma once +#include "btree.hpp" + +namespace sisl { +namespace btree { +template < typename K, typename V > +btree_status_t Btree< K, V >::do_get(const BtreeNodePtr< K >& my_node, BtreeGetRequest& greq) const { + btree_status_t ret = btree_status_t::success; + bool is_child_lock = false; + locktype_t child_locktype; + + if (my_node->is_leaf()) { + if (is_get_any_request(greq)) { + auto& gareq = to_get_any_req(greq); + const auto [found, idx] = + my_node->get_any(gareq.m_range, gareq.m_outkey.get(), gareq.m_outval.get(), true, true); + ret = found ? btree_status_t::success : btree_status_t::not_found; + } else { + auto& sgreq = to_single_get_req(greq); + const auto [found, idx] = my_node->find(sgreq.key(), sgreq.m_outval.get(), true); + ret = found ? btree_status_t::success : btree_status_t::not_found; + } + unlock_node(my_node, locktype_t::READ); + return ret; + } + + BtreeNodeInfo child_info; + bool found; + uint32_t idx; + if (is_get_any_request(greq)) { + std::tie(found, idx) = my_node->find(to_get_any_req(greq).m_range.start_key(), &child_info, true); + } else { + std::tie(found, idx) = my_node->find(to_single_get_req(greq).key(), &child_info, true); + } + + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, idx, my_node); + BtreeNodePtr< K > child_node; + child_locktype = locktype_t::READ; + ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, idx, child_locktype, child_locktype, nullptr); + if (ret != btree_status_t::success) { goto out; } + + unlock_node(my_node, locktype_t::READ); + return (do_get(child_node, greq)); + +out: + unlock_node(my_node, locktype_t::READ); + return ret; +} +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_internal.hpp b/src/btree/btree_internal.hpp new file mode 100644 index 00000000..00bffc1a --- /dev/null +++ b/src/btree/btree_internal.hpp @@ -0,0 +1,280 @@ +#pragma once + +#include +#include +#include +#include +#include "fds/utils.hpp" + +namespace sisl { +namespace btree { + +#define _BT_LOG_METHOD_IMPL(req, btcfg, node) \ + ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ + fmt::make_format_args(file_name(__FILE__), __LINE__)); \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[req={}] "}, \ + fmt::make_format_args(req->to_string())))) \ + (); \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(btcfg), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ + fmt::make_format_args(btcfg.name())))) \ + (); \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(node), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[node={}] "}, \ + fmt::make_format_args(node->to_string())))) \ + (); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ + return true; \ + }) + +#define BT_LOG(level, msg, ...) \ + { LOG##level##MOD_FMT(btree, (_BT_LOG_METHOD_IMPL(, this->m_bt_cfg, )), msg, ##__VA_ARGS__); } + +#define BT_NODE_LOG(level, node, msg, ...) \ + { LOG##level##MOD_FMT(btree, (_BT_LOG_METHOD_IMPL(, this->m_bt_cfg, node)), msg, ##__VA_ARGS__); } + +#if 0 +#define THIS_BT_LOG(level, req, msg, ...) \ + { \ + LOG##level##MOD_FMT( \ + btree, ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ + fmt::make_format_args(file_name(__FILE__), __LINE__)); \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[req={}] "}, \ + fmt::make_format_args(req->to_string())))) \ + (); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ + fmt::make_format_args(m_cfg.name())); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ + return true; \ + }), \ + msg, ##__VA_ARGS__); \ + } + +#define THIS_NODE_LOG(level, btcfg, msg, ...) \ + { \ + LOG##level##MOD_FMT( \ + btree, ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ + fmt::make_format_args(file_name(__FILE__), __LINE__)); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ + fmt::make_format_args(btcfg.name())); \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[node={}] "}, \ + fmt::make_format_args(to_string())))) \ + (); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ + return true; \ + }), \ + msg, ##__VA_ARGS__); \ + } + +#define BT_ASSERT(assert_type, cond, req, ...) \ + { \ + assert_type##_ASSERT_FMT( \ + cond, \ + [&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"\n[req={}] "}, \ + fmt::make_format_args(req->to_string())))) \ + (); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ + fmt::make_format_args(m_cfg.name())); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ + return true; \ + }, \ + msg, ##__VA_ARGS__); \ + } + +#define BT_ASSERT_CMP(assert_type, val1, cmp, val2, req, ...) \ + { \ + assert_type##_ASSERT_CMP( \ + val1, cmp, val2, \ + [&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ + BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ + BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"\n[req={}] "}, \ + fmt::make_format_args(req->to_string())))) \ + (); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ + fmt::make_format_args(m_cfg.name())); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ + return true; \ + }, \ + msg, ##__VA_ARGS__); \ + } +#endif + +#define BT_ASSERT(assert_type, cond, ...) \ + { assert_type##_ASSERT_FMT(cond, _BT_LOG_METHOD_IMPL(, this->m_bt_cfg, ), ##__VA_ARGS__); } + +#define BT_ASSERT_CMP(assert_type, val1, cmp, val2, ...) \ + { assert_type##_ASSERT_CMP(val1, cmp, val2, _BT_LOG_METHOD_IMPL(, this->m_bt_cfg, ), ##__VA_ARGS__); } + +#define BT_DBG_ASSERT(cond, ...) BT_ASSERT(DEBUG, cond, ##__VA_ARGS__) +#define BT_DBG_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, ==, val2, ##__VA_ARGS__) +#define BT_DBG_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, !=, val2, ##__VA_ARGS__) +#define BT_DBG_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, <, val2, ##__VA_ARGS__) +#define BT_DBG_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, <=, val2, ##__VA_ARGS__) +#define BT_DBG_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, >, val2, ##__VA_ARGS__) +#define BT_DBG_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, >=, val2, ##__VA_ARGS__) + +#define BT_LOG_ASSERT(cond, ...) BT_ASSERT(LOGMSG, cond, ##__VA_ARGS__) +#define BT_LOG_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, ==, val2, ##__VA_ARGS__) +#define BT_LOG_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, !=, val2, ##__VA_ARGS__) +#define BT_LOG_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, <, val2, ##__VA_ARGS__) +#define BT_LOG_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, <=, val2, ##__VA_ARGS__) +#define BT_LOG_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, >, val2, ##__VA_ARGS__) +#define BT_LOG_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, >=, val2, ##__VA_ARGS__) + +#define BT_REL_ASSERT(cond, ...) BT_ASSERT(RELEASE, cond, ##__VA_ARGS__) +#define BT_REL_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, ==, val2, ##__VA_ARGS__) +#define BT_REL_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, !=, val2, ##__VA_ARGS__) +#define BT_REL_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, <, val2, ##__VA_ARGS__) +#define BT_REL_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, <=, val2, ##__VA_ARGS__) +#define BT_REL_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, >, val2, ##__VA_ARGS__) +#define BT_REL_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, >=, val2, ##__VA_ARGS__) + +#define BT_NODE_ASSERT(assert_type, cond, node, ...) \ + { assert_type##_ASSERT_FMT(cond, _BT_LOG_METHOD_IMPL(, m_bt_cfg, node), ##__VA_ARGS__); } + +#define BT_NODE_ASSERT_CMP(assert_type, val1, cmp, val2, node, ...) \ + { assert_type##_ASSERT_CMP(val1, cmp, val2, _BT_LOG_METHOD_IMPL(, m_bt_cfg, node), ##__VA_ARGS__); } + +#define BT_NODE_DBG_ASSERT(cond, ...) BT_NODE_ASSERT(DEBUG, cond, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, ==, val2, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, !=, val2, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, <, val2, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, <=, val2, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, >, val2, ##__VA_ARGS__) +#define BT_NODE_DBG_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, >=, val2, ##__VA_ARGS__) + +#define BT_NODE_LOG_ASSERT(cond, ...) BT_NODE_ASSERT(LOGMSG, cond, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, ==, val2, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, !=, val2, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, <, val2, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, <=, val2, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, >, val2, ##__VA_ARGS__) +#define BT_NODE_LOG_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, >=, val2, ##__VA_ARGS__) + +#define BT_NODE_REL_ASSERT(cond, ...) BT_NODE_ASSERT(RELEASE, cond, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, ==, val2, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, !=, val2, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, <, val2, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, <=, val2, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, >, val2, ##__VA_ARGS__) +#define BT_NODE_REL_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, >=, val2, ##__VA_ARGS__) + +#define ASSERT_IS_VALID_INTERIOR_CHILD_INDX(is_found, found_idx, node) \ + BT_NODE_DBG_ASSERT((!is_found || ((int)found_idx < (int)node->get_total_entries()) || node->has_valid_edge()), \ + node, "Is_valid_interior_child_check_failed: found_idx={}", found_idx) + +using bnodeid_t = uint64_t; +static constexpr bnodeid_t empty_bnodeid = std::numeric_limits< bnodeid_t >::max(); +static constexpr uint16_t init_crc_16 = 0x8005; + +VENUM(btree_node_type, uint32_t, FIXED = 0, VAR_VALUE = 1, VAR_KEY = 2, VAR_OBJECT = 3, PREFIX = 4, COMPACT = 5) + +#ifdef USE_STORE_TYPE +VENUM(btree_store_type, uint8_t, MEM = 0, SSD = 1) +#endif + +ENUM(btree_status_t, uint32_t, success, not_found, item_found, closest_found, closest_removed, retry, has_more, + read_failed, write_failed, stale_buf, refresh_failed, put_failed, space_not_avail, split_failed, insert_failed, + cp_mismatch, merge_not_required, merge_failed, replay_not_needed, fast_path_not_possible, resource_full, + update_debug_bm_failed, crc_mismatch) + +struct BtreeConfig { + uint64_t m_max_objs{0}; + uint32_t m_max_key_size{0}; + uint32_t m_max_value_size{0}; + uint32_t m_node_size; + + uint8_t m_ideal_fill_pct{90}; + uint8_t m_split_pct{50}; + + bool m_custom_kv{false}; // If Key/Value needs some transformation before read or write + btree_node_type m_leaf_node_type{btree_node_type::VAR_OBJECT}; + btree_node_type m_int_node_type{btree_node_type::VAR_KEY}; + std::string m_btree_name; // Unique name for the btree + + BtreeConfig(uint32_t node_size, const std::string& btree_name = "") : + m_node_size{node_size}, m_btree_name{btree_name.empty() ? std::string("btree") : btree_name} {} + + virtual ~BtreeConfig() = default; + uint32_t node_size() const { return m_node_size; }; + uint32_t max_key_size() const { return m_max_key_size; } + void set_max_key_size(uint32_t max_key_size) { m_max_key_size = max_key_size; } + + uint64_t max_objs() const { return m_max_objs; } + void set_max_objs(uint64_t max_objs) { m_max_objs = max_objs; } + + uint32_t max_value_size() const { return m_max_value_size; } + + void set_max_value_size(uint32_t max_value_size) { m_max_value_size = max_value_size; } + + uint32_t split_size(uint32_t filled_size) const { return uint32_cast(filled_size * m_split_pct) / 100; } + const std::string& name() const { return m_btree_name; } + + bool is_custom_kv() const { return m_custom_kv; } + btree_node_type leaf_node_type() const { return m_leaf_node_type; } + btree_node_type interior_node_type() const { return m_int_node_type; } +}; + +class BtreeMetrics : public MetricsGroup { +public: + explicit BtreeMetrics(const char* inst_name) : MetricsGroup("Btree", inst_name) { + REGISTER_COUNTER(btree_obj_count, "Btree object count", _publish_as::publish_as_gauge); + REGISTER_COUNTER(btree_leaf_node_count, "Btree Leaf node count", "btree_node_count", {{"node_type", "leaf"}}, + _publish_as::publish_as_gauge); + REGISTER_COUNTER(btree_int_node_count, "Btree Interior node count", "btree_node_count", + {{"node_type", "interior"}}, _publish_as::publish_as_gauge); + REGISTER_COUNTER(btree_split_count, "Total number of btree node splits"); + REGISTER_COUNTER(insert_failed_count, "Total number of inserts failed"); + REGISTER_COUNTER(btree_merge_count, "Total number of btree node merges"); + REGISTER_COUNTER(btree_depth, "Depth of btree", _publish_as::publish_as_gauge); + + REGISTER_COUNTER(btree_int_node_writes, "Total number of btree interior node writes", "btree_node_writes", + {{"node_type", "interior"}}); + REGISTER_COUNTER(btree_leaf_node_writes, "Total number of btree leaf node writes", "btree_node_writes", + {{"node_type", "leaf"}}); + REGISTER_COUNTER(btree_num_pc_gen_mismatch, "Number of gen mismatches to recover"); + + REGISTER_HISTOGRAM(btree_int_node_occupancy, "Interior node occupancy", "btree_node_occupancy", + {{"node_type", "interior"}}, HistogramBucketsType(LinearUpto128Buckets)); + REGISTER_HISTOGRAM(btree_leaf_node_occupancy, "Leaf node occupancy", "btree_node_occupancy", + {{"node_type", "leaf"}}, HistogramBucketsType(LinearUpto128Buckets)); + REGISTER_COUNTER(btree_retry_count, "number of retries"); + REGISTER_COUNTER(write_err_cnt, "number of errors in write"); + REGISTER_COUNTER(split_failed, "split failed"); + REGISTER_COUNTER(query_err_cnt, "number of errors in query"); + REGISTER_COUNTER(read_node_count_in_write_ops, "number of nodes read in write_op"); + REGISTER_COUNTER(read_node_count_in_query_ops, "number of nodes read in query_op"); + REGISTER_COUNTER(btree_write_ops_count, "number of btree operations"); + REGISTER_COUNTER(btree_query_ops_count, "number of btree operations"); + REGISTER_COUNTER(btree_remove_ops_count, "number of btree operations"); + REGISTER_HISTOGRAM(btree_exclusive_time_in_int_node, + "Exclusive time spent (Write locked) on interior node (ns)", "btree_exclusive_time_in_node", + {{"node_type", "interior"}}); + REGISTER_HISTOGRAM(btree_exclusive_time_in_leaf_node, "Exclusive time spent (Write locked) on leaf node (ns)", + "btree_exclusive_time_in_node", {{"node_type", "leaf"}}); + REGISTER_HISTOGRAM(btree_inclusive_time_in_int_node, "Inclusive time spent (Read locked) on interior node (ns)", + "btree_inclusive_time_in_node", {{"node_type", "interior"}}); + REGISTER_HISTOGRAM(btree_inclusive_time_in_leaf_node, "Inclusive time spent (Read locked) on leaf node (ns)", + "btree_inclusive_time_in_node", {{"node_type", "leaf"}}); + + register_me_to_farm(); + } + + ~BtreeMetrics() { deregister_me_from_farm(); } +}; + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_kv.hpp b/src/btree/btree_kv.hpp new file mode 100644 index 00000000..f0fb18c3 --- /dev/null +++ b/src/btree/btree_kv.hpp @@ -0,0 +1,314 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam, Rishabh Mittal + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include +#include +#include "fds/buffer.hpp" + +namespace sisl { +namespace btree { + +ENUM(MultiMatchOption, uint16_t, + DO_NOT_CARE, // Select anything that matches + LEFT_MOST, // Select the left most one + RIGHT_MOST, // Select the right most one + MID // Select the middle one +) + +ENUM(btree_put_type, uint16_t, + INSERT_ONLY_IF_NOT_EXISTS, // Insert + REPLACE_ONLY_IF_EXISTS, // Upsert + REPLACE_IF_EXISTS_ELSE_INSERT, + APPEND_ONLY_IF_EXISTS, // Update + APPEND_IF_EXISTS_ELSE_INSERT) + +// The base class, btree library expects its key to be derived from +class BtreeKeyRange; +class BtreeKey { +public: + BtreeKey() = default; + + // Deleting copy constructor forces the derived class to define its own copy constructor + // BtreeKey(const BtreeKey& other) = delete; + // BtreeKey(const sisl::blob& b) = delete; + BtreeKey(const BtreeKey& other) = default; + virtual ~BtreeKey() = default; + + virtual BtreeKey& operator=(const BtreeKey& other) { + clone(other); + return *this; + }; + + virtual void clone(const BtreeKey& other) = 0; + virtual int compare(const BtreeKey& other) const = 0; + + /* Applicable only for extent keys, so do default compare */ + virtual int compare_head(const BtreeKey& other) const { return compare(other); }; + + virtual int compare_range(const BtreeKeyRange& range) const = 0; + + virtual sisl::blob serialize() const = 0; + virtual uint32_t serialized_size() const = 0; + // virtual void deserialize(const sisl::blob& b) = 0; + + // Applicable only to extent keys, where keys have head and tail + virtual sisl::blob serialize_tail() const { return serialize(); } + + virtual std::string to_string() const = 0; + virtual bool is_extent_key() const { return false; } +}; + +class BtreeKeyRange { +public: + const BtreeKey* m_input_start_key{nullptr}; + const BtreeKey* m_input_end_key{nullptr}; + bool m_start_incl; + bool m_end_incl; + MultiMatchOption m_multi_selector; + + friend class BtreeSearchState; + + template < typename K > + friend class BtreeKeyRangeSafe; + + void set_multi_option(MultiMatchOption o) { m_multi_selector = o; } + virtual const BtreeKey& start_key() const { return *m_input_start_key; } + virtual const BtreeKey& end_key() const { return *m_input_end_key; } + + virtual bool is_start_inclusive() const { return m_start_incl; } + virtual bool is_end_inclusive() const { return m_end_incl; } + virtual bool is_simple_search() const { + return ((m_input_start_key == m_input_end_key) && (m_start_incl == m_end_incl)); + } + MultiMatchOption multi_option() const { return m_multi_selector; } + +private: + BtreeKeyRange(const BtreeKey* start_key, bool start_incl, const BtreeKey* end_key, bool end_incl, + MultiMatchOption option) : + m_input_start_key{start_key}, + m_input_end_key{end_key}, + m_start_incl{start_incl}, + m_end_incl{end_incl}, + m_multi_selector{option} {} + BtreeKeyRange(const BtreeKey* start_key, bool start_incl, MultiMatchOption option) : + m_input_start_key{start_key}, + m_input_end_key{start_key}, + m_start_incl{start_incl}, + m_end_incl{start_incl}, + m_multi_selector{option} {} +}; + +/* This type is for keys which is range in itself i.e each key is having its own + * start() and end(). + */ +class ExtentBtreeKey : public BtreeKey { +public: + ExtentBtreeKey() = default; + virtual ~ExtentBtreeKey() = default; + virtual bool is_extent_key() const { return true; } + virtual int compare_end(const BtreeKey& other) const = 0; + virtual int compare_start(const BtreeKey& other) const = 0; + + virtual bool preceeds(const BtreeKey& other) const = 0; + virtual bool succeeds(const BtreeKey& other) const = 0; + + virtual sisl::blob serialize_tail() const override = 0; + + /* we always compare the end key in case of extent */ + virtual int compare(const BtreeKey& other) const override { return (compare_end(other)); } + + /* we always compare the end key in case of extent */ + virtual int compare_range(const BtreeKeyRange& range) const override { return (compare_end(range.end_key())); } +}; + +class BtreeValue { +public: + BtreeValue() = default; + virtual ~BtreeValue() = default; + + // Deleting copy constructor forces the derived class to define its own copy constructor + BtreeValue(const BtreeValue& other) = delete; + + virtual blob serialize() const = 0; + virtual uint32_t serialized_size() const = 0; + virtual void deserialize(const blob& b, bool copy) = 0; + // virtual void append_blob(const BtreeValue& new_val, BtreeValue& existing_val) = 0; + + // virtual void set_blob_size(uint32_t size) = 0; + // virtual uint32_t estimate_size_after_append(const BtreeValue& new_val) = 0; + +#if 0 + virtual void get_overlap_diff_kvs(BtreeKey* k1, BtreeValue* v1, BtreeKey* k2, BtreeValue* v2, uint32_t param, + diff_read_next_t& to_read, + std::vector< std::pair< BtreeKey, BtreeValue > >& overlap_kvs) { + LOGINFO("Not Implemented"); + } +#endif + + virtual std::string to_string() const { return ""; } +}; + +template < typename K > +class BtreeKeyRangeSafe : public BtreeKeyRange { +private: + const K m_actual_start_key; + const K m_actual_end_key; + +public: + BtreeKeyRangeSafe(const BtreeKey& start_key) : + BtreeKeyRange(nullptr, true, nullptr, true, MultiMatchOption::DO_NOT_CARE), m_actual_start_key{start_key} { + this->m_input_start_key = &m_actual_start_key; + this->m_input_end_key = &m_actual_start_key; + } + + virtual ~BtreeKeyRangeSafe() = default; + + BtreeKeyRangeSafe(const BtreeKey& start_key, const BtreeKey& end_key) : + BtreeKeyRangeSafe(start_key, true, end_key, true) {} + + BtreeKeyRangeSafe(const BtreeKey& start_key, bool start_incl, const BtreeKey& end_key, bool end_incl, + MultiMatchOption option = MultiMatchOption::DO_NOT_CARE) : + BtreeKeyRange(nullptr, start_incl, nullptr, end_incl, option), + m_actual_start_key{start_key}, + m_actual_end_key{end_key} { + this->m_input_start_key = &m_actual_start_key; + this->m_input_end_key = &m_actual_end_key; + } + + /******************* all functions are constant *************/ + BtreeKeyRangeSafe< K > start_of_range() const { + return BtreeKeyRangeSafe< K >(start_key(), is_start_inclusive(), multi_option()); + } + BtreeKeyRangeSafe< K > end_of_range() const { + return BtreeKeyRangeSafe< K >(end_key(), is_end_inclusive(), multi_option()); + } +}; + +struct BtreeLockTracker; +struct BtreeQueryCursor { + std::unique_ptr< BtreeKey > m_last_key; + std::unique_ptr< BtreeLockTracker > m_locked_nodes; + BtreeQueryCursor() = default; + + const sisl::blob serialize() const { return m_last_key ? m_last_key->serialize() : sisl::blob{}; }; + virtual std::string to_string() const { return (m_last_key) ? m_last_key->to_string() : "null"; } +}; + +// This class holds the current state of the search. This is where intermediate search state are stored +// and it is mutated by the do_put and do_query methods. Expect the current_sub_range and cursor to keep +// getting updated on calls. +class BtreeSearchState { +protected: + const BtreeKeyRange m_input_range; + BtreeKeyRange m_current_sub_range; + BtreeQueryCursor* m_cursor{nullptr}; + +public: + BtreeSearchState(BtreeKeyRange&& inp_range, BtreeQueryCursor* cur = nullptr) : + m_input_range(std::move(inp_range)), m_current_sub_range{m_input_range}, m_cursor{cur} {} + + const BtreeQueryCursor* const_cursor() const { return m_cursor; } + BtreeQueryCursor* cursor() { return m_cursor; } + void set_cursor(BtreeQueryCursor* cur) { m_cursor = cur; } + void reset_cursor() { set_cursor(nullptr); } + bool is_cursor_valid() const { return (m_cursor != nullptr); } + + template < typename K > + void set_cursor_key(const BtreeKey& end_key) { + if (!m_cursor) { + /* no need to set cursor as user doesn't want to keep track of it */ + return; + } + m_cursor->m_last_key = std::make_unique< K >(end_key); + } + + const BtreeKeyRange& input_range() const { return m_input_range; } + const BtreeKeyRange& current_sub_range() const { return m_current_sub_range; } + void set_current_sub_range(const BtreeKeyRange& new_sub_range) { m_current_sub_range = new_sub_range; } + const BtreeKey& next_key() const { + return (m_cursor && m_cursor->m_last_key) ? *m_cursor->m_last_key : m_input_range.start_key(); + } + +#if 0 + template < typename K > + BtreeKeyRangeSafe< K > next_start_range() const { + return BtreeKeyRangeSafe< K >(next_key(), is_start_inclusive(), m_input_range.multi_option()); + } + + template < typename K > + BtreeKeyRangeSafe< K > end_of_range() const { + return BtreeKeyRangeSafe< K >(m_input_range.end_key(), is_end_inclusive(), m_input_range.multi_option()); + } +#endif + + BtreeKeyRange next_range() const { + return BtreeKeyRange(&next_key(), is_start_inclusive(), &m_input_range.end_key(), is_end_inclusive(), + m_input_range.multi_option()); + } + +private: + bool is_start_inclusive() const { + if (m_cursor && m_cursor->m_last_key) { + // cursor always have the last key not included + return false; + } else { + return m_input_range.is_start_inclusive(); + } + } + + bool is_end_inclusive() const { return m_input_range.is_end_inclusive(); } +}; + +class BtreeNodeInfo : public BtreeValue { +private: + bnodeid_t m_bnodeid{empty_bnodeid}; + +public: + BtreeNodeInfo() = default; + explicit BtreeNodeInfo(const bnodeid_t& id) : m_bnodeid(id) {} + BtreeNodeInfo& operator=(const BtreeNodeInfo& other) = default; + + bnodeid_t bnode_id() const { return m_bnodeid; } + void set_bnode_id(bnodeid_t bid) { m_bnodeid = bid; } + bool has_valid_bnode_id() const { return (m_bnodeid != empty_bnodeid); } + + sisl::blob serialize() const override { + sisl::blob b; + b.size = sizeof(bnodeid_t); + b.bytes = uintptr_cast(const_cast< bnodeid_t* >(&m_bnodeid)); + return b; + } + uint32_t serialized_size() const override { return sizeof(bnodeid_t); } + static uint32_t get_fixed_size() { return sizeof(bnodeid_t); } + std::string to_string() const override { return fmt::format("{}", m_bnodeid); } + bool operator==(const BtreeNodeInfo& other) const { return (m_bnodeid == other.m_bnodeid); } + + void deserialize(const blob& b, bool copy) override { + DEBUG_ASSERT_EQ(b.size, sizeof(bnodeid_t), "BtreeNodeInfo deserialize received invalid blob"); + m_bnodeid = *(r_cast< bnodeid_t* >(b.bytes)); + } + + friend std::ostream& operator<<(std::ostream& os, const BtreeNodeInfo& b) { + os << b.m_bnodeid; + return os; + } +}; + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_mutate_impl.ipp b/src/btree/btree_mutate_impl.ipp new file mode 100644 index 00000000..8ce6c8fc --- /dev/null +++ b/src/btree/btree_mutate_impl.ipp @@ -0,0 +1,523 @@ +#pragma once +#include "btree.hpp" + +namespace sisl { +namespace btree { + +/* This function does the heavy lifiting of co-ordinating inserts. It is a recursive function which walks + * down the tree. + * + * NOTE: It expects the node it operates to be locked (either read or write) and also the node should not be + * full. + * + * Input: + * myNode = Node it operates on + * curLock = Type of lock held for this node + * put_req = Key to insert + * v = Value to insert + * ind_hint = If we already know which slot to insert to, if not -1 + * put_type = Type of the put (refer to structure btree_put_type) + * is_end_path = set to true only for last path from root to tree, for range put + * op = tracks multi node io. + */ +template < typename K, typename V > +btree_status_t Btree< K, V >::do_put(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeMutateRequest& put_req, + int ind_hint) { + btree_status_t ret = btree_status_t::success; + int curr_ind = -1; + + if (my_node->is_leaf()) { + /* update the leaf node */ + BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); + ret = mutate_write_leaf_node(my_node, put_req); + unlock_node(my_node, curlock); + return ret; + } + +retry: + int start_ind = 0, end_ind = -1; + + /* Get the start and end ind in a parent node for the range updates. For + * non range updates, start ind and end ind are same. + */ + ret = get_start_and_end_ind(my_node, put_req, start_ind, end_ind); + if (ret != btree_status_t::success) { goto out; } + + BT_NODE_DBG_ASSERT((curlock == locktype_t::READ || curlock == locktype_t::WRITE), my_node, "unexpected locktype {}", + curlock); + curr_ind = start_ind; + + while (curr_ind <= end_ind) { // iterate all matched childrens +#ifdef _PRERELEASE + if (curr_ind - start_ind > 1 && homestore_flip->test_flip("btree_leaf_node_split")) { + ret = btree_status_t::retry; + goto out; + } +#endif + locktype_t child_cur_lock = locktype_t::NONE; + + // Get the childPtr for given key. + BtreeNodeInfo child_info; + BtreeNodePtr< K > child_node; + ret = get_child_and_lock_node(my_node, curr_ind, child_info, child_node, locktype_t::READ, locktype_t::WRITE, + put_req_op_ctx(put_req)); + if (ret != btree_status_t::success) { + if (ret == btree_status_t::not_found) { + // Either the node was updated or mynode is freed. Just proceed again from top. + /* XXX: Is this case really possible as we always take the parent lock and never + * release it. + */ + ret = btree_status_t::retry; + } + goto out; + } + + // Directly get write lock for leaf, since its an insert. + child_cur_lock = (child_node->is_leaf()) ? locktype_t::WRITE : locktype_t::READ; + + /* Get subrange if it is a range update */ + if (is_range_update_req(put_req) && child_node->is_leaf()) { + /* We get the subrange only for leaf because this is where we will be inserting keys. In interior + * nodes, keys are always propogated from the lower nodes. + */ + BtreeSearchState& search_state = to_range_update_req(put_req).search_state(); + search_state.set_current_sub_range(my_node->get_subrange(search_state.next_range(), curr_ind)); + + BT_NODE_LOG(DEBUG, my_node, "Subrange:s:{},e:{},c:{},nid:{},edgeid:{},sk:{},ek:{}", start_ind, end_ind, + curr_ind, my_node->get_node_id(), my_node->get_edge_id(), + search_state.current_sub_range().start_key().to_string(), + search_state.current_sub_range().end_key().to_string()); + } + + /* check if child node is needed to split */ + bool split_occured = false; + ret = check_and_split_node(my_node, put_req, ind_hint, child_node, curlock, child_cur_lock, curr_ind, + split_occured); + if (ret != btree_status_t::success) { goto out; } + if (split_occured) { + ind_hint = -1; // Since split is needed, hint is no longer valid + goto retry; + } + +#ifndef NDEBUG + K ckey, pkey; + if (curr_ind != int_cast(my_node->get_total_entries())) { // not edge + pkey = my_node->get_nth_key(curr_ind, true); + if (child_node->get_total_entries() != 0) { + ckey = child_node->get_last_key(); + if (!child_node->is_leaf()) { + BT_NODE_DBG_ASSERT_EQ(ckey.compare(pkey), 0, my_node); + } else { + BT_NODE_DBG_ASSERT_LE(ckey.compare(pkey), 0, my_node); + } + } + // BT_NODE_DBG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) <= 0), true, child_node); + } + if (curr_ind > 0) { // not first child + pkey = my_node->get_nth_key(curr_ind - 1, true); + if (child_node->get_total_entries() != 0) { + ckey = child_node->get_first_key(); + BT_NODE_DBG_ASSERT_LE(pkey.compare(ckey), 0, child_node); + } + // BT_NODE_DBG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) >= 0), true, my_node); + } +#endif + if (curr_ind == end_ind) { + // If we have reached the last index, unlock before traversing down, because we no longer need + // this lock. Holding this lock will impact performance unncessarily. + unlock_node(my_node, curlock); + curlock = locktype_t::NONE; + } + +#ifndef NDEBUG + if (child_cur_lock == locktype_t::WRITE) { + BT_NODE_DBG_ASSERT_EQ(child_node->m_trans_hdr.is_lock, true, child_node); + } +#endif + + ret = do_put(child_node, child_cur_lock, put_req, ind_hint); + if (ret != btree_status_t::success) { goto out; } + + ++curr_ind; + } +out: + if (curlock != locktype_t::NONE) { unlock_node(my_node, curlock); } + return ret; + // Warning: Do not access childNode or myNode beyond this point, since it would + // have been unlocked by the recursive function and it could also been deleted. +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::mutate_write_leaf_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req) { + btree_status_t ret = btree_status_t::success; + if (is_range_update_req(req)) { + BtreeRangeUpdateRequest& rureq = to_range_update_req(req); + BtreeSearchState& search_state = rureq.search_state(); + const BtreeKeyRange& subrange = search_state.current_sub_range(); + + static thread_local std::vector< std::pair< K, V > > s_match; + s_match.clear(); + uint32_t start_ind = 0u, end_ind = 0u; + my_node->get_all(subrange, UINT32_MAX, start_ind, end_ind, &s_match); + + static thread_local std::vector< pair< K, V > > s_replace_kv; + std::vector< pair< K, V > >* p_replace_kvs = &s_match; + if (m_bt_cfg.is_custom_kv()) { + s_replace_kv.clear(); + // rreq.get_cb_param()->node_version = my_node->get_version(); + // ret = rreq.callback()(s_match, s_replace_kv, rreq.get_cb_param(), subrange); + ret = custom_kv_select_for_write(my_node->get_version(), s_match, s_replace_kv, subrange, rureq); + if (ret != btree_status_t::success) { return ret; } + p_replace_kvs = &s_replace_kv; + } + + BT_NODE_DBG_ASSERT_LE(start_ind, end_ind, my_node); + if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } + COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); + + for (const auto& [key, value] : *p_replace_kvs) { // insert is based on compare() of BtreeKey + auto status = my_node->insert(key, value); + BT_NODE_REL_ASSERT_EQ(status, btree_status_t::success, my_node, "unexpected insert failure"); + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + /* update cursor in intermediate search state */ + rureq.search_state().set_cursor_key< K >(subrange.end_key()); + } else { + const BtreeSinglePutRequest& sreq = to_single_put_req(req); + if (!my_node->put(sreq.key(), sreq.value(), sreq.m_put_type, sreq.m_existing_val.get())) { + ret = btree_status_t::put_failed; + } + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + if (ret == btree_status_t::success) { write_node(my_node, put_req_op_ctx(req)); } + return ret; +} + +/* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ +template < typename K, typename V > +btree_status_t Btree< K, V >::check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req, + int ind_hint, const BtreeNodePtr< K >& child_node, + locktype_t& curlock, locktype_t& child_curlock, int child_ind, + bool& split_occured) { + split_occured = false; + K split_key; + btree_status_t ret = btree_status_t::success; + auto child_lock_type = child_curlock; + auto none_lock_type = locktype_t::NONE; + +#ifdef _PRERELEASE + boost::optional< int > time; + if (child_node->is_leaf()) { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); + } else { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); + } + if (time && child_node->get_total_entries() > 2) { + std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); + } else +#endif + { + if (!is_split_needed(child_node, m_bt_cfg, req)) { return ret; } + } + + /* Split needed */ + if (is_range_update_req(req)) { + /* In case of range update we might split multiple childs of a parent in a single + * iteration which result into less space in the parent node. + */ +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_parent_node_full")) { + ret = btree_status_t::retry; + goto out; + } +#endif + if (is_split_needed(my_node, m_bt_cfg, req)) { + // restart from root + ret = btree_status_t::retry; + bt_thread_vars()->force_split_node = my_node; // On retry force split the my_node + goto out; + } + } + + // Time to split the child, but we need to convert parent to write lock + ret = upgrade_node(my_node, child_node, put_req_op_ctx(req), curlock, child_curlock); + if (ret != btree_status_t::success) { + BT_NODE_LOG(DEBUG, my_node, "Upgrade of node lock failed, retrying from root"); + BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::NONE, my_node); + goto out; + } + BT_NODE_LOG_ASSERT_EQ(child_curlock, child_lock_type, my_node); + BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); + + // We need to upgrade the child to WriteLock + ret = upgrade_node(child_node, nullptr, put_req_op_ctx(req), child_curlock, none_lock_type); + if (ret != btree_status_t::success) { + BT_NODE_LOG(DEBUG, child_node, "Upgrade of child node lock failed, retrying from root"); + BT_NODE_LOG_ASSERT_EQ(child_curlock, locktype_t::NONE, child_node); + goto out; + } + BT_NODE_LOG_ASSERT_EQ(none_lock_type, locktype_t::NONE, my_node); + BT_NODE_LOG_ASSERT_EQ(child_curlock, locktype_t::WRITE, child_node); + + // Real time to split the node and get point at which it was split + ret = split_node(my_node, child_node, child_ind, &split_key, false /* root_split */, put_req_op_ctx(req)); + if (ret != btree_status_t::success) { goto out; } + + // After split, retry search and walk down. + unlock_node(child_node, locktype_t::WRITE); + child_curlock = locktype_t::NONE; + COUNTER_INCREMENT(m_metrics, btree_split_count, 1); + split_occured = true; + +out: + if (ret != btree_status_t::success) { + if (curlock != locktype_t::NONE) { + unlock_node(my_node, curlock); + curlock = locktype_t::NONE; + } + + if (child_curlock != locktype_t::NONE) { + unlock_node(child_node, child_curlock); + child_curlock = locktype_t::NONE; + } + } + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::check_split_root(BtreeMutateRequest& req) { + K split_key; + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + + m_btree_lock.lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node_id, root, locktype_t::WRITE, locktype_t::WRITE, put_req_op_ctx(req)); + if (ret != btree_status_t::success) { goto done; } + + if (!is_split_needed(root, m_bt_cfg, req)) { + unlock_node(root, locktype_t::WRITE); + goto done; + } + + // Create a new child node and split them + child_node = alloc_interior_node(); + if (child_node == nullptr) { + ret = btree_status_t::space_not_avail; + unlock_node(root, locktype_t::WRITE); + goto done; + } + + /* it swap the data while keeping the nodeid same */ + swap_node(root, child_node, put_req_op_ctx(req)); + write_node(child_node, put_req_op_ctx(req)); + + BT_NODE_LOG(DEBUG, root, "Root node is full, swapping contents with child_node {} and split that", + child_node->get_node_id()); + + BT_NODE_DBG_ASSERT_EQ(root->get_total_entries(), 0, root); + ret = split_node(root, child_node, root->get_total_entries(), &split_key, true, put_req_op_ctx(req)); + BT_NODE_DBG_ASSERT_EQ(m_root_node_id, root->get_node_id(), root); + + if (ret != btree_status_t::success) { + swap_node(child_node, root, put_req_op_ctx(req)); + write_node(child_node, put_req_op_ctx(req)); + } + + /* unlock child node */ + unlock_node(root, locktype_t::WRITE); + + if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } +done: + m_btree_lock.unlock(); + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::split_node(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node, + uint32_t parent_ind, BtreeKey* out_split_key, bool root_split, void* context) { + BtreeNodeInfo ninfo; + BtreeNodePtr< K > child_node1 = child_node; + BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); + + if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } + + btree_status_t ret = btree_status_t::success; + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node1->set_next_bnode(child_node2->get_node_id()); + uint32_t child1_filled_size = BtreeNode< K >::node_area_size(m_bt_cfg) - child_node1->get_available_size(m_bt_cfg); + + auto split_size = m_bt_cfg.split_size(child1_filled_size); + uint32_t res = child_node1->move_out_to_right_by_size(m_bt_cfg, *child_node2, split_size); + + BT_NODE_REL_ASSERT_GT(res, 0, child_node1, + "Unable to split entries in the child node"); // means cannot split entries + BT_NODE_DBG_ASSERT_GT(child_node1->get_total_entries(), 0, child_node1); + + // In an unlikely case where parent node has no room to accomodate the child key, we need to un-split and then + // free up the new node. This situation could happen on variable key, where the key max size is purely + // an estimation. This logic allows the max size to be declared more optimistically than say 1/4 of node + // which will have substatinally large number of splits and performance constraints. + if (out_split_key->serialized_size() > parent_node->get_available_size(m_bt_cfg)) { + uint32_t move_in_res = child_node1->move_in_from_right_by_size(m_bt_cfg, *child_node2, split_size); + BT_NODE_REL_ASSERT_EQ(move_in_res, res, child_node1, + "The split key size is more than estimated parent available space, but when revert is " + "attempted it fails. Continuing can cause data loss, so crashing"); + free_node(child_node2, context); + + // Mark the parent_node itself to be split upon next retry. + bt_thread_vars()->force_split_node = parent_node; + return btree_status_t::retry; + } + + // Update the existing parent node entry to point to second child ptr. + bool edge_split = (parent_ind == parent_node->get_total_entries()); + ninfo.set_bnode_id(child_node2->get_node_id()); + parent_node->update(parent_ind, ninfo); + + // Insert the last entry in first child to parent node + *out_split_key = child_node1->get_last_key(); + ninfo.set_bnode_id(child_node1->get_node_id()); + + // If key is extent then we always insert the tail portion of the extent key in the parent node + if (out_split_key->is_extent_key()) { + K split_tail_key{out_split_key->serialize_tail(), true}; + parent_node->insert(split_tail_key, ninfo); + } else { + parent_node->insert(*out_split_key, ninfo); + } + + BT_NODE_DBG_ASSERT_GT(child_node2->get_first_key().compare(*out_split_key), 0, child_node2); + BT_NODE_LOG(DEBUG, parent_node, "Split child_node={} with new_child_node={}, split_key={}", + child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); + + split_node_precommit(parent_node, child_node1, child_node2, root_split, edge_split, context); + +#if 0 + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + btree_store_t::append_node_to_journal( + j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, bcp, + out_split_end_key.get_blob()); + + // For root split or split around the edge, we don't write the key, which will cause replay to insert + // edge + if (edge_split) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); + } else { + K child2_pkey; + parent_node->get_nth_key(parent_ind, &child2_pkey, true); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, + child2_pkey.get_blob()); + } + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } +#endif + + // we write right child node, than left and than parent child + write_node(child_node2, nullptr, context); + write_node(child_node1, child_node2, context); + write_node(parent_node, child_node1, context); + + // NOTE: Do not access parentInd after insert, since insert would have + // shifted parentNode to the right. + return ret; +} + +template < typename K, typename V > +bool Btree< K, V >::is_split_needed(const BtreeNodePtr< K >& node, const BtreeConfig& cfg, + BtreeMutateRequest& req) const { + if (bt_thread_vars()->force_split_node && (bt_thread_vars()->force_split_node == node)) { + bt_thread_vars()->force_split_node = nullptr; + return true; + } + + int64_t size_needed = 0; + if (!node->is_leaf()) { // if internal node, size is atmost one additional entry, size of K/V + size_needed = K::get_estimate_max_size() + BtreeNodeInfo::get_fixed_size() + node->get_record_size(); + } else if (is_range_update_req(req)) { + /* + * If there is an overlap then we can add (n + 1) more keys :- one in the front, one in the tail and + * other in between match entries (n - 1). + */ + static thread_local std::vector< std::pair< K, V > > s_match; + s_match.clear(); + uint32_t start_ind = 0, end_ind = 0; + auto& rureq = to_range_update_req(req); + node->get_all(rureq.input_range(), UINT32_MAX, start_ind, end_ind, &s_match); + + size_needed = compute_range_put_needed_size(s_match, (const V&)rureq.m_newval) + + ((s_match.size() + 1) * (K::get_estimate_max_size() + node->get_record_size())); + } else { + auto& sreq = to_single_put_req(req); + + // leaf node, + // NOTE : size_needed is just an guess here. Actual implementation of Mapping key/value can have + // specific logic which determines of size changes on insert or update. + auto [found, idx] = node->find(sreq.key(), nullptr, false); + if (!found) { // We need to insert, this newly. Find out if we have space for value. + size_needed = sreq.key().serialized_size() + sreq.value().serialized_size() + node->get_record_size(); + } else { + // Its an update, see how much additional space needed + V existing_val; + node->get_nth_value(idx, &existing_val, false); + size_needed = compute_single_put_needed_size(existing_val, (const V&)sreq.value()) + + sreq.key().serialized_size() + node->get_record_size(); + } + } + int64_t alreadyFilledSize = BtreeNode< K >::node_area_size(cfg) - node->get_available_size(cfg); + return (alreadyFilledSize + size_needed >= BtreeNode< K >::ideal_fill_size(cfg)); +} + +template < typename K, typename V > +int64_t Btree< K, V >::compute_single_put_needed_size(const V& current_val, const V& new_val) const { + return new_val.serialized_size() - current_val.serialized_size(); +} + +template < typename K, typename V > +int64_t Btree< K, V >::compute_range_put_needed_size(const std::vector< std::pair< K, V > >& existing_kvs, + const V& new_val) const { + return new_val.serialized_size() * existing_kvs.size(); +} + +template < typename K, typename V > +btree_status_t +Btree< K, V >::custom_kv_select_for_write(uint8_t node_version, const std::vector< std::pair< K, V > >& match_kv, + std::vector< std::pair< K, V > >& replace_kv, const BtreeKeyRange& range, + const BtreeRangeUpdateRequest& rureq) const { + for (const auto& [k, v] : match_kv) { + replace_kv.push_back(std::make_pair(k, (V&)rureq.m_newval)); + } + return btree_status_t::success; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::get_start_and_end_ind(const BtreeNodePtr< K >& node, BtreeMutateRequest& req, + int& start_ind, int& end_ind) { + btree_status_t ret = btree_status_t::success; + if (is_range_update_req(req)) { + /* just get start/end index from get_all. We don't release the parent lock until this + * key range is not inserted from start_ind to end_ind. + */ + node->template get_all< V >(to_range_update_req(req).input_range(), UINT32_MAX, (uint32_t&)start_ind, + (uint32_t&)end_ind); + } else { + auto [found, idx] = node->find(to_single_put_req(req).key(), nullptr, true); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, idx, node); + end_ind = start_ind = (int)idx; + } + + if (start_ind > end_ind) { + BT_NODE_LOG_ASSERT(false, node, "start ind {} greater than end ind {}", start_ind, end_ind); + ret = btree_status_t::retry; + } + return ret; +} + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_node.hpp b/src/btree/btree_node.hpp new file mode 100644 index 00000000..1dec7259 --- /dev/null +++ b/src/btree/btree_node.hpp @@ -0,0 +1,607 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam, Rishabh Mittal + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ + +#pragma once +#include + +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wattributes" +#endif +#include +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic pop +#endif + +#include "utility/atomic_counter.hpp" +#include "utility/enum.hpp" +#include "utility/obj_life_counter.hpp" +#include "btree_internal.hpp" +#include "btree_kv.hpp" + +namespace sisl { +namespace btree { +ENUM(locktype_t, uint8_t, NONE, READ, WRITE) + +#pragma pack(1) +struct transient_hdr_t { + mutable folly::SharedMutexReadPriority lock; + sisl::atomic_counter< uint16_t > upgraders{0}; + + /* these variables are accessed without taking lock and are not expected to change after init */ + uint8_t is_leaf_node{0}; + // btree_store_type store_type{btree_store_type::MEM}; + +#ifndef NDEBUG + int is_lock{-1}; +#endif + + bool is_leaf() const { return (is_leaf_node != 0); } +}; +#pragma pack() + +static constexpr uint8_t BTREE_NODE_VERSION = 1; +static constexpr uint8_t BTREE_NODE_MAGIC = 0xab; + +#pragma pack(1) +struct persistent_hdr_t { + uint8_t magic{BTREE_NODE_MAGIC}; + uint8_t version{BTREE_NODE_VERSION}; + uint16_t checksum; + + bnodeid_t node_id; + bnodeid_t next_node; + + uint32_t nentries : 27; + uint32_t node_type : 3; + uint32_t leaf : 1; + uint32_t valid_node : 1; + + uint64_t node_gen; + bnodeid_t edge_entry; + + std::string to_string() const { + return fmt::format("magic={} version={} csum={} node_id={} next_node={} nentries={} node_type={} is_leaf={} " + "valid_node={} node_gen={} edge_entry={}", + magic, version, checksum, node_id, next_node, nentries, node_type, leaf, valid_node, + node_gen, edge_entry); + } +}; +#pragma pack() + +template < typename K > +class BtreeNode : public sisl::ObjLifeCounter< BtreeNode< K > > { + typedef std::pair< bool, uint32_t > node_find_result_t; + +public: + atomic_counter< int32_t > m_refcount{0}; + transient_hdr_t m_trans_hdr; + uint8_t* m_phys_node_buf; + +public: + BtreeNode(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf) : m_phys_node_buf{node_buf} { + if (init_buf) { + set_magic(); + init_checksum(); + set_leaf(is_leaf); + set_total_entries(0); + set_next_bnode(empty_bnodeid); + set_gen(0); + set_valid_node(true); + set_edge_id(empty_bnodeid); + set_node_id(id); + } else { + DEBUG_ASSERT_EQ(get_node_id(), id); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); + } + m_trans_hdr.is_leaf_node = is_leaf; + } + virtual ~BtreeNode() = default; + + node_find_result_t find(const BtreeKey& key, BtreeValue* outval, bool copy_val) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", + get_persistent_header_const()->to_string()); + + auto [found, idx] = bsearch_node(key); + if (idx == get_total_entries() && !has_valid_edge()) { + DEBUG_ASSERT_EQ(found, false); + return std::make_pair(found, idx); + } + + if (get_total_entries() == 0) { + DEBUG_ASSERT((has_valid_edge() || is_leaf()), "Invalid node"); + if (is_leaf()) { return std::make_pair(found, idx); /* Leaf doesn't have any elements */ } + } + + if (outval) { get_nth_value(idx, outval, copy_val); } + return std::make_pair(found, idx); + } + + template < typename V > + uint32_t get_all(const BtreeKeyRange& range, uint32_t max_count, uint32_t& start_ind, uint32_t& end_ind, + std::vector< std::pair< K, V > >* out_values = nullptr) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", + get_persistent_header_const()->to_string()); + auto count = 0U; + + // Get the start index of the search range. + const auto [sfound, sind] = bsearch_node(range.start_key()); + + start_ind = sind; + if (!range.is_start_inclusive()) { + if (start_ind < get_total_entries()) { + /* start is not inclusive so increment the start_ind if it is same as this key */ + const int x = compare_nth_key(range.start_key(), start_ind); + if (x == 0) { ++start_ind; } + } else { + DEBUG_ASSERT(is_leaf() || has_valid_edge(), "Invalid node"); + } + } + + if (start_ind == get_total_entries() && is_leaf()) { + end_ind = start_ind; + return 0; // no result found + } + DEBUG_ASSERT((start_ind < get_total_entries()) || has_valid_edge(), "Invalid node"); + + // search by the end index + const auto [efound, eind] = bsearch_node(range.end_key()); + end_ind = eind; + + if (end_ind == get_total_entries() && !has_valid_edge()) { --end_ind; } + if (is_leaf()) { + /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ + K key = get_nth_key(end_ind, false); + if ((range.start_key().compare(key) < 0) && ((range.end_key().compare(key)) < 0)) { + if (start_ind == end_ind) { return 0; /* no match */ } + --end_ind; + } + } + + /* We should always find the entries in interior node */ + DEBUG_ASSERT_LE(start_ind, end_ind); + // DEBUG_ASSERT_EQ(range.is_end_inclusive(), true); /* we don't support end exclusive */ + DEBUG_ASSERT(start_ind < get_total_entries() || has_valid_edge(), "Invalid node"); + + count = std::min(end_ind - start_ind + 1, max_count); + if (out_values == nullptr) { return count; } + + /* get the keys and values */ + for (auto i{start_ind}; i < (start_ind + count); ++i) { + add_nth_obj_to_list< V >(i, out_values, true); +#if 0 + if (i == get_total_entries() && !is_leaf()) { + // invalid key in case of edge entry for internal node + out_values->emplace_back(std::make_pair(K{}, get_edge_value())); + } else { + out_values->emplace_back(std::make_pair(K{}, get_nth_value(i, true))); + } +#endif + } + return count; + } + + std::pair< bool, uint32_t > get_any(const BtreeKeyRange& range, BtreeKey* out_key, BtreeValue* out_val, + bool copy_key, bool copy_val) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", + get_persistent_header_const()->to_string()); + uint32_t result_idx; + const auto mm_opt = range.multi_option(); + bool efound; + uint32_t end_idx; + + // Get the start index of the search range. + auto [sfound, start_idx] = bsearch_node(range.start_key()); + if (sfound && !range.is_start_inclusive()) { + ++start_idx; + sfound = false; + } + + if (sfound && ((mm_opt == MultiMatchOption::DO_NOT_CARE) || (mm_opt == MultiMatchOption::LEFT_MOST))) { + result_idx = start_idx; + goto found_result; + } else if (start_idx == get_total_entries()) { + DEBUG_ASSERT(is_leaf() || has_valid_edge(), "Invalid node"); + return std::make_pair(false, 0); // out_of_range + } + + std::tie(efound, end_idx) = bsearch_node(range.end_key()); + if (efound && !range.is_end_inclusive()) { + if (end_idx == 0) { return std::make_pair(false, 0); } + --end_idx; + efound = false; + } + + if (end_idx > start_idx) { + if (mm_opt == MultiMatchOption::RIGHT_MOST) { + result_idx = end_idx; + } else if (mm_opt == MultiMatchOption::MID) { + result_idx = (end_idx - start_idx) / 2; + } else { + result_idx = start_idx; + } + } else if ((start_idx == end_idx) && ((sfound || efound))) { + result_idx = start_idx; + } else { + return std::make_pair(false, 0); + } + + found_result: + if (out_key) { *out_key = get_nth_key(result_idx, copy_key); } + if (out_val) { get_nth_value(result_idx, out_val, copy_val); } + return std::make_pair(true, result_idx); + } + + bool put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue* existing_val) { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", + get_persistent_header_const()->to_string()); + bool ret = true; + + const auto [found, idx] = find(key, nullptr, false); + if (found && existing_val) { get_nth_value(idx, existing_val, true); } + + if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { + if (found) { + LOGDEBUG("Attempt to insert duplicate entry {}", key.to_string()); + return false; + } + ret = (insert(idx, key, val) == btree_status_t::success); + } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { + if (!found) return false; + update(idx, key, val); + } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { + (found) ? update(idx, key, val) : (void)insert(idx, key, val); + } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { + if (!found) return false; + append(idx, key, val); + } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { + (found) ? append(idx, key, val) : (void)insert(idx, key, val); + } else { + DEBUG_ASSERT(false, "Wrong put_type {}", put_type); + } + return ret; + } + + virtual btree_status_t insert(const BtreeKey& key, const BtreeValue& val) { + const auto [found, idx] = find(key, nullptr, false); + DEBUG_ASSERT(!is_leaf() || (!found), "Invalid node"); // We do not support duplicate keys yet + insert(idx, key, val); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); + return btree_status_t::success; + } + + virtual bool remove_one(const BtreeKey& key, BtreeKey* outkey, BtreeValue* outval) { + const auto [found, idx] = find(key, outval, true); + if (found) { + if (outkey) { *outkey = get_nth_key(idx, true); } + remove(idx); + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); + } + return found; + } + + virtual bool remove_any(const BtreeKeyRange& range, BtreeKey* outkey, BtreeValue* outval) { + const auto [found, idx] = get_any(range, outkey, outval, true, true); + if (found) { + remove(idx); + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); + } + return found; + } + + /* Update the key and value pair and after update if outkey and outval are non-nullptr, it fills them with + * the key and value it just updated respectively */ + virtual bool update_one(const BtreeKey& key, const BtreeValue& val, BtreeValue* outval) { + const auto [found, idx] = find(key, outval, true); + if (found) { + update(idx, val); + LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", get_persistent_header_const()->to_string()); + } + return found; + } + + void get_adjacent_indicies(uint32_t cur_ind, std::vector< uint32_t >& indices_list, uint32_t max_indices) const { + uint32_t i = 0; + uint32_t start_ind; + uint32_t end_ind; + uint32_t nentries = get_total_entries(); + + auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); + end_ind = cur_ind + (max_indices / 2); + if (cur_ind < max_ind) { + end_ind += max_ind - cur_ind; + start_ind = 0; + } else { + start_ind = cur_ind - max_ind; + } + + for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); ++i) { + if (i == nentries) { + if (has_valid_edge()) { indices_list.push_back(i); } + break; + } else { + indices_list.push_back(i); + } + } + } + + BtreeKeyRange get_subrange(const BtreeKeyRange& inp_range, int upto_ind) const { +#ifndef NDEBUG + if (upto_ind > 0) { + /* start of input range should always be more then the key in curr_ind - 1 */ + DEBUG_ASSERT_LE(get_nth_key(upto_ind - 1, false).compare(inp_range.start_key()), 0, "[node={}]", + to_string()); + } +#endif + + // find end of subrange + bool end_inc = true; + K end_key; + + if (upto_ind < int_cast(get_total_entries())) { + end_key = get_nth_key(upto_ind, false); + if (end_key.compare(inp_range.end_key()) >= 0) { + /* this is last index to process as end of range is smaller then key in this node */ + end_key = inp_range.end_key(); + end_inc = inp_range.is_end_inclusive(); + } else { + end_inc = true; + } + } else { + /* it is the edge node. end key is the end of input range */ + LOGMSG_ASSERT_EQ(has_valid_edge(), true, "node={}", to_string()); + end_key = inp_range.end_key(); + end_inc = inp_range.is_end_inclusive(); + } + + BtreeKeyRangeSafe< K > subrange{inp_range.start_key(), inp_range.is_start_inclusive(), end_key, end_inc}; + RELEASE_ASSERT_LE(subrange.start_key().compare(subrange.end_key()), 0, "[node={}]", to_string()); + RELEASE_ASSERT_LE(subrange.start_key().compare(inp_range.end_key()), 0, "[node={}]", to_string()); + return subrange; + } + + K get_last_key() const { + if (get_total_entries() == 0) { return K{}; } + return get_nth_key(get_total_entries() - 1, true); + } + + K get_first_key() const { return get_nth_key(0, true); } + + bool validate_key_order() const { + for (auto i = 1u; i < get_total_entries(); ++i) { + auto prevKey = get_nth_key(i - 1, false); + auto curKey = get_nth_key(i, false); + if (prevKey.compare(curKey) >= 0) { + DEBUG_ASSERT(false, "Order check failed at entry={}", i); + return false; + } + } + return true; + } + + virtual BtreeNodeInfo get_edge_value() const { return BtreeNodeInfo{get_edge_id()}; } + + virtual void set_edge_value(const BtreeValue& v) { + const auto b = v.serialize(); + ASSERT_EQ(b.size, sizeof(bnodeid_t)); + set_edge_id(*r_cast< bnodeid_t* >(b.bytes)); + } + + void invalidate_edge() { set_edge_id(empty_bnodeid); } + + uint32_t get_total_entries() const { return get_persistent_header_const()->nentries; } + + void lock(locktype_t l) const { + if (l == locktype_t::READ) { + m_trans_hdr.lock.lock_shared(); + } else if (l == locktype_t::WRITE) { + m_trans_hdr.lock.lock(); + } + } + + void unlock(locktype_t l) const { + if (l == locktype_t::READ) { + m_trans_hdr.lock.unlock_shared(); + } else if (l == locktype_t::WRITE) { + m_trans_hdr.lock.unlock(); + } + } + + void lock_upgrade() { + m_trans_hdr.upgraders.increment(1); + this->unlock(locktype_t::READ); + this->lock(locktype_t::WRITE); + } + + void lock_acknowledge() { m_trans_hdr.upgraders.decrement(1); } + bool any_upgrade_waiters() const { return (!m_trans_hdr.upgraders.testz()); } + +public: + // Public method which needs to be implemented by variants + virtual uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t nentries) = 0; + virtual uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t size) = 0; + virtual uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode& other_node, + uint32_t nentries) = 0; + virtual uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t size) = 0; + virtual uint32_t get_available_size(const BtreeConfig& cfg) const = 0; + virtual std::string to_string(bool print_friendly = false) const = 0; + virtual void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const = 0; + virtual K get_nth_key(uint32_t ind, bool copykey) const = 0; + + virtual btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; + virtual void remove(uint32_t ind) { remove(ind, ind); } + virtual void remove(uint32_t ind_s, uint32_t ind_e) = 0; + virtual void update(uint32_t ind, const BtreeValue& val) = 0; + virtual void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; + virtual void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; + + virtual uint32_t get_nth_obj_size(uint32_t ind) const = 0; + virtual uint16_t get_record_size() const = 0; + virtual int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const = 0; + + // Method just to please compiler + template < typename V > + V edge_value_internal() const { + return V{get_edge_id()}; + } + +private: + node_find_result_t bsearch_node(const BtreeKey& key) const { + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + auto [found, idx] = bsearch(-1, get_total_entries(), key); + if (found) { DEBUG_ASSERT_LT(idx, get_total_entries()); } + + return std::make_pair(found, idx); + } + + node_find_result_t bsearch(int start, int end, const BtreeKey& key) const { + int mid = 0; + bool found{false}; + uint32_t end_of_search_index{0}; + + if ((end - start) <= 1) { return std::make_pair(found, end_of_search_index); } + while ((end - start) > 1) { + mid = start + (end - start) / 2; + DEBUG_ASSERT(mid >= 0 && mid < int_cast(get_total_entries()), "Invalid mid={}", mid); + int x = compare_nth_key(key, mid); + if (x == 0) { + found = true; + end = mid; + break; + } else if (x > 0) { + end = mid; + } else { + start = mid; + } + } + + return std::make_pair(found, end); + } + + template < typename V > + void add_nth_obj_to_list(uint32_t ind, std::vector< std::pair< K, V > >* vec, bool copy) const { + std::pair< K, V > kv; + vec->emplace_back(kv); + + auto* pkv = &vec->back(); + if (ind == get_total_entries() && !is_leaf()) { + pkv->second = edge_value_internal< V >(); + } else { + pkv->first = get_nth_key(ind, copy); + get_nth_value(ind, &pkv->second, copy); + } + } + +public: + persistent_hdr_t* get_persistent_header() { return r_cast< persistent_hdr_t* >(m_phys_node_buf); } + const persistent_hdr_t* get_persistent_header_const() const { + return r_cast< const persistent_hdr_t* >(m_phys_node_buf); + } + uint8_t* node_data_area() { return (m_phys_node_buf + sizeof(persistent_hdr_t)); } + const uint8_t* node_data_area_const() const { return (m_phys_node_buf + sizeof(persistent_hdr_t)); } + + uint8_t get_magic() const { return get_persistent_header_const()->magic; } + void set_magic() { get_persistent_header()->magic = BTREE_NODE_MAGIC; } + + uint8_t get_version() const { return get_persistent_header_const()->version; } + uint16_t get_checksum() const { return get_persistent_header_const()->checksum; } + void init_checksum() { get_persistent_header()->checksum = 0; } + + void set_node_id(bnodeid_t id) { get_persistent_header()->node_id = id; } + bnodeid_t get_node_id() const { return get_persistent_header_const()->node_id; } + +#ifndef NO_CHECKSUM + void set_checksum(const BtreeConfig& cfg) { + get_persistent_header()->checksum = crc16_t10dif(init_crc_16, node_data_area_const(), node_area_size(cfg)); + } + + bool verify_node(const BtreeConfig& cfg) const { + HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", + get_persistent_header_const()->to_string()); + auto exp_checksum = crc16_t10dif(init_crc_16, node_data_area_const(), node_area_size(cfg)); + return ((get_magic() == BTREE_NODE_MAGIC) && (get_checksum() == exp_checksum)); + } +#endif + + bool is_leaf() const { return get_persistent_header_const()->leaf; } + btree_node_type get_node_type() const { + return s_cast< btree_node_type >(get_persistent_header_const()->node_type); + } + + void set_total_entries(uint32_t n) { get_persistent_header()->nentries = n; } + void inc_entries() { ++get_persistent_header()->nentries; } + void dec_entries() { --get_persistent_header()->nentries; } + + void add_entries(uint32_t addn) { get_persistent_header()->nentries += addn; } + void sub_entries(uint32_t subn) { get_persistent_header()->nentries -= subn; } + + void set_leaf(bool leaf) { get_persistent_header()->leaf = leaf; } + void set_node_type(btree_node_type t) { get_persistent_header()->node_type = uint32_cast(t); } + uint64_t get_gen() const { return get_persistent_header_const()->node_gen; } + void inc_gen() { get_persistent_header()->node_gen++; } + void set_gen(uint64_t g) { get_persistent_header()->node_gen = g; } + + void set_valid_node(bool valid) { get_persistent_header()->valid_node = (valid ? 1 : 0); } + bool is_valid_node() const { return get_persistent_header_const()->valid_node; } + + uint32_t get_occupied_size(const BtreeConfig& cfg) const { return (node_area_size(cfg) - get_available_size(cfg)); } + uint32_t get_suggested_min_size(const BtreeConfig& cfg) const { return cfg.max_key_size(); } + + static uint32_t node_area_size(const BtreeConfig& cfg) { return cfg.node_size() - sizeof(persistent_hdr_t); } + static uint32_t ideal_fill_size(const BtreeConfig& cfg) { + return uint32_cast(node_area_size(cfg) * cfg.m_ideal_fill_pct) / 100; + } + static uint32_t merge_suggested_size(const BtreeConfig& cfg) { return node_area_size(cfg) - ideal_fill_size(cfg); } + + bool is_merge_needed(const BtreeConfig& cfg) const { +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < node_area_size(cfg)) { + return true; + } + + auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); + if (ret && get_occupied_size(cfg) < (ret.get() * node_area_size(cfg) / 100)) { return true; } +#endif + return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); + } + + bnodeid_t next_bnode() const { return get_persistent_header_const()->next_node; } + void set_next_bnode(bnodeid_t b) { get_persistent_header()->next_node = b; } + + bnodeid_t get_edge_id() const { return get_persistent_header_const()->edge_entry; } + void set_edge_id(bnodeid_t edge) { get_persistent_header()->edge_entry = edge; } + + bool has_valid_edge() const { + if (is_leaf()) { return false; } + return (get_edge_id() != empty_bnodeid); + } +}; + +template < typename K, typename V > +struct btree_locked_node_info { + BtreeNode< K >* node; + Clock::time_point start_time; + const char* fname; + int line; + + void dump() const { LOGINFO("node locked by file: {}, line: {}", fname, line); } +}; + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_node_mgr.ipp b/src/btree/btree_node_mgr.ipp new file mode 100644 index 00000000..add991b4 --- /dev/null +++ b/src/btree/btree_node_mgr.ipp @@ -0,0 +1,480 @@ +#pragma once + +#include "btree.hpp" +#include "fds/utils.hpp" +#include + +namespace sisl { +namespace btree { + +#define lock_and_refresh_node(a, b, c) _lock_and_refresh_node(a, b, c, __FILE__, __LINE__) +#define lock_node_upgrade(a, b) _lock_node_upgrade(a, b, __FILE__, __LINE__) +#define start_of_lock(a, b) _start_of_lock(a, b, __FILE__, __LINE__) + +template < typename K, typename V > +std::pair< btree_status_t, bnodeid_t > Btree< K, V >::create_root_node(void* op_context) { + // Assign one node as root node and initially root is leaf + BtreeNodePtr< K > root = alloc_leaf_node(); + if (root == nullptr) { return std::make_pair(btree_status_t::space_not_avail, empty_bnodeid); } + m_root_node_id = root->get_node_id(); + + create_tree_precommit(root, op_context); + + auto ret = write_node(root, nullptr, op_context); + BT_DBG_ASSERT_EQ(ret, btree_status_t::success, "Writing root node failed"); + + /* write an entry to the journal also */ + return std::make_pair(ret, m_root_node_id); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::read_and_lock_root(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const { + return (read_and_lock_node(id, node_ptr, int_lock_type, int_lock_type, context)); +} + +/* It read the node, take the lock and recover it if required */ +template < typename K, typename V > +btree_status_t Btree< K, V >::read_and_lock_child(bnodeid_t child_id, BtreeNodePtr< K >& child_node, + const BtreeNodePtr< K >& parent_node, uint32_t parent_ind, + locktype_t int_lock_type, locktype_t leaf_lock_type, + void* context) const { + btree_status_t ret = read_node(child_id, child_node); + if (child_node == nullptr) { + if (ret != btree_status_t::fast_path_not_possible) { BT_LOG(ERROR, "read failed, reason: {}", ret); } + return ret; + } + + auto is_leaf = child_node->is_leaf(); + auto acq_lock = is_leaf ? leaf_lock_type : int_lock_type; + ret = lock_and_refresh_node(child_node, acq_lock, context); + + BT_NODE_DBG_ASSERT_EQ(is_leaf, child_node->is_leaf(), child_node); + + return ret; +} + +/* It read the node, take the lock and recover it if required */ +template < typename K, typename V > +btree_status_t Btree< K, V >::read_and_lock_sibling(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const { + /* TODO: Currently we do not have any recovery while sibling is read. It is not a problem today + * as we always scan the whole btree traversally during boot. However, we should support + * it later. + */ + return (read_and_lock_node(id, node_ptr, int_lock_type, int_lock_type, context)); +} + +/* It read the node and take a lock of the node. It doesn't recover the node. + * @int_lock_type :- lock type if a node is interior node. + * @leaf_lock_type :- lock type if a node is leaf node. + */ +template < typename K, typename V > +btree_status_t Btree< K, V >::read_and_lock_node(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, + locktype_t leaf_lock_type, void* context) const { + auto ret = read_node(id, node_ptr); + if (node_ptr == nullptr) { + if (ret != btree_status_t::fast_path_not_possible) { BT_LOG(ERROR, "read failed, reason: {}", ret); } + return ret; + } + + auto acq_lock = (node_ptr->is_leaf()) ? leaf_lock_type : int_lock_type; + ret = lock_and_refresh_node(node_ptr, acq_lock, context); + if (ret != btree_status_t::success) { BT_LOG(ERROR, "Node refresh failed"); } + + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::get_child_and_lock_node(const BtreeNodePtr< K >& node, uint32_t index, + BtreeNodeInfo& child_info, BtreeNodePtr< K >& child_node, + locktype_t int_lock_type, locktype_t leaf_lock_type, + void* context) const { + if (index == node->get_total_entries()) { + const auto& edge_id{node->get_edge_id()}; + child_info.set_bnode_id(edge_id); + // If bsearch points to last index, it means the search has not found entry unless it is an edge value. + if (!child_info.has_valid_bnode_id()) { + BT_NODE_LOG_ASSERT(false, node, "Child index {} does not have valid bnode_id", index); + return btree_status_t::not_found; + } + } else { + BT_NODE_LOG_ASSERT_LT(index, node->get_total_entries(), node); + node->get_nth_value(index, &child_info, false /* copy */); + } + + return ( + read_and_lock_child(child_info.bnode_id(), child_node, node, index, int_lock_type, leaf_lock_type, context)); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::write_node_sync(const BtreeNodePtr< K >& node, void* context) { + return (write_node(node, nullptr, context)); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::write_node(const BtreeNodePtr< K >& node, void* context) { + return (write_node(node, nullptr, context)); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::write_node(const BtreeNodePtr< K >& node, const BtreeNodePtr< K >& dependent_node, + void* context) { + BT_NODE_LOG(DEBUG, node, "Writing node"); + + COUNTER_INCREMENT_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_writes, btree_int_node_writes, 1); + HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_occupancy, btree_int_node_occupancy, + ((m_node_size - node->get_available_size(m_bt_cfg)) * 100) / m_node_size); + + return btree_status_t::success; +} + +/* Caller of this api doesn't expect read to fail in any circumstance */ +template < typename K, typename V > +void Btree< K, V >::read_node_or_fail(bnodeid_t id, BtreeNodePtr< K >& node) const { + BT_NODE_REL_ASSERT_EQ(read_node(id, node), btree_status_t::success, node); +} + +/* This function upgrades the node lock and take required steps if things have + * changed during the upgrade. + * + * Inputs: + * myNode - Node to upgrade + * childNode - In case childNode needs to be unlocked. Could be nullptr + * curLock - Input/Output: current lock type + * + * Returns - If successfully able to upgrade, return true, else false. + * + * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects + * it to be locked too. If it is able to successfully upgrade it continue to retain its + * old lock. If failed to upgrade, will release all locks. + */ +template < typename K, typename V > +btree_status_t Btree< K, V >::upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, + void* context, locktype_t& cur_lock, locktype_t& child_cur_lock) { + uint64_t prev_gen; + btree_status_t ret = btree_status_t::success; + locktype_t child_lock_type = child_cur_lock; + + if (cur_lock == locktype_t::WRITE) { goto done; } + + prev_gen = my_node->get_gen(); + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype_t::NONE; + } + +#ifdef _PRERELEASE + { + auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); + if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } + } +#endif + ret = lock_node_upgrade(my_node, context); + if (ret != btree_status_t::success) { + cur_lock = locktype_t::NONE; + return ret; + } + + // The node was not changed by anyone else during upgrade. + cur_lock = locktype_t::WRITE; + + // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before + // that cleanup or free this node if there is no one waiting. + if (!my_node->is_valid_node()) { + unlock_node(my_node, locktype_t::WRITE); + cur_lock = locktype_t::NONE; + ret = btree_status_t::retry; + goto done; + } + + // If node has been updated, while we have upgraded, ask caller to start all over again. + if (prev_gen != my_node->get_gen()) { + unlock_node(my_node, cur_lock); + cur_lock = locktype_t::NONE; + ret = btree_status_t::retry; + goto done; + } + + if (child_node) { + ret = lock_and_refresh_node(child_node, child_lock_type, context); + if (ret != btree_status_t::success) { + unlock_node(my_node, cur_lock); + cur_lock = locktype_t::NONE; + child_cur_lock = locktype_t::NONE; + goto done; + } + child_cur_lock = child_lock_type; + } + +#ifdef _PRERELEASE + { + int is_leaf = 0; + + if (child_node && child_node->is_leaf()) { is_leaf = 1; } + if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { + unlock_node(my_node, cur_lock); + cur_lock = locktype_t::NONE; + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype_t::NONE; + } + ret = btree_status_t::retry; + goto done; + } + } +#endif + + BT_NODE_DBG_ASSERT_EQ(my_node->m_trans_hdr.is_lock, 1, my_node); +done: + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::_lock_and_refresh_node(const BtreeNodePtr< K >& node, locktype_t type, void* context, + const char* fname, int line) const { + bool is_write_modifiable; + node->lock(type); + if (type == locktype_t::WRITE) { + is_write_modifiable = true; +#ifndef NDEBUG + node->m_trans_hdr.is_lock = 1; +#endif + } else { + is_write_modifiable = false; + } + + auto ret = refresh_node(node, is_write_modifiable, context); + if (ret != btree_status_t::success) { + node->unlock(type); + return ret; + } + + _start_of_lock(node, type, fname, line); + return btree_status_t::success; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::_lock_node_upgrade(const BtreeNodePtr< K >& node, void* context, const char* fname, + int line) { + // Explicitly dec and incr, for upgrade, since it does not call top level functions to lock/unlock node + auto time_spent = end_of_lock(node, locktype_t::READ); + + node->lock_upgrade(); +#ifndef NDEBUG + node->m_trans_hdr.is_lock = 1; +#endif + node->lock_acknowledge(); + auto ret = refresh_node(node, true, context); + if (ret != btree_status_t::success) { + node->unlock(locktype_t::WRITE); + return ret; + } + + observe_lock_time(node, locktype_t::READ, time_spent); + _start_of_lock(node, locktype_t::WRITE, fname, line); + return btree_status_t::success; +} + +template < typename K, typename V > +void Btree< K, V >::unlock_node(const BtreeNodePtr< K >& node, locktype_t type) const { +#ifndef NDEBUG + if (type == locktype_t::WRITE) { node->m_trans_hdr.is_lock = 0; } +#endif + node->unlock(type); + auto time_spent = end_of_lock(node, type); + observe_lock_time(node, type, time_spent); +} + +template < typename K, typename V > +BtreeNodePtr< K > Btree< K, V >::alloc_leaf_node() { + bool is_new_allocation; + BtreeNodePtr< K > n = alloc_node(true /* is_leaf */, is_new_allocation); + if (n) { + COUNTER_INCREMENT(m_metrics, btree_leaf_node_count, 1); + ++m_total_nodes; + } + return n; +} + +template < typename K, typename V > +BtreeNodePtr< K > Btree< K, V >::alloc_interior_node() { + bool is_new_allocation; + BtreeNodePtr< K > n = alloc_node(false /* is_leaf */, is_new_allocation); + if (n) { + COUNTER_INCREMENT(m_metrics, btree_int_node_count, 1); + ++m_total_nodes; + } + return n; +} + +template < typename K, typename V > +BtreeNode< K >* Btree< K, V >::init_node(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf) { + BtreeNode< K >* ret_node{nullptr}; + btree_node_type node_type = is_leaf ? m_bt_cfg.leaf_node_type() : m_bt_cfg.interior_node_type(); + + switch (node_type) { + case btree_node_type::VAR_OBJECT: + if (is_leaf) { + ret_node = new VarObjSizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } else { + ret_node = new VarObjSizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } + break; + + case btree_node_type::FIXED: + if (is_leaf) { + ret_node = new SimpleNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } else { + ret_node = new SimpleNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } + break; + + case btree_node_type::VAR_VALUE: + if (is_leaf) { + ret_node = new VarValueSizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } else { + ret_node = new VarValueSizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } + break; + + case btree_node_type::VAR_KEY: + if (is_leaf) { + ret_node = new VarKeySizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } else { + ret_node = new VarKeySizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); + } + break; + + default: + BT_REL_ASSERT(false, "Unsupported node type {}", node_type); + break; + } + return ret_node; +} + +/* Note:- This function assumes that access of this node is thread safe. */ +template < typename K, typename V > +void Btree< K, V >::do_free_node(const BtreeNodePtr< K >& node) { + BT_NODE_LOG(DEBUG, node, "Freeing node"); + + COUNTER_DECREMENT_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_count, btree_int_node_count, 1); + if (node->is_valid_node() == false) { + // a node could be marked as invalid during previous destroy and hit crash before destroy completes; + // and upon boot volume continues to destroy this btree; + BT_NODE_LOG(INFO, node, "Freeing a node already freed because of crash during destroy btree."); + } + node->set_valid_node(false); + --m_total_nodes; + + intrusive_ptr_release(node.get()); +} + +template < typename K, typename V > +void Btree< K, V >::observe_lock_time(const BtreeNodePtr< K >& node, locktype_t type, uint64_t time_spent) const { + if (time_spent == 0) { return; } + + if (type == locktype_t::READ) { + HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_inclusive_time_in_leaf_node, + btree_inclusive_time_in_int_node, time_spent); + } else { + HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_exclusive_time_in_leaf_node, + btree_exclusive_time_in_int_node, time_spent); + } +} + +template < typename K, typename V > +void Btree< K, V >::_start_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype, const char* fname, int line) { + btree_locked_node_info< K, V > info; + +#ifndef NDEBUG + info.fname = fname; + info.line = line; +#endif + + info.start_time = Clock::now(); + info.node = node.get(); + if (ltype == locktype_t::WRITE) { + bt_thread_vars()->wr_locked_nodes.push_back(info); + LOGTRACEMOD(btree, "ADDING node {} to write locked nodes list, its size={}", (void*)info.node, + bt_thread_vars()->wr_locked_nodes.size()); + } else if (ltype == locktype_t::READ) { + bt_thread_vars()->rd_locked_nodes.push_back(info); + LOGTRACEMOD(btree, "ADDING node {} to read locked nodes list, its size={}", (void*)info.node, + bt_thread_vars()->rd_locked_nodes.size()); + } else { + DEBUG_ASSERT(false, "Invalid locktype_t {}", ltype); + } +} + +template < typename K, typename V > +bool Btree< K, V >::remove_locked_node(const BtreeNodePtr< K >& node, locktype_t ltype, + btree_locked_node_info< K, V >* out_info) { + auto pnode_infos = + (ltype == locktype_t::WRITE) ? &bt_thread_vars()->wr_locked_nodes : &bt_thread_vars()->rd_locked_nodes; + + if (!pnode_infos->empty()) { + auto info = pnode_infos->back(); + if (info.node == node.get()) { + *out_info = info; + pnode_infos->pop_back(); + LOGTRACEMOD(btree, "REMOVING node {} from {} locked nodes list, its size = {}", (void*)info.node, + (ltype == locktype_t::WRITE) ? "write" : "read", pnode_infos->size()); + return true; + } else if (pnode_infos->size() > 1) { + info = pnode_infos->at(pnode_infos->size() - 2); + if (info.node == node.get()) { + *out_info = info; + pnode_infos->at(pnode_infos->size() - 2) = pnode_infos->back(); + pnode_infos->pop_back(); + LOGTRACEMOD(btree, "REMOVING node {} from {} locked nodes list, its size = {}", (void*)info.node, + (ltype == locktype_t::WRITE) ? "write" : "read", pnode_infos->size()); + return true; + } + } + } + +#ifndef NDEBUG + if (pnode_infos->empty()) { + LOGERRORMOD(btree, "locked_node_list: node = {} not found, locked node list empty", (void*)node.get()); + } else if (pnode_infos->size() == 1) { + LOGERRORMOD(btree, "locked_node_list: node = {} not found, total list count = 1, Expecting node = {}", + (void*)node.get(), (void*)pnode_infos->back().node); + } else { + LOGERRORMOD(btree, "locked_node_list: node = {} not found, total list count = {}, Expecting nodes = {} or {}", + (void*)node.get(), pnode_infos->size(), (void*)pnode_infos->back().node, + (void*)pnode_infos->at(pnode_infos->size() - 2).node); + } +#endif + return false; +} + +template < typename K, typename V > +uint64_t Btree< K, V >::end_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype) { + btree_locked_node_info< K, V > info; + if (!remove_locked_node(node, ltype, &info)) { + DEBUG_ASSERT(false, "Expected node = {} is not there in locked_node_list", (void*)node.get()); + return 0; + } + // DEBUG_ASSERT_EQ(node.get(), info.node); + return get_elapsed_time_ns(info.start_time); +} + +#ifndef NDEBUG +template < typename K, typename V > +void Btree< K, V >::check_lock_debug() { + // both wr_locked_nodes and rd_locked_nodes are thread_local; + // nothing will be dumpped if there is no assert failure; + for (const auto& x : bt_thread_vars()->wr_locked_nodes) { + x.dump(); + } + for (const auto& x : bt_thread_vars()->rd_locked_nodes) { + x.dump(); + } + DEBUG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); + DEBUG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); +} +#endif + +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_query_impl.ipp b/src/btree/btree_query_impl.ipp new file mode 100644 index 00000000..0cc65619 --- /dev/null +++ b/src/btree/btree_query_impl.ipp @@ -0,0 +1,360 @@ +#pragma once +#include "btree.hpp" + +namespace sisl { +namespace btree { + +template < typename K, typename V > +btree_status_t Btree< K, V >::do_sweep_query(BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values) const { + btree_status_t ret = btree_status_t::success; + if (my_node->is_leaf()) { + BT_NODE_DBG_ASSERT_GT(qreq.batch_size(), 0, my_node); + + auto count = 0U; + BtreeNodePtr< K > next_node = nullptr; + + do { + if (next_node) { + unlock_node(my_node, locktype_t::READ); + my_node = next_node; + } + + BT_NODE_LOG(TRACE, my_node, "Query leaf node"); + + uint32_t start_ind = 0u, end_ind = 0u; + static thread_local std::vector< std::pair< K, V > > s_match_kvs; + + s_match_kvs.clear(); + auto cur_count = + my_node->get_all(qreq.next_range(), qreq.batch_size() - count, start_ind, end_ind, &s_match_kvs); + if (cur_count == 0) { + if (my_node->get_last_key().compare(qreq.input_range().end_key()) >= 0) { + // we've covered all lba range, we are done now; + break; + } + } else { + // fall through to visit siblings if we haven't covered lba range yet; + if (m_bt_cfg.is_custom_kv()) { + static thread_local std::vector< std::pair< K, V > > s_result_kvs; + s_result_kvs.clear(); + custom_kv_select_for_read(my_node->get_version(), s_match_kvs, s_result_kvs, qreq.next_range(), + qreq); + + auto ele_to_add = std::min((uint32_t)s_result_kvs.size(), qreq.batch_size()); + if (ele_to_add > 0) { + out_values.insert(out_values.end(), s_result_kvs.begin(), s_result_kvs.begin() + ele_to_add); + } + count += ele_to_add; + BT_NODE_DBG_ASSERT_LE(count, qreq.batch_size(), my_node); + } else { + out_values.insert(std::end(out_values), std::begin(s_match_kvs), std::end(s_match_kvs)); + count += cur_count; + } + } + + // if cur_count is 0, keep querying sibling nodes; + if (ret == btree_status_t::success && (count < qreq.batch_size())) { + if (my_node->next_bnode() == empty_bnodeid) { break; } + ret = read_and_lock_sibling(my_node->next_bnode(), next_node, locktype_t::READ, locktype_t::READ, + nullptr); + if (ret == btree_status_t::fast_path_not_possible) { break; } + + if (ret != btree_status_t::success) { + LOGERROR("read failed btree name {}", m_bt_cfg.name()); + break; + } + } else { + if (count >= qreq.batch_size()) { ret = btree_status_t::has_more; } + break; + } + } while (true); + + unlock_node(my_node, locktype_t::READ); + return ret; + } + + BtreeNodeInfo start_child_info; + const auto [isfound, idx] = my_node->find(qreq.next_key(), &start_child_info, false); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(isfound, idx, my_node); + + BtreeNodePtr< K > child_node; + ret = read_and_lock_child(start_child_info.bnode_id(), child_node, my_node, idx, locktype_t::READ, locktype_t::READ, + nullptr); + unlock_node(my_node, locktype_t::READ); + if (ret != btree_status_t::success) { return ret; } + return (do_sweep_query(child_node, qreq, out_values)); +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::do_traversal_query(const BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values) const { + btree_status_t ret = btree_status_t::success; + uint32_t idx; + + if (my_node->is_leaf()) { + BT_NODE_LOG_ASSERT_GT(qreq.batch_size(), 0, my_node); + + uint32_t start_ind = 0, end_ind = 0; + + static thread_local std::vector< std::pair< K, V > > s_match_kvs; + s_match_kvs.clear(); + auto cur_count = my_node->get_all(qreq.next_range(), qreq.batch_size() - (uint32_t)out_values.size(), start_ind, + end_ind, &s_match_kvs); + + if (cur_count && m_bt_cfg.is_custom_kv()) { + static thread_local std::vector< std::pair< K, V > > s_result_kvs; + s_result_kvs.clear(); + custom_kv_select_for_read(my_node->get_version(), s_match_kvs, s_result_kvs, qreq.next_range(), qreq); + + auto ele_to_add = s_result_kvs.size(); + if (ele_to_add > 0) { + out_values.insert(out_values.end(), s_result_kvs.begin(), s_result_kvs.begin() + ele_to_add); + } + } + out_values.insert(std::end(out_values), std::begin(s_match_kvs), std::end(s_match_kvs)); + + unlock_node(my_node, locktype_t::READ); + if (ret != btree_status_t::success || out_values.size() >= qreq.batch_size()) { + if (out_values.size() >= qreq.batch_size()) { ret = btree_status_t::has_more; } + } + + return ret; + } + + const auto [start_isfound, start_idx] = my_node->find(qreq.next_key(), nullptr, false); + auto [end_is_found, end_idx] = my_node->find(qreq.input_range().end_key(), nullptr, false); + bool unlocked_already = false; + + if (start_idx == my_node->get_total_entries() && !(my_node->has_valid_edge())) { + goto done; // no results found + } else if (end_idx == my_node->get_total_entries() && !(my_node->has_valid_edge())) { + --end_idx; // end is not valid + } + + BT_NODE_LOG_ASSERT_LE(start_idx, end_idx, my_node); + idx = start_idx; + + while (idx <= end_idx) { + BtreeNodeInfo child_info; + my_node->get_nth_value(idx, &child_info, false); + BtreeNodePtr< K > child_node = nullptr; + locktype_t child_cur_lock = locktype_t::READ; + ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, idx, child_cur_lock, child_cur_lock, + nullptr); + if (ret != btree_status_t::success) { break; } + + if (idx == end_idx) { + // If we have reached the last index, unlock before traversing down, because we no longer need + // this lock. Holding this lock will impact performance unncessarily. + unlock_node(my_node, locktype_t::READ); + unlocked_already = true; + } + // TODO - pass sub range if child is leaf + ret = do_traversal_query(child_node, qreq, out_values); + if (ret == btree_status_t::has_more) { break; } + ++idx; + } +done: + if (!unlocked_already) { unlock_node(my_node, locktype_t::READ); } + + return ret; +} + +template < typename K, typename V > +btree_status_t +Btree< K, V >::custom_kv_select_for_read(uint8_t node_version, const std::vector< std::pair< K, V > >& match_kv, + std::vector< std::pair< K, V > >& replace_kv, const BtreeKeyRange& range, + const BtreeRangeRequest& qreq) const { + + replace_kv = match_kv; + return btree_status_t::success; +} + +#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION +btree_status_t do_serialzable_query(const BtreeNodePtr< K >& my_node, BtreeSerializableQueryRequest& qreq, + std::vector< std::pair< K, V > >& out_values) { + + btree_status_t ret = btree_status_t::success; + if (my_node->is_leaf) { + auto count = 0; + auto start_result = my_node->find(qreq.get_start_of_range(), nullptr, nullptr); + auto start_ind = start_result.end_of_search_index; + + auto end_result = my_node->find(qreq.get_end_of_range(), nullptr, nullptr); + auto end_ind = end_result.end_of_search_index; + if (!end_result.found) { end_ind--; } // not found entries will point to 1 ind after last in range. + + ind = start_ind; + while ((ind <= end_ind) && (count < qreq.batch_size())) { + K key; + V value; + my_node->get_nth_element(ind, &key, &value, false); + + if (!qreq.m_match_item_cb || qreq.m_match_item_cb(key, value)) { + out_values.emplace_back(std::make_pair< K, V >(key, value)); + count++; + } + ind++; + } + + bool has_more = ((ind >= start_ind) && (ind < end_ind)); + if (!has_more) { + unlock_node(my_node, locktype_t::READ); + get_tracker(qreq)->pop(); + return success; + } + + return has_more; + } + + BtreeNodeId start_child_ptr, end_child_ptr; + auto start_ret = my_node->find(qreq.get_start_of_range(), nullptr, &start_child_ptr); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(start_ret, my_node); + auto end_ret = my_node->find(qreq.get_end_of_range(), nullptr, &end_child_ptr); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(end_ret, my_node); + + BtreeNodePtr< K > child_node; + if (start_ret.end_of_search_index == end_ret.end_of_search_index) { + BT_LOG_ASSERT_CMP(start_child_ptr, ==, end_child_ptr, my_node); + + ret = + read_and_lock_node(start_child_ptr.get_node_id(), child_node, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { + unlock_node(my_node, locktype_t::READ); + return ret; + } + unlock_node(my_node, locktype_t::READ); + + // Pop the last node and push this child node + get_tracker(qreq)->pop(); + get_tracker(qreq)->push(child_node); + return do_serialzable_query(child_node, qreq, search_range, out_values); + } else { + // This is where the deviation of tree happens. Do not pop the node out of lock tracker + bool has_more = false; + + for (auto i = start_ret.end_of_search_index; i <= end_ret.end_of_search_index; i++) { + BtreeNodeId child_ptr; + my_node->get_nth_value(i, &child_ptr, false); + ret = read_and_lock_node(child_ptr.get_node_id(), child_node, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { + unlock_node(my_node, locktype_t::READ); + return ret; + } + + get_tracker(qreq)->push(child_node); + + ret = do_serialzable_query(child_node, qreq, out_values); + if (ret == BTREE_AGAIN) { + BT_LOG_ASSERT_CMP(out_values.size(), ==, qreq.batch_size(), ); + break; + } + } + + if (ret == BTREE_SUCCESS) { + unlock_node(my_node, locktype_t::READ); + HS_DEBUG_ASSERT_EQ(get_tracker(qreq)->top(), my_node); + get_tracker(qreq)->pop(); + } + return ret; + } +} +#endif + +#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION +btree_status_t sweep_query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) { + COUNTER_INCREMENT(m_metrics, btree_read_ops_count, 1); + qreq.init_batch_range(); + + m_btree_lock.lock_shared(); + + BtreeNodePtr< K > root; + btree_status_t ret = btree_status_t::success; + + ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + + ret = do_sweep_query(root, qreq, out_values); +out: + m_btree_lock.unlock_shared(); + +#ifndef NDEBUG + check_lock_debug(); +#endif + return ret; +} + +btree_status_t serializable_query(BtreeSerializableQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) { + qreq.init_batch_range(); + + m_btree_lock.lock_shared(); + BtreeNodePtr< K > node; + btree_status_t ret; + + if (qreq.is_empty_cursor()) { + // Initialize a new lock tracker and put inside the cursor. + qreq.cursor().m_locked_nodes = std::make_unique< BtreeLockTrackerImpl >(this); + + BtreeNodePtr< K > root; + ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + get_tracker(qreq)->push(root); // Start tracking the locked nodes. + } else { + node = get_tracker(qreq)->top(); + } + + ret = do_serialzable_query(node, qreq, out_values); +out: + m_btree_lock.unlock_shared(); + + // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match + +#ifndef NDEBUG + check_lock_debug(); +#endif + + return ret; +} + +BtreeLockTrackerImpl* get_tracker(BtreeSerializableQueryRequest& qreq) { + return (BtreeLockTrackerImpl*)qreq->get_cursor.m_locked_nodes.get(); +} + +template < typename K, typename V > +class BtreeLockTrackerImpl : public BtreeLockTracker { +public: + BtreeLockTrackerImpl(btree_t* bt) : m_bt(bt) {} + + virtual ~BtreeLockTrackerImpl() { + while (m_nodes.size()) { + auto& p = m_nodes.top(); + m_bt->unlock_node(p.first, p.second); + m_nodes.pop(); + } + } + + void push(const BtreeNodePtr< K >& node, locktype_t locktype) { m_nodes.emplace(std::make_pair<>(node, locktype)); } + + std::pair< BtreeNodePtr< K >, locktype_t > pop() { + HS_ASSERT_CMP(DEBUG, m_nodes.size(), !=, 0); + std::pair< BtreeNodePtr< K >, locktype_t > p; + if (m_nodes.size()) { + p = m_nodes.top(); + m_nodes.pop(); + } else { + p = std::make_pair<>(nullptr, locktype_t::LOCKTYPE_NONE); + } + + return p; + } + + BtreeNodePtr< K > top() { return (m_nodes.size == 0) ? nullptr : m_nodes.top().first; } + +private: + btree_t m_bt; + std::stack< std::pair< BtreeNodePtr< K >, locktype_t > > m_nodes; +}; +#endif +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_remove_impl.ipp b/src/btree/btree_remove_impl.ipp new file mode 100644 index 00000000..b74d0d82 --- /dev/null +++ b/src/btree/btree_remove_impl.ipp @@ -0,0 +1,391 @@ +#pragma once +#include "btree.hpp" + +namespace sisl { +namespace btree { +template < typename K, typename V > +btree_status_t Btree< K, V >::do_remove(const BtreeNodePtr< K >& my_node, locktype_t curlock, + BtreeRemoveRequest& rreq) { + btree_status_t ret = btree_status_t::success; + if (my_node->is_leaf()) { + BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); + +#ifndef NDEBUG + my_node->validate_key_order(); +#endif + bool is_found; + + if (is_remove_any_request(rreq)) { + is_found = my_node->remove_any(rreq.m_range, rreq.m_outkey.get(), rreq.m_outval.get()); + } else { + is_found = my_node->remove_one(rreq.key(), rreq.m_outkey.get(), rreq.m_outval.get()); + } +#ifndef NDEBUG + my_node->validate_key_order(); +#endif + if (is_found) { + write_node(my_node, nullptr, remove_req_op_ctx(rreq)); + COUNTER_DECREMENT(m_metrics, btree_obj_count, 1); + } + + unlock_node(my_node, curlock); + return is_found ? btree_status_t::success : btree_status_t::not_found; + } + +retry: + locktype_t child_cur_lock = locktype_t::NONE; + bool found; + uint32_t ind; + + // TODO: Range Delete support needs to be added here + // Get the childPtr for given key. + if (is_remove_any_request(rreq)) { + std::tie(found, ind) = my_node->find(to_remove_any_req(rreq).m_range.start_key(), &child_info, true); + } else { + std::tie(found, ind) = my_node->find(to_single_remove_req(rreq).key(), &child_info, true); + } + + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, ind, my_node); + + BtreeNodeInfo child_info; + BtreeNodePtr< K > child_node; + ret = get_child_and_lock_node(my_node, ind, child_info, child_node, locktype_t::READ, locktype_t::WRITE); + if (ret != btree_status_t::success) { + unlock_node(my_node, curlock); + return ret; + } + + // Check if child node is minimal. + child_cur_lock = child_node->is_leaf() ? locktype_t::WRITE : locktype_t::READ; + if (child_node->is_merge_needed(m_bt_cfg)) { + // If we are unable to upgrade the node, ask the caller to retry. + ret = upgrade_node(my_node, child_node, curlock, child_cur_lock); + if (ret != btree_status_t::success) { + BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::NONE, my_node) + return ret; + } + BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); + + uint32_t node_end_indx = + my_node->has_valid_edge() ? my_node->get_total_entries() : my_node->get_total_entries() - 1; + uint32_t end_ind = (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) < node_end_indx + ? (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) + : node_end_indx; + if (end_ind > ind) { + // It is safe to unlock child without upgrade, because child node would not be deleted, since its + // parent (myNode) is being write locked by this thread. In fact upgrading would be a problem, since + // this child might be a middle child in the list of indices, which means we might have to lock one + // in left against the direction of intended locking (which could cause deadlock). + unlock_node(child_node, child_cur_lock); + auto result = merge_nodes(my_node, ind, end_ind); + if (result != btree_status_t::success && result != btree_status_t::merge_not_required) { + // write or read failed + unlock_node(my_node, curlock); + return ret; + } + if (result == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_merge_count, 1); } + goto retry; + } + } + +#ifndef NDEBUG + if (ind != my_node->get_total_entries() && child_node->get_total_entries()) { // not edge + BT_NODE_DBG_ASSERT_LE(child_node->get_last_key().compare(my_node->get_nth_key(ind, false)), 0, my_node); + } + + if (ind > 0 && child_node->get_total_entries()) { // not first child + BT_NODE_DBG_ASSERT_LT(child_node->get_first_key().compare(my_node->get_nth_key(ind - 1, false)), 0, my_node); + } +#endif + + unlock_node(my_node, curlock); + return (do_remove(child_node, child_cur_lock, rreq)); + + // Warning: Do not access childNode or myNode beyond this point, since it would + // have been unlocked by the recursive function and it could also been deleted. +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::check_collapse_root(void* context) { + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + + m_btree_lock.lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node_id, root, locktype_t::WRITE, locktype_t::WRITE, context); + if (ret != btree_status_t::success) { goto done; } + + if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { + unlock_node(root, locktype_t::WRITE); + goto done; + } + + BT_NODE_DBG_ASSERT_EQ(root->has_valid_edge(), true, root); + ret = read_node(root->get_edge_id(), child_node); + if (child_node == nullptr) { + unlock_node(root, locktype_t::WRITE); + goto done; + } + + // Elevate the edge child as root. + swap_node(root, child_node, context); + write_node(root, context); + BT_NODE_DBG_ASSERT_EQ(m_root_node_id, root->get_node_id(), root); + old_nodes.push_back(child_node); + + static thread_local std::vector< BtreeNodePtr< K > > s_nodes; + s_nodes.clear(); + s_nodes.push_back(child_node); + merge_node_precommit(true, nullptr, 0, root, &s_nodes, nullptr, context); + + unlock_node(root, locktype_t::WRITE); + free_node(child_node, context); + + if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } +done: + m_btree_lock.unlock(); + return ret; +} + +template < typename K, typename V > +btree_status_t Btree< K, V >::merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, + void* context) { + btree_status_t ret = btree_status_t::merge_failed; + std::vector< BtreeNodePtr< K > > child_nodes; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > replace_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + std::vector< BtreeNodePtr< K > > deleted_nodes; + BtreeNodePtr< K > left_most_node; + K last_pkey; // last key of parent node + bool last_pkey_valid = false; + uint32_t balanced_size; + BtreeNodePtr< K > merge_node; + K last_ckey; // last key in child + uint32_t parent_insert_indx = start_indx; +#ifndef NDEBUG + uint32_t total_child_entries = 0; + uint32_t new_entries = 0; + K last_debug_ckey; + K new_last_debug_ckey; + BtreeNodePtr< K > last_node; +#endif + /* Try to take a lock on all nodes participating in merge*/ + for (auto indx = start_indx; indx <= end_indx; ++indx) { + if (indx == parent_node->get_total_entries()) { + BT_NODE_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, + "Assertion failure, expected valid edge for parent_node: {}"); + } + + BtreeNodeInfo child_info; + parent_node->get(indx, &child_info, false /* copy */); + + BtreeNodePtr< K > child; + ret = read_and_lock_node(child_info.bnode_id(), child, locktype_t::WRITE, locktype_t::WRITE, bcp); + if (ret != btree_status_t::success) { goto out; } + BT_NODE_LOG_ASSERT_EQ(child->is_valid_node(), true, child); + + /* check if left most node has space */ + if (indx == start_indx) { + balanced_size = m_bt_cfg.ideal_fill_size(); + left_most_node = child; + if (left_most_node->get_occupied_size(m_bt_cfg) > balanced_size) { + /* first node doesn't have any free space. we can exit now */ + ret = btree_status_t::merge_not_required; + goto out; + } + } else { + bool is_allocated = true; + /* pre allocate the new nodes. We will free the nodes which are not in use later */ + auto new_node = alloc_node(child->is_leaf(), is_allocated, child); + if (is_allocated) { + /* we are going to allocate new blkid of all the nodes except the first node. + * Note :- These blkids will leak if we fail or crash before writing entry into + * journal. + */ + old_nodes.push_back(child); + COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, 1); + } + /* Blk IDs can leak if it crash before writing it to a journal */ + if (new_node == nullptr) { + ret = btree_status_t::space_not_avail; + goto out; + } + new_nodes.push_back(new_node); + } +#ifndef NDEBUG + total_child_entries += child->get_total_entries(); + child->get_last_key(&last_debug_ckey); +#endif + child_nodes.push_back(child); + } + + if (end_indx != parent_node->get_total_entries()) { + /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ + parent_node->get_nth_key(end_indx, &last_pkey, true); + last_pkey_valid = true; + } + + merge_node = left_most_node; + /* We can not fail from this point. Nodes will be modified in memory. */ + for (uint32_t i = 0; i < new_nodes.size(); ++i) { + auto occupied_size = merge_node->get_occupied_size(m_bt_cfg); + if (occupied_size < balanced_size) { + uint32_t pull_size = balanced_size - occupied_size; + merge_node->move_in_from_right_by_size(m_bt_cfg, new_nodes[i], pull_size); + if (new_nodes[i]->get_total_entries() == 0) { + /* this node is freed */ + deleted_nodes.push_back(new_nodes[i]); + continue; + } + } + + /* update the last key of merge node in parent node */ + K last_ckey; // last key in child + merge_node->get_last_key(&last_ckey); + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + + merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them + merge_node = new_nodes[i]; + if (merge_node != left_most_node) { + /* left most node is not replaced */ + replace_nodes.push_back(merge_node); + } + } + + /* update the latest merge node */ + merge_node->get_last_key(&last_ckey); + if (last_pkey_valid) { + BT_DBG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); + last_ckey = last_pkey; + } + + /* update the last key */ + { + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + } + + /* remove the keys which are no longer used */ + if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } + + // TODO: Validate if empty child_pkey on last_key or edge has any impact on journal/precommit + K child_pkey; + if (start_indx < parent_node->get_total_entries()) { + child_pkey = parent_node->get_nth_key(start_indx, true); + BT_NODE_REL_ASSERT_EQ(start_indx, (parent_insert_indx - 1), parent_node, "it should be last index"); + } + + merge_node_precommit(false, parent_node, parent_merge_start_idx, left_most_node, &old_nodes, &replace_nodes, + context); + +#if 0 + /* write the journal entry */ + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + K child_pkey; + if (start_indx < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx, &child_pkey, true); + BT_REL_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, + child_pkey.get_blob()); + for (auto& node : old_nodes) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); + } + uint32_t insert_indx = 0; + for (auto& node : replace_nodes) { + K child_pkey; + if ((start_indx + insert_indx) < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); + BT_REL_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, + "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, + child_pkey.get_blob()); + ++insert_indx; + } + BT_REL_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } +#endif + + if (replace_nodes.size() > 0) { + /* write the right most node */ + write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); + if (replace_nodes.size() > 1) { + /* write the middle nodes */ + for (int i = replace_nodes.size() - 2; i >= 0; --i) { + write_node(replace_nodes[i], replace_nodes[i + 1], bcp); + } + } + /* write the left most node */ + write_node(left_most_node, replace_nodes[0], bcp); + } else { + /* write the left most node */ + write_node(left_most_node, nullptr, bcp); + } + + /* write the parent node */ + write_node(parent_node, left_most_node, bcp); + +#ifndef NDEBUG + for (const auto& n : replace_nodes) { + new_entries += n->get_total_entries(); + } + + new_entries += left_most_node->get_total_entries(); + BT_DBG_ASSERT_EQ(total_child_entries, new_entries); + + if (replace_nodes.size()) { + replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); + last_node = replace_nodes[replace_nodes.size() - 1]; + } else { + left_most_node->get_last_key(&new_last_debug_ckey); + last_node = left_most_node; + } + if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { + LOGINFO("{}", last_node->to_string()); + if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } + HS_DEBUG_ASSERT(false, "compared failed"); + } +#endif + /* free nodes. It actually gets freed after cp is completed */ + for (const auto& n : old_nodes) { + free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); + } + for (const auto& n : deleted_nodes) { + free_node(n); + } + ret = btree_status_t::success; +out: +#ifndef NDEBUG + uint32_t freed_entries = deleted_nodes.size(); + uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; + for (uint32_t i = 0; i < scan_entries; ++i) { + if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } + validate_sanity_child(parent_node, (uint32_t)start_indx + i); + } +#endif + // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed + for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { + unlock_node(child_nodes[i], locktype_t::WRITE); + } + unlock_node(child_nodes[0], locktype_t::WRITE); + if (ret != btree_status_t::success) { + /* free the allocated nodes */ + for (const auto& n : new_nodes) { + free_node(n); + } + } + return ret; +} +} // namespace btree +} // namespace sisl diff --git a/src/btree/btree_req.hpp b/src/btree/btree_req.hpp new file mode 100644 index 00000000..d7b45e9a --- /dev/null +++ b/src/btree/btree_req.hpp @@ -0,0 +1,242 @@ +#pragma once +#include "btree_kv.hpp" +#include "fds/buffer.hpp" + +namespace sisl { +namespace btree { +// Base class for any btree operations +struct BtreeRequest { + BtreeRequest() = default; + BtreeRequest(void* app_ctx, void* op_ctx) : m_app_context{app_ctx}, m_op_context{op_ctx} {} + void* m_app_context{nullptr}; + void* m_op_context{nullptr}; +}; + +// Base class for all range related operations +struct BtreeRangeRequest : public BtreeRequest { +public: + const BtreeKeyRange& input_range() const { return m_search_state.input_range(); } + uint32_t batch_size() const { return m_batch_size; } + void set_batch_size(uint32_t count) { m_batch_size = count; } + + bool is_empty_cursor() const { + return ((m_search_state.const_cursor()->m_last_key == nullptr) && + (m_search_state.const_cursor()->m_locked_nodes == nullptr)); + } + + BtreeSearchState& search_state() { return m_search_state; } + BtreeQueryCursor* cursor() { return m_search_state.cursor(); } + const BtreeQueryCursor* const_cursor() const { return m_search_state.const_cursor(); } + BtreeKeyRange next_range() const { return m_search_state.next_range(); } + + const BtreeKeyRange& current_sub_range() const { return m_search_state.current_sub_range(); } + void set_current_sub_range(const BtreeKeyRange& new_sub_range) { + m_search_state.set_current_sub_range(new_sub_range); + } + const BtreeKey& next_key() const { return m_search_state.next_key(); } + +protected: + BtreeRangeRequest(BtreeSearchState&& search_state, void* app_context = nullptr, uint32_t batch_size = UINT32_MAX) : + BtreeRequest{app_context, nullptr}, m_search_state(std::move(search_state)), m_batch_size(UINT32_MAX) {} + +private: + BtreeSearchState m_search_state; + uint32_t m_batch_size{1}; +}; + +/////////////////////////// 1: Put Operations ///////////////////////////////////// +struct BtreeSinglePutRequest : public BtreeRequest { +public: + BtreeSinglePutRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< const BtreeValue > v, + btree_put_type put_type, std::unique_ptr< BtreeValue > existing_val = nullptr) : + m_k{std::move(k)}, m_v{std::move(v)}, m_put_type{put_type}, m_existing_val{std::move(existing_val)} {} + + const BtreeKey& key() const { return *m_k; } + const BtreeValue& value() const { return *m_v; } + + std::unique_ptr< const BtreeKey > m_k; + std::unique_ptr< const BtreeValue > m_v; + btree_put_type m_put_type; + std::unique_ptr< BtreeValue > m_existing_val; +}; + +struct BtreeRangeUpdateRequest : public BtreeRangeRequest { +public: + BtreeRangeUpdateRequest(BtreeSearchState&& search_state, btree_put_type put_type, const BtreeValue& value, + void* app_context = nullptr, uint32_t batch_size = std::numeric_limits< uint32_t >::max()) : + BtreeRangeRequest(std::move(search_state), app_context, batch_size), + m_put_type{put_type}, + m_newval{value} {} + + const btree_put_type m_put_type{btree_put_type::INSERT_ONLY_IF_NOT_EXISTS}; + const BtreeValue& m_newval; +}; + +using BtreeMutateRequest = std::variant< BtreeSinglePutRequest, BtreeRangeUpdateRequest >; + +static bool is_range_update_req(BtreeMutateRequest& req) { + return (std::holds_alternative< BtreeRangeUpdateRequest >(req)); +} + +static BtreeRangeUpdateRequest& to_range_update_req(BtreeMutateRequest& req) { + return std::get< BtreeRangeUpdateRequest >(req); +} + +static BtreeSinglePutRequest& to_single_put_req(BtreeMutateRequest& req) { + return std::get< BtreeSinglePutRequest >(req); +} + +static void* put_req_op_ctx(BtreeMutateRequest& req) { + return (is_range_update_req(req)) ? to_range_update_req(req).m_op_context : to_single_put_req(req).m_op_context; +} + +/////////////////////////// 2: Remove Operations ///////////////////////////////////// +struct BtreeSingleRemoveRequest : public BtreeRequest { +public: + BtreeSingleRemoveRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< BtreeValue > out_val) : + m_k{std::move(k)}, m_outval{std::move(out_val)} {} + + const BtreeKey& key() const { return *m_k; } + const BtreeValue& value() const { return *m_outval; } + + std::unique_ptr< const BtreeKey > m_k; + std::unique_ptr< BtreeValue > m_outval; +}; + +struct BtreeRemoveAnyRequest : public BtreeRequest { +public: + BtreeRemoveAnyRequest(BtreeKeyRange&& range, std::unique_ptr< BtreeKey > out_key, + std::unique_ptr< BtreeValue > out_val) : + m_range{std::move(range)}, m_outkey{std::move(out_key)}, m_outval{std::move(out_val)} {} + + BtreeKeyRange m_range; + std::unique_ptr< BtreeKey > m_outkey; + std::unique_ptr< BtreeValue > m_outval; +}; + +using BtreeRemoveRequest = std::variant< BtreeSingleRemoveRequest, BtreeRemoveAnyRequest >; + +static bool is_remove_any_request(BtreeRemoveRequest& req) { + return (std::holds_alternative< BtreeRemoveAnyRequest >(req)); +} + +static BtreeSingleRemoveRequest& to_single_remove_req(BtreeRemoveRequest& req) { + return std::get< BtreeSingleRemoveRequest >(req); +} + +static BtreeRemoveAnyRequest& to_remove_any_req(BtreeRemoveRequest& req) { + return std::get< BtreeRemoveAnyRequest >(req); +} + +static void* remove_req_op_ctx(BtreeRemoveRequest& req) { + return (is_remove_any_request(req)) ? to_remove_any_req(req).m_op_context : to_single_remove_req(req).m_op_context; +} + +/////////////////////////// 3: Get Operations ///////////////////////////////////// +struct BtreeSingleGetRequest : public BtreeRequest { +public: + BtreeSingleGetRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< BtreeValue > out_val) : + m_k{std::move(k)}, m_outval{std::move(out_val)} {} + + const BtreeKey& key() const { return *m_k; } + const BtreeValue& value() const { return *m_outval; } + + std::unique_ptr< const BtreeKey > m_k; + std::unique_ptr< BtreeValue > m_outval; +}; + +struct BtreeGetAnyRequest : public BtreeRequest { +public: + BtreeGetAnyRequest(BtreeKeyRange&& range, std::unique_ptr< BtreeKey > out_key, + std::unique_ptr< BtreeValue > out_val) : + m_range{std::move(range)}, m_outkey{std::move(out_key)}, m_outval{std::move(out_val)} {} + + BtreeKeyRange m_range; + std::unique_ptr< BtreeKey > m_outkey; + std::unique_ptr< BtreeValue > m_outval; +}; + +using BtreeGetRequest = std::variant< BtreeSingleGetRequest, BtreeGetAnyRequest >; + +static bool is_get_any_request(BtreeGetRequest& req) { return (std::holds_alternative< BtreeGetAnyRequest >(req)); } + +static BtreeSingleGetRequest& to_single_get_req(BtreeGetRequest& req) { return std::get< BtreeSingleGetRequest >(req); } + +static BtreeGetAnyRequest& to_get_any_req(BtreeGetRequest& req) { return std::get< BtreeGetAnyRequest >(req); } + +static void* get_req_op_ctx(BtreeGetRequest& req) { + return (is_get_any_request(req)) ? to_get_any_req(req).m_op_context : to_single_get_req(req).m_op_context; +} + +/////////////////////////// 4 Range Query Operations ///////////////////////////////////// +ENUM(BtreeQueryType, uint8_t, + // This is default query which walks to first element in range, and then sweeps/walks + // across the leaf nodes. However, if upon pagination, it again walks down the query from + // the key it left off. + SWEEP_NON_INTRUSIVE_PAGINATION_QUERY, + + // Similar to sweep query, except that it retains the node and its lock during + // pagination. This is more of intrusive query and if the caller is not careful, the read + // lock will never be unlocked and could cause deadlocks. Use this option carefully. + SWEEP_INTRUSIVE_PAGINATION_QUERY, + + // This is relatively inefficient query where every leaf node goes from its parent node + // instead of walking the leaf node across. This is useful only if we want to check and + // recover if parent and leaf node are in different generations or crash recovery cases. + TREE_TRAVERSAL_QUERY, + + // This is both inefficient and quiet intrusive/unsafe query, where it locks the range + // that is being queried for and do not allow any insert or update within that range. It + // essentially create a serializable level of isolation. + SERIALIZABLE_QUERY) + +struct BtreeQueryRequest : public BtreeRangeRequest { +public: + /* TODO :- uint32_max to c++. pass reference */ + BtreeQueryRequest(BtreeSearchState&& search_state, + BtreeQueryType query_type = BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY, + uint32_t batch_size = UINT32_MAX, void* app_context = nullptr) : + BtreeRangeRequest(std::move(search_state), app_context, batch_size), m_query_type(query_type) {} + ~BtreeQueryRequest() = default; + + // virtual bool is_serializable() const = 0; + BtreeQueryType query_type() const { return m_query_type; } + +protected: + const BtreeQueryType m_query_type; // Type of the query +}; + +/* This class is a top level class to keep track of the locks that are held currently. It is + * used for serializabke query to unlock all nodes in right order at the end of the lock */ +class BtreeLockTracker { +public: + virtual ~BtreeLockTracker() = default; +}; + +#if 0 +class BtreeSweepQueryRequest : public BtreeQueryRequest { +public: + BtreeSweepQueryRequest(const BtreeSearchRange& criteria, uint32_t iter_count = 1000, + const match_item_cb_t& match_item_cb = nullptr) : + BtreeQueryRequest(criteria, iter_count, match_item_cb) {} + + BtreeSweepQueryRequest(const BtreeSearchRange &criteria, const match_item_cb_t& match_item_cb) : + BtreeQueryRequest(criteria, 1000, match_item_cb) {} + + bool is_serializable() const { return false; } +}; + +class BtreeSerializableQueryRequest : public BtreeQueryRequest { +public: + BtreeSerializableQueryRequest(const BtreeSearchRange &range, uint32_t iter_count = 1000, + const match_item_cb_t& match_item_cb = nullptr) : + BtreeQueryRequest(range, iter_count, match_item_cb) {} + + BtreeSerializableQueryRequest(const BtreeSearchRange &criteria, const match_item_cb_t& match_item_cb) : + BtreeSerializableQueryRequest(criteria, 1000, match_item_cb) {} + + bool is_serializable() const { return true; } +}; +#endif +} // namespace btree +} // namespace sisl diff --git a/src/btree/hs_btree.hpp b/src/btree/hs_btree.hpp new file mode 100644 index 00000000..626e5b61 --- /dev/null +++ b/src/btree/hs_btree.hpp @@ -0,0 +1,396 @@ +#pragma once + +namespace sisl { +template < typename K, typename V > +class HSBtree : public Btree< K, V > { + static btree_t* create_btree(const btree_super_block& btree_sb, const BtreeConfig& cfg, btree_cp_sb* cp_sb, + const split_key_callback& split_key_cb) { + Btree* bt = new Btree(cfg); + auto impl_ptr = btree_store_t::init_btree(bt, cfg); + bt->m_btree_store = std::move(impl_ptr); + bt->init_recovery(btree_sb, cp_sb, split_key_cb); + LOGINFO("btree recovered and created {}, node size {}", cfg.get_name(), cfg.get_node_size()); + return bt; + } + + void init(bool recovery) { + m_total_nodes = m_last_cp_sb.btree_size; + m_bt_store->update_sb(m_sb, &m_last_cp_sb, is_recovery); + create_root_node(); + } + + void init_recovery(const btree_super_block& btree_sb, btree_cp_sb* cp_sb, const split_key_callback& split_key_cb) { + m_sb = btree_sb; + m_split_key_cb = split_key_cb; + if (cp_sb) { memcpy(&m_last_cp_sb, cp_sb, sizeof(m_last_cp_sb)); } + do_common_init(true); + m_root_node_id = m_sb.root_node; + } + + /* It is called when its btree consumer has successfully stored the btree superblock */ + void create_done() { btree_store_t::create_done(m_btree_store.get(), m_root_node_id); } + void destroy_done() { btree_store_t::destroy_done(m_btree_store.get()); } + + void replay_done(const btree_cp_ptr& bcp) { + m_total_nodes = m_last_cp_sb.btree_size + bcp->btree_size.load(); + BT_LOG(INFO, base, , "total btree nodes {}", m_total_nodes); + } + + btree_status_t free_btree(const BtreeNodePtr< K >& start_node, blkid_list_ptr free_blkid_list, bool in_mem, + uint64_t& free_node_cnt) { + // TODO - this calls free node on mem_tree and ssd_tree. + // In ssd_tree we free actual block id, which is not correct behavior + // we shouldnt really free any blocks on free node, just reclaim any memory + // occupied by ssd_tree structure in memory. Ideally we should have sepearte + // api like deleteNode which should be called instead of freeNode + const auto ret = post_order_traversal( + start_node, [this, free_blkid_list, in_mem, &free_node_cnt](const BtreeNodePtr< K >& node) { + free_node(node, free_blkid_list, in_mem); + ++free_node_cnt; + }); + return ret; + } + + /* It attaches the new CP and prepare for cur cp flush */ + btree_cp_ptr attach_prepare_cp(const btree_cp_ptr& cur_bcp, bool is_last_cp, bool blkalloc_checkpoint) { + return (btree_store_t::attach_prepare_cp(m_btree_store.get(), cur_bcp, is_last_cp, blkalloc_checkpoint)); + } + + void cp_start(const btree_cp_ptr& bcp, cp_comp_callback cb) { + btree_store_t::cp_start(m_btree_store.get(), bcp, cb); + } + + std::string get_cp_flush_status(const btree_cp_ptr& bcp) const { + return (btree_store_t::get_cp_flush_status(m_btree_store.get(), bcp)); + } + + void truncate(const btree_cp_ptr& bcp) { btree_store_t::truncate(m_btree_store.get(), bcp); } + + /* It is called before superblock is persisted for each CP */ + void update_btree_cp_sb(const btree_cp_ptr& bcp, btree_cp_sb& btree_sb, bool is_blkalloc_cp) { + btree_sb.active_seqid = bcp->end_seqid; + btree_sb.blkalloc_cp_id = is_blkalloc_cp ? bcp->cp_id : m_last_cp_sb.blkalloc_cp_id; + btree_sb.btree_size = bcp->btree_size.load() + m_last_cp_sb.btree_size; + btree_sb.cp_id = bcp->cp_id; + HS_DEBUG_ASSERT_EQ((int64_t)m_last_cp_sb.cp_id, (int64_t)bcp->cp_id - 1); + memcpy(&m_last_cp_sb, &btree_sb, sizeof(m_last_cp_sb)); + } + + void flush_free_blks(const btree_cp_ptr& bcp, std::shared_ptr< homestore::blkalloc_cp >& ba_cp) { + btree_store_t::flush_free_blks(m_btree_store.get(), bcp, ba_cp); + } + + /** + * @brief : verify the btree node is corrupted or not; + * + * Note: this function should never assert, but only return success or failure since it is in verification mode; + * + * @param bnodeid : node id + * @param parent_node : parent node ptr + * @param indx : index within thie node; + * @param update_debug_bm : true or false; + * + * @return : true if this node including all its children are not corrupted; + * false if not; + */ + template < typename K, typename V > + bool Btree< K, V >::verify_node(bnodeid_t bnodeid, BtreeNodePtr< K > parent_node, uint32_t indx, + bool update_debug_bm) { + locktype_t acq_lock = locktype_t::READ; + BtreeNodePtr< K > my_node; + if (read_and_lock_node(bnodeid, my_node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { + LOGINFO("read node failed"); + return false; + } + if (update_debug_bm && + (btree_store_t::update_debug_bm(m_btree_store.get(), my_node) != btree_status_t::success)) { + LOGERROR("bitmap update failed for node {}", my_node->to_string()); + return false; + } + + K prev_key; + bool success = true; + for (uint32_t i = 0; i < my_node->get_total_entries(); ++i) { + K key; + my_node->get_nth_key(i, &key, false); + if (!my_node->is_leaf()) { + BtreeNodeInfo child; + my_node->get(i, &child, false); + success = verify_node(child.bnode_id(), my_node, i, update_debug_bm); + if (!success) { goto exit_on_error; } + + if (i > 0) { + BT_LOG_ASSERT_CMP(prev_key.compare(&key), <, 0, my_node); + if (prev_key.compare(&key) >= 0) { + success = false; + goto exit_on_error; + } + } + } + if (my_node->is_leaf() && i > 0) { + BT_LOG_ASSERT_CMP(prev_key.compare_start(&key), <, 0, my_node); + if (prev_key.compare_start(&key) >= 0) { + success = false; + goto exit_on_error; + } + } + prev_key = key; + } + + if (my_node->is_leaf() && my_node->get_total_entries() == 0) { + /* this node has zero entries */ + goto exit_on_error; + } + if (parent_node && parent_node->get_total_entries() != indx) { + K parent_key; + parent_node->get_nth_key(indx, &parent_key, false); + + K last_key; + my_node->get_nth_key(my_node->get_total_entries() - 1, &last_key, false); + if (!my_node->is_leaf()) { + BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), ==, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (last_key.compare(&parent_key) != 0) { + success = false; + goto exit_on_error; + } + } else { + BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), <=, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (last_key.compare(&parent_key) > 0) { + success = false; + goto exit_on_error; + } + BT_LOG_ASSERT_CMP(parent_key.compare_start(&last_key), >=, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (parent_key.compare_start(&last_key) < 0) { + success = false; + goto exit_on_error; + } + } + } + + if (parent_node && indx != 0) { + K parent_key; + parent_node->get_nth_key(indx - 1, &parent_key, false); + + K first_key; + my_node->get_nth_key(0, &first_key, false); + BT_LOG_ASSERT_CMP(first_key.compare(&parent_key), >, 0, parent_node, "my node {}", my_node->to_string()); + if (first_key.compare(&parent_key) <= 0) { + success = false; + goto exit_on_error; + } + + BT_LOG_ASSERT_CMP(parent_key.compare_start(&first_key), <, 0, parent_node, "my node {}", + my_node->to_string()); + if (parent_key.compare_start(&first_key) > 0) { + success = false; + goto exit_on_error; + } + } + + if (my_node->has_valid_edge()) { + success = verify_node(my_node->get_edge_id(), my_node, my_node->get_total_entries(), update_debug_bm); + if (!success) { goto exit_on_error; } + } + + exit_on_error: + unlock_node(my_node, acq_lock); + return success; + } + + btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + if (jentry) { + BT_DBG_ASSERT_CMP(jentry->is_root, ==, true, , + "Expected create_btree_replay entry to be root journal entry"); + BT_DBG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node_id, , "Root node journal entry mismatch"); + } + + // Create a root node by reserving the leaf node + BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node_id)); + auto ret = write_node(root, nullptr, bcp); + BT_DBG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); + return btree_status_t::success; + } + + btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + bnodeid_t id = jentry->is_root ? m_root_node_id : jentry->parent_node.node_id; + BtreeNodePtr< K > parent_node; + + // read parent node + read_node_or_fail(id, parent_node); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { + BT_LOG(INFO, base, , "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", + parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); + return btree_status_t::replay_not_needed; + } + + // Read the first inplace write node which is the leftmost child and also form child split key from journal + auto j_child_nodes = jentry->get_nodes(); + + BtreeNodePtr< K > child_node1; + if (jentry->is_root) { + // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to + // be treated as root here on. + child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); + btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); + + BT_LOG(INFO, btree_generics, , + "Journal replay: root split, so creating child_node id={} and swapping the node with " + "parent_node id={} names {}", + child_node1->get_node_id(), parent_node->get_node_id(), m_cfg.name()); + + } else { + read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); + } + + THIS_BT_LOG(INFO, btree_generics, , + "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", + j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), + child_node1->get_gen(), m_cfg.name()); + if (jentry->is_root) { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , + "Expected first node in journal entry to be new creation for root split"); + } else { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , + "Expected first node in journal entry to be in-place write"); + } + BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , + "Expected second node in journal entry to be new node creation"); + + // recover child node + bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); + + // recover parent node + recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); + return btree_status_t::success; + } + + bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, + const std::vector< bt_journal_node_info* >& j_child_nodes, + const btree_cp_ptr& bcp) { + + BtreeNodePtr< K > child_node2; + // Check if child1 is ahead of the generation + if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { + // leftmost_node is written, so right node must have been written as well. + read_node_or_fail(child_node1->next_bnode(), child_node2); + + // sanity check for right node + BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, + "gen cnt should be more than the journal entry"); + // no need to recover child nodes + return false; + } + + K split_key; + split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) + : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); + + // We need to do split based on entries since the left children is also not written yet. + // Find the split key within the child_node1. It is not always found, so we split upto that. + auto ret = child_node1->find(split_key, nullptr, false); + + // sanity check for left mode node before recovery + { + if (!ret.found) { + if (!child_node1->is_leaf()) { + BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); + } + } + } + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", + split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); + /* if it is not found than end_of_search_index points to first ind which is greater than split key */ + auto split_ind = ret.end_of_search_index; + if (ret.found) { ++split_ind; } // we don't want to move split key */ + if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { + K key; + child_node1->get_nth_key(split_ind, &key, false); + + if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ + THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); + V v; + child_node1->get_nth_value(split_ind, &v, false); + vector< pair< K, V > > replace_kv; + child_node1->remove(split_ind, split_ind); + m_split_key_cb(key, v, split_key, replace_kv); + for (auto& pair : replace_kv) { + auto status = child_node1->insert(pair.first, pair.second); + BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); + } + auto ret = child_node1->find(split_key, nullptr, false); + BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, + "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); + ++split_ind; + } + } + child_node1->move_out_to_right_by_entries(m_cfg, child_node2, child_node1->get_total_entries() - split_ind); + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node2->set_gen(j_child_nodes[1]->node_gen()); + + child_node1->set_next_bnode(child_node2->get_node_id()); + child_node1->set_gen(j_child_nodes[0]->node_gen()); + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); + write_node(child_node2, nullptr, bcp); + write_node(child_node1, child_node2, bcp); + return true; + } + + void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, + std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { + + // find child_1 key + K child1_key; // we need to insert child1_key + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); + child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + auto child1_node_id = j_child_nodes[0]->node_id(); + + // find split indx + auto ret = parent_node->find(child1_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); + auto split_indx = ret.end_of_search_index; + + // find child2_key + K child2_key; // we only need to update child2_key to new node + if (j_child_nodes[1]->key_size != 0) { + child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); + ret = parent_node->find(child2_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); + } else { + // parent should be valid edge it is not a root split + } + auto child2_node_id = j_child_nodes[1]->node_id(); + + // update child2_key value + BtreeNodeInfo ninfo; + ninfo.set_bnode_id(child2_node_id); + parent_node->update(split_indx, ninfo); + + // insert child 1 + ninfo.set_bnode_id(child1_node_id); + K out_split_end_key; + out_split_end_key.copy_end_key_blob(child1_key.get_blob()); + parent_node->insert(out_split_end_key, ninfo); + + // Write the parent node + write_node(parent_node, child_node1, bcp); + + /* do sanity check after recovery split */ + { + validate_sanity_child(parent_node, split_indx); + validate_sanity_next_child(parent_node, split_indx); + } + } +}; +} // namespace sisl diff --git a/src/btree/mem_btree.hpp b/src/btree/mem_btree.hpp new file mode 100644 index 00000000..5bd7eb70 --- /dev/null +++ b/src/btree/mem_btree.hpp @@ -0,0 +1,100 @@ +#pragma once +#include "btree.ipp" + +namespace sisl { +namespace btree { +#ifdef INCASE_WE_NEED_COMMON +// Common class for all membtree's +template < typename K, typename V > +class MemBtreeCommon : public BtreeCommon< K, V > { +public: + void deref_node(BtreeNode< K >* node) override { + if (node->m_refcount.decrement_testz()) { + delete node->m_node_buf; + delete node; + } + } +}; + +MemBtree(BtreeConfig& cfg) : Btree(update_node_area_size(cfg)) { + Btree< K, V >::create_store_common(btree_store_type::MEM, []() { return std::make_shared< MemBtreeCommon >(); }); +} +#endif + +template < typename K, typename V > +class MemBtree : public Btree< K, V > { +public: + MemBtree(const BtreeConfig& cfg) : Btree< K, V >(cfg) { + BT_LOG(INFO, "New {} being created: Node size {}", btree_store_type(), cfg.node_size()); + } + + virtual ~MemBtree() { + const auto [ret, free_node_cnt] = this->destroy_btree(nullptr); + BT_LOG_ASSERT_EQ(ret, btree_status_t::success, "btree destroy failed"); + } + + std::string btree_store_type() const override { return "MEM_BTREE"; } + +private: + BtreeNodePtr< K > alloc_node(bool is_leaf, bool& is_new_allocation, /* is alloced same as copy_from */ + const BtreeNodePtr< K >& copy_from = nullptr) override { + if (copy_from != nullptr) { + is_new_allocation = false; + return copy_from; + } + + is_new_allocation = true; + uint8_t* node_buf = new uint8_t[this->m_bt_cfg.node_size()]; + auto new_node = this->init_node(node_buf, bnodeid_t{0}, true, is_leaf); + new_node->set_node_id(bnodeid_t{r_cast< std::uintptr_t >(new_node)}); + new_node->m_refcount.increment(); + return BtreeNodePtr< K >{new_node}; + } + + btree_status_t read_node(bnodeid_t id, BtreeNodePtr< K >& bnode) const override { + bnode = BtreeNodePtr< K >{r_cast< BtreeNode< K >* >(id)}; + return btree_status_t::success; + } + + void swap_node(const BtreeNodePtr< K >& node1, const BtreeNodePtr< K >& node2, void* context) override { + std::swap(node1->m_phys_node_buf, node2->m_phys_node_buf); + } + + btree_status_t refresh_node(const BtreeNodePtr< K >& bn, bool is_write_modifiable, void* context) const override { + return btree_status_t::success; + } + + void free_node(const BtreeNodePtr< K >& node, void* context) override { this->do_free_node(node); } + + void create_tree_precommit(const BtreeNodePtr< K >& root_node, void* op_context) override {} + void split_node_precommit(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, + const BtreeNodePtr< K >& child_node2, bool root_split, bool edge_split, + void* context) override {} + + void merge_node_precommit(bool is_root_merge, const BtreeNodePtr< K >& parent_node, uint32_t parent_merge_start_idx, + const BtreeNodePtr< K >& child_node1, + const std::vector< BtreeNodePtr< K > >* old_child_nodes, + const std::vector< BtreeNodePtr< K > >* replace_child_nodes, void* op_context) override {} +#if 0 + static void ref_node(MemBtreeNode* bn) { + auto mbh = (mem_btree_node_header*)bn; + LOGMSG_ASSERT_EQ(mbh->magic, 0xDEADBEEF, "Invalid Magic for Membtree node {}, Metrics {}", bn->to_string(), + sisl::MetricsFarm::getInstance().get_result_in_json_string()); + mbh->refcount.increment(); + } + + static void deref_node(MemBtreeNode* bn) { + auto mbh = (mem_btree_node_header*)bn; + LOGMSG_ASSERT_EQ(mbh->magic, 0xDEADBEEF, "Invalid Magic for Membtree node {}, Metrics {}", bn->to_string(), + sisl::MetricsFarm::getInstance().get_result_in_json_string()); + if (mbh->refcount.decrement_testz()) { + mbh->magic = 0; + bn->~MemBtreeNode(); + deallocate_mem((uint8_t*)bn); + } + } +#endif +}; + +} // namespace btree +} // namespace sisl diff --git a/src/btree/rough/btree_node.cpp b/src/btree/rough/btree_node.cpp new file mode 100644 index 00000000..d697adfc --- /dev/null +++ b/src/btree/rough/btree_node.cpp @@ -0,0 +1,364 @@ +/* + * physical_node.cpp + * + * Created on: 16-May-2016 + * Author: Hari Kadayam + * + * Copyright © 2016 Kadayam, Hari. All rights reserved. + */ +#include +#include "btree_node.hpp" + +namespace sisl { +BtreeNode::BtreeNode(uint8_t* node_buf, bnodeid_t id, bool init) : m_phys_node_buf{node_buf} { + if (init) { + set_magic(); + init_checksum(); + set_leaf(true); + set_total_entries(0); + set_next_bnode(empty_bnodeid); + set_gen(0); + set_valid_node(true); + set_edge_id(empty_bnodeid); + set_node_id(id); + } else { + DEBUG_ASSERT_EQ(get_node_id(), id); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); + } +} + +node_find_result_t BtreeNode::find(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, bool copy_key, + bool copy_val) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); + + auto result = bsearch_node(range); + if (result.end_of_search_index == (int)get_total_entries() && !has_valid_edge()) { + assert(!result.found); + return result; + } + + if (get_total_entries() == 0) { + assert(has_valid_edge() || is_leaf()); + if (is_leaf()) { + /* Leaf doesn't have any elements */ + return result; + } + } + + if (outval) { get(result.end_of_search_index, outval, copy_val /* copy */); } + if (!range.is_simple_search() && outkey) { get_nth_key(result.end_of_search_index, outkey, copy_key /* copy */); } + return result; +} + +node_find_result_t BtreeNode::find(const BtreeKey& find_key, BtreeValue* outval, bool copy_val) const { + return find(BtreeSearchRange(find_key), nullptr, outval, false, copy_val); +} + +uint32_t BtreeNode::get_all(const BtreeSearchRange& range, uint32_t max_count, int& start_ind, int& end_ind, + std::vector< std::pair< K, V > >* out_values = nullptr) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); + auto count = 0U; + + // Get the start index of the search range. + BtreeSearchRange sr = range.get_start_of_range(); + sr.set_multi_option(MultiMatchOption::DO_NOT_CARE); + + auto result = bsearch_node(sr); // doing bsearch only based on start key + // at this point start index will point to exact found or element after that + start_ind = result.end_of_search_index; + + if (!range.is_start_inclusive()) { + if (start_ind < (int)get_total_entries()) { + /* start is not inclusive so increment the start_ind if it is same as this key */ + int x = to_variant_node_const()->compare_nth_key(*range.get_start_key(), start_ind); + if (x == 0) { start_ind++; } + } else { + assert(is_leaf() || has_valid_edge()); + } + } + + if (start_ind == (int)get_total_entries() && is_leaf()) { + end_ind = start_ind; + return 0; // no result found + } + + assert((start_ind < (int)get_total_entries()) || has_valid_edge()); + + // search by the end index + BtreeSearchRange er = range.get_end_of_range(); + er.set_multi_option(MultiMatchOption::DO_NOT_CARE); + result = bsearch_node(er); // doing bsearch only based on end key + end_ind = result.end_of_search_index; + + assert(start_ind <= end_ind); + + /* we don't support end exclusive */ + assert(range.is_end_inclusive()); + + if (end_ind == (int)get_total_entries() && !has_valid_edge()) { --end_ind; } + + if (is_leaf()) { + /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ + sisl::blob blob; + K key; + get_nth_key(end_ind, &key, false); + + if ((range.get_start_key())->compare_start(&key) < 0 && ((range.get_end_key())->compare_start(&key)) < 0) { + if (start_ind == end_ind) { + /* no match */ + return 0; + } + --end_ind; + } + } + + assert(start_ind <= end_ind); + count = end_ind - start_ind + 1; + if (count > max_count) { count = max_count; } + + /* We should always find the entries in interior node */ + assert(start_ind < (int)get_total_entries() || has_valid_edge()); + assert(end_ind < (int)get_total_entries() || has_valid_edge()); + + if (out_values == nullptr) { return count; } + + /* get the keys and values */ + for (auto i = start_ind; i < (int)(start_ind + count); ++i) { + K key; + V value; + if (i == (int)get_total_entries() && !is_leaf()) + get_edge_value(&value); // invalid key in case of edge entry for internal node + else { + get_nth_key(i, &key, true); + get_nth_value(i, &value, true); + } + out_values->emplace_back(std::make_pair<>(key, value)); + } + return count; +} + +bool BtreeNode::put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue& existing_val) { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); + auto result = find(key, nullptr, nullptr); + bool ret = true; + + if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { + if (result.found) { + LOGINFO("entry already exist"); + return false; + } + insert(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { + if (!result.found) return false; + update(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { + !(result.found) ? insert(result.end_of_search_index, key, val) : update(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { + if (!result.found) return false; + append(result.end_of_search_index, key, val, existing_val); + } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { + (!result.found) ? insert(result.end_of_search_index, key, val) + : append(result.end_of_search_index, key, val, existing_val); + } else { + DEBUG_ASSERT(false, "Wrong put_type {}", put_type); + } + return ret; +} + +#ifndef NO_CHECKSUM +void BtreeNode::set_checksum(size_t size) { + get_persistent_header()->checksum = crc16_t10dif(init_crc_16, m_node_area, size); +} + +bool BtreeNode::verify_node(size_t size, verify_result& vr) const { + HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", m_pers_header.to_string()); + vr.act_magic = get_magic(); + vr.exp_magic = BTREE_NODE_MAGIC; + vr.act_checksum = get_checksum(); + vr.exp_checksum = crc16_t10dif(init_crc_16, m_node_area, size); + return (vr.act_magic == vr.exp_magic && vr.act_checksum == vr.exp_checksum) ? true : false; +} +#endif + +bool BtreeNode::is_merge_needed(const BtreeConfig& cfg) const { +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < cfg.get_node_area_size()) { + return true; + } + + auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); + if (ret && get_occupied_size(cfg) < (ret.get() * cfg.get_node_area_size() / 100)) { return true; } +#endif + return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); +} + +void BtreeNode::get_last_key(BtreeKey* out_lastkey) const { + if (get_total_entries() == 0) { return; } + return get_nth_key(get_total_entries() - 1, out_lastkey, true); +} + +void BtreeNode::get_var_nth_key(int i, BtreeKey* out_firstkey) const { return get_nth_key(i, out_firstkey, true); } + +btree_status_t BtreeNode::insert(const BtreeKey& key, const BtreeValue& val) { + auto result = find(key, nullptr, nullptr); + assert(!is_leaf() || (!result.found)); // We do not support duplicate keys yet + auto ret = insert(result.end_of_search_index, key, val); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + return ret; +} + +bool BtreeNode::remove_one(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { + auto result = find(range, outkey, outval); + if (!result.found) { return false; } + + remove(result.end_of_search_index); + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); + return true; +} + +void BtreeNode::append(uint32_t index, const BtreeKey& key, const BtreeValue& val, BtreeValue& existing_val) { + // Get the nth value and do a callback to update its blob with the new value, being passed + V nth_val; + get_nth_value(index, &nth_val, false); + nth_val.append_blob(val, existing_val); + to_variant_node()->update(index, key, nth_val); + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); +} + +void BtreeNode::update(const BtreeKey& key, const BtreeValue& val, BtreeKey* outkey, BtreeValue* outval) { + auto result = find(key, outkey, outval); + assert(result.found); + update(result.end_of_search_index, val); + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); +} + +void BtreeNode::set_edge_value(const BtreeValue& v) { + BtreeNodeInfo* bni = (BtreeNodeInfo*)&v; + set_edge_id(bni->bnode_id()); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); +} + +void BtreeNode::get_edge_value(BtreeValue* v) const { + if (is_leaf()) { return; } + v->set_blob(BtreeNodeInfo(get_edge_id()).get_blob()); +} + +void BtreeNode::get_adjacent_indicies(uint32_t cur_ind, vector< int >& indices_list, uint32_t max_indices) const { + uint32_t i = 0; + uint32_t start_ind; + uint32_t end_ind; + uint32_t nentries = this->get_total_entries(); + + auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); + end_ind = cur_ind + (max_indices / 2); + if (cur_ind < max_ind) { + end_ind += max_ind - cur_ind; + start_ind = 0; + } else { + start_ind = cur_ind - max_ind; + } + + for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); i++) { + if (i == nentries) { + if (this->has_valid_edge()) { indices_list.push_back(i); } + break; + } else { + indices_list.push_back(i); + } + } +} + +node_find_result_t BtreeNode::bsearch_node(const BtreeSearchRange& range) const { + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + const auto ret = bsearch(-1, get_total_entries(), range); + const auto selection = range.multi_option(); + + if (ret.found) { assert(ret.end_of_search_index < (int)get_total_entries() && ret.end_of_search_index > -1); } + + /* BEST_FIT_TO_CLOSEST is used by remove only. Remove doesn't support range_remove. Until + * then we have the special logic : + */ + if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { + if (!ret.found && is_leaf()) { + if (get_total_entries() != 0) { + ret.end_of_search_index = get_total_entries() - 1; + ret.found = true; + } + } + } + + return ret; +} + +node_find_result_t BtreeNode::is_bsearch_left_or_right_most(const BtreeSearchRange& range) const { + auto selection = range.multi_option(); + if (range.is_simple_search()) { return (MultiMatchOption::DO_NOT_CARE); } + if (selection == MultiMatchOption::LEFT_MOST) { + return (MultiMatchOption::LEFT_MOST); + } else if (selection == MultiMatchOption::RIGHT_MOST) { + return (MultiMatchOption::RIGHT_MOST); + } else if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { + return (MultiMatchOption::LEFT_MOST); + } + return (MultiMatchOption::DO_NOT_CARE); +} + +node_find_result_t BtreeNode::bsearch(int start, int end, const BtreeSearchRange& range) const { + int mid = 0; + int initial_end = end; + int min_ind_found = INT32_MAX; + int second_min = INT32_MAX; + int max_ind_found = 0; + + struct { + bool found; + int end_of_search_index; + } ret{false, 0}; + + if ((end - start) <= 1) { return ret; } + + auto selection = is_bsearch_left_or_right_most(range); + + while ((end - start) > 1) { + mid = start + (end - start) / 2; + assert(mid >= 0 && mid < (int)get_total_entries()); + int x = range.is_simple_search() ? to_variant_node_const()->compare_nth_key(*range.get_start_key(), mid) + : to_variant_node_const()->compare_nth_key_range(range, mid); + if (x == 0) { + ret.found = true; + if (selection == MultiMatchOption::DO_NOT_CARE) { + end = mid; + break; + } else if (selection == MultiMatchOption::LEFT_MOST) { + if (mid < min_ind_found) { min_ind_found = mid; } + end = mid; + } else if (selection == MultiMatchOption::RIGHT_MOST) { + if (mid > max_ind_found) { max_ind_found = mid; } + start = mid; + } else { + assert(false); + } + } else if (x > 0) { + end = mid; + } else { + start = mid; + } + } + + if (ret.found) { + if (selection == MultiMatchOption::LEFT_MOST) { + assert(min_ind_found != INT32_MAX); + ret.end_of_search_index = min_ind_found; + } else if (selection == MultiMatchOption::RIGHT_MOST) { + assert(max_ind_found != INT32_MAX); + ret.end_of_search_index = max_ind_found; + } else { + ret.end_of_search_index = end; + } + } else { + ret.end_of_search_index = end; + } + return ret; +} +} // namespace sisl diff --git a/src/btree/rough/physical_node.hpp b/src/btree/rough/physical_node.hpp new file mode 100644 index 00000000..c9a7c48d --- /dev/null +++ b/src/btree/rough/physical_node.hpp @@ -0,0 +1,525 @@ +/* + * physical_node.hpp + * + * Created on: 16-May-2016 + * Author: Hari Kadayam + * + * Copyright © 2016 Kadayam, Hari. All rights reserved. + */ +#pragma once + +#include +#include "logging/logging.h" +#include "btree_internal.h" + +namespace sisl { +static constexpr uint8_t BTREE_NODE_VERSION = 1; +static constexpr uint8_t BTREE_NODE_MAGIC = 0xab; + +#pragma pack(1) +struct persistent_hdr_t { + uint8_t magic{BTREE_NODE_MAGIC}; + uint8_t version{BTREE_NODE_VERSION}; + uint16_t checksum; + + bnodeid_t node_id; + bnodeid_t next_node; + + uint32_t nentries : 27; + uint32_t node_type : 3; + uint32_t leaf : 1; + uint32_t valid_node : 1; + + uint64_t node_gen; + bnodeid_t edge_entry; + + std::string to_string() const { + return fmt::format("magic={} version={} csum={} node_id={} next_node={} nentries={} node_type={} is_leaf={} " + "valid_node={} node_gen={} edge_entry={}", + magic, version, checksum, node_id, next_node, nentries, node_type, leaf, valid_node, + node_gen, edge_entry); + } +}; +#pragma pack() + +#ifndef NO_CHECKSUM +struct verify_result { + uint8_t act_magic; + uint16_t act_checksum; + uint8_t exp_magic; + uint16_t exp_checksum; + + std::string to_string() const { + return fmt::format(" Actual magic={} Expected magic={} Actual checksum={} Expected checksum={}", act_magic, + exp_magic, act_checksum, exp_checksum); + } + + friend ostream& operator<<(ostream& os, const verify_result& vr) { + os << vr.to_string(); + return os; + } +}; +#endif + +class BtreeSearchRange; + +#pragma pack(1) +template < typename VariantNodeT > +class PhysicalNode { +protected: + persistent_hdr_t m_pers_header; + uint8_t m_node_area[0]; + +public: + PhysicalNode(bnodeid_t* id, bool init) { + if (init) { + set_magic(); + init_checksum(); + set_leaf(true); + set_total_entries(0); + set_next_bnode(empty_bnodeid); + set_gen(0); + set_valid_node(true); + set_edge_id(empty_bnodeid); + set_node_id(*id); + } else { + DEBUG_ASSERT_EQ(get_node_id(), *id); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); + } + } + PhysicalNode(bnodeid_t id, bool init) { + if (init) { + set_magic(); + init_checksum(); + set_leaf(true); + set_total_entries(0); + set_next_bnode(empty_bnodeid); + set_gen(0); + set_valid_node(true); + set_edge_id(empty_bnodeid); + set_node_id(id); + } else { + DEBUG_ASSERT_EQ(get_node_id(), id); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); + } + } + ~PhysicalNode() = default; + + persistent_hdr_t* get_persistent_header() { return &m_pers_header; } + + uint8_t get_magic() const { return m_pers_header.magic; } + void set_magic() { m_pers_header.magic = BTREE_NODE_MAGIC; } + + uint8_t get_version() const { return m_pers_header.version; } + uint16_t get_checksum() const { return m_pers_header.checksum; } + void init_checksum() { m_pers_header.checksum = 0; } + + void set_node_id(bnodeid_t id) { m_pers_header.node_id = id; } + bnodeid_t get_node_id() const { return m_pers_header.node_id; } + +#ifndef NO_CHECKSUM + void set_checksum(size_t size) { m_pers_header.checksum = crc16_t10dif(init_crc_16, m_node_area, size); } + bool verify_node(size_t size, verify_result& vr) const { + HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", m_pers_header.to_string()); + vr.act_magic = get_magic(); + vr.exp_magic = BTREE_NODE_MAGIC; + vr.act_checksum = get_checksum(); + vr.exp_checksum = crc16_t10dif(init_crc_16, m_node_area, size); + return (vr.act_magic == vr.exp_magic && vr.act_checksum == vr.exp_checksum) ? true : false; + } +#endif + + uint32_t get_total_entries() const { return m_pers_header.nentries; } + bool is_leaf() const { return m_pers_header.leaf; } + btree_node_type get_node_type() const { return s_cast< btree_node_type >(m_pers_header.node_type); } + +protected: + void set_total_entries(uint32_t n) { get_persistent_header()->nentries = n; } + void inc_entries() { ++get_persistent_header()->nentries; } + void dec_entries() { --get_persistent_header()->nentries; } + + void add_entries(uint32_t addn) { get_persistent_header()->nentries += addn; } + void sub_entries(uint32_t subn) { get_persistent_header()->nentries -= subn; } + + void set_leaf(bool leaf) { get_persistent_header()->leaf = leaf; } + void set_node_type(btree_node_type t) { get_persistent_header()->node_type = uint32_cast(t); } + uint64_t get_gen() const { return m_pers_header.node_gen; } + void inc_gen() { get_persistent_header()->node_gen++; } + void set_gen(uint64_t g) { get_persistent_header()->node_gen = g; } + + void set_valid_node(bool valid) { get_persistent_header()->valid_node = (valid ? 1 : 0); } + bool is_valid_node() const { return m_pers_header.valid_node; } + + uint8_t* get_node_area_mutable() { return m_node_area; } + const uint8_t* get_node_area() const { return m_node_area; } + + uint32_t get_occupied_size(const BtreeConfig& cfg) const { + return (cfg.get_node_area_size() - to_variant_node_const().get_available_size(cfg)); + } + uint32_t get_suggested_min_size(const BtreeConfig& cfg) const { return cfg.get_max_key_size(); } + + bool is_merge_needed(const BtreeConfig& cfg) const { +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < cfg.get_node_area_size()) { + return true; + } + + auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); + if (ret && get_occupied_size(cfg) < (ret.get() * cfg.get_node_area_size() / 100)) { return true; } +#endif + return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); + } + + bnodeid_t next_bnode() const { return m_pers_header.next_node; } + void set_next_bnode(bnodeid_t b) { get_persistent_header()->next_node = b; } + + bnodeid_t get_edge_id() const { return m_pers_header.edge_entry; } + void set_edge_id(bnodeid_t edge) { get_persistent_header()->edge_entry = edge; } + + typedef std::pair< bool, int > node_find_result_t; + + ////////// Top level functions (CRUD on a node) ////////////////// + // Find the slot where the key is present. If not present, return the closest location for the key. + // Assumption: Node lock is already taken + node_find_result_t find(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, bool copy_key = true, + bool copy_val = true) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); + + auto result = bsearch_node(range); + if (result.end_of_search_index == int_cast(get_total_entries()) && !has_valid_edge()) { + assert(!result.found); + return result; + } + + if (get_total_entries() == 0) { + assert(has_valid_edge() || is_leaf()); + if (is_leaf()) { + /* Leaf doesn't have any elements */ + return result; + } + } + + if (outval) { to_variant_node_const().get(result.end_of_search_index, outval, copy_val /* copy */); } + + if (!range.is_simple_search() && outkey) { + to_variant_node_const().get_nth_key(result.end_of_search_index, outkey, copy_key /* copy */); + } + + return result; + } + + node_find_result_t find(const BtreeKey& find_key, BtreeValue* outval, bool copy_val = true) const { + return find(BtreeSearchRange(find_key), nullptr, outval, false, copy_val); + } + + void get_last_key(BtreeKey* out_lastkey) const { + if (get_total_entries() == 0) { return; } + to_variant_node().get_nth_key(get_total_entries() - 1, out_lastkey, true); + } + + void get_first_key(BtreeKey* out_firstkey) const { return to_variant_node().get_nth_key(0, out_firstkey, true); } + void get_var_nth_key(int i, BtreeKey* out_firstkey) const { + return to_variant_node().get_nth_key(i, out_firstkey, true); + } + + uint32_t get_all(const BtreeSearchRange& range, uint32_t max_count, int& start_ind, int& end_ind, + std::vector< std::pair< K, V > >* out_values = nullptr) const { + LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); + auto count = 0U; + + // Get the start index of the search range. + BtreeSearchRange sr = range.get_start_of_range(); + sr.set_multi_option(MultiMatchOption::DO_NOT_CARE); + + auto result = bsearch_node(sr); // doing bsearch only based on start key + // at this point start index will point to exact found or element after that + start_ind = result.end_of_search_index; + + if (!range.is_start_inclusive()) { + if (start_ind < (int)get_total_entries()) { + /* start is not inclusive so increment the start_ind if it is same as this key */ + int x = to_variant_node_const().compare_nth_key(*range.get_start_key(), start_ind); + if (x == 0) { start_ind++; } + } else { + assert(is_leaf() || has_valid_edge()); + } + } + + if (start_ind == (int)get_total_entries() && is_leaf()) { + end_ind = start_ind; + return 0; // no result found + } + + assert((start_ind < (int)get_total_entries()) || has_valid_edge()); + + // search by the end index + BtreeSearchRange er = range.get_end_of_range(); + er.set_multi_option(MultiMatchOption::DO_NOT_CARE); + result = bsearch_node(er); // doing bsearch only based on end key + end_ind = result.end_of_search_index; + + assert(start_ind <= end_ind); + + /* we don't support end exclusive */ + assert(range.is_end_inclusive()); + + if (end_ind == (int)get_total_entries() && !has_valid_edge()) { --end_ind; } + + if (is_leaf()) { + /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ + sisl::blob blob; + K key; + to_variant_node().get_nth_key(end_ind, &key, false); + + if ((range.get_start_key())->compare_start(&key) < 0 && ((range.get_end_key())->compare_start(&key)) < 0) { + if (start_ind == end_ind) { + /* no match */ + return 0; + } + --end_ind; + } + } + + assert(start_ind <= end_ind); + count = end_ind - start_ind + 1; + if (count > max_count) { count = max_count; } + + /* We should always find the entries in interior node */ + assert(start_ind < (int)get_total_entries() || has_valid_edge()); + assert(end_ind < (int)get_total_entries() || has_valid_edge()); + + if (out_values == nullptr) { return count; } + + /* get the keys and values */ + for (auto i = start_ind; i < (int)(start_ind + count); ++i) { + K key; + V value; + if (i == (int)get_total_entries() && !is_leaf()) + get_edge_value(&value); // invalid key in case of edge entry for internal node + else { + to_variant_node().get_nth_key(i, &key, true); + to_variant_node().get_nth_value(i, &value, true); + } + out_values->emplace_back(std::make_pair<>(key, value)); + } + return count; + } + + bool put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue& existing_val) { + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + auto result = find(key, nullptr, nullptr); + bool ret = true; + + LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); + if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { + if (result.found) { + LOGINFO("entry already exist"); + return false; + } + (void)to_variant_node().insert(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { + if (!result.found) return false; + to_variant_node().update(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { + !(result.found) ? (void)to_variant_node().insert(result.end_of_search_index, key, val) + : to_variant_node().update(result.end_of_search_index, key, val); + } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { + if (!result.found) return false; + append(result.end_of_search_index, key, val, existing_val); + } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { + (!result.found) ? (void)to_variant_node().insert(result.end_of_search_index, key, val) + : append(result.end_of_search_index, key, val, existing_val); + } else { + assert(false); + } + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + + LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); + return ret; + } + + btree_status_t insert(const BtreeKey& key, const BtreeValue& val) { + auto result = find(key, nullptr, nullptr); + assert(!is_leaf() || (!result.found)); // We do not support duplicate keys yet + auto ret = to_variant_node().insert(result.end_of_search_index, key, val); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + return ret; + } + + bool remove_one(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { + auto result = find(range, outkey, outval); + if (!result.found) { return false; } + + to_variant_node().remove(result.end_of_search_index); + LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); + return true; + } + + void append(uint32_t index, const BtreeKey& key, const BtreeValue& val, BtreeValue& existing_val) { + // Get the nth value and do a callback to update its blob with the new value, being passed + V nth_val; + to_variant_node().get_nth_value(index, &nth_val, false); + nth_val.append_blob(val, existing_val); + to_variant_node().update(index, key, nth_val); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + } + + /* Update the key and value pair and after update if outkey and outval are non-nullptr, it fills them with + * the key and value it just updated respectively */ + void update(const BtreeKey& key, const BtreeValue& val, BtreeKey* outkey, BtreeValue* outval) { + auto result = find(key, outkey, outval); + assert(result.found); + to_variant_node().update(result.end_of_search_index, val); + LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); + } + + //////////// Edge Related Methods /////////////// + void invalidate_edge() { set_edge_id(empty_bnodeid); } + + void set_edge_value(const BtreeValue& v) { + BtreeNodeInfo* bni = (BtreeNodeInfo*)&v; + set_edge_id(bni->bnode_id()); + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + } + + void get_edge_value(BtreeValue* v) const { + if (is_leaf()) { return; } + v->set_blob(BtreeNodeInfo(get_edge_id()).get_blob()); + } + + bool has_valid_edge() const { + if (is_leaf()) { return false; } + return (get_edge_id() != empty_bnodeid); + } + + void get_adjacent_indicies(uint32_t cur_ind, vector< int >& indices_list, uint32_t max_indices) const { + uint32_t i = 0; + uint32_t start_ind; + uint32_t end_ind; + uint32_t nentries = this->get_total_entries(); + + auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); + end_ind = cur_ind + (max_indices / 2); + if (cur_ind < max_ind) { + end_ind += max_ind - cur_ind; + start_ind = 0; + } else { + start_ind = cur_ind - max_ind; + } + + for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); i++) { + if (i == nentries) { + if (this->has_valid_edge()) { indices_list.push_back(i); } + break; + } else { + indices_list.push_back(i); + } + } + } + +protected: + node_find_result_t bsearch_node(const BtreeSearchRange& range) const { + DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); + const auto ret = bsearch(-1, get_total_entries(), range); + const auto selection = range.multi_option(); + + if (ret.found) { assert(ret.end_of_search_index < (int)get_total_entries() && ret.end_of_search_index > -1); } + + /* BEST_FIT_TO_CLOSEST is used by remove only. Remove doesn't support range_remove. Until + * then we have the special logic : + */ + if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { + if (!ret.found && is_leaf()) { + if (get_total_entries() != 0) { + ret.end_of_search_index = get_total_entries() - 1; + ret.found = true; + } + } + } + + return ret; + } + + node_find_result_t is_bsearch_left_or_right_most(const BtreeSearchRange& range) const { + auto selection = range.multi_option(); + if (range.is_simple_search()) { return (MultiMatchOption::DO_NOT_CARE); } + if (selection == MultiMatchOption::LEFT_MOST) { + return (MultiMatchOption::LEFT_MOST); + } else if (selection == MultiMatchOption::RIGHT_MOST) { + return (MultiMatchOption::RIGHT_MOST); + } else if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { + return (MultiMatchOption::LEFT_MOST); + } + return (MultiMatchOption::DO_NOT_CARE); + } + + /* This function does bseach between start and end where start and end are not included. + * It either gives left most, right most or the first found entry based on the range selection policy. + * If entry doesn't found then it gives the closest found entry. + */ + node_find_result_t bsearch(int start, int end, const BtreeSearchRange& range) const { + int mid = 0; + int initial_end = end; + int min_ind_found = INT32_MAX; + int second_min = INT32_MAX; + int max_ind_found = 0; + + struct { + bool found; + int end_of_search_index; + } ret{false, 0}; + + if ((end - start) <= 1) { return ret; } + + auto selection = is_bsearch_left_or_right_most(range); + + while ((end - start) > 1) { + mid = start + (end - start) / 2; + assert(mid >= 0 && mid < (int)get_total_entries()); + int x = range.is_simple_search() ? to_variant_node_const().compare_nth_key(*range.get_start_key(), mid) + : to_variant_node_const().compare_nth_key_range(range, mid); + if (x == 0) { + ret.found = true; + if (selection == MultiMatchOption::DO_NOT_CARE) { + end = mid; + break; + } else if (selection == MultiMatchOption::LEFT_MOST) { + if (mid < min_ind_found) { min_ind_found = mid; } + end = mid; + } else if (selection == MultiMatchOption::RIGHT_MOST) { + if (mid > max_ind_found) { max_ind_found = mid; } + start = mid; + } else { + assert(false); + } + } else if (x > 0) { + end = mid; + } else { + start = mid; + } + } + + if (ret.found) { + if (selection == MultiMatchOption::LEFT_MOST) { + assert(min_ind_found != INT32_MAX); + ret.end_of_search_index = min_ind_found; + } else if (selection == MultiMatchOption::RIGHT_MOST) { + assert(max_ind_found != INT32_MAX); + ret.end_of_search_index = max_ind_found; + } else { + ret.end_of_search_index = end; + } + } else { + ret.end_of_search_index = end; + } + return ret; + } + + VariantNodeT& to_variant_node() { return s_cast< VariantNodeT& >(*this); } + const VariantNodeT& to_variant_node_const() const { return s_cast< const VariantNodeT& >(*this); } +}; +#pragma pack() + +} // namespace sisl diff --git a/src/btree/rough/sisl_btree.hpp b/src/btree/rough/sisl_btree.hpp new file mode 100644 index 00000000..6f11ac27 --- /dev/null +++ b/src/btree/rough/sisl_btree.hpp @@ -0,0 +1,1894 @@ +/* + * Created on: 14-May-2016 + * Author: Hari Kadayam + * + * Copyright © 2016 Kadayam, Hari. All rights reserved. + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "logging/logging.h" + +#include "fds/buffer.hpp" +#include "btree_internal.h" +#include "btree_node.hpp" + +SISL_LOGGING_DECL(btree) +namespace sisl { + +#if 0 +#define container_of(ptr, type, member) ({ (type*)((char*)ptr - offsetof(type, member)); }) +#endif + +#define btree_t Btree< BtreeStoreType, K, V, InteriorNodeType, LeafNodeType > + +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +struct _btree_locked_node_info { + btree_node_t* node; + Clock::time_point start_time; + const char* fname; + int line; + void dump() { LOGINFO("node locked by file: {}, line: {}", fname, line); } +}; + +#define btree_locked_node_info _btree_locked_node_info< BtreeStoreType, K, V, InteriorNodeType, LeafNodeType > + +template < typename K, typename V > +class Btree { + typedef std::function< void(V& mv) > free_blk_callback; + typedef std::function< void() > destroy_btree_comp_callback; + typedef std::function< void(const K& k, const V& v, const K& split_key, + std::vector< std::pair< K, V > >& replace_kv) > + split_key_callback; + +private: + bnodeid_t m_root_node; + homeds::thread::RWLock m_btree_lock; + + uint32_t m_max_nodes; + BtreeConfig m_bt_cfg; + btree_super_block m_sb; + + BtreeMetrics m_metrics; + std::unique_ptr< btree_store_t > m_btree_store; + bool m_destroy = false; + std::atomic< uint64_t > m_total_nodes = 0; + uint32_t m_node_size = 4096; + btree_cp_sb m_last_cp_sb; + split_key_callback m_split_key_cb; +#ifndef NDEBUG + std::atomic< uint64_t > m_req_id = 0; +#endif + + static thread_local homeds::reserve_vector< btree_locked_node_info, 5 > wr_locked_nodes; + static thread_local homeds::reserve_vector< btree_locked_node_info, 5 > rd_locked_nodes; + + ////////////////// Implementation ///////////////////////// +public: + btree_super_block get_btree_sb() { return m_sb; } + const btree_cp_sb& get_last_cp_cb() const { return m_last_cp_sb; } + + /** + * @brief : return the btree cfg + * + * @return : the btree cfg; + */ + BtreeConfig get_btree_cfg() const { return m_bt_cfg; } + uint64_t get_used_size() const { return m_node_size * m_total_nodes.load(); } +#ifdef _PRERELEASE + static void set_io_flip() { + /* IO flips */ + FlipClient* fc = homestore::HomeStoreFlip::client_instance(); + FlipFrequency freq; + FlipCondition cond1; + FlipCondition cond2; + freq.set_count(2000000000); + freq.set_percent(2); + + FlipCondition null_cond; + fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); + + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 0, &cond1); + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 1, &cond2); + fc->inject_noreturn_flip("btree_upgrade_node_fail", {cond1, cond2}, freq); + + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 4, &cond1); + fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 2, &cond2); + + fc->inject_retval_flip("btree_delay_and_split", {cond1, cond2}, freq, 20); + fc->inject_retval_flip("btree_delay_and_split_leaf", {cond1, cond2}, freq, 20); + fc->inject_noreturn_flip("btree_parent_node_full", {null_cond}, freq); + fc->inject_noreturn_flip("btree_leaf_node_split", {null_cond}, freq); + fc->inject_retval_flip("btree_upgrade_delay", {null_cond}, freq, 20); + fc->inject_retval_flip("writeBack_completion_req_delay_us", {null_cond}, freq, 20); + fc->inject_noreturn_flip("btree_read_fast_path_not_possible", {null_cond}, freq); + } + + static void set_error_flip() { + /* error flips */ + FlipClient* fc = homestore::HomeStoreFlip::client_instance(); + FlipFrequency freq; + freq.set_count(20); + freq.set_percent(10); + + FlipCondition null_cond; + fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); + + fc->inject_noreturn_flip("btree_read_fail", {null_cond}, freq); + fc->inject_noreturn_flip("fixed_blkalloc_no_blks", {null_cond}, freq); + } +#endif + + static btree_t* create_btree(BtreeConfig& cfg) { + Btree* bt = new Btree(cfg); + auto impl_ptr = btree_store_t::init_btree(bt, cfg); + bt->m_btree_store = std::move(impl_ptr); + btree_status_t ret = bt->init(); + if (ret != btree_status_t::success) { + LOGERROR("btree create failed. error {} name {}", ret, cfg.get_name()); + delete (bt); + return nullptr; + } + + HS_SUBMOD_LOG(INFO, base, , "btree", cfg.get_name(), "New {} created: Node size {}", BtreeStoreType, + cfg.get_node_size()); + return bt; + } + + void do_common_init(bool is_recovery = false) { + // TODO: Check if node_area_size need to include persistent header + uint32_t node_area_size = btree_store_t::get_node_area_size(m_btree_store.get()); + m_bt_cfg.set_node_area_size(node_area_size); + + // calculate number of nodes + uint32_t max_leaf_nodes = + (m_bt_cfg.get_max_objs() * (m_bt_cfg.get_max_key_size() + m_bt_cfg.get_max_value_size())) / node_area_size + + 1; + max_leaf_nodes += (100 * max_leaf_nodes) / 60; // Assume 60% btree full + + m_max_nodes = max_leaf_nodes + ((double)max_leaf_nodes * 0.05) + 1; // Assume 5% for interior nodes + m_total_nodes = m_last_cp_sb.btree_size; + btree_store_t::update_sb(m_btree_store.get(), m_sb, &m_last_cp_sb, is_recovery); + } + + void replay_done(const btree_cp_ptr& bcp) { + m_total_nodes = m_last_cp_sb.btree_size + bcp->btree_size.load(); + THIS_BT_LOG(INFO, base, , "total btree nodes {}", m_total_nodes); + } + + btree_status_t init() { + do_common_init(); + return (create_root_node()); + } + + void init_recovery(const btree_super_block& btree_sb, btree_cp_sb* cp_sb, const split_key_callback& split_key_cb) { + m_sb = btree_sb; + m_split_key_cb = split_key_cb; + if (cp_sb) { memcpy(&m_last_cp_sb, cp_sb, sizeof(m_last_cp_sb)); } + do_common_init(true); + m_root_node = m_sb.root_node; + } + + Btree(BtreeConfig& cfg) : + m_bt_cfg(cfg), m_metrics(BtreeStoreType, cfg.get_name().c_str()), m_node_size(cfg.get_node_size()) {} + + ~Btree() { + if (BtreeStoreType != btree_store_type::MEM_BTREE) { + LOGINFO("Skipping destroy in-memory btree nodes for non mem btree types."); + return; + } + + uint64_t free_node_cnt; + auto ret = destroy_btree(nullptr, free_node_cnt, true); + + HS_DEBUG_ASSERT_EQ(ret, btree_status_t::success, "btree destroy failed"); + LOGWARN("Destroy in-memory btree nodes failed."); + } + + btree_status_t destroy_btree(blkid_list_ptr free_blkid_list, uint64_t& free_node_cnt, bool in_mem = false) { + btree_status_t ret{btree_status_t::success}; + m_btree_lock.write_lock(); + if (!m_destroy) { // if previous destroy is successful, do not destroy again; + BtreeNodePtr< K > root; + homeds::thread::locktype acq_lock = LOCKTYPE_WRITE; + + ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock, nullptr); + if (ret != btree_status_t::success) { + m_btree_lock.unlock(); + return ret; + } + + free_node_cnt = 0; + ret = free(root, free_blkid_list, in_mem, free_node_cnt); + + unlock_node(root, acq_lock); + + if (ret == btree_status_t::success) { + THIS_BT_LOG(DEBUG, base, , "btree(root: {}) nodes destroyed successfully", m_root_node); + m_destroy = true; + } else { + THIS_BT_LOG(ERROR, base, , "btree(root: {}) nodes destroyed failed, ret: {}", m_root_node, ret); + } + } + m_btree_lock.unlock(); + return ret; + } + + // + // 1. free nodes in post order traversal of tree to free non-leaf node + // + btree_status_t post_order_traversal(const BtreeNodePtr< K >& node, const auto& cb) { + homeds::thread::locktype acq_lock = homeds::thread::LOCKTYPE_WRITE; + uint32_t i = 0; + btree_status_t ret = btree_status_t::success; + + if (!node->is_leaf()) { + BtreeNodeInfo child_info; + while (i <= node->get_total_entries()) { + if (i == node->get_total_entries()) { + if (!node->has_valid_edge()) { break; } + child_info.set_bnode_id(node->get_edge_id()); + } else { + child_info = node->get(i, false /* copy */); + } + + BtreeNodePtr< K > child; + ret = read_and_lock_child(child_info.bnode_id(), child, node, i, acq_lock, acq_lock, nullptr); + if (ret != btree_status_t::success) { return ret; } + ret = post_order_traversal(child, cb); + unlock_node(child, acq_lock); + ++i; + } + } + + if (ret != btree_status_t::success) { return ret; } + cb(node); + return ret; + } + + void destroy_done() { btree_store_t::destroy_done(m_btree_store.get()); } + + uint64_t get_used_size() const { return m_node_size * m_total_nodes.load(); } + + btree_status_t range_put(const BtreeRangeUpdateRequest< K, V >& bur) { + BtreeQueryCursor cur; + bool reset_cur = false; + if (!bur.get_input_range().is_cursor_valid()) { + bur.get_input_range().set_cursor(&cur); + reset_cur = true; + } + auto ret = put_internal(bur); + if (reset_cur) { bur.get_input_range().reset_cursor(); } + return ret; + } + + btree_status_t put(const BtreeKey& k, const BtreeValue& v, btree_put_type put_type, + BtreeValue* existing_val = nullptr) { + return put_internal(BtreeSinglePutRequest{k, v, put_type, existing_val}); + } + + btree_status_t get(const BtreeKey& key, BtreeValue* outval) { return get(key, nullptr, outval); } + + btree_status_t get(const BtreeKey& key, BtreeKey* outkey, BtreeValue* outval) { + return get_any(BtreeSearchRange(key), outkey, outval); + } + + btree_status_t get_any(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { + btree_status_t ret = btree_status_t::success; + bool is_found; + + m_btree_lock.read_lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + + ret = do_get(root, range, outkey, outval); + out: + m_btree_lock.unlock(); + + // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match + +#ifndef NDEBUG + check_lock_debug(); +#endif + return ret; + } + + btree_status_t query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) { + COUNTER_INCREMENT(m_metrics, btree_query_ops_count, 1); + + btree_status_t ret = btree_status_t::success; + if (query_req.batch_size() == 0) { return ret; } + + /* set cursor if it is invalid. User is not interested in the cursor but we need it for internal logic */ + BtreeQueryCursor cur; + bool reset_cur = false; + if (!query_req.get_input_range().is_cursor_valid()) { + query_req.get_input_range().set_cursor(&cur); + reset_cur = true; + } + + m_btree_lock.read_lock(); + BtreeNodePtr< K > root = nullptr; + ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + + switch (query_req.query_type()) { + case BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY: + ret = do_sweep_query(root, query_req, out_values); + break; + + case BtreeQueryType::TREE_TRAVERSAL_QUERY: + ret = do_traversal_query(root, query_req, out_values); + break; + + default: + unlock_node(root, homeds::thread::locktype::locktype_t::READ); + LOGERROR("Query type {} is not supported yet", query_req.query_type()); + break; + } + + if ((query_req.query_type() == BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY || + query_req.query_type() == BtreeQueryType::TREE_TRAVERSAL_QUERY) && + out_values.size() > 0) { + + /* if return is not success then set the cursor to last read. No need to set cursor if user is not + * interested in it. + */ + if (!reset_cur) { + query_req.get_input_range().set_cursor_key(&out_values.back().first, ([](BtreeKey* key) { + K end_key; + end_key.copy_end_key_blob(key->get_blob()); + return std::move(std::make_unique< K >(end_key)); + })); + } + + /* check if we finished just at the last key */ + if (out_values.back().first.compare(query_req.get_input_range().get_end_key()) == 0) { + ret = btree_status_t::success; + } + } + + out: + m_btree_lock.unlock(); +#ifndef NDEBUG + check_lock_debug(); +#endif + if (ret != btree_status_t::success && ret != btree_status_t::has_more && + ret != btree_status_t::fast_path_not_possible) { + THIS_BT_LOG(ERROR, base, , "btree get failed {}", ret); + COUNTER_INCREMENT(m_metrics, query_err_cnt, 1); + } + if (reset_cur) { query_req.get_input_range().reset_cursor(); } + return ret; + } + +#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION + btree_status_t sweep_query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) { + COUNTER_INCREMENT(m_metrics, btree_read_ops_count, 1); + query_req.init_batch_range(); + + m_btree_lock.read_lock(); + + BtreeNodePtr< K > root; + btree_status_t ret = btree_status_t::success; + + ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + + ret = do_sweep_query(root, query_req, out_values); + out: + m_btree_lock.unlock(); + +#ifndef NDEBUG + check_lock_debug(); +#endif + return ret; + } + + btree_status_t serializable_query(BtreeSerializableQueryRequest& query_req, + std::vector< std::pair< K, V > >& out_values) { + query_req.init_batch_range(); + + m_btree_lock.read_lock(); + BtreeNodePtr< K > node; + btree_status_t ret; + + if (query_req.is_empty_cursor()) { + // Initialize a new lock tracker and put inside the cursor. + query_req.cursor().m_locked_nodes = std::make_unique< BtreeLockTrackerImpl >(this); + + BtreeNodePtr< K > root; + ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); + if (ret != btree_status_t::success) { goto out; } + get_tracker(query_req)->push(root); // Start tracking the locked nodes. + } else { + node = get_tracker(query_req)->top(); + } + + ret = do_serialzable_query(node, query_req, out_values); + out: + m_btree_lock.unlock(); + + // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match + +#ifndef NDEBUG + check_lock_debug(); +#endif + + return ret; + } + + BtreeLockTrackerImpl* get_tracker(BtreeSerializableQueryRequest& query_req) { + return (BtreeLockTrackerImpl*)query_req->get_cursor.m_locked_nodes.get(); + } +#endif + + /* It doesn't support async */ + btree_status_t remove_any(BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { + return (remove_any(range, outkey, outval, nullptr)); + } + + btree_status_t remove_any(BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, const btree_cp_ptr& bcp) { + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + bool is_found = false; + bool is_leaf = false; + /* set cursor if it is invalid. User is not interested in the cursor but we need it for internal logic */ + BtreeQueryCursor cur; + bool reset_cur = false; + if (!range.is_cursor_valid()) { + range.set_cursor(&cur); + reset_cur = true; + } + + m_btree_lock.read_lock(); + + retry: + + btree_status_t status = btree_status_t::success; + + BtreeNodePtr< K > root; + status = read_and_lock_root(m_root_node, root, acq_lock, acq_lock); + if (status != btree_status_t::success) { goto out; } + is_leaf = root->is_leaf(); + + if (root->get_total_entries() == 0) { + if (is_leaf) { + // There are no entries in btree. + unlock_node(root, acq_lock); + status = btree_status_t::not_found; + THIS_BT_LOG(DEBUG, base, root, "entry not found in btree"); + goto out; + } + BT_LOG_ASSERT(root->has_valid_edge(), root, "Invalid edge id"); + unlock_node(root, acq_lock); + m_btree_lock.unlock(); + + status = check_collapse_root(); + if (status != btree_status_t::success) { + LOGERROR("check collapse read failed btree name {}", m_bt_cfg.get_name()); + goto out; + } + + // We must have gotten a new root, need to + // start from scratch. + m_btree_lock.read_lock(); + goto retry; + } else if ((is_leaf) && (acq_lock != homeds::thread::LOCKTYPE_WRITE)) { + // Root is a leaf, need to take write lock, instead + // of read, retry + unlock_node(root, acq_lock); + acq_lock = homeds::thread::LOCKTYPE_WRITE; + goto retry; + } else { + status = do_remove(root, acq_lock, range, outkey, outval, bcp); + if (status == btree_status_t::retry) { + // Need to start from top down again, since + // there is a race between 2 inserts or deletes. + acq_lock = homeds::thread::locktype_t::READ; + goto retry; + } + } + + out: + m_btree_lock.unlock(); +#ifndef NDEBUG + check_lock_debug(); +#endif + if (reset_cur) { range.reset_cursor(); } + return status; + } + + btree_status_t remove(const BtreeKey& key, BtreeValue* outval) { return (remove(key, outval, nullptr)); } + + btree_status_t remove(const BtreeKey& key, BtreeValue* outval, const btree_cp_ptr& bcp) { + auto range = BtreeSearchRange(key); + return remove_any(range, nullptr, outval, bcp); + } + + /** + * @brief : verify btree is consistent and no corruption; + * + * @param update_debug_bm : true or false; + * + * @return : true if btree is not corrupted. + * false if btree is corrupted; + */ + bool verify_tree(bool update_debug_bm) { + m_btree_lock.read_lock(); + bool ret = verify_node(m_root_node, nullptr, -1, update_debug_bm); + m_btree_lock.unlock(); + + return ret; + } + + /** + * @brief : get the status of this btree; + * + * @param log_level : verbosity level; + * + * @return : status in json form; + */ + nlohmann::json get_status(const int log_level) { + nlohmann::json j; + return j; + } + + void diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { + std::vector< pair< K, V > > my_kvs, other_kvs; + + get_all_kvs(&my_kvs); + other->get_all_kvs(&other_kvs); + auto it1 = my_kvs.begin(); + auto it2 = other_kvs.begin(); + + K k1, k2; + V v1, v2; + + if (it1 != my_kvs.end()) { + k1 = it1->first; + v1 = it1->second; + } + if (it2 != other_kvs.end()) { + k2 = it2->first; + v2 = it2->second; + } + + while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { + if (k1.preceeds(&k2)) { + /* k1 preceeds k2 - push k1 and continue */ + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + } else if (k1.succeeds(&k2)) { + /* k2 preceeds k1 - push k2 and continue */ + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + if (it2 == other_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + } else { + /* k1 and k2 overlaps */ + std::vector< pair< K, V > > overlap_kvs; + diff_read_next_t to_read = READ_BOTH; + + v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); + for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { + diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); + } + + switch (to_read) { + case READ_FIRST: + it1++; + if (it1 == my_kvs.end()) { + // Add k2,v2 + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + break; + } + k1 = it1->first; + v1 = it1->second; + break; + + case READ_SECOND: + it2++; + if (it2 == other_kvs.end()) { + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + break; + } + k2 = it2->first; + v2 = it2->second; + break; + + case READ_BOTH: + /* No tail part */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + + default: + LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, + to_read); + /* skip both */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + } + } + } + + while (it1 != my_kvs.end()) { + diff_kv->emplace_back(make_pair(it1->first, it1->second)); + it1++; + } + + while (it2 != other_kvs.end()) { + diff_kv->emplace_back(make_pair(it2->first, it2->second)); + it2++; + } + } + + void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { + std::vector< pair< K, V > > other_kvs; + + other->get_all_kvs(&other_kvs); + for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { + K k = it->first; + V v = it->second; + BRangeCBParam local_param(k, v); + K start(k.start(), 1), end(k.end(), 1); + + auto search_range = BtreeSearchRange(start, true, end, true); + BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); + range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); + } + } + + void print_tree() { + std::string buf; + m_btree_lock.read_lock(); + to_string(m_root_node, buf); + m_btree_lock.unlock(); + + THIS_BT_LOG(INFO, base, , "Pre order traversal of tree:\n<{}>", buf); + } + + void print_node(const bnodeid_t& bnodeid) { + std::string buf; + BtreeNodePtr< K > node; + + m_btree_lock.read_lock(); + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } + buf = node->to_string(true /* print_friendly */); + unlock_node(node, acq_lock); + + done: + m_btree_lock.unlock(); + + THIS_BT_LOG(INFO, base, , "Node: <{}>", buf); + } + + nlohmann::json get_metrics_in_json(bool updated = true) { return m_metrics.get_result_in_json(updated); } + +private: + /** + * @brief : verify the btree node is corrupted or not; + * + * Note: this function should never assert, but only return success or failure since it is in verification mode; + * + * @param bnodeid : node id + * @param parent_node : parent node ptr + * @param indx : index within thie node; + * @param update_debug_bm : true or false; + * + * @return : true if this node including all its children are not corrupted; + * false if not; + */ + bool verify_node(bnodeid_t bnodeid, BtreeNodePtr< K > parent_node, uint32_t indx, bool update_debug_bm) { + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + BtreeNodePtr< K > my_node; + if (read_and_lock_node(bnodeid, my_node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { + LOGINFO("read node failed"); + return false; + } + if (update_debug_bm && + (btree_store_t::update_debug_bm(m_btree_store.get(), my_node) != btree_status_t::success)) { + LOGERROR("bitmap update failed for node {}", my_node->to_string()); + return false; + } + + K prev_key; + bool success = true; + for (uint32_t i = 0; i < my_node->get_total_entries(); ++i) { + K key; + my_node->get_nth_key(i, &key, false); + if (!my_node->is_leaf()) { + BtreeNodeInfo child; + my_node->get(i, &child, false); + success = verify_node(child.bnode_id(), my_node, i, update_debug_bm); + if (!success) { goto exit_on_error; } + + if (i > 0) { + BT_LOG_ASSERT_CMP(prev_key.compare(&key), <, 0, my_node); + if (prev_key.compare(&key) >= 0) { + success = false; + goto exit_on_error; + } + } + } + if (my_node->is_leaf() && i > 0) { + BT_LOG_ASSERT_CMP(prev_key.compare_start(&key), <, 0, my_node); + if (prev_key.compare_start(&key) >= 0) { + success = false; + goto exit_on_error; + } + } + prev_key = key; + } + + if (my_node->is_leaf() && my_node->get_total_entries() == 0) { + /* this node has zero entries */ + goto exit_on_error; + } + if (parent_node && parent_node->get_total_entries() != indx) { + K parent_key; + parent_node->get_nth_key(indx, &parent_key, false); + + K last_key; + my_node->get_nth_key(my_node->get_total_entries() - 1, &last_key, false); + if (!my_node->is_leaf()) { + BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), ==, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (last_key.compare(&parent_key) != 0) { + success = false; + goto exit_on_error; + } + } else { + BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), <=, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (last_key.compare(&parent_key) > 0) { + success = false; + goto exit_on_error; + } + BT_LOG_ASSERT_CMP(parent_key.compare_start(&last_key), >=, 0, parent_node, + "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), + my_node->to_string()); + if (parent_key.compare_start(&last_key) < 0) { + success = false; + goto exit_on_error; + } + } + } + + if (parent_node && indx != 0) { + K parent_key; + parent_node->get_nth_key(indx - 1, &parent_key, false); + + K first_key; + my_node->get_nth_key(0, &first_key, false); + BT_LOG_ASSERT_CMP(first_key.compare(&parent_key), >, 0, parent_node, "my node {}", my_node->to_string()); + if (first_key.compare(&parent_key) <= 0) { + success = false; + goto exit_on_error; + } + + BT_LOG_ASSERT_CMP(parent_key.compare_start(&first_key), <, 0, parent_node, "my node {}", + my_node->to_string()); + if (parent_key.compare_start(&first_key) > 0) { + success = false; + goto exit_on_error; + } + } + + if (my_node->has_valid_edge()) { + success = verify_node(my_node->get_edge_id(), my_node, my_node->get_total_entries(), update_debug_bm); + if (!success) { goto exit_on_error; } + } + + exit_on_error: + unlock_node(my_node, acq_lock); + return success; + } + + void to_string(bnodeid_t bnodeid, std::string& buf) const { + BtreeNodePtr< K > node; + + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } + fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); + + if (!node->is_leaf()) { + uint32_t i = 0; + while (i < node->get_total_entries()) { + BtreeNodeInfo p; + node->get(i, &p, false); + to_string(p.bnode_id(), buf); + i++; + } + if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } + } + unlock_node(node, acq_lock); + } + + /* This function upgrades the node lock and take required steps if things have + * changed during the upgrade. + * + * Inputs: + * myNode - Node to upgrade + * childNode - In case childNode needs to be unlocked. Could be nullptr + * curLock - Input/Output: current lock type + * + * Returns - If successfully able to upgrade, return true, else false. + * + * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects + * it to be locked too. If it is able to successfully upgrade it continue to retain its + * old lock. If failed to upgrade, will release all locks. + */ + btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, + homeds::thread::locktype& cur_lock, homeds::thread::locktype& child_cur_lock, + const btree_cp_ptr& bcp) { + uint64_t prev_gen; + btree_status_t ret = btree_status_t::success; + homeds::thread::locktype child_lock_type = child_cur_lock; + + if (cur_lock == homeds::thread::LOCKTYPE_WRITE) { goto done; } + + prev_gen = my_node->get_gen(); + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype::LOCKTYPE_NONE; + } + +#ifdef _PRERELEASE + { + auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); + if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } + } +#endif + ret = lock_node_upgrade(my_node, bcp); + if (ret != btree_status_t::success) { + cur_lock = locktype::LOCKTYPE_NONE; + return ret; + } + + // The node was not changed by anyone else during upgrade. + cur_lock = homeds::thread::LOCKTYPE_WRITE; + + // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before + // that cleanup or free this node if there is no one waiting. + if (!my_node->is_valid_node()) { + unlock_node(my_node, homeds::thread::LOCKTYPE_WRITE); + cur_lock = locktype::LOCKTYPE_NONE; + ret = btree_status_t::retry; + goto done; + } + + // If node has been updated, while we have upgraded, ask caller to start all over again. + if (prev_gen != my_node->get_gen()) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + ret = btree_status_t::retry; + goto done; + } + + if (child_node) { + ret = lock_and_refresh_node(child_node, child_lock_type, bcp); + if (ret != btree_status_t::success) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + child_cur_lock = locktype::LOCKTYPE_NONE; + goto done; + } + child_cur_lock = child_lock_type; + } + +#ifdef _PRERELEASE + { + int is_leaf = 0; + + if (child_node && child_node->is_leaf()) { is_leaf = 1; } + if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype::LOCKTYPE_NONE; + } + ret = btree_status_t::retry; + goto done; + } + } +#endif + + BT_DEBUG_ASSERT_CMP(my_node->m_common_header.is_lock, ==, 1, my_node); + done: + return ret; + } + + btree_status_t update_leaf_node(const BtreeNodePtr< K >& my_node, const BtreeKey& k, const BtreeValue& v, + btree_put_type put_type, BtreeValue& existing_val, BtreeUpdateRequest< K, V >* bur, + const btree_cp_ptr& bcp, BtreeSearchRange& subrange) { + btree_status_t ret = btree_status_t::success; + if (bur != nullptr) { + // BT_DEBUG_ASSERT_CMP(bur->callback(), !=, nullptr, my_node); // TODO - range req without + // callback implementation + static thread_local std::vector< std::pair< K, V > > s_match; + s_match.clear(); + int start_ind = 0, end_ind = 0; + my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind, &s_match); + + static thread_local std::vector< pair< K, V > > s_replace_kv; + s_replace_kv.clear(); + bur->get_cb_param()->node_version = my_node->get_version(); + ret = bur->callback()(s_match, s_replace_kv, bur->get_cb_param(), subrange); + if (ret != btree_status_t::success) { return ret; } + + HS_ASSERT_CMP(DEBUG, start_ind, <=, end_ind); + if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } + COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); + + for (const auto& pair : s_replace_kv) { // insert is based on compare() of BtreeKey + auto status = my_node->insert(pair.first, pair.second); + BT_RELEASE_ASSERT((status == btree_status_t::success), my_node, "unexpected insert failure"); + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + /* update cursor in input range */ + auto end_key_ptr = const_cast< BtreeKey* >(subrange.get_end_key()); + bur->get_input_range().set_cursor_key( + end_key_ptr, ([](BtreeKey* end_key) { return std::move(std::make_unique< K >(*((K*)end_key))); })); + if (homestore::vol_test_run) { + // sorted check + for (auto i = 1u; i < my_node->get_total_entries(); i++) { + K curKey, prevKey; + my_node->get_nth_key(i - 1, &prevKey, false); + my_node->get_nth_key(i, &curKey, false); + if (prevKey.compare(&curKey) >= 0) { + LOGINFO("my_node {}", my_node->to_string()); + for (const auto& [k, v] : s_match) { + LOGINFO("match key {} value {}", k.to_string(), v.to_string()); + } + for (const auto& [k, v] : s_replace_kv) { + LOGINFO("replace key {} value {}", k.to_string(), v.to_string()); + } + } + BT_RELEASE_ASSERT_CMP(prevKey.compare(&curKey), <, 0, my_node); + } + } + } else { + if (!my_node->put(k, v, put_type, existing_val)) { ret = btree_status_t::put_failed; } + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + write_node(my_node, bcp); + return ret; + } + + btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, + const BtreeKey& k, int& start_ind, int& end_ind) { + + btree_status_t ret = btree_status_t::success; + if (bur != nullptr) { + /* just get start/end index from get_all. We don't release the parent lock until this + * key range is not inserted from start_ind to end_ind. + */ + my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind); + } else { + auto result = my_node->find(k, nullptr, nullptr, true, true); + end_ind = start_ind = result.end_of_search_index; + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); + } + + if (start_ind > end_ind) { + BT_LOG_ASSERT(false, my_node, "start ind {} greater than end ind {}", start_ind, end_ind); + ret = btree_status_t::retry; + } + return ret; + } + + /* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ + btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, + const BtreeKey& k, const BtreeValue& v, int ind_hint, btree_put_type put_type, + BtreeNodePtr< K > child_node, homeds::thread::locktype& curlock, + homeds::thread::locktype& child_curlock, int child_ind, bool& split_occured, + const btree_cp_ptr& bcp) { + + split_occured = false; + K split_key; + btree_status_t ret = btree_status_t::success; + auto child_lock_type = child_curlock; + auto none_lock_type = LOCKTYPE_NONE; + +#ifdef _PRERELEASE + boost::optional< int > time; + if (child_node->is_leaf()) { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); + } else { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); + } + if (time && child_node->get_total_entries() > 2) { + std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); + } else +#endif + { + if (!child_node->is_split_needed(m_bt_cfg, k, v, &ind_hint, put_type, bur)) { return ret; } + } + + /* Split needed */ + if (bur) { + + /* In case of range update we might split multiple childs of a parent in a single + * iteration which result into less space in the parent node. + */ +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_parent_node_full")) { + ret = btree_status_t::retry; + goto out; + } +#endif + if (my_node->is_split_needed(m_bt_cfg, k, v, &ind_hint, put_type, bur)) { + // restart from root + ret = btree_status_t::retry; + goto out; + } + } + + // Time to split the child, but we need to convert parent to write lock + ret = upgrade_node(my_node, child_node, curlock, child_curlock, bcp); + if (ret != btree_status_t::success) { + THIS_BT_LOG(DEBUG, btree_structures, my_node, "Upgrade of node lock failed, retrying from root"); + BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_NONE, my_node); + goto out; + } + BT_LOG_ASSERT_CMP(child_curlock, ==, child_lock_type, my_node); + BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_WRITE, my_node); + + // We need to upgrade the child to WriteLock + ret = upgrade_node(child_node, nullptr, child_curlock, none_lock_type, bcp); + if (ret != btree_status_t::success) { + THIS_BT_LOG(DEBUG, btree_structures, child_node, "Upgrade of child node lock failed, retrying from root"); + BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_NONE, child_node); + goto out; + } + BT_LOG_ASSERT_CMP(none_lock_type, ==, homeds::thread::LOCKTYPE_NONE, my_node); + BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_WRITE, child_node); + + // Real time to split the node and get point at which it was split + ret = split_node(my_node, child_node, child_ind, &split_key, bcp); + if (ret != btree_status_t::success) { goto out; } + + // After split, retry search and walk down. + unlock_node(child_node, homeds::thread::LOCKTYPE_WRITE); + child_curlock = LOCKTYPE_NONE; + COUNTER_INCREMENT(m_metrics, btree_split_count, 1); + split_occured = true; + out: + if (ret != btree_status_t::success) { + if (curlock != LOCKTYPE_NONE) { + unlock_node(my_node, curlock); + curlock = LOCKTYPE_NONE; + } + + if (child_curlock != LOCKTYPE_NONE) { + unlock_node(child_node, child_curlock); + child_curlock = LOCKTYPE_NONE; + } + } + return ret; + } + + /* This function is called for the interior nodes whose childs are leaf nodes to calculate the sub range */ + void get_subrange(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, int curr_ind, + K& subrange_start_key, K& subrange_end_key, bool& subrange_start_inc, bool& subrange_end_inc) { + +#ifndef NDEBUG + if (curr_ind > 0) { + /* start of subrange will always be more then the key in curr_ind - 1 */ + K start_key; + BtreeKey* start_key_ptr = &start_key; + + my_node->get_nth_key(curr_ind - 1, start_key_ptr, false); + HS_ASSERT_CMP(DEBUG, start_key_ptr->compare(bur->get_input_range().get_start_key()), <=, 0); + } +#endif + + // find end of subrange + bool end_inc = true; + K end_key; + BtreeKey* end_key_ptr = &end_key; + + if (curr_ind < (int)my_node->get_total_entries()) { + my_node->get_nth_key(curr_ind, end_key_ptr, false); + if (end_key_ptr->compare(bur->get_input_range().get_end_key()) >= 0) { + /* this is last index to process as end of range is smaller then key in this node */ + end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); + end_inc = bur->get_input_range().is_end_inclusive(); + } else { + end_inc = true; + } + } else { + /* it is the edge node. end key is the end of input range */ + BT_LOG_ASSERT_CMP(my_node->has_valid_edge(), ==, true, my_node); + end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); + end_inc = bur->get_input_range().is_end_inclusive(); + } + + BtreeSearchRange& input_range = bur->get_input_range(); + auto start_key_ptr = input_range.get_start_key(); + subrange_start_key.copy_blob(start_key_ptr->get_blob()); + subrange_end_key.copy_blob(end_key_ptr->get_blob()); + subrange_start_inc = input_range.is_start_inclusive(); + subrange_end_inc = end_inc; + + auto ret = subrange_start_key.compare(&subrange_end_key); + BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); + ret = subrange_start_key.compare(bur->get_input_range().get_end_key()); + BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); + /* We don't neeed to update the start at it is updated when entries are inserted in leaf nodes */ + } + + btree_status_t check_split_root(const BtreeMutateRequest& put_req) { + int ind; + K split_key; + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + + m_btree_lock.write_lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { goto done; } + + if (!root->is_split_needed(m_bt_cfg, put_req)) { + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + goto done; + } + + // Create a new child node and split them + child_node = alloc_interior_node(); + if (child_node == nullptr) { + ret = btree_status_t::space_not_avail; + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + goto done; + } + + /* it swap the data while keeping the nodeid same */ + btree_store_t::swap_node(m_btree_store.get(), root, child_node); + write_node(child_node); + + THIS_BT_LOG(DEBUG, btree_structures, root, + "Root node is full, swapping contents with child_node {} and split that", + child_node->get_node_id()); + + BT_DEBUG_ASSERT_CMP(root->get_total_entries(), ==, 0, root); + ret = split_node(root, child_node, root->get_total_entries(), &split_key, true); + BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); + + if (ret != btree_status_t::success) { + btree_store_t::swap_node(m_btree_store.get(), child_node, root); + write_node(child_node); + } + + /* unlock child node */ + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + + if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } + done: + m_btree_lock.unlock(); + return ret; + } + + btree_status_t check_collapse_root(const btree_cp_ptr& bcp) { + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + + m_btree_lock.write_lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, bcp); + if (ret != btree_status_t::success) { goto done; } + + if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { + unlock_node(root, locktype::LOCKTYPE_WRITE); + goto done; + } + + BT_DEBUG_ASSERT_CMP(root->has_valid_edge(), ==, true, root); + ret = read_node(root->get_edge_id(), child_node); + if (child_node == nullptr) { + unlock_node(root, locktype::LOCKTYPE_WRITE); + goto done; + } + + // Elevate the edge child as root. + btree_store_t::swap_node(m_btree_store.get(), root, child_node); + write_node(root, bcp); + BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); + + old_nodes.push_back(child_node); + + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, true /* is_root */, bcp); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, root, bcp); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, child_node, bcp); + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + unlock_node(root, locktype::LOCKTYPE_WRITE); + free_node(child_node, (bcp ? bcp->free_blkid_list : nullptr)); + + if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } + done: + m_btree_lock.unlock(); + return ret; + } + + btree_status_t split_node(const BtreeNodePtr< K >& parent_node, BtreeNodePtr< K > child_node, uint32_t parent_ind, + BtreeKey* out_split_key, const btree_cp_ptr& bcp, bool root_split = false) { + BtreeNodeInfo ninfo; + BtreeNodePtr< K > child_node1 = child_node; + BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); + + if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } + + btree_status_t ret = btree_status_t::success; + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node1->set_next_bnode(child_node2->get_node_id()); + uint32_t child1_filled_size = m_bt_cfg.get_node_area_size() - child_node1->get_available_size(m_bt_cfg); + + auto split_size = m_bt_cfg.get_split_size(child1_filled_size); + uint32_t res = child_node1->move_out_to_right_by_size(m_bt_cfg, child_node2, split_size); + + BT_RELEASE_ASSERT_CMP(res, >, 0, child_node1, + "Unable to split entries in the child node"); // means cannot split entries + BT_DEBUG_ASSERT_CMP(child_node1->get_total_entries(), >, 0, child_node1); + + // Update the existing parent node entry to point to second child ptr. + bool edge_split = (parent_ind == parent_node->get_total_entries()); + ninfo.set_bnode_id(child_node2->get_node_id()); + parent_node->update(parent_ind, ninfo); + + // Insert the last entry in first child to parent node + child_node1->get_last_key(out_split_key); + ninfo.set_bnode_id(child_node1->get_node_id()); + + /* If key is extent then we always insert the end key in the parent node */ + K out_split_end_key; + out_split_end_key.copy_end_key_blob(out_split_key->get_blob()); + parent_node->insert(out_split_end_key, ninfo); + +#ifndef NDEBUG + K split_key; + child_node2->get_first_key(&split_key); + BT_DEBUG_ASSERT_CMP(split_key.compare(out_split_key), >, 0, child_node2); +#endif + THIS_BT_LOG(DEBUG, btree_structures, parent_node, "Split child_node={} with new_child_node={}, split_key={}", + child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); + + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + btree_store_t::append_node_to_journal( + j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, + bcp, out_split_end_key.get_blob()); + + // For root split or split around the edge, we don't write the key, which will cause replay to insert + // edge + if (edge_split) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); + } else { + K child2_pkey; + parent_node->get_nth_key(parent_ind, &child2_pkey, true); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, + child2_pkey.get_blob()); + } + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + + // we write right child node, than left and than parent child + write_node(child_node2, nullptr, bcp); + write_node(child_node1, child_node2, bcp); + write_node(parent_node, child_node1, bcp); + + // NOTE: Do not access parentInd after insert, since insert would have + // shifted parentNode to the right. + return ret; + } + +public: + btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + if (jentry) { + BT_DEBUG_ASSERT_CMP(jentry->is_root, ==, true, , + "Expected create_btree_replay entry to be root journal entry"); + BT_DEBUG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node, , "Root node journal entry mismatch"); + } + + // Create a root node by reserving the leaf node + BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node)); + auto ret = write_node(root, nullptr, bcp); + BT_DEBUG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); + return btree_status_t::success; + } + + btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + bnodeid_t id = jentry->is_root ? m_root_node : jentry->parent_node.node_id; + BtreeNodePtr< K > parent_node; + + // read parent node + read_node_or_fail(id, parent_node); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { + THIS_BT_LOG(INFO, base, , + "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", + parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); + return btree_status_t::replay_not_needed; + } + + // Read the first inplace write node which is the leftmost child and also form child split key from journal + auto j_child_nodes = jentry->get_nodes(); + + BtreeNodePtr< K > child_node1; + if (jentry->is_root) { + // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to + // be treated as root here on. + child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); + btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); + + THIS_BT_LOG(INFO, btree_generics, , + "Journal replay: root split, so creating child_node id={} and swapping the node with " + "parent_node id={} names {}", + child_node1->get_node_id(), parent_node->get_node_id(), m_bt_cfg.get_name()); + + } else { + read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); + } + + THIS_BT_LOG(INFO, btree_generics, , + "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", + j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), + child_node1->get_gen(), m_bt_cfg.get_name()); + if (jentry->is_root) { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , + "Expected first node in journal entry to be new creation for root split"); + } else { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , + "Expected first node in journal entry to be in-place write"); + } + BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , + "Expected second node in journal entry to be new node creation"); + + // recover child node + bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); + + // recover parent node + recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); + return btree_status_t::success; + } + +private: + bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, + const std::vector< bt_journal_node_info* >& j_child_nodes, + const btree_cp_ptr& bcp) { + + BtreeNodePtr< K > child_node2; + // Check if child1 is ahead of the generation + if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { + // leftmost_node is written, so right node must have been written as well. + read_node_or_fail(child_node1->next_bnode(), child_node2); + + // sanity check for right node + BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, + "gen cnt should be more than the journal entry"); + // no need to recover child nodes + return false; + } + + K split_key; + split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) + : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); + + // We need to do split based on entries since the left children is also not written yet. + // Find the split key within the child_node1. It is not always found, so we split upto that. + auto ret = child_node1->find(split_key, nullptr, false); + + // sanity check for left mode node before recovery + { + if (!ret.found) { + if (!child_node1->is_leaf()) { + BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); + } + } + } + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", + split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); + /* if it is not found than end_of_search_index points to first ind which is greater than split key */ + auto split_ind = ret.end_of_search_index; + if (ret.found) { ++split_ind; } // we don't want to move split key */ + if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { + K key; + child_node1->get_nth_key(split_ind, &key, false); + + if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ + THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); + V v; + child_node1->get_nth_value(split_ind, &v, false); + vector< pair< K, V > > replace_kv; + child_node1->remove(split_ind, split_ind); + m_split_key_cb(key, v, split_key, replace_kv); + for (auto& pair : replace_kv) { + auto status = child_node1->insert(pair.first, pair.second); + BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); + } + auto ret = child_node1->find(split_key, nullptr, false); + BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, + "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); + ++split_ind; + } + } + child_node1->move_out_to_right_by_entries(m_bt_cfg, child_node2, child_node1->get_total_entries() - split_ind); + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node2->set_gen(j_child_nodes[1]->node_gen()); + + child_node1->set_next_bnode(child_node2->get_node_id()); + child_node1->set_gen(j_child_nodes[0]->node_gen()); + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); + write_node(child_node2, nullptr, bcp); + write_node(child_node1, child_node2, bcp); + return true; + } + + void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, + std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { + + // find child_1 key + K child1_key; // we need to insert child1_key + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); + child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + auto child1_node_id = j_child_nodes[0]->node_id(); + + // find split indx + auto ret = parent_node->find(child1_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); + auto split_indx = ret.end_of_search_index; + + // find child2_key + K child2_key; // we only need to update child2_key to new node + if (j_child_nodes[1]->key_size != 0) { + child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); + ret = parent_node->find(child2_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); + } else { + // parent should be valid edge it is not a root split + } + auto child2_node_id = j_child_nodes[1]->node_id(); + + // update child2_key value + BtreeNodeInfo ninfo; + ninfo.set_bnode_id(child2_node_id); + parent_node->update(split_indx, ninfo); + + // insert child 1 + ninfo.set_bnode_id(child1_node_id); + K out_split_end_key; + out_split_end_key.copy_end_key_blob(child1_key.get_blob()); + parent_node->insert(out_split_end_key, ninfo); + + // Write the parent node + write_node(parent_node, child_node1, bcp); + + /* do sanity check after recovery split */ + { + validate_sanity_child(parent_node, split_indx); + validate_sanity_next_child(parent_node, split_indx); + } + } + + btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, + const btree_cp_ptr& bcp) { + btree_status_t ret = btree_status_t::merge_failed; + std::vector< BtreeNodePtr< K > > child_nodes; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > replace_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + std::vector< BtreeNodePtr< K > > deleted_nodes; + BtreeNodePtr< K > left_most_node; + K last_pkey; // last key of parent node + bool last_pkey_valid = false; + uint32_t balanced_size; + BtreeNodePtr< K > merge_node; + K last_ckey; // last key in child + uint32_t parent_insert_indx = start_indx; +#ifndef NDEBUG + uint32_t total_child_entries = 0; + uint32_t new_entries = 0; + K last_debug_ckey; + K new_last_debug_ckey; + BtreeNodePtr< K > last_node; +#endif + /* Try to take a lock on all nodes participating in merge*/ + for (auto indx = start_indx; indx <= end_indx; ++indx) { + if (indx == parent_node->get_total_entries()) { + BT_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, + "Assertion failure, expected valid edge for parent_node: {}"); + } + + BtreeNodeInfo child_info; + parent_node->get(indx, &child_info, false /* copy */); + + BtreeNodePtr< K > child; + ret = read_and_lock_node(child_info.bnode_id(), child, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, + bcp); + if (ret != btree_status_t::success) { goto out; } + BT_LOG_ASSERT_CMP(child->is_valid_node(), ==, true, child); + + /* check if left most node has space */ + if (indx == start_indx) { + balanced_size = m_bt_cfg.get_ideal_fill_size(); + left_most_node = child; + if (left_most_node->get_occupied_size(m_bt_cfg) > balanced_size) { + /* first node doesn't have any free space. we can exit now */ + ret = btree_status_t::merge_not_required; + goto out; + } + } else { + bool is_allocated = true; + /* pre allocate the new nodes. We will free the nodes which are not in use later */ + auto new_node = btree_store_t::alloc_node(m_btree_store.get(), child->is_leaf(), is_allocated, child); + if (is_allocated) { + /* we are going to allocate new blkid of all the nodes except the first node. + * Note :- These blkids will leak if we fail or crash before writing entry into + * journal. + */ + old_nodes.push_back(child); + COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, + 1); + } + /* Blk IDs can leak if it crash before writing it to a journal */ + if (new_node == nullptr) { + ret = btree_status_t::space_not_avail; + goto out; + } + new_nodes.push_back(new_node); + } +#ifndef NDEBUG + total_child_entries += child->get_total_entries(); + child->get_last_key(&last_debug_ckey); +#endif + child_nodes.push_back(child); + } + + if (end_indx != parent_node->get_total_entries()) { + /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ + parent_node->get_nth_key(end_indx, &last_pkey, true); + last_pkey_valid = true; + } + + merge_node = left_most_node; + /* We can not fail from this point. Nodes will be modified in memory. */ + for (uint32_t i = 0; i < new_nodes.size(); ++i) { + auto occupied_size = merge_node->get_occupied_size(m_bt_cfg); + if (occupied_size < balanced_size) { + uint32_t pull_size = balanced_size - occupied_size; + merge_node->move_in_from_right_by_size(m_bt_cfg, new_nodes[i], pull_size); + if (new_nodes[i]->get_total_entries() == 0) { + /* this node is freed */ + deleted_nodes.push_back(new_nodes[i]); + continue; + } + } + + /* update the last key of merge node in parent node */ + K last_ckey; // last key in child + merge_node->get_last_key(&last_ckey); + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + + merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them + merge_node = new_nodes[i]; + if (merge_node != left_most_node) { + /* left most node is not replaced */ + replace_nodes.push_back(merge_node); + } + } + + /* update the latest merge node */ + merge_node->get_last_key(&last_ckey); + if (last_pkey_valid) { + BT_DEBUG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); + last_ckey = last_pkey; + } + + /* update the last key */ + { + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + } + + /* remove the keys which are no longer used */ + if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } + + /* write the journal entry */ + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + K child_pkey; + if (start_indx < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx, &child_pkey, true); + BT_RELEASE_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, + child_pkey.get_blob()); + for (auto& node : old_nodes) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); + } + uint32_t insert_indx = 0; + for (auto& node : replace_nodes) { + K child_pkey; + if ((start_indx + insert_indx) < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); + BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, + "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, + child_pkey.get_blob()); + ++insert_indx; + } + BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + + if (replace_nodes.size() > 0) { + /* write the right most node */ + write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); + if (replace_nodes.size() > 1) { + /* write the middle nodes */ + for (int i = replace_nodes.size() - 2; i >= 0; --i) { + write_node(replace_nodes[i], replace_nodes[i + 1], bcp); + } + } + /* write the left most node */ + write_node(left_most_node, replace_nodes[0], bcp); + } else { + /* write the left most node */ + write_node(left_most_node, nullptr, bcp); + } + + /* write the parent node */ + write_node(parent_node, left_most_node, bcp); + +#ifndef NDEBUG + for (const auto& n : replace_nodes) { + new_entries += n->get_total_entries(); + } + + new_entries += left_most_node->get_total_entries(); + HS_DEBUG_ASSERT_EQ(total_child_entries, new_entries); + + if (replace_nodes.size()) { + replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); + last_node = replace_nodes[replace_nodes.size() - 1]; + } else { + left_most_node->get_last_key(&new_last_debug_ckey); + last_node = left_most_node; + } + if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { + LOGINFO("{}", last_node->to_string()); + if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } + HS_DEBUG_ASSERT(false, "compared failed"); + } +#endif + /* free nodes. It actually gets freed after cp is completed */ + for (const auto& n : old_nodes) { + free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); + } + for (const auto& n : deleted_nodes) { + free_node(n); + } + ret = btree_status_t::success; + out: +#ifndef NDEBUG + uint32_t freed_entries = deleted_nodes.size(); + uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; + for (uint32_t i = 0; i < scan_entries; ++i) { + if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } + validate_sanity_child(parent_node, (uint32_t)start_indx + i); + } +#endif + // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed + for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { + unlock_node(child_nodes[i], locktype::LOCKTYPE_WRITE); + } + unlock_node(child_nodes[0], locktype::LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { + /* free the allocated nodes */ + for (const auto& n : new_nodes) { + free_node(n); + } + } + return ret; + } + +#if 0 + btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node) : read_node(jentry->parent_node.node_id); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } + } +#endif + + void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { + BtreeNodeInfo child_info; + K child_first_key; + K child_last_key; + K parent_key; + + parent_node->get(ind, &child_info, false /* copy */); + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries + BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); + } + return; + } + child_node->get_first_key(&child_first_key); + child_node->get_last_key(&child_last_key); + BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0) + if (ind == parent_node->get_total_entries()) { + BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); + if (ind > 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_GT(parent_key.compare_start(&child_first_key), 0) + } + } else { + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + if (ind != 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_GT(parent_key.compare_start(&child_first_key), 0) + } + } + } + + void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { + BtreeNodeInfo child_info; + K child_key; + K parent_key; + + if (parent_node->has_valid_edge()) { + if (ind == parent_node->get_total_entries()) { return; } + } else { + if (ind == parent_node->get_total_entries() - 1) { return; } + } + parent_node->get(ind + 1, &child_info, false /* copy */); + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + HS_RELEASE_ASSERT(ret == btree_status_t::success, "read failed, reason: {}", ret); + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node can have 0 entries + HS_ASSERT_CMP(RELEASE, + ((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), + ==, true); + } + return; + } + /* in case of merge next child will never have zero entries otherwise it would have been merged */ + HS_ASSERT_CMP(RELEASE, child_node->get_total_entries(), !=, 0); + child_node->get_first_key(&child_key); + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) + BT_REL_ASSERT_GT(parent_key.compare_start(&child_key), 0) + } + + /* Recovery process is different for root node, child node and sibling node depending on how the node + * is accessed. This is the reason to create below three apis separately. + */ + +protected: + BtreeConfig* get_config() { return &m_bt_cfg; } +}; // namespace btree + +// static inline const char* _type_desc(const BtreeNodePtr< K >& n) { return n->is_leaf() ? "L" : "I"; } + +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; + +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; + +#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION +template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > +class BtreeLockTrackerImpl : public BtreeLockTracker { +public: + BtreeLockTrackerImpl(btree_t* bt) : m_bt(bt) {} + + virtual ~BtreeLockTrackerImpl() { + while (m_nodes.size()) { + auto& p = m_nodes.top(); + m_bt->unlock_node(p.first, p.second); + m_nodes.pop(); + } + } + + void push(const BtreeNodePtr< K >& node, homeds::thread::locktype locktype) { + m_nodes.emplace(std::make_pair<>(node, locktype)); + } + + std::pair< BtreeNodePtr< K >, homeds::thread::locktype > pop() { + HS_ASSERT_CMP(DEBUG, m_nodes.size(), !=, 0); + std::pair< BtreeNodePtr< K >, homeds::thread::locktype > p; + if (m_nodes.size()) { + p = m_nodes.top(); + m_nodes.pop(); + } else { + p = std::make_pair<>(nullptr, homeds::thread::locktype::LOCKTYPE_NONE); + } + + return p; + } + + BtreeNodePtr< K > top() { return (m_nodes.size == 0) ? nullptr : m_nodes.top().first; } + +private: + btree_t m_bt; + std::stack< std::pair< BtreeNodePtr< K >, homeds::thread::locktype > > m_nodes; +}; +#endif + +} // namespace btree +} // namespace sisl diff --git a/src/btree/rough/sisl_btree_impl.hpp b/src/btree/rough/sisl_btree_impl.hpp new file mode 100644 index 00000000..107df40e --- /dev/null +++ b/src/btree/rough/sisl_btree_impl.hpp @@ -0,0 +1,1653 @@ +#pragma once + +namespace sisl { +namespace btree { +template < typename K, typename V > +class BtreeImpl { +protected: + template < typename K, typename V > + btree_status_t Btree< K, V >::post_order_traversal(locktype_t ltype, + const std::function< void(const BtreeNodePtr< K >&) >& cb) { + BtreeNodePtr< K > root; + btree_status_t ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock, nullptr); + if (ret != btree_status_t::success) { + m_btree_lock.unlock(); + return ret; + } + + post_order_traversal(root, ltype, cb); + } + + template < typename K, typename V > + btree_status_t Btree< K, V >::post_order_traversal(const BtreeNodePtr< K >& node, locktype_t ltype, + const auto& cb) { + homeds::thread::locktype acq_lock = homeds::thread::LOCKTYPE_WRITE; + uint32_t i = 0; + btree_status_t ret = btree_status_t::success; + + if (!node->is_leaf()) { + BtreeNodeInfo child_info; + while (i <= node->get_total_entries()) { + if (i == node->get_total_entries()) { + if (!node->has_valid_edge()) { break; } + child_info.set_bnode_id(node->get_edge_id()); + } else { + child_info = node->get(i, false /* copy */); + } + + BtreeNodePtr< K > child; + ret = read_and_lock_child(child_info.bnode_id(), child, node, i, acq_lock, acq_lock, nullptr); + if (ret != btree_status_t::success) { return ret; } + ret = post_order_traversal(child, cb); + unlock_node(child, acq_lock); + ++i; + } + } + + if (ret != btree_status_t::success) { return ret; } + cb(node); + return ret; + } + + btree_status_t put_internal(const BtreeMutateRequest& put_req) { + COUNTER_INCREMENT(m_metrics, btree_write_ops_count, 1); + locktype acq_lock = locktype::READ; + int ind = -1; + bool is_leaf = false; + + // THIS_BT_LOG(INFO, base, , "Put called for key = {}, value = {}", k.to_string(), v.to_string()); + + m_btree_lock.read_lock(); + + btree_status_t ret = btree_status_t::success; + retry: + +#ifndef NDEBUG + check_lock_debug(); +#endif + BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); + BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); + + BtreeNodePtr< K > root; + ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock); + if (ret != btree_status_t::success) { goto out; } + is_leaf = root->is_leaf(); + + if (root->is_split_needed(m_bt_cfg, put_req)) { + // Time to do the split of root. + unlock_node(root, acq_lock); + m_btree_lock.unlock(); + ret = check_split_root(put_req); + BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); + BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); + + // We must have gotten a new root, need to start from scratch. + m_btree_lock.read_lock(); + + if (ret != btree_status_t::success) { + LOGERROR("root split failed btree name {}", m_bt_cfg.get_name()); + goto out; + } + + goto retry; + } else if ((is_leaf) && (acq_lock != homeds::thread::LOCKTYPE_WRITE)) { + // Root is a leaf, need to take write lock, instead of read, retry + unlock_node(root, acq_lock); + acq_lock = homeds::thread::LOCKTYPE_WRITE; + goto retry; + } else { + K subrange_start_key, subrange_end_key; + bool start_incl = false, end_incl = false; + if (is_range_update_req(put_req)) { + to_range_update_req(put_req)->get_input_range().copy_start_end_blob(subrange_start_key, start_incl, + subrange_end_key, end_incl); + } + BtreeSearchRange subrange(subrange_start_key, start_incl, subrange_end_key, end_incl); + ret = do_put(root, acq_lock, put_req, ind, subrange); + if (ret == btree_status_t::retry) { + // Need to start from top down again, since there is a race between 2 inserts or deletes. + acq_lock = homeds::thread::locktype_t::READ; + THIS_BT_LOG(TRACE, btree_generics, , "retrying put operation"); + BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); + BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); + goto retry; + } + } + + out: + m_btree_lock.unlock(); +#ifndef NDEBUG + check_lock_debug(); +#endif + if (ret != btree_status_t::success && ret != btree_status_t::fast_path_not_possible && + ret != btree_status_t::cp_mismatch) { + THIS_BT_LOG(ERROR, base, , "btree put failed {}", ret); + COUNTER_INCREMENT(m_metrics, write_err_cnt, 1); + } + + return ret; + } + + btree_status_t do_get(const BtreeNodePtr< K >& my_node, const BtreeSearchRange& range, BtreeKey* outkey, + BtreeValue* outval) const { + btree_status_t ret = btree_status_t::success; + bool is_child_lock = false; + homeds::thread::locktype child_locktype; + + if (my_node->is_leaf()) { + auto result = my_node->find(range, outkey, outval); + if (result.found) { + ret = btree_status_t::success; + } else { + ret = btree_status_t::not_found; + } + unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); + return ret; + } + + BtreeNodeInfo child_info; + auto result = my_node->find(range, nullptr, &child_info); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); + + BtreeNodePtr< K > child_node; + child_locktype = homeds::thread::locktype_t::READ; + ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, result.end_of_search_index, + child_locktype, child_locktype, nullptr); + if (ret != btree_status_t::success) { goto out; } + + unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); + + return (do_get(child_node, range, outkey, outval)); + out: + unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); + return ret; + } + + btree_status_t do_remove(const BtreeNodePtr< K >& my_node, locktype curlock, const BtreeSearchRange& range, + BtreeKey* outkey, BtreeValue* outval) { + btree_status_t ret = btree_status_t::success; + if (my_node->is_leaf()) { + BT_DEBUG_ASSERT_CMP(curlock, ==, LOCKTYPE_WRITE, my_node); + +#ifndef NDEBUG + my_node->validate_key_order(); +#endif + bool is_found = my_node->remove_one(range, outkey, outval); +#ifndef NDEBUG + my_node->validate_key_order(); +#endif + if (is_found) { + write_node(my_node); + COUNTER_DECREMENT(m_metrics, btree_obj_count, 1); + } + + unlock_node(my_node, curlock); + return is_found ? btree_status_t::success : btree_status_t::not_found; + } + + retry: + locktype child_cur_lock = LOCKTYPE_NONE; + + /* range delete is not supported yet */ + // Get the childPtr for given key. + auto [found, ind] = my_node->find(range, nullptr, nullptr); + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); + + BtreeNodeInfo child_info; + BtreeNodePtr< K > child_node; + ret = get_child_and_lock_node(my_node, ind, child_info, child_node, locktype_t::READ, LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { + unlock_node(my_node, curlock); + return ret; + } + + // Check if child node is minimal. + child_cur_lock = child_node->is_leaf() ? LOCKTYPE_WRITE : locktype_t::READ; + if (child_node->is_merge_needed(m_bt_cfg)) { + // If we are unable to upgrade the node, ask the caller to retry. + ret = upgrade_node(my_node, child_node, curlock, child_cur_lock, bcp); + if (ret != btree_status_t::success) { + BT_DEBUG_ASSERT_CMP(curlock, ==, locktype::NONE, my_node) + return ret; + } + BT_DEBUG_ASSERT_CMP(curlock, ==, locktype::WRITE, my_node); + + uint32_t node_end_indx = + my_node->has_valid_edge() ? my_node->get_total_entries() : my_node->get_total_entries() - 1; + uint32_t end_ind = (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) < node_end_indx + ? (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) + : node_end_indx; + if (end_ind > ind) { + // It is safe to unlock child without upgrade, because child node would not be deleted, since its + // parent (myNode) is being write locked by this thread. In fact upgrading would be a problem, since + // this child might be a middle child in the list of indices, which means we might have to lock one + // in left against the direction of intended locking (which could cause deadlock). + unlock_node(child_node, child_cur_lock); + auto result = merge_nodes(my_node, ind, end_ind); + if (result != btree_status_t::success && result != btree_status_t::merge_not_required) { + // write or read failed + unlock_node(my_node, curlock); + return ret; + } + if (result == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_merge_count, 1); } + goto retry; + } + } + +#ifndef NDEBUG + if (ind != my_node->get_total_entries() && child_node->get_total_entries()) { // not edge + const auto ckey = child_node->get_last_key(); + const auto pkey = my_node->get_nth_key(ind, true); + BT_DEBUG_ASSERT_CMP(ckey.compare(&pkey), <=, 0, my_node); + } + + if (ind > 0 && child_node->get_total_entries()) { // not first child + const auto ckey = child_node->get_first_key(); + const auto pkey = my_node->get_nth_key(ind - 1, true); + BT_DEBUG_ASSERT_CMP(pkey.compare(&ckey), <, 0, my_node); + } +#endif + + unlock_node(my_node, curlock); + return (do_remove(child_node, child_cur_lock, range, outkey, outval)); + + // Warning: Do not access childNode or myNode beyond this point, since it would + // have been unlocked by the recursive function and it could also been deleted. + } + +private: + /* This function does the heavy lifiting of co-ordinating inserts. It is a recursive function which walks + * down the tree. + * + * NOTE: It expects the node it operates to be locked (either read or write) and also the node should not be + * full. + * + * Input: + * myNode = Node it operates on + * curLock = Type of lock held for this node + * put_req = Key to insert + * v = Value to insert + * ind_hint = If we already know which slot to insert to, if not -1 + * put_type = Type of the put (refer to structure btree_put_type) + * is_end_path = set to true only for last path from root to tree, for range put + * op = tracks multi node io. + */ + btree_status_t do_put(const BtreeNodePtr< K >& my_node, btree::locktype curlock, const BtreeMutateRequest& put_req, + int ind_hint, BtreeSearchRange& child_subrange) { + btree_status_t ret = btree_status_t::success; + bool unlocked_already = false; + int curr_ind = -1; + + if (my_node->is_leaf()) { + /* update the leaf node */ + BT_LOG_ASSERT_CMP(curlock, ==, LOCKTYPE_WRITE, my_node); + ret = update_leaf_node(my_node, put_req, child_subrange); + unlock_node(my_node, curlock); + return ret; + } + + bool is_any_child_splitted = false; + + retry: + int start_ind = 0, end_ind = -1; + + /* Get the start and end ind in a parent node for the range updates. For + * non range updates, start ind and end ind are same. + */ + ret = get_start_and_end_ind(my_node, put_req, start_ind, end_ind); + if (ret != btree_status_t::success) { goto out; } + + BT_DEBUG_ASSERT((curlock == locktype_t::READ || curlock == LOCKTYPE_WRITE), my_node, "unexpected locktype {}", + curlock); + curr_ind = start_ind; + + while (curr_ind <= end_ind) { // iterate all matched childrens + +#ifdef _PRERELEASE + if (curr_ind - start_ind > 1 && homestore_flip->test_flip("btree_leaf_node_split")) { + ret = btree_status_t::retry; + goto out; + } +#endif + + homeds::thread::locktype child_cur_lock = homeds::thread::LOCKTYPE_NONE; + + // Get the childPtr for given key. + BtreeNodeInfo child_info; + BtreeNodePtr< K > child_node; + + ret = get_child_and_lock_node(my_node, curr_ind, child_info, child_node, locktype_t::READ, LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { + if (ret == btree_status_t::not_found) { + // Either the node was updated or mynode is freed. Just proceed again from top. + /* XXX: Is this case really possible as we always take the parent lock and never + * release it. + */ + ret = btree_status_t::retry; + } + goto out; + } + + // Directly get write lock for leaf, since its an insert. + child_cur_lock = (child_node->is_leaf()) ? LOCKTYPE_WRITE : locktype_t::READ; + + /* Get subrange if it is a range update */ + K start_key, end_key; + bool start_incl = false, end_incl = false; + if (is_range_update_req(put_req) && child_node->is_leaf()) { + /* We get the subrange only for leaf because this is where we will be inserting keys. In interior + * nodes, keys are always propogated from the lower nodes. + */ + get_subrange(my_node, put_req, curr_ind, start_key, end_key, start_incl, end_incl); + } + BtreeSearchRange subrange(start_key, start_incl, end_key, end_incl); + + /* check if child node is needed to split */ + bool split_occured = false; + ret = check_and_split_node(my_node, put_req, ind_hint, child_node, curlock, child_cur_lock, curr_ind, + split_occured); + if (ret != btree_status_t::success) { goto out; } + if (split_occured) { + ind_hint = -1; // Since split is needed, hint is no longer valid + goto retry; + } + + if (is_range_update_req(put_req) && child_node->is_leaf()) { + THIS_BT_LOG(DEBUG, btree_structures, my_node, "Subrange:s:{},e:{},c:{},nid:{},edgeid:{},sk:{},ek:{}", + start_ind, end_ind, curr_ind, my_node->get_node_id(), my_node->get_edge_id(), + subrange.get_start_key()->to_string(), subrange.get_end_key()->to_string()); + } + +#ifndef NDEBUG + K ckey, pkey; + if (curr_ind != int_cast(my_node->get_total_entries())) { // not edge + pkey = my_node->get_nth_key(curr_ind, true); + if (child_node->get_total_entries() != 0) { + ckey = child_node->get_last_key(); + if (!child_node->is_leaf()) { + HS_DEBUG_ASSERT_EQ(ckey.compare(pkey), 0); + } else { + HS_ASSERT_CMP(DEBUG, ckey.compare(pkey), <=, 0); + } + } + HS_DEBUG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) <= 0), true); + } + if (curr_ind > 0) { // not first child + pkey = my_node->get_nth_key(curr_ind - 1, true); + if (child_node->get_total_entries() != 0) { + ckey = child_node->get_first_key(); + HS_ASSERT_CMP(DEBUG, pkey.compare(ckey), <=, 0); + } + HS_DEBUG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) >= 0), true); + } +#endif + if (curr_ind == end_ind) { + // If we have reached the last index, unlock before traversing down, because we no longer need + // this lock. Holding this lock will impact performance unncessarily. + unlock_node(my_node, curlock); + curlock = LOCKTYPE_NONE; + } + +#ifndef NDEBUG + if (child_cur_lock == homeds::thread::LOCKTYPE_WRITE) { + HS_DEBUG_ASSERT_EQ(child_node->m_common_header.is_lock, true); + } +#endif + + ret = do_put(child_node, child_cur_lock, put_req, ind_hint, subrange); + if (ret != btree_status_t::success) { goto out; } + + curr_ind++; + } + out: + if (curlock != LOCKTYPE_NONE) { unlock_node(my_node, curlock); } + return ret; + // Warning: Do not access childNode or myNode beyond this point, since it would + // have been unlocked by the recursive function and it could also been deleted. + } + + void get_all_kvs(std::vector< pair< K, V > >& kvs) const { + // TODO: Isn't it better to do DFS traversal and get kvs instead of collecting all leafs. Its a non-scalable + // operation. + static thread_local std::vector< BtreeNodePtr< K > > leaves; + leaves.clear(); + get_all_leaf_nodes(leaves); + + for (auto& l : leaves) { + l->get_all_kvs(kvs); + } + leaves.clear(); + } + + uint64_t get_btree_node_cnt() const { + uint64_t cnt = 1; /* increment it for root */ + m_btree_lock.read_lock(); + cnt += get_child_node_cnt(m_root_node); + m_btree_lock.unlock(); + return cnt; + } + + uint64_t get_child_node_cnt(bnodeid_t bnodeid) const { + uint64_t cnt{0}; + BtreeNodePtr< K > node; + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return cnt; } + if (!node->is_leaf()) { + uint32_t i = 0; + while (i < node->get_total_entries()) { + BtreeNodeInfo p = node->get(i, false); + cnt += get_child_node_cnt(p.bnode_id()) + 1; + ++i; + } + if (node->has_valid_edge()) { cnt += get_child_node_cnt(node->get_edge_id()) + 1; } + } + unlock_node(node, acq_lock); + return cnt; + } + + /* + * Get all leaf nodes from the read-only tree (CP tree, Snap Tree etc) + * NOTE: Doesn't take any lock + */ + void get_all_leaf_nodes(std::vector< BtreeNodePtr< K > >& leaves) const { + /* TODO: Add a flag to indicate RO tree + * TODO: Check the flag here + */ + get_leaf_nodes(m_root_node, leaves); + } + + // TODO: Remove the locks once we have RO flags + void get_leaf_nodes(bnodeid_t bnodeid, std::vector< BtreeNodePtr< K > >& leaves) const { + BtreeNodePtr< K > node; + if (read_and_lock_node(bnodeid, node, locktype_t::READ, locktype_t::READ, nullptr) != btree_status_t::success) { + return; + } + + if (node->is_leaf()) { + BtreeNodePtr< K > next_node = nullptr; + leaves.push_back(node); + while (node->next_bnode() != empty_bnodeid) { + auto ret = + read_and_lock_sibling(node->next_bnode(), next_node, locktype_t::READ, locktype_t::READ, nullptr); + unlock_node(node, locktype_t::READ); + HS_DEBUG_ASSERT_EQ(ret, btree_status_t::success); + if (ret != btree_status_t::success) { + LOGERROR("Cannot read sibling node for {}", node); + return; + } + HS_DEBUG_ASSERT_EQ(next_node->is_leaf(), true); + leaves.push_back(next_node); + node = next_node; + } + unlock_node(node, locktype_t::READ); + return; + } + + HS_ASSERT_CMP(DEBUG, node->get_total_entries(), >, 0); + if (node->get_total_entries() > 0) { + BtreeNodeInfo p = node->get(0, false); + // XXX If we cannot get rid of locks, lock child and release parent here + get_leaf_nodes(p.bnode_id(), leaves); + } + unlock_node(node, locktype_t::READ); + } + + void to_string(bnodeid_t bnodeid, std::string& buf) const { + BtreeNodePtr< K > node; + + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } + fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); + + if (!node->is_leaf()) { + uint32_t i = 0; + while (i < node->get_total_entries()) { + BtreeNodeInfo p; + node->get(i, &p, false); + to_string(p.bnode_id(), buf); + i++; + } + if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } + } + unlock_node(node, acq_lock); + } + + /* This function upgrades the node lock and take required steps if things have + * changed during the upgrade. + * + * Inputs: + * myNode - Node to upgrade + * childNode - In case childNode needs to be unlocked. Could be nullptr + * curLock - Input/Output: current lock type + * + * Returns - If successfully able to upgrade, return true, else false. + * + * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects + * it to be locked too. If it is able to successfully upgrade it continue to retain its + * old lock. If failed to upgrade, will release all locks. + */ + btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, + homeds::thread::locktype& cur_lock, homeds::thread::locktype& child_cur_lock, + const btree_cp_ptr& bcp) { + uint64_t prev_gen; + btree_status_t ret = btree_status_t::success; + homeds::thread::locktype child_lock_type = child_cur_lock; + + if (cur_lock == homeds::thread::LOCKTYPE_WRITE) { goto done; } + + prev_gen = my_node->get_gen(); + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype::LOCKTYPE_NONE; + } + +#ifdef _PRERELEASE + { + auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); + if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } + } +#endif + ret = lock_node_upgrade(my_node, bcp); + if (ret != btree_status_t::success) { + cur_lock = locktype::LOCKTYPE_NONE; + return ret; + } + + // The node was not changed by anyone else during upgrade. + cur_lock = homeds::thread::LOCKTYPE_WRITE; + + // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before + // that cleanup or free this node if there is no one waiting. + if (!my_node->is_valid_node()) { + unlock_node(my_node, homeds::thread::LOCKTYPE_WRITE); + cur_lock = locktype::LOCKTYPE_NONE; + ret = btree_status_t::retry; + goto done; + } + + // If node has been updated, while we have upgraded, ask caller to start all over again. + if (prev_gen != my_node->get_gen()) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + ret = btree_status_t::retry; + goto done; + } + + if (child_node) { + ret = lock_and_refresh_node(child_node, child_lock_type, bcp); + if (ret != btree_status_t::success) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + child_cur_lock = locktype::LOCKTYPE_NONE; + goto done; + } + child_cur_lock = child_lock_type; + } + +#ifdef _PRERELEASE + { + int is_leaf = 0; + + if (child_node && child_node->is_leaf()) { is_leaf = 1; } + if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { + unlock_node(my_node, cur_lock); + cur_lock = locktype::LOCKTYPE_NONE; + if (child_node) { + unlock_node(child_node, child_cur_lock); + child_cur_lock = locktype::LOCKTYPE_NONE; + } + ret = btree_status_t::retry; + goto done; + } + } +#endif + + BT_DEBUG_ASSERT_CMP(my_node->m_common_header.is_lock, ==, 1, my_node); + done: + return ret; + } + + btree_status_t update_leaf_node(const BtreeNodePtr< K >& my_node, const BtreeKey& k, const BtreeValue& v, + btree_put_type put_type, BtreeValue& existing_val, BtreeUpdateRequest< K, V >* bur, + const btree_cp_ptr& bcp, BtreeSearchRange& subrange) { + btree_status_t ret = btree_status_t::success; + if (bur != nullptr) { + // BT_DEBUG_ASSERT_CMP(bur->callback(), !=, nullptr, my_node); // TODO - range req without + // callback implementation + static thread_local std::vector< std::pair< K, V > > s_match; + s_match.clear(); + int start_ind = 0, end_ind = 0; + my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind, &s_match); + + static thread_local std::vector< pair< K, V > > s_replace_kv; + s_replace_kv.clear(); + bur->get_cb_param()->node_version = my_node->get_version(); + ret = bur->callback()(s_match, s_replace_kv, bur->get_cb_param(), subrange); + if (ret != btree_status_t::success) { return ret; } + + HS_ASSERT_CMP(DEBUG, start_ind, <=, end_ind); + if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } + COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); + + for (const auto& pair : s_replace_kv) { // insert is based on compare() of BtreeKey + auto status = my_node->insert(pair.first, pair.second); + BT_RELEASE_ASSERT((status == btree_status_t::success), my_node, "unexpected insert failure"); + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + /* update cursor in input range */ + auto end_key_ptr = const_cast< BtreeKey* >(subrange.get_end_key()); + bur->get_input_range().set_cursor_key( + end_key_ptr, ([](BtreeKey* end_key) { return std::move(std::make_unique< K >(*((K*)end_key))); })); + if (homestore::vol_test_run) { + // sorted check + for (auto i = 1u; i < my_node->get_total_entries(); i++) { + K curKey, prevKey; + my_node->get_nth_key(i - 1, &prevKey, false); + my_node->get_nth_key(i, &curKey, false); + if (prevKey.compare(&curKey) >= 0) { + LOGINFO("my_node {}", my_node->to_string()); + for (const auto& [k, v] : s_match) { + LOGINFO("match key {} value {}", k.to_string(), v.to_string()); + } + for (const auto& [k, v] : s_replace_kv) { + LOGINFO("replace key {} value {}", k.to_string(), v.to_string()); + } + } + BT_RELEASE_ASSERT_CMP(prevKey.compare(&curKey), <, 0, my_node); + } + } + } else { + if (!my_node->put(k, v, put_type, existing_val)) { ret = btree_status_t::put_failed; } + COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); + } + + write_node(my_node, bcp); + return ret; + } + + btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, + const BtreeKey& k, int& start_ind, int& end_ind) { + + btree_status_t ret = btree_status_t::success; + if (bur != nullptr) { + /* just get start/end index from get_all. We don't release the parent lock until this + * key range is not inserted from start_ind to end_ind. + */ + my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind); + } else { + auto result = my_node->find(k, nullptr, nullptr, true, true); + end_ind = start_ind = result.end_of_search_index; + ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); + } + + if (start_ind > end_ind) { + BT_LOG_ASSERT(false, my_node, "start ind {} greater than end ind {}", start_ind, end_ind); + ret = btree_status_t::retry; + } + return ret; + } + + /* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ + btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, + const BtreeKey& k, const BtreeValue& v, int ind_hint, btree_put_type put_type, + BtreeNodePtr< K > child_node, homeds::thread::locktype& curlock, + homeds::thread::locktype& child_curlock, int child_ind, bool& split_occured, + const btree_cp_ptr& bcp) { + + split_occured = false; + K split_key; + btree_status_t ret = btree_status_t::success; + auto child_lock_type = child_curlock; + auto none_lock_type = LOCKTYPE_NONE; + +#ifdef _PRERELEASE + boost::optional< int > time; + if (child_node->is_leaf()) { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); + } else { + time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); + } + if (time && child_node->get_total_entries() > 2) { + std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); + } else +#endif + { + if (!child_node->is_split_needed(m_cfg, k, v, &ind_hint, put_type, bur)) { return ret; } + } + + /* Split needed */ + if (bur) { + + /* In case of range update we might split multiple childs of a parent in a single + * iteration which result into less space in the parent node. + */ +#ifdef _PRERELEASE + if (homestore_flip->test_flip("btree_parent_node_full")) { + ret = btree_status_t::retry; + goto out; + } +#endif + if (my_node->is_split_needed(m_cfg, k, v, &ind_hint, put_type, bur)) { + // restart from root + ret = btree_status_t::retry; + goto out; + } + } + + // Time to split the child, but we need to convert parent to write lock + ret = upgrade_node(my_node, child_node, curlock, child_curlock, bcp); + if (ret != btree_status_t::success) { + THIS_BT_LOG(DEBUG, btree_structures, my_node, "Upgrade of node lock failed, retrying from root"); + BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_NONE, my_node); + goto out; + } + BT_LOG_ASSERT_CMP(child_curlock, ==, child_lock_type, my_node); + BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_WRITE, my_node); + + // We need to upgrade the child to WriteLock + ret = upgrade_node(child_node, nullptr, child_curlock, none_lock_type, bcp); + if (ret != btree_status_t::success) { + THIS_BT_LOG(DEBUG, btree_structures, child_node, "Upgrade of child node lock failed, retrying from root"); + BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_NONE, child_node); + goto out; + } + BT_LOG_ASSERT_CMP(none_lock_type, ==, homeds::thread::LOCKTYPE_NONE, my_node); + BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_WRITE, child_node); + + // Real time to split the node and get point at which it was split + ret = split_node(my_node, child_node, child_ind, &split_key, bcp); + if (ret != btree_status_t::success) { goto out; } + + // After split, retry search and walk down. + unlock_node(child_node, homeds::thread::LOCKTYPE_WRITE); + child_curlock = LOCKTYPE_NONE; + COUNTER_INCREMENT(m_metrics, btree_split_count, 1); + split_occured = true; + out: + if (ret != btree_status_t::success) { + if (curlock != LOCKTYPE_NONE) { + unlock_node(my_node, curlock); + curlock = LOCKTYPE_NONE; + } + + if (child_curlock != LOCKTYPE_NONE) { + unlock_node(child_node, child_curlock); + child_curlock = LOCKTYPE_NONE; + } + } + return ret; + } + + /* This function is called for the interior nodes whose childs are leaf nodes to calculate the sub range */ + void get_subrange(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, int curr_ind, + K& subrange_start_key, K& subrange_end_key, bool& subrange_start_inc, bool& subrange_end_inc) { + +#ifndef NDEBUG + if (curr_ind > 0) { + /* start of subrange will always be more then the key in curr_ind - 1 */ + K start_key; + BtreeKey* start_key_ptr = &start_key; + + my_node->get_nth_key(curr_ind - 1, start_key_ptr, false); + HS_ASSERT_CMP(DEBUG, start_key_ptr->compare(bur->get_input_range().get_start_key()), <=, 0); + } +#endif + + // find end of subrange + bool end_inc = true; + K end_key; + BtreeKey* end_key_ptr = &end_key; + + if (curr_ind < (int)my_node->get_total_entries()) { + my_node->get_nth_key(curr_ind, end_key_ptr, false); + if (end_key_ptr->compare(bur->get_input_range().get_end_key()) >= 0) { + /* this is last index to process as end of range is smaller then key in this node */ + end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); + end_inc = bur->get_input_range().is_end_inclusive(); + } else { + end_inc = true; + } + } else { + /* it is the edge node. end key is the end of input range */ + BT_LOG_ASSERT_CMP(my_node->has_valid_edge(), ==, true, my_node); + end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); + end_inc = bur->get_input_range().is_end_inclusive(); + } + + BtreeSearchRange& input_range = bur->get_input_range(); + auto start_key_ptr = input_range.get_start_key(); + subrange_start_key.copy_blob(start_key_ptr->get_blob()); + subrange_end_key.copy_blob(end_key_ptr->get_blob()); + subrange_start_inc = input_range.is_start_inclusive(); + subrange_end_inc = end_inc; + + auto ret = subrange_start_key.compare(&subrange_end_key); + BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); + ret = subrange_start_key.compare(bur->get_input_range().get_end_key()); + BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); + /* We don't neeed to update the start at it is updated when entries are inserted in leaf nodes */ + } + + btree_status_t check_split_root(const BtreeMutateRequest& put_req) { + int ind; + K split_key; + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + + m_btree_lock.write_lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { goto done; } + + if (!root->is_split_needed(m_cfg, put_req)) { + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + goto done; + } + + // Create a new child node and split them + child_node = alloc_interior_node(); + if (child_node == nullptr) { + ret = btree_status_t::space_not_avail; + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + goto done; + } + + /* it swap the data while keeping the nodeid same */ + btree_store_t::swap_node(m_btree_store.get(), root, child_node); + write_node(child_node); + + THIS_BT_LOG(DEBUG, btree_structures, root, + "Root node is full, swapping contents with child_node {} and split that", + child_node->get_node_id()); + + BT_DEBUG_ASSERT_CMP(root->get_total_entries(), ==, 0, root); + ret = split_node(root, child_node, root->get_total_entries(), &split_key, true); + BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); + + if (ret != btree_status_t::success) { + btree_store_t::swap_node(m_btree_store.get(), child_node, root); + write_node(child_node); + } + + /* unlock child node */ + unlock_node(root, homeds::thread::LOCKTYPE_WRITE); + + if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } + done: + m_btree_lock.unlock(); + return ret; + } + + btree_status_t check_collapse_root(const btree_cp_ptr& bcp) { + BtreeNodePtr< K > child_node = nullptr; + btree_status_t ret = btree_status_t::success; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + + m_btree_lock.write_lock(); + BtreeNodePtr< K > root; + + ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, bcp); + if (ret != btree_status_t::success) { goto done; } + + if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { + unlock_node(root, locktype::LOCKTYPE_WRITE); + goto done; + } + + BT_DEBUG_ASSERT_CMP(root->has_valid_edge(), ==, true, root); + ret = read_node(root->get_edge_id(), child_node); + if (child_node == nullptr) { + unlock_node(root, locktype::LOCKTYPE_WRITE); + goto done; + } + + // Elevate the edge child as root. + btree_store_t::swap_node(m_btree_store.get(), root, child_node); + write_node(root, bcp); + BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); + + old_nodes.push_back(child_node); + + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, true /* is_root */, bcp); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, root, bcp); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, child_node, bcp); + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + unlock_node(root, locktype::LOCKTYPE_WRITE); + free_node(child_node, (bcp ? bcp->free_blkid_list : nullptr)); + + if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } + done: + m_btree_lock.unlock(); + return ret; + } + + btree_status_t split_node(const BtreeNodePtr< K >& parent_node, BtreeNodePtr< K > child_node, uint32_t parent_ind, + BtreeKey* out_split_key, const btree_cp_ptr& bcp, bool root_split = false) { + BtreeNodeInfo ninfo; + BtreeNodePtr< K > child_node1 = child_node; + BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); + + if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } + + btree_status_t ret = btree_status_t::success; + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node1->set_next_bnode(child_node2->get_node_id()); + uint32_t child1_filled_size = m_cfg.get_node_area_size() - child_node1->get_available_size(m_cfg); + + auto split_size = m_cfg.get_split_size(child1_filled_size); + uint32_t res = child_node1->move_out_to_right_by_size(m_cfg, child_node2, split_size); + + BT_RELEASE_ASSERT_CMP(res, >, 0, child_node1, + "Unable to split entries in the child node"); // means cannot split entries + BT_DEBUG_ASSERT_CMP(child_node1->get_total_entries(), >, 0, child_node1); + + // Update the existing parent node entry to point to second child ptr. + bool edge_split = (parent_ind == parent_node->get_total_entries()); + ninfo.set_bnode_id(child_node2->get_node_id()); + parent_node->update(parent_ind, ninfo); + + // Insert the last entry in first child to parent node + child_node1->get_last_key(out_split_key); + ninfo.set_bnode_id(child_node1->get_node_id()); + + /* If key is extent then we always insert the end key in the parent node */ + K out_split_end_key; + out_split_end_key.copy_end_key_blob(out_split_key->get_blob()); + parent_node->insert(out_split_end_key, ninfo); + +#ifndef NDEBUG + K split_key; + child_node2->get_first_key(&split_key); + BT_DEBUG_ASSERT_CMP(split_key.compare(out_split_key), >, 0, child_node2); +#endif + THIS_BT_LOG(DEBUG, btree_structures, parent_node, "Split child_node={} with new_child_node={}, split_key={}", + child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); + + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + btree_store_t::append_node_to_journal( + j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, + bcp, out_split_end_key.get_blob()); + + // For root split or split around the edge, we don't write the key, which will cause replay to insert + // edge + if (edge_split) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); + } else { + K child2_pkey; + parent_node->get_nth_key(parent_ind, &child2_pkey, true); + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, + child2_pkey.get_blob()); + } + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + + // we write right child node, than left and than parent child + write_node(child_node2, nullptr, bcp); + write_node(child_node1, child_node2, bcp); + write_node(parent_node, child_node1, bcp); + + // NOTE: Do not access parentInd after insert, since insert would have + // shifted parentNode to the right. + return ret; + } + + btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + if (jentry) { + BT_DEBUG_ASSERT_CMP(jentry->is_root, ==, true, , + "Expected create_btree_replay entry to be root journal entry"); + BT_DEBUG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node, , "Root node journal entry mismatch"); + } + + // Create a root node by reserving the leaf node + BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node)); + auto ret = write_node(root, nullptr, bcp); + BT_DEBUG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); + return btree_status_t::success; + } + + btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + bnodeid_t id = jentry->is_root ? m_root_node : jentry->parent_node.node_id; + BtreeNodePtr< K > parent_node; + + // read parent node + read_node_or_fail(id, parent_node); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { + THIS_BT_LOG(INFO, base, , + "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", + parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); + return btree_status_t::replay_not_needed; + } + + // Read the first inplace write node which is the leftmost child and also form child split key from journal + auto j_child_nodes = jentry->get_nodes(); + + BtreeNodePtr< K > child_node1; + if (jentry->is_root) { + // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to + // be treated as root here on. + child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); + btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); + + THIS_BT_LOG(INFO, btree_generics, , + "Journal replay: root split, so creating child_node id={} and swapping the node with " + "parent_node id={} names {}", + child_node1->get_node_id(), parent_node->get_node_id(), m_cfg.get_name()); + + } else { + read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); + } + + THIS_BT_LOG(INFO, btree_generics, , + "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", + j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), + child_node1->get_gen(), m_cfg.get_name()); + if (jentry->is_root) { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , + "Expected first node in journal entry to be new creation for root split"); + } else { + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , + "Expected first node in journal entry to be in-place write"); + } + BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , + "Expected second node in journal entry to be new node creation"); + + // recover child node + bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); + + // recover parent node + recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); + return btree_status_t::success; + } + + bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, + const std::vector< bt_journal_node_info* >& j_child_nodes, + const btree_cp_ptr& bcp) { + + BtreeNodePtr< K > child_node2; + // Check if child1 is ahead of the generation + if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { + // leftmost_node is written, so right node must have been written as well. + read_node_or_fail(child_node1->next_bnode(), child_node2); + + // sanity check for right node + BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, + "gen cnt should be more than the journal entry"); + // no need to recover child nodes + return false; + } + + K split_key; + split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) + : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); + + // We need to do split based on entries since the left children is also not written yet. + // Find the split key within the child_node1. It is not always found, so we split upto that. + auto ret = child_node1->find(split_key, nullptr, false); + + // sanity check for left mode node before recovery + { + if (!ret.found) { + if (!child_node1->is_leaf()) { + BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); + } + } + } + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", + split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); + /* if it is not found than end_of_search_index points to first ind which is greater than split key */ + auto split_ind = ret.end_of_search_index; + if (ret.found) { ++split_ind; } // we don't want to move split key */ + if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { + K key; + child_node1->get_nth_key(split_ind, &key, false); + + if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ + THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); + V v; + child_node1->get_nth_value(split_ind, &v, false); + vector< pair< K, V > > replace_kv; + child_node1->remove(split_ind, split_ind); + m_split_key_cb(key, v, split_key, replace_kv); + for (auto& pair : replace_kv) { + auto status = child_node1->insert(pair.first, pair.second); + BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); + } + auto ret = child_node1->find(split_key, nullptr, false); + BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, + "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); + ++split_ind; + } + } + child_node1->move_out_to_right_by_entries(m_cfg, child_node2, child_node1->get_total_entries() - split_ind); + + child_node2->set_next_bnode(child_node1->next_bnode()); + child_node2->set_gen(j_child_nodes[1]->node_gen()); + + child_node1->set_next_bnode(child_node2->get_node_id()); + child_node1->set_gen(j_child_nodes[0]->node_gen()); + + THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); + write_node(child_node2, nullptr, bcp); + write_node(child_node1, child_node2, bcp); + return true; + } + + void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, + std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { + + // find child_1 key + K child1_key; // we need to insert child1_key + BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); + child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); + auto child1_node_id = j_child_nodes[0]->node_id(); + + // find split indx + auto ret = parent_node->find(child1_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); + auto split_indx = ret.end_of_search_index; + + // find child2_key + K child2_key; // we only need to update child2_key to new node + if (j_child_nodes[1]->key_size != 0) { + child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); + ret = parent_node->find(child2_key, nullptr, false); + BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); + } else { + // parent should be valid edge it is not a root split + } + auto child2_node_id = j_child_nodes[1]->node_id(); + + // update child2_key value + BtreeNodeInfo ninfo; + ninfo.set_bnode_id(child2_node_id); + parent_node->update(split_indx, ninfo); + + // insert child 1 + ninfo.set_bnode_id(child1_node_id); + K out_split_end_key; + out_split_end_key.copy_end_key_blob(child1_key.get_blob()); + parent_node->insert(out_split_end_key, ninfo); + + // Write the parent node + write_node(parent_node, child_node1, bcp); + + /* do sanity check after recovery split */ + { + validate_sanity_child(parent_node, split_indx); + validate_sanity_next_child(parent_node, split_indx); + } + } + + btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, + const btree_cp_ptr& bcp) { + btree_status_t ret = btree_status_t::merge_failed; + std::vector< BtreeNodePtr< K > > child_nodes; + std::vector< BtreeNodePtr< K > > old_nodes; + std::vector< BtreeNodePtr< K > > replace_nodes; + std::vector< BtreeNodePtr< K > > new_nodes; + std::vector< BtreeNodePtr< K > > deleted_nodes; + BtreeNodePtr< K > left_most_node; + K last_pkey; // last key of parent node + bool last_pkey_valid = false; + uint32_t balanced_size; + BtreeNodePtr< K > merge_node; + K last_ckey; // last key in child + uint32_t parent_insert_indx = start_indx; +#ifndef NDEBUG + uint32_t total_child_entries = 0; + uint32_t new_entries = 0; + K last_debug_ckey; + K new_last_debug_ckey; + BtreeNodePtr< K > last_node; +#endif + /* Try to take a lock on all nodes participating in merge*/ + for (auto indx = start_indx; indx <= end_indx; ++indx) { + if (indx == parent_node->get_total_entries()) { + BT_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, + "Assertion failure, expected valid edge for parent_node: {}"); + } + + BtreeNodeInfo child_info; + parent_node->get(indx, &child_info, false /* copy */); + + BtreeNodePtr< K > child; + ret = read_and_lock_node(child_info.bnode_id(), child, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, + bcp); + if (ret != btree_status_t::success) { goto out; } + BT_LOG_ASSERT_CMP(child->is_valid_node(), ==, true, child); + + /* check if left most node has space */ + if (indx == start_indx) { + balanced_size = m_cfg.get_ideal_fill_size(); + left_most_node = child; + if (left_most_node->get_occupied_size(m_cfg) > balanced_size) { + /* first node doesn't have any free space. we can exit now */ + ret = btree_status_t::merge_not_required; + goto out; + } + } else { + bool is_allocated = true; + /* pre allocate the new nodes. We will free the nodes which are not in use later */ + auto new_node = btree_store_t::alloc_node(m_btree_store.get(), child->is_leaf(), is_allocated, child); + if (is_allocated) { + /* we are going to allocate new blkid of all the nodes except the first node. + * Note :- These blkids will leak if we fail or crash before writing entry into + * journal. + */ + old_nodes.push_back(child); + COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, + 1); + } + /* Blk IDs can leak if it crash before writing it to a journal */ + if (new_node == nullptr) { + ret = btree_status_t::space_not_avail; + goto out; + } + new_nodes.push_back(new_node); + } +#ifndef NDEBUG + total_child_entries += child->get_total_entries(); + child->get_last_key(&last_debug_ckey); +#endif + child_nodes.push_back(child); + } + + if (end_indx != parent_node->get_total_entries()) { + /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ + parent_node->get_nth_key(end_indx, &last_pkey, true); + last_pkey_valid = true; + } + + merge_node = left_most_node; + /* We can not fail from this point. Nodes will be modified in memory. */ + for (uint32_t i = 0; i < new_nodes.size(); ++i) { + auto occupied_size = merge_node->get_occupied_size(m_cfg); + if (occupied_size < balanced_size) { + uint32_t pull_size = balanced_size - occupied_size; + merge_node->move_in_from_right_by_size(m_cfg, new_nodes[i], pull_size); + if (new_nodes[i]->get_total_entries() == 0) { + /* this node is freed */ + deleted_nodes.push_back(new_nodes[i]); + continue; + } + } + + /* update the last key of merge node in parent node */ + K last_ckey; // last key in child + merge_node->get_last_key(&last_ckey); + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + + merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them + merge_node = new_nodes[i]; + if (merge_node != left_most_node) { + /* left most node is not replaced */ + replace_nodes.push_back(merge_node); + } + } + + /* update the latest merge node */ + merge_node->get_last_key(&last_ckey); + if (last_pkey_valid) { + BT_DEBUG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); + last_ckey = last_pkey; + } + + /* update the last key */ + { + BtreeNodeInfo ninfo(merge_node->get_node_id()); + parent_node->update(parent_insert_indx, last_ckey, ninfo); + ++parent_insert_indx; + } + + /* remove the keys which are no longer used */ + if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } + + /* write the journal entry */ + if (BtreeStoreType == btree_store_type::SSD_BTREE) { + auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, + {parent_node->get_node_id(), parent_node->get_gen()}); + K child_pkey; + if (start_indx < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx, &child_pkey, true); + BT_RELEASE_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, + child_pkey.get_blob()); + for (auto& node : old_nodes) { + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); + } + uint32_t insert_indx = 0; + for (auto& node : replace_nodes) { + K child_pkey; + if ((start_indx + insert_indx) < parent_node->get_total_entries()) { + parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); + BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, + "it should be last index"); + } + btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, + child_pkey.get_blob()); + ++insert_indx; + } + BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); + btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); + } + + if (replace_nodes.size() > 0) { + /* write the right most node */ + write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); + if (replace_nodes.size() > 1) { + /* write the middle nodes */ + for (int i = replace_nodes.size() - 2; i >= 0; --i) { + write_node(replace_nodes[i], replace_nodes[i + 1], bcp); + } + } + /* write the left most node */ + write_node(left_most_node, replace_nodes[0], bcp); + } else { + /* write the left most node */ + write_node(left_most_node, nullptr, bcp); + } + + /* write the parent node */ + write_node(parent_node, left_most_node, bcp); + +#ifndef NDEBUG + for (const auto& n : replace_nodes) { + new_entries += n->get_total_entries(); + } + + new_entries += left_most_node->get_total_entries(); + HS_DEBUG_ASSERT_EQ(total_child_entries, new_entries); + + if (replace_nodes.size()) { + replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); + last_node = replace_nodes[replace_nodes.size() - 1]; + } else { + left_most_node->get_last_key(&new_last_debug_ckey); + last_node = left_most_node; + } + if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { + LOGINFO("{}", last_node->to_string()); + if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } + HS_DEBUG_ASSERT(false, "compared failed"); + } +#endif + /* free nodes. It actually gets freed after cp is completed */ + for (const auto& n : old_nodes) { + free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); + } + for (const auto& n : deleted_nodes) { + free_node(n); + } + ret = btree_status_t::success; + out: +#ifndef NDEBUG + uint32_t freed_entries = deleted_nodes.size(); + uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; + for (uint32_t i = 0; i < scan_entries; ++i) { + if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } + validate_sanity_child(parent_node, (uint32_t)start_indx + i); + } +#endif + // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed + for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { + unlock_node(child_nodes[i], locktype::LOCKTYPE_WRITE); + } + unlock_node(child_nodes[0], locktype::LOCKTYPE_WRITE); + if (ret != btree_status_t::success) { + /* free the allocated nodes */ + for (const auto& n : new_nodes) { + free_node(n); + } + } + return ret; + } + +#if 0 + btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { + BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node) : read_node(jentry->parent_node.node_id); + + // Parent already went ahead of the journal entry, return done + if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } + } +#endif + + void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { + BtreeNodeInfo child_info; + K child_first_key; + K child_last_key; + K parent_key; + + parent_node->get(ind, &child_info, false /* copy */); + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries + BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); + } + return; + } + child_node->get_first_key(&child_first_key); + child_node->get_last_key(&child_last_key); + BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0) + if (ind == parent_node->get_total_entries()) { + BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); + if (ind > 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) + } + } else { + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) + if (ind != 0) { + parent_node->get_nth_key(ind - 1, &parent_key, false); + BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) + BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) + } + } + } + + void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { + BtreeNodeInfo child_info; + K child_key; + K parent_key; + + if (parent_node->has_valid_edge()) { + if (ind == parent_node->get_total_entries()) { return; } + } else { + if (ind == parent_node->get_total_entries() - 1) { return; } + } + parent_node->get(ind + 1, &child_info, false /* copy */); + BtreeNodePtr< K > child_node = nullptr; + auto ret = read_node(child_info.bnode_id(), child_node); + HS_RELEASE_ASSERT(ret == btree_status_t::success, "read failed, reason: {}", ret); + if (child_node->get_total_entries() == 0) { + auto parent_entries = parent_node->get_total_entries(); + if (!child_node->is_leaf()) { // leaf node can have 0 entries + HS_ASSERT_CMP(RELEASE, + ((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), + ==, true); + } + return; + } + /* in case of merge next child will never have zero entries otherwise it would have been merged */ + HS_ASSERT_CMP(RELEASE, child_node->get_total_entries(), !=, 0); + child_node->get_first_key(&child_key); + parent_node->get_nth_key(ind, &parent_key, false); + BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) + BT_REL_ASSERT_GT(parent_key.compare_start(&child_key), 0) + } + + void print_node(const bnodeid_t& bnodeid) { + std::string buf; + BtreeNodePtr< K > node; + + m_btree_lock.read_lock(); + homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; + if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } + buf = node->to_string(true /* print_friendly */); + unlock_node(node, acq_lock); + + done: + m_btree_lock.unlock(); + + THIS_BT_LOG(INFO, base, , "Node: <{}>", buf); + } + + void diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { + std::vector< pair< K, V > > my_kvs, other_kvs; + + get_all_kvs(&my_kvs); + other->get_all_kvs(&other_kvs); + auto it1 = my_kvs.begin(); + auto it2 = other_kvs.begin(); + + K k1, k2; + V v1, v2; + + if (it1 != my_kvs.end()) { + k1 = it1->first; + v1 = it1->second; + } + if (it2 != other_kvs.end()) { + k2 = it2->first; + v2 = it2->second; + } + + while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { + if (k1.preceeds(&k2)) { + /* k1 preceeds k2 - push k1 and continue */ + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + } else if (k1.succeeds(&k2)) { + /* k2 preceeds k1 - push k2 and continue */ + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + if (it2 == other_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + } else { + /* k1 and k2 overlaps */ + std::vector< pair< K, V > > overlap_kvs; + diff_read_next_t to_read = READ_BOTH; + + v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); + for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { + diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); + } + + switch (to_read) { + case READ_FIRST: + it1++; + if (it1 == my_kvs.end()) { + // Add k2,v2 + diff_kv->emplace_back(make_pair(k2, v2)); + it2++; + break; + } + k1 = it1->first; + v1 = it1->second; + break; + + case READ_SECOND: + it2++; + if (it2 == other_kvs.end()) { + diff_kv->emplace_back(make_pair(k1, v1)); + it1++; + break; + } + k2 = it2->first; + v2 = it2->second; + break; + + case READ_BOTH: + /* No tail part */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + + default: + LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, + to_read); + /* skip both */ + it1++; + if (it1 == my_kvs.end()) { break; } + k1 = it1->first; + v1 = it1->second; + it2++; + if (it2 == my_kvs.end()) { break; } + k2 = it2->first; + v2 = it2->second; + break; + } + } + } + + while (it1 != my_kvs.end()) { + diff_kv->emplace_back(make_pair(it1->first, it1->second)); + it1++; + } + + while (it2 != other_kvs.end()) { + diff_kv->emplace_back(make_pair(it2->first, it2->second)); + it2++; + } + } + + void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { + std::vector< pair< K, V > > other_kvs; + + other->get_all_kvs(&other_kvs); + for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { + K k = it->first; + V v = it->second; + BRangeCBParam local_param(k, v); + K start(k.start(), 1), end(k.end(), 1); + + auto search_range = BtreeSearchRange(start, true, end, true); + BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); + range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); + } + } + + template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > + thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; + + template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, + btree_node_type LeafNodeType > + thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; +}; +} // namespace btree +} // namespace sisl diff --git a/src/btree/simple_node.hpp b/src/btree/simple_node.hpp new file mode 100644 index 00000000..da6c5344 --- /dev/null +++ b/src/btree/simple_node.hpp @@ -0,0 +1,301 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include "btree_node.hpp" +#include "btree_kv.hpp" +#include "btree_internal.hpp" + +using namespace std; +using namespace boost; + +SISL_LOGGING_DECL(btree) + +namespace sisl { +namespace btree { + +template < typename K, typename V > +class SimpleNode : public BtreeNode< K > { +public: + SimpleNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : + BtreeNode< K >(node_buf, id, init, is_leaf) { + this->set_node_type(btree_node_type::FIXED); + } + + // Insert the key and value in provided index + // Assumption: Node lock is already taken + btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + uint32_t sz = (this->get_total_entries() - (ind + 1) + 1) * get_nth_obj_size(0); + + if (sz != 0) { std::memmove(get_nth_obj(ind + 1), get_nth_obj(ind), sz); } + this->set_nth_obj(ind, key, val); + this->inc_entries(); + this->inc_gen(); + +#ifndef NDEBUG + validate_sanity(); +#endif + return btree_status_t::success; + } + + void update(uint32_t ind, const BtreeValue& val) override { + set_nth_value(ind, val); + + // TODO: Check if we need to upgrade the gen and impact of doing so with performance. It is especially + // needed for non similar key/value pairs + this->inc_gen(); +#ifndef NDEBUG + validate_sanity(); +#endif + } + + void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + set_nth_obj(ind, key, val); + this->inc_gen(); + } + + // ind_s and ind_e are inclusive + void remove(uint32_t ind_s, uint32_t ind_e) override { + uint32_t total_entries = this->get_total_entries(); + DEBUG_ASSERT_GE(total_entries, ind_s, "node={}", to_string()); + DEBUG_ASSERT_GE(total_entries, ind_e, "node={}", to_string()); + + if (ind_e == total_entries) { // edge entry + DEBUG_ASSERT((!this->is_leaf() && this->has_valid_edge()), "node={}", to_string()); + // Set the last key/value as edge entry and by decrementing entry count automatically removed the last + // entry. + BtreeNodeInfo new_edge; + get_nth_value(ind_s - 1, &new_edge, false); + this->set_nth_value(total_entries, new_edge); + this->sub_entries(total_entries - ind_s + 1); + } else { + uint32_t sz = (total_entries - ind_e - 1) * get_nth_obj_size(0); + + if (sz != 0) { std::memmove(get_nth_obj(ind_s), get_nth_obj(ind_e + 1), sz); } + this->sub_entries(ind_e - ind_s + 1); + } + this->inc_gen(); +#ifndef NDEBUG + validate_sanity(); +#endif + } + + void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + RELEASE_ASSERT(false, "Append operation is not supported on simple node"); + } + + uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { + auto& other_node = s_cast< SimpleNode< K, V >& >(o); + + // Minimum of whats to be moved out and how many slots available in other node + nentries = std::min({nentries, this->get_total_entries(), other_node.get_available_entries(cfg)}); + uint32_t sz = nentries * get_nth_obj_size(0); + + if (sz != 0) { + uint32_t othersz = other_node.get_total_entries() * other_node.get_nth_obj_size(0); + std::memmove(other_node.get_nth_obj(nentries), other_node.get_nth_obj(0), othersz); + std::memmove(other_node.get_nth_obj(0), get_nth_obj(this->get_total_entries() - nentries), sz); + } + + other_node.add_entries(nentries); + this->sub_entries(nentries); + + // If there is an edgeEntry in this node, it needs to move to move out as well. + if (!this->is_leaf() && this->has_valid_edge()) { + other_node.set_edge_id(this->get_edge_id()); + this->invalidate_edge(); + } + + other_node.inc_gen(); + this->inc_gen(); + +#ifndef NDEBUG + validate_sanity(); +#endif + return nentries; + } + + uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size) override { + return (get_nth_obj_size(0) * move_out_to_right_by_entries(cfg, o, size / get_nth_obj_size(0))); + } + + uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { + auto& other_node = s_cast< SimpleNode< K, V >& >(o); + + // Minimum of whats to be moved and how many slots available + nentries = std::min({nentries, other_node.get_total_entries(), get_available_entries(cfg)}); + uint32_t sz = nentries * get_nth_obj_size(0); + if (sz != 0) { + uint32_t othersz = (other_node.get_total_entries() - nentries) * other_node.get_nth_obj_size(0); + std::memmove(get_nth_obj(this->get_total_entries()), other_node.get_nth_obj(0), sz); + std::memmove(other_node.get_nth_obj(0), other_node.get_nth_obj(nentries), othersz); + } + + other_node.sub_entries(nentries); + this->add_entries(nentries); + + // If next node does not have any more entries, but only a edge entry + // we need to move that to us, so that if need be next node could be freed. + if ((other_node.get_total_entries() == 0) && other_node.has_valid_edge()) { + DEBUG_ASSERT_EQ(this->has_valid_edge(), false, "node={}", to_string()); + this->set_edge_id(other_node.get_edge_id()); + other_node.invalidate_edge(); + } + + other_node.inc_gen(); + this->inc_gen(); + +#ifndef NDEBUG + validate_sanity(); +#endif + return nentries; + } + + uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size) override { + return (get_nth_obj_size(0) * move_in_from_right_by_entries(cfg, o, size / get_nth_obj_size(0))); + } + + uint32_t get_available_size(const BtreeConfig& cfg) const override { + return (BtreeNode< K >::node_area_size(cfg) - (this->get_total_entries() * get_nth_obj_size(0))); + } + + K get_nth_key(uint32_t ind, bool copy) const override { + DEBUG_ASSERT_LT(ind, this->get_total_entries(), "node={}", to_string()); + sisl::blob b; + b.bytes = (uint8_t*)(this->node_data_area_const() + (get_nth_obj_size(ind) * ind)); + b.size = get_obj_key_size(ind); + return K{b, copy}; + } + + void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const override { + DEBUG_ASSERT_LT(ind, this->get_total_entries(), "node={}", to_string()); + sisl::blob b; + if (ind == this->get_total_entries()) { + RELEASE_ASSERT_EQ(this->is_leaf(), false, "setting value outside bounds on leaf node"); + DEBUG_ASSERT_EQ(this->has_valid_edge(), true, "node={}", to_string()); + b.bytes = const_cast< uint8_t* >(reinterpret_cast< const uint8_t* >(this->get_edge_id())); + b.size = sizeof(bnodeid_t); + } else { + b.bytes = const_cast< uint8_t* >(reinterpret_cast< const uint8_t* >( + this->node_data_area_const() + (get_nth_obj_size(ind) * ind) + get_obj_key_size(ind))); + b.size = V::get_fixed_size(); + } + return out_val->deserialize(b, copy); + } + + /*V get_nth_value(uint32_t ind, bool copy) const { + V val; + get_nth_value(ind, &val, copy); + return val; + }*/ + + std::string to_string(bool print_friendly = false) const override { + auto str = fmt::format("{}id={} nEntries={} {} ", + (print_friendly ? "------------------------------------------------------------\n" : ""), + this->get_node_id(), this->get_total_entries(), (this->is_leaf() ? "LEAF" : "INTERIOR")); + if (!this->is_leaf() && (this->has_valid_edge())) { + fmt::format_to(std::back_inserter(str), "edge_id={} ", this->get_edge_id()); + } + + for (uint32_t i{0}; i < this->get_total_entries(); ++i) { + V val; + get_nth_value(i, &val, false); + fmt::format_to(std::back_inserter(str), "{}Entry{} [Key={} Val={}]", (print_friendly ? "\n\t" : " "), i + 1, + get_nth_key(i, false).to_string(), val.to_string()); + } + return str; + } + +#ifndef NDEBUG + void validate_sanity() { + if (this->get_total_entries() == 0) { return; } + + // validate if keys are in ascending order + uint32_t i{1}; + K prevKey = get_nth_key(0, false); + + while (i < this->get_total_entries()) { + K key = get_nth_key(i, false); + if (i > 0 && prevKey.compare(key) > 0) { + LOGDEBUG("non sorted entry : {} -> {} ", prevKey.to_string(), key.to_string()); + DEBUG_ASSERT(false, "node={}", to_string()); + } + ++i; + prevKey = key; + } + } +#endif + + inline uint32_t get_nth_obj_size(uint32_t ind) const override { + return (get_obj_key_size(ind) + get_obj_value_size(ind)); + } + + int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const override { + return get_nth_key(ind, false).compare(cmp_key); + } + + // Simple/Fixed node doesn't need a record to point key/value object + uint16_t get_record_size() const override { return 0; } + + /*int compare_nth_key_range(const BtreeKeyRange& range, uint32_t ind) const override { + return get_nth_key(ind, false).compare_range(range); + }*/ + + /////////////// Other Internal Methods ///////////// + void set_nth_obj(uint32_t ind, const BtreeKey& k, const BtreeValue& v) { + if (ind > this->get_total_entries()) { + set_nth_value(ind, v); + } else { + uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind); + sisl::blob key_blob = k.serialize(); + memcpy((void*)entry, key_blob.bytes, key_blob.size); + + sisl::blob val_blob = v.serialize(); + memcpy((void*)(entry + key_blob.size), val_blob.bytes, val_blob.size); + } + } + + uint32_t get_available_entries(const BtreeConfig& cfg) const { + return get_available_size(cfg) / get_nth_obj_size(0); + } + + inline uint32_t get_obj_key_size(uint32_t ind) const { return K::get_fixed_size(); } + + inline uint32_t get_obj_value_size(uint32_t ind) const { return V::get_fixed_size(); } + + uint8_t* get_nth_obj(uint32_t ind) { return (this->node_data_area() + (get_nth_obj_size(ind) * ind)); } + + void set_nth_key(uint32_t ind, BtreeKey* key) { + uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind); + sisl::blob b = key->serialize(); + memcpy(entry, b.bytes, b.size); + } + + void set_nth_value(uint32_t ind, const BtreeValue& v) { + sisl::blob b = v.serialize(); + if (ind > this->get_total_entries()) { + RELEASE_ASSERT_EQ(this->is_leaf(), false, "setting value outside bounds on leaf node"); + DEBUG_ASSERT_EQ(b.size, sizeof(bnodeid_t), "Invalid value size being set for non-leaf node"); + this->set_edge_id(*r_cast< bnodeid_t* >(b.bytes)); + } else { + uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind) + get_obj_key_size(ind); + std::memcpy(entry, b.bytes, b.size); + } + } +}; +} // namespace btree +} // namespace sisl diff --git a/src/btree/tests/btree_test_kvs.hpp b/src/btree/tests/btree_test_kvs.hpp new file mode 100644 index 00000000..862e2896 --- /dev/null +++ b/src/btree/tests/btree_test_kvs.hpp @@ -0,0 +1,294 @@ +#pragma once +#include +#include +#include +#include +#include +#include "../btree_kv.hpp" + +static constexpr uint32_t g_max_keys{6000}; +static constexpr uint32_t g_max_keysize{120}; +static constexpr uint32_t g_max_valsize{120}; +static std::random_device g_rd{}; +static std::default_random_engine g_re{g_rd()}; +static std::uniform_int_distribution< uint32_t > g_randkey_generator{0, g_max_keys}; +static std::uniform_int_distribution< uint32_t > g_randkeysize_generator{2, g_max_keysize}; +static std::uniform_int_distribution< uint32_t > g_randval_generator{1, 30000}; +static std::uniform_int_distribution< uint32_t > g_randvalsize_generator{2, g_max_valsize}; + +static std::map< uint32_t, std::shared_ptr< std::string > > g_key_pool; + +static constexpr std::array< const char, 62 > alphanum{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}; + +static std::string gen_random_string(size_t len, uint32_t preamble = std::numeric_limits< uint32_t >::max()) { + std::string str; + if (preamble != std::numeric_limits< uint32_t >::max()) { + std::stringstream ss; + ss << std::setw(8) << std::setfill('0') << std::hex << preamble; + str += ss.str(); + } + + static thread_local std::random_device rd{}; + static thread_local std::default_random_engine re{rd()}; + std::uniform_int_distribution< size_t > rand_char{0, alphanum.size() - 1}; + for (size_t i{0}; i < len; ++i) { + str += alphanum[rand_char(re)]; + } + str += '\0'; + return str; +} + +using namespace sisl::btree; + +class TestFixedKey : public BtreeKey { +private: + uint32_t m_key{0}; + +public: + TestFixedKey() = default; + TestFixedKey(uint32_t k) : m_key{k} {} + TestFixedKey(const TestFixedKey& other) : TestFixedKey(other.serialize(), true) {} + TestFixedKey(const BtreeKey& other) : TestFixedKey(other.serialize(), true) {} + TestFixedKey(const sisl::blob& b, bool copy) : BtreeKey(), m_key{*(r_cast< const uint32_t* >(b.bytes))} {} + TestFixedKey& operator=(const TestFixedKey& other) { + clone(other); + return *this; + }; + virtual void clone(const BtreeKey& other) override { m_key = ((TestFixedKey&)other).m_key; } + + virtual ~TestFixedKey() = default; + + int compare(const BtreeKey& o) const override { + const TestFixedKey& other = s_cast< const TestFixedKey& >(o); + if (m_key < other.m_key) { + return -1; + } else if (m_key > other.m_key) { + return 1; + } else { + return 0; + } + } + + int compare_range(const BtreeKeyRange& range) const override { + if (m_key == start_key(range)) { + return range.is_start_inclusive() ? 0 : -1; + } else if (m_key < start_key(range)) { + return -1; + } else if (m_key == end_key(range)) { + return range.is_end_inclusive() ? 0 : 1; + } else if (m_key > end_key(range)) { + return 1; + } else { + return 0; + } + } + + sisl::blob serialize() const override { + return sisl::blob{uintptr_cast(const_cast< uint32_t* >(&m_key)), uint32_cast(sizeof(uint32_t))}; + } + uint32_t serialized_size() const override { return get_fixed_size(); } + static uint32_t get_fixed_size() { return (sizeof(uint32_t)); } + std::string to_string() const { return fmt::format("{}", m_key); } + + static uint32_t get_estimate_max_size() { return get_fixed_size(); } + friend std::ostream& operator<<(std::ostream& os, const TestFixedKey& k) { + os << k.to_string(); + return os; + } + + bool operator<(const TestFixedKey& o) const { return (compare(o) < 0); } + bool operator==(const TestFixedKey& other) const { return (compare(other) == 0); } + + uint32_t key() const { return m_key; } + uint32_t start_key(const BtreeKeyRange& range) const { + const TestFixedKey& k = (const TestFixedKey&)(range.start_key()); + return k.m_key; + } + uint32_t end_key(const BtreeKeyRange& range) const { + const TestFixedKey& k = (const TestFixedKey&)(range.end_key()); + return k.m_key; + } +}; + +class TestVarLenKey : public BtreeKey { +private: + uint32_t m_key{0}; + + static std::shared_ptr< std::string > idx_to_key(uint32_t idx) { + auto it = g_key_pool.find(idx); + if (it == g_key_pool.end()) { + const auto& [it, happened] = g_key_pool.emplace( + idx, std::make_shared< std::string >(gen_random_string(g_randkeysize_generator(g_re), idx))); + assert(happened); + return it->second; + } else { + return it->second; + } + } + +public: + TestVarLenKey() = default; + TestVarLenKey(uint32_t k) : BtreeKey(), m_key{k} {} + TestVarLenKey(const BtreeKey& other) : TestVarLenKey(other.serialize(), true) {} + TestVarLenKey(const TestVarLenKey& other) = default; + TestVarLenKey(const sisl::blob& b, bool copy) : BtreeKey() { + std::string data{r_cast< const char* >(b.bytes), b.size}; + std::stringstream ss; + ss << std::hex << data.substr(0, 8); + ss >> m_key; + assert(data == *idx_to_key(m_key)); + } + virtual ~TestVarLenKey() = default; + + virtual void clone(const BtreeKey& other) override { m_key = ((TestVarLenKey&)other).m_key; } + + sisl::blob serialize() const override { + const auto& data = idx_to_key(m_key); + return sisl::blob{(uint8_t*)(data->c_str()), (uint32_t)data->size()}; + } + + uint32_t serialized_size() const override { return idx_to_key(m_key)->size(); } + + static uint32_t get_fixed_size() { + assert(0); + return 0; + } + + static uint32_t get_estimate_max_size() { return g_max_keysize; } + + int compare(const BtreeKey& o) const override { + const TestVarLenKey& other = s_cast< const TestVarLenKey& >(o); + if (m_key < other.m_key) { + return -1; + } else if (m_key > other.m_key) { + return 1; + } else { + return 0; + } + } + + int compare_range(const BtreeKeyRange& range) const override { + if (m_key == start_key(range)) { + return range.is_start_inclusive() ? 0 : -1; + } else if (m_key < start_key(range)) { + return -1; + } else if (m_key == end_key(range)) { + return range.is_end_inclusive() ? 0 : 1; + } else if (m_key > end_key(range)) { + return 1; + } else { + return 0; + } + } + + std::string to_string() const { return fmt::format("{}-{}", m_key, idx_to_key(m_key)->substr(0, 8)); } + + friend std::ostream& operator<<(std::ostream& os, const TestVarLenKey& k) { + os << k.to_string(); + return os; + } + + bool operator<(const TestVarLenKey& o) const { return (compare(o) < 0); } + bool operator==(const TestVarLenKey& other) const { return (compare(other) == 0); } + + uint32_t key() const { return m_key; } + uint32_t start_key(const BtreeKeyRange& range) const { + const TestVarLenKey& k = (const TestVarLenKey&)(range.start_key()); + return k.m_key; + } + uint32_t end_key(const BtreeKeyRange& range) const { + const TestVarLenKey& k = (const TestVarLenKey&)(range.end_key()); + return k.m_key; + } +}; + +class TestFixedValue : public BtreeValue { +private: +public: + TestFixedValue(bnodeid_t val) { assert(0); } + TestFixedValue(uint32_t val) : BtreeValue() { m_val = val; } + TestFixedValue() : TestFixedValue((uint32_t)-1) {} + TestFixedValue(const TestFixedValue& other) : BtreeValue() { m_val = other.m_val; }; + TestFixedValue(const sisl::blob& b, bool copy) : BtreeValue() { m_val = *(r_cast< uint32_t* >(b.bytes)); } + virtual ~TestFixedValue() = default; + + static TestFixedValue generate_rand() { return TestFixedValue{g_randval_generator(g_re)}; } + + TestFixedValue& operator=(const TestFixedValue& other) { + m_val = other.m_val; + return *this; + } + + sisl::blob serialize() const override { + sisl::blob b; + b.bytes = uintptr_cast(const_cast< uint32_t* >(&m_val)); + b.size = sizeof(m_val); + return b; + } + + uint32_t serialized_size() const override { return sizeof(m_val); } + static uint32_t get_fixed_size() { return sizeof(m_val); } + void deserialize(const sisl::blob& b, bool copy) { m_val = *(r_cast< uint32_t* >(b.bytes)); } + + std::string to_string() const override { return fmt::format("{}", m_val); } + + friend ostream& operator<<(ostream& os, const TestFixedValue& v) { + os << v.to_string(); + return os; + } + + // This is not mandatory overridden method for BtreeValue, but for testing comparision + bool operator==(const TestFixedValue& other) const { return (m_val == other.m_val); } + + uint32_t value() const { return m_val; } + +private: + uint32_t m_val; +}; + +class TestVarLenValue : public BtreeValue { +public: + TestVarLenValue(bnodeid_t val) { assert(0); } + TestVarLenValue(const std::string& val) : BtreeValue(), m_val{val} {} + TestVarLenValue() = default; + TestVarLenValue(const TestVarLenValue& other) : BtreeValue() { m_val = other.m_val; }; + TestVarLenValue(const sisl::blob& b, bool copy) : BtreeValue(), m_val{std::string((const char*)b.bytes, b.size)} {} + virtual ~TestVarLenValue() = default; + + TestVarLenValue& operator=(const TestVarLenValue& other) { + m_val = other.m_val; + return *this; + } + + static TestVarLenValue generate_rand() { return TestVarLenValue{gen_random_string(g_randvalsize_generator(g_re))}; } + + sisl::blob serialize() const override { + sisl::blob b; + b.bytes = uintptr_cast(const_cast< char* >(m_val.c_str())); + b.size = m_val.size(); + return b; + } + + uint32_t serialized_size() const override { return (uint32_t)m_val.size(); } + static uint32_t get_fixed_size() { return 0; } + + void deserialize(const sisl::blob& b, bool copy) { m_val = std::string((const char*)b.bytes, b.size); } + + std::string to_string() const override { return fmt::format("{}", m_val); } + + friend ostream& operator<<(ostream& os, const TestVarLenValue& v) { + os << v.to_string(); + return os; + } + + // This is not mandatory overridden method for BtreeValue, but for testing comparision + bool operator==(const TestVarLenValue& other) const { return (m_val == other.m_val); } + + std::string value() const { return m_val; } + +private: + std::string m_val; +}; diff --git a/src/btree/tests/test_btree_node.cpp b/src/btree/tests/test_btree_node.cpp new file mode 100644 index 00000000..6d71d97f --- /dev/null +++ b/src/btree/tests/test_btree_node.cpp @@ -0,0 +1,347 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ + +#include +#include +#include +#include + +#include "options/options.h" +#include "logging/logging.h" +#include "simple_node.hpp" +#include "varlen_node.hpp" +#include "utility/enum.hpp" +#include "btree_test_kvs.hpp" + +static constexpr uint32_t g_node_size{4096}; +using namespace sisl::btree; +SISL_LOGGING_INIT(btree) + +struct FixedLenNodeTest { + using NodeType = SimpleNode< TestFixedKey, TestFixedValue >; + using KeyType = TestFixedKey; + using ValueType = TestFixedValue; +}; + +struct VarKeySizeNodeTest { + using NodeType = VarKeySizeNode< TestVarLenKey, TestFixedValue >; + using KeyType = TestVarLenKey; + using ValueType = TestFixedValue; +}; + +struct VarValueSizeNodeTest { + using NodeType = VarValueSizeNode< TestFixedKey, TestVarLenValue >; + using KeyType = TestFixedKey; + using ValueType = TestVarLenValue; +}; + +struct VarObjSizeNodeTest { + using NodeType = VarObjSizeNode< TestVarLenKey, TestVarLenValue >; + using KeyType = TestVarLenKey; + using ValueType = TestVarLenValue; +}; + +template < typename TestType > +struct NodeTest : public testing::Test { + using T = TestType; + using K = TestType::KeyType; + using V = TestType::ValueType; + + std::unique_ptr< typename T::NodeType > m_node1; + std::unique_ptr< typename T::NodeType > m_node2; + std::map< K, V > m_shadow_map; + BtreeConfig m_cfg{g_node_size}; + + void SetUp() override { + m_node1 = std::make_unique< typename T::NodeType >(new uint8_t[g_node_size], 1ul, true, true, m_cfg); + m_node2 = std::make_unique< typename T::NodeType >(new uint8_t[g_node_size], 2ul, true, true, m_cfg); + } + + void put(uint32_t k, btree_put_type put_type) { + K key{k}; + V value{V::generate_rand()}; + V existing_v; + bool done = m_node1->put(key, value, put_type, &existing_v); + + bool expected_done{true}; + if (m_shadow_map.find(key) != m_shadow_map.end()) { + expected_done = (put_type != btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + ASSERT_EQ(done, expected_done) << "Expected put of key " << k << " of put_type " << enum_name(put_type) + << " to be " << expected_done; + if (expected_done) { + m_shadow_map.insert(std::make_pair(key, value)); + } else { + const auto r = m_shadow_map.find(key); + ASSERT_NE(r, m_shadow_map.end()) << "Testcase issue, expected inserted slots to be in shadow map"; + ASSERT_EQ(existing_v, r->second) + << "Insert existing value doesn't return correct data for key " << r->first; + } + } + + void update(uint32_t k, bool validate_update = true) { + K key{k}; + V value{V::generate_rand()}; + V existing_v; + const bool done = m_node1->update_one(key, value, &existing_v); + const auto expected_done = (m_shadow_map.find(key) != m_shadow_map.end()); + ASSERT_EQ(done, expected_done) << "Not updated for key=" << k << " where it is expected to"; + + if (done) { + validate_data(key, existing_v); + m_shadow_map[key] = value; + } + + if (validate_update) { validate_specific(k); } + } + + void remove(uint32_t k, bool validate_remove = true) { + K key{k}; + K existing_key; + V existing_value; + const bool shadow_found = (m_shadow_map.find(key) != m_shadow_map.end()); + auto removed_1 = m_node1->remove_one(K{key}, &existing_key, &existing_value); + if (removed_1) { + ASSERT_EQ(key.key(), k) << "Whats removed is different than whats asked for"; + validate_data(key, existing_value); + m_shadow_map.erase(key); + } + + auto removed_2 = m_node2->remove_one(K{key}, &existing_key, &existing_value); + if (removed_2) { + ASSERT_EQ(key.key(), k) << "Whats removed is different than whats asked for"; + validate_data(key, existing_value); + m_shadow_map.erase(key); + } + + ASSERT_EQ(removed_1 || removed_2, shadow_found) << "To remove key=" << k << " is not present in the nodes"; + + if (validate_remove) { validate_specific(k); } + } + + void validate_get_all() const { + uint32_t start_ind{0}; + uint32_t end_ind{0}; + std::vector< std::pair< K, V > > out_vector; + auto ret = m_node1->get_all(BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}, g_max_keys, start_ind, + end_ind, &out_vector); + ret += m_node2->get_all(BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}, g_max_keys, start_ind, + end_ind, &out_vector); + + ASSERT_EQ(ret, m_shadow_map.size()) << "Expected number of entries to be same with shadow_map size"; + ASSERT_EQ(out_vector.size(), m_shadow_map.size()) + << "Expected number of entries to be same with shadow_map size"; + + uint64_t idx{0}; + for (auto& [key, value] : m_shadow_map) { + ASSERT_EQ(out_vector[idx].second, value) + << "Range get doesn't return correct data for key=" << key << " idx=" << idx; + ++idx; + } + } + + void validate_get_any(uint32_t start, uint32_t end) const { + K start_key{start}; + K end_key{end}; + K out_k; + V out_v; + auto result = + m_node1->get_any(BtreeKeyRangeSafe< K >{start_key, true, end_key, true}, &out_k, &out_v, true, true); + if (result.first) { + validate_data(out_k, out_v); + } else { + result = + m_node2->get_any(BtreeKeyRangeSafe< K >{start_key, true, end_key, true}, &out_k, &out_v, true, true); + if (result.first) { + validate_data(out_k, out_v); + } else { + const auto r = m_shadow_map.lower_bound(start_key); + const bool found = ((r != m_shadow_map.end()) && (r->first.key() <= end)); + ASSERT_EQ(found, false) << "Node key range=" << start << "-" << end + << " missing, Its present in shadow map at " << r->first; + } + } + } + + void validate_specific(uint32_t k) const { + K key{k}; + V val; + const auto ret1 = m_node1->find(key, &val, true); + if (ret1.first) { + ASSERT_NE(m_shadow_map.find(key), m_shadow_map.end()) + << "Node key " << k << " is present when its expected not to be"; + validate_data(key, val); + } + + const auto ret2 = m_node2->find(key, &val, true); + if (ret2.first) { + ASSERT_NE(m_shadow_map.find(key), m_shadow_map.end()) + << "Node key " << k << " is present when its expected not to be"; + validate_data(key, val); + } + + ASSERT_EQ(ret1.first || ret2.first, m_shadow_map.find(key) != m_shadow_map.end()) + << "Node key " << k << " is incorrect presence compared to shadow map"; + } + +protected: + void put_list(const std::vector< uint32_t >& keys) { + for (const auto& k : keys) { + if (!this->has_room()) { break; } + put(k, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + } + + void print() const { + LOGDEBUG("Node1:\n {}", m_node1->to_string(true)); + LOGDEBUG("Node2:\n {}", m_node2->to_string(true)); + } + + uint32_t remaining_space() const { return m_node1->get_available_size(m_cfg); } + bool has_room() const { return remaining_space() > (g_max_keysize + g_max_valsize + 32); } + +private: + void validate_data(const K& key, const V& node_val) const { + const auto r = m_shadow_map.find(key); + ASSERT_NE(r, m_shadow_map.end()) << "Node key is not present in shadow map"; + ASSERT_EQ(node_val, r->second) << "Found value in node doesn't return correct data for key=" << r->first; + } +}; + +using NodeTypes = testing::Types< FixedLenNodeTest, VarKeySizeNodeTest, VarValueSizeNodeTest, VarObjSizeNodeTest >; +TYPED_TEST_SUITE(NodeTest, NodeTypes); + +TYPED_TEST(NodeTest, SequentialInsert) { + for (uint32_t i{0}; (i < 100 && this->has_room()); ++i) { + this->put(i, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + this->print(); + this->validate_get_all(); + this->validate_get_any(0, 2); + this->validate_get_any(3, 3); + this->validate_get_any(98, 102); +} + +TYPED_TEST(NodeTest, ReverseInsert) { + for (uint32_t i{100}; (i > 0 && this->has_room()); --i) { + this->put(i - 1, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + this->print(); + this->validate_get_all(); + this->validate_get_any(0, 2); + this->validate_get_any(3, 3); + this->validate_get_any(98, 102); +} + +TYPED_TEST(NodeTest, Remove) { + this->put_list({0, 1, 2, g_max_keys / 2, g_max_keys / 2 + 1, g_max_keys / 2 - 1}); + this->remove(0); + this->remove(0); // Remove non-existing + this->remove(1); + this->remove(2); + this->remove(g_max_keys / 2 - 1); + this->print(); + this->validate_get_all(); + this->validate_get_any(0, 2); + this->validate_get_any(3, 3); + this->validate_get_any(g_max_keys / 2, g_max_keys - 1); +} + +TYPED_TEST(NodeTest, Update) { + this->put_list({0, 1, 2, g_max_keys / 2, g_max_keys / 2 + 1, g_max_keys / 2 - 1}); + this->update(1); + this->update(g_max_keys / 2); + this->update(2); + this->remove(0); + this->update(0); // Update non-existing + this->print(); + this->validate_get_all(); +} + +TYPED_TEST(NodeTest, RandomInsertRemoveUpdate) { + uint32_t num_inserted{0}; + while (this->has_room()) { + this->put(g_randkey_generator(g_re), btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + ++num_inserted; + } + LOGDEBUG("After random insertion of {} objects", num_inserted); + this->print(); + this->validate_get_all(); + + for (uint32_t i{0}; i < num_inserted / 2; ++i) { + const auto k = g_randkey_generator(g_re) % this->m_shadow_map.rbegin()->first.key(); + const auto r = this->m_shadow_map.lower_bound(typename TestFixture::K{k}); + this->remove(r->first.key()); + } + LOGDEBUG("After random removal of {} objects", num_inserted / 2); + this->print(); + this->validate_get_all(); + + uint32_t num_updated{0}; + for (uint32_t i{0}; i < num_inserted / 2 && this->has_room(); ++i) { + const auto k = g_randkey_generator(g_re) % this->m_shadow_map.rbegin()->first.key(); + const auto r = this->m_shadow_map.lower_bound(typename TestFixture::K{k}); + this->update(r->first.key()); + ++num_updated; + } + LOGDEBUG("After update of {} entries", num_updated); + this->print(); + this->validate_get_all(); +} + +TYPED_TEST(NodeTest, Move) { + std::vector< uint32_t > list{0, 1, 2, g_max_keys / 2 - 1}; + this->put_list(list); + this->print(); + + this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size()); + this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size()); // Empty move + ASSERT_EQ(this->m_node1->get_total_entries(), 0u) << "Move out to right has failed"; + ASSERT_EQ(this->m_node2->get_total_entries(), list.size()) << "Move out to right has failed"; + this->validate_get_all(); + + this->m_node1->move_in_from_right_by_entries(this->m_cfg, *this->m_node2, list.size()); + this->m_node1->move_in_from_right_by_entries(this->m_cfg, *this->m_node2, list.size()); // Empty move + ASSERT_EQ(this->m_node2->get_total_entries(), 0u) << "Move in from right has failed"; + ASSERT_EQ(this->m_node1->get_total_entries(), list.size()) << "Move in from right has failed"; + this->validate_get_all(); + + this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size() / 2); + ASSERT_EQ(this->m_node1->get_total_entries(), list.size() / 2) << "Move out half entries to right has failed"; + ASSERT_EQ(this->m_node2->get_total_entries(), list.size() - list.size() / 2) + << "Move out half entries to right has failed"; + this->validate_get_all(); + this->print(); + + ASSERT_EQ(this->m_node1->validate_key_order(), true) << "Key order validation of node1 has failed"; + ASSERT_EQ(this->m_node2->validate_key_order(), true) << "Key order validation of node2 has failed"; +} + +SISL_OPTIONS_ENABLE(logging, test_btree_node) +SISL_OPTION_GROUP(test_btree_node, + (num_iters, "", "num_iters", "number of iterations for rand ops", + ::cxxopts::value< uint32_t >()->default_value("65536"), "number")) + +int main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging, test_btree_node) + sisl::logging::SetLogger("test_btree_node"); + spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); + + auto ret = RUN_ALL_TESTS(); + return ret; +} \ No newline at end of file diff --git a/src/btree/tests/test_mem_btree.cpp b/src/btree/tests/test_mem_btree.cpp new file mode 100644 index 00000000..27737422 --- /dev/null +++ b/src/btree/tests/test_mem_btree.cpp @@ -0,0 +1,151 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ + +#include +#include +#include +#include + +#include "options/options.h" +#include "logging/logging.h" +#include "simple_node.hpp" +#include "varlen_node.hpp" +#include "utility/enum.hpp" +#include "btree_test_kvs.hpp" +#include "../mem_btree.hpp" + +static constexpr uint32_t g_node_size{4096}; +using namespace sisl::btree; +SISL_LOGGING_INIT(btree) + +SISL_OPTIONS_ENABLE(logging, test_mem_btree) +SISL_OPTION_GROUP(test_mem_btree, + (num_iters, "", "num_iters", "number of iterations for rand ops", + ::cxxopts::value< uint32_t >()->default_value("65536"), "number")) + +struct FixedLenBtreeTest { + using BtreeType = MemBtree< TestFixedKey, TestFixedValue >; + using KeyType = TestFixedKey; + using ValueType = TestFixedValue; + static constexpr btree_node_type leaf_node_type = btree_node_type::FIXED; + static constexpr btree_node_type interior_node_type = btree_node_type::FIXED; +}; + +struct VarKeySizeBtreeTest { + using BtreeType = MemBtree< TestVarLenKey, TestFixedValue >; + using KeyType = TestVarLenKey; + using ValueType = TestFixedValue; + static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_KEY; + static constexpr btree_node_type interior_node_type = btree_node_type::VAR_KEY; +}; + +struct VarValueSizeBtreeTest { + using BtreeType = MemBtree< TestFixedKey, TestVarLenValue >; + using KeyType = TestFixedKey; + using ValueType = TestVarLenValue; + static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_VALUE; + static constexpr btree_node_type interior_node_type = btree_node_type::FIXED; +}; + +struct VarObjSizeBtreeTest { + using BtreeType = MemBtree< TestVarLenKey, TestVarLenValue >; + using KeyType = TestVarLenKey; + using ValueType = TestVarLenValue; + static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_OBJECT; + static constexpr btree_node_type interior_node_type = btree_node_type::VAR_OBJECT; +}; + +template < typename TestType > +struct BtreeTest : public testing::Test { + using T = TestType; + using K = TestType::KeyType; + using V = TestType::ValueType; + + std::unique_ptr< typename T::BtreeType > m_bt; + std::map< K, V > m_shadow_map; + BtreeConfig m_cfg{g_node_size}; + + void SetUp() override { + m_cfg.m_leaf_node_type = T::leaf_node_type; + m_cfg.m_int_node_type = T::interior_node_type; + m_bt = std::make_unique< typename T::BtreeType >(m_cfg); + m_bt->init(nullptr); + } + + void put(uint32_t k, btree_put_type put_type) { + std::unique_ptr< V > existing_v; + + BtreeMutateRequest req = BtreeSinglePutRequest{ + std::make_unique< K >(k), std::make_unique< V >(V::generate_rand()), put_type, std::move(existing_v)}; + bool done = (m_bt->put(req) == btree_status_t::success); + + auto& sreq = to_single_put_req(req); + bool expected_done{true}; + if (m_shadow_map.find(*sreq.m_k) != m_shadow_map.end()) { + expected_done = (put_type != btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + ASSERT_EQ(done, expected_done) << "Expected put of key " << k << " of put_type " << enum_name(put_type) + << " to be " << expected_done; + if (expected_done) { + m_shadow_map.insert(std::make_pair((const K&)*sreq.m_k, (const V&)*sreq.m_v)); + } else { + const auto r = m_shadow_map.find(*sreq.m_k); + ASSERT_NE(r, m_shadow_map.end()) << "Testcase issue, expected inserted slots to be in shadow map"; + ASSERT_EQ((const V&)*sreq.m_existing_val, r->second) + << "Insert existing value doesn't return correct data for key " << r->first; + } + } + + void validate_get_all() const { + std::vector< std::pair< K, V > > out_vector; + BtreeQueryRequest qreq{BtreeSearchState{BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}}}; + auto ret = m_bt->query(qreq, out_vector); + + ASSERT_EQ(ret, btree_status_t::success) << "Expected success on query"; + ASSERT_EQ(out_vector.size(), m_shadow_map.size()) + << "Expected number of entries to be same with shadow_map size"; + + uint64_t idx{0}; + for (auto& [key, value] : m_shadow_map) { + ASSERT_EQ(out_vector[idx].second, value) + << "Range get doesn't return correct data for key=" << key << " idx=" << idx; + ++idx; + } + } + + void print() const { m_bt->print_tree(); } +}; + +using BtreeTypes = testing::Types< FixedLenBtreeTest, VarKeySizeBtreeTest, VarValueSizeBtreeTest, VarObjSizeBtreeTest >; +TYPED_TEST_SUITE(BtreeTest, BtreeTypes); + +TYPED_TEST(BtreeTest, SequentialInsert) { + for (uint32_t i{0}; i < 100; ++i) { + this->put(i, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); + } + this->print(); + this->validate_get_all(); +} + +int main(int argc, char* argv[]) { + SISL_OPTIONS_LOAD(argc, argv, logging, test_mem_btree) + sisl::logging::SetLogger("test_mem_btree"); + spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); + + auto ret = RUN_ALL_TESTS(); + return ret; +} \ No newline at end of file diff --git a/src/btree/varlen_node.hpp b/src/btree/varlen_node.hpp new file mode 100644 index 00000000..e69b370a --- /dev/null +++ b/src/btree/varlen_node.hpp @@ -0,0 +1,695 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam, Rishabh Mittal + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ + +#pragma once + +#include "logging/logging.h" +#include "btree_node.hpp" +#include "btree_kv.hpp" + +SISL_LOGGING_DECL(btree) + +namespace sisl { +namespace btree { +#pragma pack(1) +struct btree_obj_record { + uint16_t m_obj_offset : 14; + uint16_t reserved : 2; +}; + +struct var_node_header { + uint16_t m_tail_arena_offset; // Tail side of the arena where new keys are inserted + uint16_t m_available_space; + uint16_t m_init_available_space; // remember initial node area size to later use for compaction + // TODO: + // We really dont require storing m_init_available_space in each node. + // Instead add method in variant node to fetch config + + uint16_t tail_offset() const { return m_tail_arena_offset; } + uint16_t available_space() const { return m_available_space; } +}; +#pragma pack() + +/** + * Internal format of variable node: + * [var node header][Record][Record].. ... ... [key][value][key][value] + * key and value both can be variying. + */ +template < typename K, typename V > +class VariableNode : public BtreeNode< K > { +public: + VariableNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : + BtreeNode< K >(node_buf, id, init, is_leaf) { + if (init) { + // Tail arena points to the edge of the node as data arena grows backwards. Entire space is now available + // except for the header itself + get_var_node_header()->m_init_available_space = BtreeNode< K >::node_area_size(cfg); + get_var_node_header()->m_tail_arena_offset = BtreeNode< K >::node_area_size(cfg); + get_var_node_header()->m_available_space = + get_var_node_header()->m_tail_arena_offset - sizeof(var_node_header); + } + } + + virtual ~VariableNode() = default; + + /* Insert the key and value in provided index + * Assumption: Node lock is already taken */ + btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + LOGTRACEMOD(btree, "{}:{}", key.to_string(), val.to_string()); + auto sz = insert(ind, key.serialize(), val.serialize()); +#ifndef NDEBUG + validate_sanity(); +#endif + if (sz == 0) { return btree_status_t::insert_failed; } + return btree_status_t::success; + } + +#ifndef NDEBUG + void validate_sanity() { + uint32_t i{0}; + // validate if keys are in ascending order + K prevKey; + while (i < this->get_total_entries()) { + K key = get_nth_key(i, false); + uint64_t kp = *(uint64_t*)key.serialize().bytes; + if (i > 0 && prevKey.compare(key) > 0) { + DEBUG_ASSERT(false, "Found non sorted entry: {} -> {}", kp, to_string()); + } + prevKey = key; + ++i; + } + } +#endif + + /* Update a value in a given index to the provided value. It will support change in size of the new value. + * Assumption: Node lock is already taken, size check for the node to support new value is already done */ + void update(uint32_t ind, const BtreeValue& val) override { + // If we are updating the edge value, none of the other logic matter. Just update edge value and move on + if (ind == this->get_total_entries()) { + DEBUG_ASSERT_EQ(this->is_leaf(), false); + this->set_edge_value(val); + this->inc_gen(); + } else { + K key = get_nth_key(ind, true); + update(ind, key, val); + } + } + + // TODO - currently we do not support variable size key + void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + LOGTRACEMOD(btree, "Update called:{}", to_string()); + DEBUG_ASSERT_LE(ind, this->get_total_entries()); + + // If we are updating the edge value, none of the other logic matter. Just update edge value and move on + if (ind == this->get_total_entries()) { + DEBUG_ASSERT_EQ(this->is_leaf(), false); + this->set_edge_value(val); + this->inc_gen(); + return; + } + + // Determine if we are doing same size update or smaller size update, in that case, reuse the space. + uint16_t nth_key_len = get_nth_key_len(ind); + uint16_t new_obj_size = nth_key_len + val.serialized_size(); + uint16_t cur_obj_size = get_nth_obj_size(ind); + + if (cur_obj_size >= new_obj_size) { + uint8_t* val_ptr = (uint8_t*)get_nth_obj(ind) + nth_key_len; + sisl::blob vblob = val.serialize(); + DEBUG_ASSERT_EQ(vblob.size, val.serialized_size(), + "Serialized size returned different after serialization"); + + // we can avoid memcpy if addresses of val_ptr and vblob.bytes is same. In place update + if (val_ptr != vblob.bytes) { + // TODO - we can reclaim space if new obj size is lower than cur obj size + // Same or smaller size update, just copy the value blob + LOGTRACEMOD(btree, "Not an in-place update, have to copying data of size {}", vblob.size); + memcpy(val_ptr, vblob.bytes, vblob.size); + } else { + // do nothing + LOGTRACEMOD(btree, "In place update, not copying data."); + } + set_nth_value_len(get_nth_record_mutable(ind), vblob.size); + get_var_node_header()->m_available_space += cur_obj_size - new_obj_size; + this->inc_gen(); + return; + } + + remove(ind, ind); + insert(ind, key, val); + LOGTRACEMOD(btree, "Size changed for either key or value. Had to delete and insert :{}", to_string()); + } + + // ind_s and ind_e are inclusive + void remove(uint32_t ind_s, uint32_t ind_e) override { + uint32_t total_entries = this->get_total_entries(); + assert(total_entries >= ind_s); + assert(total_entries >= ind_e); + uint32_t recSize = this->get_record_size(); + uint32_t no_of_elem = ind_e - ind_s + 1; + if (ind_e == this->get_total_entries()) { + assert(!this->is_leaf() && this->has_valid_edge()); + + V last_1_val; + get_nth_value(ind_s - 1, &last_1_val, false); + this->set_edge_value(last_1_val); + + for (uint32_t i = ind_s; i < total_entries; i++) { + get_var_node_header()->m_available_space += get_nth_key_len(i) + get_nth_value_len(i) + recSize; + } + this->sub_entries(total_entries - ind_s + 1); + } else { + // claim available memory + for (uint32_t i = ind_s; i <= ind_e; i++) { + get_var_node_header()->m_available_space += get_nth_key_len(i) + get_nth_value_len(i) + recSize; + } + uint8_t* rec_ptr = get_nth_record_mutable(ind_s); + memmove(rec_ptr, rec_ptr + recSize * no_of_elem, (this->get_total_entries() - ind_e - 1) * recSize); + + this->sub_entries(no_of_elem); + } + this->inc_gen(); + } + + /*V get(uint32_t ind, bool copy) const { + // Need edge index + if (ind == this->get_total_entries()) { + assert(!this->is_leaf()); + assert(this->has_valid_edge()); + return this->get_edge_value(); + } else { + return get_nth_value(ind, copy); + } + }*/ + + uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { + auto& other = static_cast< VariableNode& >(o); + const auto this_gen = this->get_gen(); + const auto other_gen = other.get_gen(); + + const auto this_nentries = this->get_total_entries(); + nentries = std::min(nentries, this_nentries); + if (nentries == 0) { return 0; /* Nothing to move */ } + + const uint32_t start_ind = this_nentries - 1; + const uint32_t end_ind = this_nentries - nentries; + uint32_t ind = start_ind; + bool full_move{false}; + while (ind >= end_ind) { + // Get the ith key and value blob and then remove the entry from here and insert to the other node + sisl::blob kb; + kb.bytes = (uint8_t*)get_nth_obj(ind); + kb.size = get_nth_key_len(ind); + + sisl::blob vb; + vb.bytes = kb.bytes + kb.size; + vb.size = get_nth_value_len(ind); + + auto sz = other.insert(0, kb, vb); + if (!sz) { break; } + if (ind == 0) { + full_move = true; + break; + } + --ind; + } + + if (!this->is_leaf() && (other.get_total_entries() != 0)) { + // Incase this node is an edge node, move the stick to the right hand side node + other.set_edge_id(this->get_edge_id()); + this->invalidate_edge(); + } + remove(full_move ? 0u : ind + 1, start_ind); // Remove all entries in bulk + + // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 + // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce + // interface around it. + this->set_gen(this_gen + 1); + other.set_gen(other_gen + 1); + + return (start_ind - ind); + } + + uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size_to_move) override { + auto& other = static_cast< VariableNode& >(o); + uint32_t moved_size = 0U; + auto this_gen = this->get_gen(); + auto other_gen = other.get_gen(); + + uint32_t ind = this->get_total_entries() - 1; + while (ind > 0) { + sisl::blob kb; + kb.bytes = (uint8_t*)get_nth_obj(ind); + kb.size = get_nth_key_len(ind); + + sisl::blob vb; + vb.bytes = kb.bytes + kb.size; + vb.size = get_nth_value_len(ind); + + auto sz = other.insert(0, kb, vb); // Keep on inserting on the first index, thus moving everything to right + if (!sz) break; + moved_size += sz; + --ind; + if ((kb.size + vb.size + this->get_record_size()) > size_to_move) { + // We reached threshold of how much we could move + break; + } + size_to_move -= sz; + } + remove(ind + 1, this->get_total_entries() - 1); + + if (!this->is_leaf() && (other.get_total_entries() != 0)) { + // Incase this node is an edge node, move the stick to the right hand side node + other.set_edge_id(this->get_edge_id()); + this->invalidate_edge(); + } + + // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 + // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce + // interface around it. + this->set_gen(this_gen + 1); + other.set_gen(other_gen + 1); + + return moved_size; + } + + uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { + auto& other = static_cast< VariableNode& >(o); + auto this_gen = this->get_gen(); + auto other_gen = other.get_gen(); + nentries = std::min(nentries, other.get_total_entries()); + + if (nentries == 0) { return 0; /* Nothing to move */ } + uint32_t other_ind = 0; + while (nentries) { + // Get the ith key and value blob and then remove the entry from here and insert to the other node + sisl::blob kb; + kb.bytes = (uint8_t*)other.get_nth_obj(other_ind); + kb.size = other.get_nth_key_len(other_ind); + + sisl::blob vb; + vb.bytes = kb.bytes + kb.size; + vb.size = other.get_nth_value_len(other_ind); + + auto sz = insert(this->get_total_entries(), kb, vb); + if (!sz) { break; } + --nentries; + ++other_ind; + } + + other.remove(0, other_ind - 1); // Remove all entries in bulk + assert(other.get_total_entries() == nentries); + + if (!other.is_leaf() && (other.get_total_entries() == 0)) { + // Incase other node is an edge node and we moved all the data into this node, move over the edge info as + // well. + this->set_edge_id(other.get_edge_id()); + other.invalidate_edge(); + } + + // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 + // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce + // interface around it. + this->set_gen(this_gen + 1); + other.set_gen(other_gen + 1); + + return (other_ind); + } + + uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size_to_move) override { + auto& other = static_cast< VariableNode& >(o); + uint32_t moved_size = 0U; + auto this_gen = this->get_gen(); + auto other_gen = other.get_gen(); + + uint32_t ind = 0; + while (ind < this->get_total_entries()) { + sisl::blob kb; + kb.bytes = (uint8_t*)other.get_nth_obj(ind); + kb.size = other.get_nth_key_len(ind); + + sisl::blob vb; + vb.bytes = kb.bytes + kb.size; + vb.size = other.get_nth_value_len(ind); + + if ((kb.size + vb.size + other.get_record_size()) > size_to_move) { + // We reached threshold of how much we could move + break; + } + auto sz = insert(this->get_total_entries(), kb, vb); // Keep on inserting on the last index. + if (!sz) break; + moved_size += sz; + ind++; + size_to_move -= sz; + } + if (ind) other.remove(0, ind - 1); + + if (!other.is_leaf() && (other.get_total_entries() == 0)) { + // Incase other node is an edge node and we moved all the data into this node, move over the edge info as + // well. + this->set_edge_id(other.get_edge_id()); + other.invalidate_edge(); + } + + // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 + // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce + // interface around it. + this->set_gen(this_gen + 1); + other.set_gen(other_gen + 1); + + return moved_size; + } + void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { + RELEASE_ASSERT(false, "Append operation is not supported on var node"); + } + + uint32_t get_available_size(const BtreeConfig& cfg) const override { + return get_var_node_header_const()->m_available_space; + } + + uint32_t get_nth_obj_size(uint32_t ind) const override { return get_nth_key_len(ind) + get_nth_value_len(ind); } + + void set_nth_key(uint32_t ind, const BtreeKey& key) { + const auto kb = key.serialize(); + assert(ind < this->get_total_entries()); + assert(kb.size == get_nth_key_len(ind)); + memcpy(uintptr_cast(get_nth_obj(ind)), kb.bytes, kb.size); + } + + virtual uint16_t get_nth_key_len(uint32_t ind) const = 0; + virtual uint16_t get_nth_value_len(uint32_t ind) const = 0; + virtual void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) = 0; + virtual void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) = 0; + + K get_nth_key(uint32_t ind, bool copy) const { + assert(ind < this->get_total_entries()); + sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)), get_nth_key_len(ind)}; + return K{b, copy}; + } + + void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const override { + sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)) + get_nth_key_len(ind), get_nth_value_len(ind)}; + out_val->deserialize(b, copy); + } + + /*V get_nth_value(uint32_t ind, bool copy) const { + assert(ind < this->get_total_entries()); + sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)) + get_nth_key_len(ind), get_nth_value_len(ind)}; + return V{b, copy}; + }*/ + + std::string to_string(bool print_friendly = false) const override { + auto str = fmt::format( + "{}id={} nEntries={} {} free_space={} ", + (print_friendly ? "---------------------------------------------------------------------\n" : ""), + this->get_node_id(), this->get_total_entries(), (this->is_leaf() ? "LEAF" : "INTERIOR"), + get_var_node_header_const()->m_available_space); + if (!this->is_leaf() && (this->has_valid_edge())) { + fmt::format_to(std::back_inserter(str), "edge_id={} ", this->get_edge_id()); + } + for (uint32_t i{0}; i < this->get_total_entries(); ++i) { + V val; + get_nth_value(i, &val, false); + fmt::format_to(std::back_inserter(str), "{}Entry{} [Key={} Val={}]", (print_friendly ? "\n\t" : " "), i + 1, + get_nth_key(i, false).to_string(), val.to_string()); + } + return str; + } + + int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const { + return get_nth_key(ind, false).compare(cmp_key); + } + + /*int compare_nth_key_range(const BtreeKeyRange& range, uint32_t ind) const { + return get_nth_key(ind, false).compare_range(range); + }*/ + +protected: + uint32_t insert(uint32_t ind, const sisl::blob& key_blob, const sisl::blob& val_blob) { + assert(ind <= this->get_total_entries()); + LOGTRACEMOD(btree, "{}:{}:{}:{}", ind, get_var_node_header()->tail_offset(), get_arena_free_space(), + get_var_node_header()->available_space()); + uint16_t obj_size = key_blob.size + val_blob.size; + uint16_t to_insert_size = obj_size + this->get_record_size(); + if (to_insert_size > get_var_node_header()->available_space()) { + LOGDEBUGMOD(btree, "insert failed insert size {} available size {}", to_insert_size, + get_var_node_header()->available_space()); + return 0; + } + + // If we don't have enough space in the tail arena area, we need to compact and get the space. + if (to_insert_size > get_arena_free_space()) { + compact(); + assert(to_insert_size <= + get_arena_free_space()); // Expect after compaction to have available space to insert + } + + // Create a room for a new record + uint8_t* rec_ptr = uintptr_cast(get_nth_record_mutable(ind)); + memmove((void*)(rec_ptr + this->get_record_size()), rec_ptr, + (this->get_total_entries() - ind) * this->get_record_size()); + + // Move up the tail area + assert(get_var_node_header()->m_tail_arena_offset > obj_size); + get_var_node_header()->m_tail_arena_offset -= obj_size; + get_var_node_header()->m_available_space -= (obj_size + this->get_record_size()); + + // Create a new record + set_nth_key_len(rec_ptr, key_blob.size); + set_nth_value_len(rec_ptr, val_blob.size); + set_record_data_offset(rec_ptr, get_var_node_header()->m_tail_arena_offset); + + // Copy the contents of key and value in the offset + uint8_t* raw_data_ptr = offset_to_ptr_mutable(get_var_node_header()->m_tail_arena_offset); + memcpy(raw_data_ptr, key_blob.bytes, key_blob.size); + raw_data_ptr += key_blob.size; + memcpy(raw_data_ptr, val_blob.bytes, val_blob.size); + + // Increment the entries and generation number + this->inc_entries(); + this->inc_gen(); + +#ifndef NDEBUG + this->validate_sanity(); +#endif + +#ifdef DEBUG + // print(); +#endif + return to_insert_size; + } + + /* + * This method compacts and provides contiguous tail arena space + * so that available space == tail arena space + * */ + void compact() { +#ifndef NDEBUG + this->validate_sanity(); +#endif + // temp ds to sort records in stack space + struct Record { + uint16_t m_obj_offset; + uint16_t orig_record_index; + }; + + uint32_t no_of_entries = this->get_total_entries(); + if (no_of_entries == 0) { + // this happens when there is only entry and in update, we first remove and than insert + get_var_node_header()->m_tail_arena_offset = get_var_node_header()->m_init_available_space; + LOGTRACEMOD(btree, "Full available size reclaimed"); + return; + } + std::vector< Record > rec; + rec.reserve(no_of_entries); + + uint32_t ind = 0; + while (ind < no_of_entries) { + btree_obj_record* rec_ptr = (btree_obj_record*)(get_nth_record_mutable(ind)); + rec[ind].m_obj_offset = rec_ptr->m_obj_offset; + rec[ind].orig_record_index = ind; + ind++; + } + + // use comparator to sort based on m_obj_offset in desc order + std::sort(rec.begin(), rec.begin() + no_of_entries, + [](Record const& a, Record const& b) -> bool { return b.m_obj_offset < a.m_obj_offset; }); + + uint16_t last_offset = get_var_node_header()->m_init_available_space; + + ind = 0; + uint16_t sparce_space = 0; + // loop records + while (ind < no_of_entries) { + uint16_t total_key_value_len = + get_nth_key_len(rec[ind].orig_record_index) + get_nth_value_len(rec[ind].orig_record_index); + sparce_space = last_offset - (rec[ind].m_obj_offset + total_key_value_len); + if (sparce_space > 0) { + // do compaction + uint8_t* old_key_ptr = (uint8_t*)get_nth_obj(rec[ind].orig_record_index); + uint8_t* raw_data_ptr = old_key_ptr + sparce_space; + memmove(raw_data_ptr, old_key_ptr, total_key_value_len); + + // update original record + btree_obj_record* rec_ptr = (btree_obj_record*)(get_nth_record_mutable(rec[ind].orig_record_index)); + rec_ptr->m_obj_offset += sparce_space; + + last_offset = rec_ptr->m_obj_offset; + + } else { + assert(sparce_space == 0); + last_offset = rec[ind].m_obj_offset; + } + ind++; + } + get_var_node_header()->m_tail_arena_offset = last_offset; +#ifndef NDEBUG + this->validate_sanity(); +#endif + LOGTRACEMOD(btree, "Sparse space reclaimed:{}", sparce_space); + } + + const uint8_t* get_nth_record(uint32_t ind) const { + return this->node_data_area_const() + sizeof(var_node_header) + (ind * this->get_record_size()); + } + uint8_t* get_nth_record_mutable(uint32_t ind) { + return this->node_data_area() + sizeof(var_node_header) + (ind * this->get_record_size()); + } + + const uint8_t* get_nth_obj(uint32_t ind) const { + return offset_to_ptr(((btree_obj_record*)get_nth_record(ind))->m_obj_offset); + } + uint8_t* get_nth_obj_mutable(uint32_t ind) { + return offset_to_ptr_mutable(((btree_obj_record*)get_nth_record(ind))->m_obj_offset); + } + + void set_record_data_offset(uint8_t* rec_ptr, uint16_t offset) { + auto r = (btree_obj_record*)rec_ptr; + r->m_obj_offset = offset; + } + + uint8_t* offset_to_ptr_mutable(uint16_t offset) { return this->node_data_area() + offset; } + + const uint8_t* offset_to_ptr(uint16_t offset) const { return this->node_data_area_const() + offset; } + + ///////////// Other Private Methods ////////////////// + inline var_node_header* get_var_node_header() { return r_cast< var_node_header* >(this->node_data_area()); } + + inline const var_node_header* get_var_node_header_const() const { + return r_cast< const var_node_header* >(this->node_data_area_const()); + } + + uint16_t get_arena_free_space() const { + return get_var_node_header_const()->m_tail_arena_offset - sizeof(var_node_header) - + (this->get_total_entries() * this->get_record_size()); + } +}; + +template < typename K, typename V > +class VarKeySizeNode : public VariableNode< K, V > { +public: + VarKeySizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : + VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { + this->set_node_type(btree_node_type::VAR_KEY); + } + + uint16_t get_nth_key_len(uint32_t ind) const override { + return r_cast< const var_key_record* >(this->get_nth_record(ind))->m_key_len; + } + uint16_t get_nth_value_len(uint32_t ind) const override { return V::get_fixed_size(); } + uint16_t get_record_size() const override { return sizeof(var_key_record); } + + void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { + r_cast< var_key_record* >(rec_ptr)->m_key_len = key_len; + } + void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { assert(value_len == V::get_fixed_size()); } + +private: +#pragma pack(1) + struct var_key_record : public btree_obj_record { + uint16_t m_key_len : 14; + uint16_t reserved : 2; + }; +#pragma pack() +}; + +/***************** Template Specialization for variable value records ******************/ +template < typename K, typename V > +class VarValueSizeNode : public VariableNode< K, V > { +public: + VarValueSizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : + VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { + this->set_node_type(btree_node_type::VAR_VALUE); + } + + uint16_t get_nth_key_len(uint32_t ind) const override { return K::get_fixed_size(); } + uint16_t get_nth_value_len(uint32_t ind) const override { + return r_cast< const var_value_record* >(this->get_nth_record(ind))->m_value_len; + } + uint16_t get_record_size() const override { return sizeof(var_value_record); } + + void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { assert(key_len == K::get_fixed_size()); } + void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { + r_cast< var_value_record* >(rec_ptr)->m_value_len = value_len; + } + +private: +#pragma pack(1) + struct var_value_record : public btree_obj_record { + uint16_t m_value_len : 14; + uint16_t reserved : 2; + }; +#pragma pack() +}; + +/***************** Template Specialization for variable object records ******************/ +template < typename K, typename V > +class VarObjSizeNode : public VariableNode< K, V > { +public: + VarObjSizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : + VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { + this->set_node_type(btree_node_type::VAR_OBJECT); + } + + uint16_t get_nth_key_len(uint32_t ind) const override { + return r_cast< const var_obj_record* >(this->get_nth_record(ind))->m_key_len; + } + uint16_t get_nth_value_len(uint32_t ind) const override { + return r_cast< const var_obj_record* >(this->get_nth_record(ind))->m_value_len; + } + uint16_t get_record_size() const override { return sizeof(var_obj_record); } + + void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { + r_cast< var_obj_record* >(rec_ptr)->m_key_len = key_len; + } + void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { + r_cast< var_obj_record* >(rec_ptr)->m_value_len = value_len; + } + +private: +#pragma pack(1) + struct var_obj_record : public btree_obj_record { + uint16_t m_key_len : 14; + uint16_t reserved : 2; + + uint16_t m_value_len : 14; + uint16_t reserved2 : 2; + }; +#pragma pack() +}; +} // namespace btree +} // namespace sisl From cafb83b99868ee971cf96487630accad60310527 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Sep 2022 07:27:48 -0700 Subject: [PATCH 113/385] Some fixes for oss build in Jenkins --- .jenkins/Jenkinsfile | 2 +- conanfile.py | 26 ++++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index ab9d0bbb..febfeff8 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,7 +3,7 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') - CONAN_USER = 'sisl' + CONAN_USER = 'github' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' diff --git a/conanfile.py b/conanfile.py index e9a424af..58e004f8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -20,12 +20,18 @@ class SISLConan(ConanFile): options = { "shared": ['True', 'False'], "fPIC": ['True', 'False'], - 'malloc_impl' : ['libc', 'jemalloc'], + "coverage": ['True', 'False'], + "sanitize": ['True', 'False'], + 'prerelease' : ['True', 'False'], + 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], 'with_evhtp' : ['True', 'False'], } default_options = { 'shared': False, 'fPIC': True, + 'coverage': False, + 'sanitize': False, + 'prerelease': True, 'malloc_impl': 'libc', 'with_evhtp': False, } @@ -40,6 +46,8 @@ def build_requirements(self): def requirements(self): # Custom packages + if self.options.prerelease: + self.requires("prerelease_dummy/1.0.1") # Generic packages (conan-center) self.requires("boost/1.79.0") @@ -60,6 +68,8 @@ def requirements(self): self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": self.requires("jemalloc/5.2.1") + elif self.options.malloc_impl == "tcmalloc": + self.requires("gperftools/2.7.0") if self.options.with_evhtp: self.requires("evhtp/1.2.18.2") @@ -74,7 +84,8 @@ def configure(self): def build(self): cmake = CMake(self) - definitions = {'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', + definitions = {'CONAN_BUILD_COVERAGE': 'OFF', + 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', 'EVHTP_ON': 'OFF', 'MALLOC_IMPL': self.options.malloc_impl} @@ -83,6 +94,15 @@ def build(self): if self.options.with_evhtp: definitions['EVHTP_ON'] = 'ON' + if self.settings.build_type == "Debug": + if self.options.sanitize: + definitions['MEMORY_SANITIZER_ON'] = 'ON' + elif self.options.coverage: + definitions['CONAN_BUILD_COVERAGE'] = 'ON' + test_target = 'coverage' + + definitions['MALLOC_IMPL'] = self.options.malloc_impl + cmake.configure(defs=definitions) cmake.build() cmake.test(target=test_target) @@ -116,3 +136,5 @@ def package_info(self): if self.options.malloc_impl == 'jemalloc': self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") + elif self.options.malloc_impl == 'tcmalloc': + self.cpp_info.cppflags.append("-DUSING_TCMALLOC=1") From 386039e9026d5542bf50817526da41a049774f3f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Sep 2022 07:27:48 -0700 Subject: [PATCH 114/385] Some fixes for oss build in Jenkins --- .jenkins/Jenkinsfile | 2 +- conanfile.py | 26 ++++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index ab9d0bbb..febfeff8 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,7 +3,7 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') - CONAN_USER = 'sisl' + CONAN_USER = 'github' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' diff --git a/conanfile.py b/conanfile.py index e9a424af..58e004f8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -20,12 +20,18 @@ class SISLConan(ConanFile): options = { "shared": ['True', 'False'], "fPIC": ['True', 'False'], - 'malloc_impl' : ['libc', 'jemalloc'], + "coverage": ['True', 'False'], + "sanitize": ['True', 'False'], + 'prerelease' : ['True', 'False'], + 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], 'with_evhtp' : ['True', 'False'], } default_options = { 'shared': False, 'fPIC': True, + 'coverage': False, + 'sanitize': False, + 'prerelease': True, 'malloc_impl': 'libc', 'with_evhtp': False, } @@ -40,6 +46,8 @@ def build_requirements(self): def requirements(self): # Custom packages + if self.options.prerelease: + self.requires("prerelease_dummy/1.0.1") # Generic packages (conan-center) self.requires("boost/1.79.0") @@ -60,6 +68,8 @@ def requirements(self): self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": self.requires("jemalloc/5.2.1") + elif self.options.malloc_impl == "tcmalloc": + self.requires("gperftools/2.7.0") if self.options.with_evhtp: self.requires("evhtp/1.2.18.2") @@ -74,7 +84,8 @@ def configure(self): def build(self): cmake = CMake(self) - definitions = {'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', + definitions = {'CONAN_BUILD_COVERAGE': 'OFF', + 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', 'EVHTP_ON': 'OFF', 'MALLOC_IMPL': self.options.malloc_impl} @@ -83,6 +94,15 @@ def build(self): if self.options.with_evhtp: definitions['EVHTP_ON'] = 'ON' + if self.settings.build_type == "Debug": + if self.options.sanitize: + definitions['MEMORY_SANITIZER_ON'] = 'ON' + elif self.options.coverage: + definitions['CONAN_BUILD_COVERAGE'] = 'ON' + test_target = 'coverage' + + definitions['MALLOC_IMPL'] = self.options.malloc_impl + cmake.configure(defs=definitions) cmake.build() cmake.test(target=test_target) @@ -116,3 +136,5 @@ def package_info(self): if self.options.malloc_impl == 'jemalloc': self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") + elif self.options.malloc_impl == 'tcmalloc': + self.cpp_info.cppflags.append("-DUSING_TCMALLOC=1") From d87743956253648270ccefdbef331e04cadf8ac7 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Sep 2022 07:43:02 -0700 Subject: [PATCH 115/385] add output and disable deploy for now. --- .jenkins/Jenkinsfile | 1 + conanfile.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index febfeff8..455b77c9 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -149,6 +149,7 @@ pipeline { stage("Deploy") { when { allOf { + branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } expression { not { branch "PR_*" } } diff --git a/conanfile.py b/conanfile.py index 58e004f8..eec23fd6 100644 --- a/conanfile.py +++ b/conanfile.py @@ -105,7 +105,7 @@ def build(self): cmake.configure(defs=definitions) cmake.build() - cmake.test(target=test_target) + cmake.test(target=test_target, output_on_failure=True) def package(self): lib_dir = join(self.package_folder, "lib") From 2ab1bcc8cf0c09bddd8de4d759129e785ec729b9 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Sep 2022 07:43:02 -0700 Subject: [PATCH 116/385] add output and disable deploy for now. --- .jenkins/Jenkinsfile | 1 + conanfile.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index febfeff8..455b77c9 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -149,6 +149,7 @@ pipeline { stage("Deploy") { when { allOf { + branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } expression { not { branch "PR_*" } } diff --git a/conanfile.py b/conanfile.py index 58e004f8..eec23fd6 100644 --- a/conanfile.py +++ b/conanfile.py @@ -105,7 +105,7 @@ def build(self): cmake.configure(defs=definitions) cmake.build() - cmake.test(target=test_target) + cmake.test(target=test_target, output_on_failure=True) def package(self): lib_dir = join(self.package_folder, "lib") From ee7e433768261e53e2d4c0fc5db4134ab2c594b2 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 16:45:15 -0700 Subject: [PATCH 117/385] Set prerelease option false for workflow builds --- .github/workflows/build_with_conan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index d3d5773b..b92509ba 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -41,7 +41,7 @@ jobs: - name: Install dependencies # Build your program with the given configuration run: | - conan install -s build_type=${{ matrix.build-type }} --build missing . + conan install -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . - name: Build # Build your program with the given configuration From f67fb4cc0598afc3d2acb45529c713498a9cae40 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 16:45:15 -0700 Subject: [PATCH 118/385] Set prerelease option false for workflow builds --- .github/workflows/build_with_conan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index d3d5773b..b92509ba 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -41,7 +41,7 @@ jobs: - name: Install dependencies # Build your program with the given configuration run: | - conan install -s build_type=${{ matrix.build-type }} --build missing . + conan install -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . - name: Build # Build your program with the given configuration From 1b349c1c2848cbd3f679b895d87af4c0e854c0d0 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 21 Sep 2022 20:55:09 -0700 Subject: [PATCH 119/385] Fixed leak report on histogram reporting, downgraded folly to prevent build failures --- conanfile.py | 4 ++-- src/metrics/prometheus_reporter.hpp | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index eec23fd6..000eb102 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.0.2" + version = "8.0.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -54,7 +54,7 @@ def requirements(self): self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - self.requires("folly/2022.01.31.00") + self.requires("folly/2020.08.10.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") diff --git a/src/metrics/prometheus_reporter.hpp b/src/metrics/prometheus_reporter.hpp index 051302b6..efc497d8 100644 --- a/src/metrics/prometheus_reporter.hpp +++ b/src/metrics/prometheus_reporter.hpp @@ -77,7 +77,10 @@ class PrometheusReportHistogram : public ReportHistogram { // Since histogram doesn't have reset facility (PR is yet to be accepted in the main repo), // we are doing a placement new to reconstruct the entire object to force to call its constructor. This // way we don't need to register histogram again to family. + using namespace prometheus; + bucket_values.resize(m_bkt_boundaries.size() + 1); + m_histogram.~Histogram(); prometheus::Histogram* inplace_hist = new ((void*)&m_histogram) prometheus::Histogram(m_bkt_boundaries); inplace_hist->ObserveMultiple(bucket_values, sum); } From 99b279a41afcdaf54f19ccc068e3cac33de7eadf Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 21 Sep 2022 20:55:09 -0700 Subject: [PATCH 120/385] Fixed leak report on histogram reporting, downgraded folly to prevent build failures --- conanfile.py | 4 ++-- src/metrics/prometheus_reporter.hpp | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index eec23fd6..000eb102 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.0.2" + version = "8.0.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -54,7 +54,7 @@ def requirements(self): self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - self.requires("folly/2022.01.31.00") + self.requires("folly/2020.08.10.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") diff --git a/src/metrics/prometheus_reporter.hpp b/src/metrics/prometheus_reporter.hpp index 051302b6..efc497d8 100644 --- a/src/metrics/prometheus_reporter.hpp +++ b/src/metrics/prometheus_reporter.hpp @@ -77,7 +77,10 @@ class PrometheusReportHistogram : public ReportHistogram { // Since histogram doesn't have reset facility (PR is yet to be accepted in the main repo), // we are doing a placement new to reconstruct the entire object to force to call its constructor. This // way we don't need to register histogram again to family. + using namespace prometheus; + bucket_values.resize(m_bkt_boundaries.size() + 1); + m_histogram.~Histogram(); prometheus::Histogram* inplace_hist = new ((void*)&m_histogram) prometheus::Histogram(m_bkt_boundaries); inplace_hist->ObserveMultiple(bucket_values, sum); } From 2827d4669f801fc0fb54ba93f0fe565e18e295e8 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:01:07 -0700 Subject: [PATCH 121/385] Temporarily pick dependencies from conancenter --- .jenkins/Jenkinsfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 455b77c9..2a82c93b 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,7 +134,8 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan remote add conancenter https://center.conan.io" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -r conancenter -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -143,7 +144,8 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" + sh "conan remote add conancenter https://center.conan.io" + sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -r conancenter -pr debug . ${PROJECT}/${TAG}" } } From 5e3a3b48ea5a44f1c16692e5fe78c65f58b3aaa5 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:01:07 -0700 Subject: [PATCH 122/385] Temporarily pick dependencies from conancenter --- .jenkins/Jenkinsfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 455b77c9..2a82c93b 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,7 +134,8 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan remote add conancenter https://center.conan.io" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -r conancenter -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -143,7 +144,8 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" + sh "conan remote add conancenter https://center.conan.io" + sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -r conancenter -pr debug . ${PROJECT}/${TAG}" } } From ed28e935ec530d6c8479bfe4aa120565232e9d42 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:11:21 -0700 Subject: [PATCH 123/385] Attempt to remove remotes to get it pull from correct repo --- .jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 2a82c93b..a6105136 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -135,7 +135,7 @@ pipeline { } } steps { sh "conan remote add conancenter https://center.conan.io" - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -r conancenter -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -145,7 +145,7 @@ pipeline { } } steps { sh "conan remote add conancenter https://center.conan.io" - sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -r conancenter -pr debug . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } From 8623fdb99dfb8db2bc4045e6a9f0374f47328076 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:11:21 -0700 Subject: [PATCH 124/385] Attempt to remove remotes to get it pull from correct repo --- .jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 2a82c93b..a6105136 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -135,7 +135,7 @@ pipeline { } } steps { sh "conan remote add conancenter https://center.conan.io" - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -r conancenter -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -145,7 +145,7 @@ pipeline { } } steps { sh "conan remote add conancenter https://center.conan.io" - sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -r conancenter -pr debug . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } From a0ed9bc12d3a7c38d7dfeb8b86ffb9a51777cdb7 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:27:33 -0700 Subject: [PATCH 125/385] Put the correct priority order to pull conan dependencies --- .jenkins/Jenkinsfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index a6105136..70edea83 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,7 +134,8 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io" + sh "conan remote add conancenter https://center.conan.io --insert 0" + sh "conan remote add_ref prerelease_dummy/1.0.1 ebay-local" sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -144,7 +145,7 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io" + sh "conan remote add conancenter https://center.conan.io --insert 0" sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } From c108fe096219c37981296c628cffd6e8f81d4ce7 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 21 Sep 2022 23:27:33 -0700 Subject: [PATCH 126/385] Put the correct priority order to pull conan dependencies --- .jenkins/Jenkinsfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index a6105136..70edea83 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,7 +134,8 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io" + sh "conan remote add conancenter https://center.conan.io --insert 0" + sh "conan remote add_ref prerelease_dummy/1.0.1 ebay-local" sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -144,7 +145,7 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io" + sh "conan remote add conancenter https://center.conan.io --insert 0" sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } From 49553578b32c708a8e111bb87c49e13a3be720eb Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 23 Sep 2022 15:01:39 -0700 Subject: [PATCH 127/385] Bring folly back to latest and attempting again to resolve build failures --- .jenkins/Jenkinsfile | 3 --- conanfile.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 70edea83..455b77c9 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,8 +134,6 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io --insert 0" - sh "conan remote add_ref prerelease_dummy/1.0.1 ebay-local" sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -145,7 +143,6 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io --insert 0" sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } diff --git a/conanfile.py b/conanfile.py index 000eb102..54c43b42 100644 --- a/conanfile.py +++ b/conanfile.py @@ -54,7 +54,7 @@ def requirements(self): self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - self.requires("folly/2020.08.10.00") + self.requires("folly/2022.01.31.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") From d6824e69420782729b59de333d38c7fad3c024ad Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 23 Sep 2022 15:01:39 -0700 Subject: [PATCH 128/385] Bring folly back to latest and attempting again to resolve build failures --- .jenkins/Jenkinsfile | 3 --- conanfile.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 70edea83..455b77c9 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -134,8 +134,6 @@ pipeline { expression { "${BUILD_TYPE}" != 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io --insert 0" - sh "conan remote add_ref prerelease_dummy/1.0.1 ebay-local" sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -145,7 +143,6 @@ pipeline { expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan remote add conancenter https://center.conan.io --insert 0" sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } diff --git a/conanfile.py b/conanfile.py index 000eb102..54c43b42 100644 --- a/conanfile.py +++ b/conanfile.py @@ -54,7 +54,7 @@ def requirements(self): self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - self.requires("folly/2020.08.10.00") + self.requires("folly/2022.01.31.00") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") From fd9e953916d5e78652d32cba11ec3096aaccaf69 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 23 Sep 2022 15:47:39 -0700 Subject: [PATCH 129/385] Allow sisl to be deployed and changed channel to oss --- .jenkins/Jenkinsfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 455b77c9..060262e5 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,7 +3,7 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') - CONAN_USER = 'github' + CONAN_USER = 'oss' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' @@ -149,7 +149,6 @@ pipeline { stage("Deploy") { when { allOf { - branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } expression { not { branch "PR_*" } } From 1ad08d6005a2a0b9f031b2797936d15c4dbcefc7 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Fri, 23 Sep 2022 15:47:39 -0700 Subject: [PATCH 130/385] Allow sisl to be deployed and changed channel to oss --- .jenkins/Jenkinsfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 455b77c9..060262e5 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -3,7 +3,7 @@ pipeline { environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') - CONAN_USER = 'github' + CONAN_USER = 'oss' TARGET_BRANCH = 'master' TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' @@ -149,7 +149,6 @@ pipeline { stage("Deploy") { when { allOf { - branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } expression { not { branch "PR_*" } } From 035f8f22c590a5901e1a70ab58fea51fa264c491 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 26 Sep 2022 16:08:55 -0700 Subject: [PATCH 131/385] Merge Symbiosis sisl (#28) * wip * Remove http server from sisl * use pistache package from conancenter * remove internal reference Co-authored-by: Ravi Akella email = raakella@ebay.com --- CMakeLists.txt | 6 - conanfile.py | 12 +- src/async_http/CMakeLists.txt | 31 - src/async_http/http_server.hpp | 583 ------------------- src/async_http/tests/AuthTest.cpp | 517 ---------------- src/async_http/tests/id_rsa | 28 - src/async_http/tests/id_rsa.pub | 9 - src/async_http/tests/id_rsa1.pub | 9 - src/async_http/tests/test_http_server.cpp | 195 ------- src/auth_manager/CMakeLists.txt | 17 + src/auth_manager/tests/AuthTest.cpp | 310 ++++++++++ src/auth_manager/tests/basic_http_server.hpp | 50 ++ src/auth_manager/tests/test_token.hpp | 110 ++++ 13 files changed, 489 insertions(+), 1388 deletions(-) delete mode 100644 src/async_http/CMakeLists.txt delete mode 100644 src/async_http/http_server.hpp delete mode 100644 src/async_http/tests/AuthTest.cpp delete mode 100644 src/async_http/tests/id_rsa delete mode 100644 src/async_http/tests/id_rsa.pub delete mode 100644 src/async_http/tests/id_rsa1.pub delete mode 100644 src/async_http/tests/test_http_server.cpp create mode 100644 src/auth_manager/tests/AuthTest.cpp create mode 100644 src/auth_manager/tests/basic_http_server.hpp create mode 100644 src/auth_manager/tests/test_token.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 632d675c..81040e97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -108,12 +108,6 @@ endif() include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/auth_manager) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/settings) -if (DEFINED EVHTP_ON) - if (${EVHTP_ON}) - add_subdirectory (src/async_http) - endif() -endif() - #add_subdirectory (src/btree) add_subdirectory (src/cache) add_subdirectory (src/logging) diff --git a/conanfile.py b/conanfile.py index 54c43b42..f028467c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.0.3" + version = "8.1.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -24,7 +24,6 @@ class SISLConan(ConanFile): "sanitize": ['True', 'False'], 'prerelease' : ['True', 'False'], 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], - 'with_evhtp' : ['True', 'False'], } default_options = { 'shared': False, @@ -33,7 +32,6 @@ class SISLConan(ConanFile): 'sanitize': False, 'prerelease': True, 'malloc_impl': 'libc', - 'with_evhtp': False, } generators = "cmake", "cmake_find_package" @@ -42,7 +40,7 @@ class SISLConan(ConanFile): def build_requirements(self): self.build_requires("benchmark/1.6.1") self.build_requires("gtest/1.11.0") - + self.build_requires("pistache/cci.20201127") def requirements(self): # Custom packages @@ -70,8 +68,6 @@ def requirements(self): self.requires("jemalloc/5.2.1") elif self.options.malloc_impl == "tcmalloc": self.requires("gperftools/2.7.0") - if self.options.with_evhtp: - self.requires("evhtp/1.2.18.2") def validate(self): if self.info.settings.compiler.cppstd: @@ -87,13 +83,9 @@ def build(self): definitions = {'CONAN_BUILD_COVERAGE': 'OFF', 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', - 'EVHTP_ON': 'OFF', 'MALLOC_IMPL': self.options.malloc_impl} test_target = None - if self.options.with_evhtp: - definitions['EVHTP_ON'] = 'ON' - if self.settings.build_type == "Debug": if self.options.sanitize: definitions['MEMORY_SANITIZER_ON'] = 'ON' diff --git a/src/async_http/CMakeLists.txt b/src/async_http/CMakeLists.txt deleted file mode 100644 index fb18f2c2..00000000 --- a/src/async_http/CMakeLists.txt +++ /dev/null @@ -1,31 +0,0 @@ -cmake_minimum_required (VERSION 3.10) - -find_package(FlatBuffers REQUIRED) -find_package(evhtp REQUIRED) - -add_flags("-Wno-unused-parameter -Wno-cast-function-type") - -include_directories(BEFORE ..) -include_directories(BEFORE .) - -set(AUTH_DEPS - sisl - ${COMMON_DEPS} - evhtp::evhtp - cpr::cpr - flatbuffers::flatbuffers - jwt-cpp::jwt-cpp - GTest::gmock - ) - -set(TEST_HTTP_SERVER_SOURCES - tests/test_http_server.cpp - ) -add_executable(test_http_server ${TEST_HTTP_SERVER_SOURCES}) -target_link_libraries(test_http_server ${AUTH_DEPS}) - -add_executable(test_http_server_auth - tests/AuthTest.cpp - ) -target_link_libraries(test_http_server_auth ${AUTH_DEPS}) -add_test(NAME test_http_server_auth COMMAND test_http_server_auth) diff --git a/src/async_http/http_server.hpp b/src/async_http/http_server.hpp deleted file mode 100644 index c6fd4e4a..00000000 --- a/src/async_http/http_server.hpp +++ /dev/null @@ -1,583 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __linux__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#endif - -#include -#include -#include -#include -#include - -#include "auth_manager/auth_manager.hpp" -#include "logging/logging.h" -#include "options/options.h" -#include "utility/obj_life_counter.hpp" -#include "utility/thread_factory.hpp" - -SISL_LOGGING_DECL(httpserver_lmod) - -namespace sisl { -class AuthManager; - -////////////////////// Config Definitions ////////////////////// -struct HttpServerConfig { - bool is_tls_enabled; - std::string tls_cert_path; - std::string tls_key_path; - std::string bind_address; - uint32_t server_port; - uint32_t read_write_timeout_secs; - bool is_auth_enabled; -}; - -////////////////////// Internal Event Definitions ////////////////////// -enum event_type_t { - CALLBACK, -}; -struct HttpEvent { - event_type_t m_event_type; - std::function< void() > m_closure; -}; -typedef std::list< HttpEvent > EventList; - -////////////////////// API CallData Definitions ////////////////////// -struct _http_calldata : public boost::intrusive_ref_counter< _http_calldata >, sisl::ObjLifeCounter< _http_calldata > { -public: - friend class HttpServer; - - _http_calldata(evhtp_request_t* req, void* arg = nullptr) : - m_req{req}, m_completed{false}, m_arg{arg}, m_http_code{EVHTP_RES_OK}, m_content_type{"application/json"} { - m_req->cbarg = this; - } - - void set_response(evhtp_res code, const std::string& msg) { - m_http_code = code; - m_response_msg = msg; - } - - void complete() { m_completed = true; } - bool is_completed() const { return m_completed; } - evhtp_request_t* request() { return m_req; } - void* cookie() { return m_arg; } - -private: - evhtp_request_t* m_req; - bool m_completed; - void* m_arg; - std::string m_response_msg; - evhtp_res m_http_code; - const char* m_content_type; -}; - -typedef boost::intrusive_ptr< _http_calldata > HttpCallData; - -////////////////////// Handler Definitions ////////////////////// -typedef std::function< void(HttpCallData) > HttpRequestHandler; -struct _handler_info { - std::string m_uri; - evhtp_callback_cb m_callback; - void* m_arg; - - _handler_info(const std::string& uri, evhtp_callback_cb cb, void* arg = nullptr) : - m_uri{uri}, m_callback{cb}, m_arg{arg} {} - - bool operator<(const _handler_info& other) const { return m_uri < other.m_uri; } -}; - -template < void (*Handler)(HttpCallData) > -static void _request_handler(evhtp_request_t* req, void* arg) { - const HttpCallData cd{new _http_calldata(req, arg)}; - Handler(cd); -} - -#define handler_info(uri, cb, arg) sisl::_handler_info(uri, sisl::_request_handler< cb >, arg) - -////////////////////// Server Implementation ////////////////////// -class HttpServer { -public: - HttpServer(const HttpServerConfig& cfg, const std::vector< _handler_info >& handlers) : - m_cfg{cfg}, m_handlers{handlers}, m_ev_base{nullptr}, m_htp{nullptr}, m_internal_event{nullptr} {} - - HttpServer(const HttpServerConfig& cfg, const std::vector< _handler_info >& handlers, - const std::shared_ptr< AuthManager > auth_mgr) : - m_cfg{cfg}, - m_handlers{handlers}, - m_ev_base{nullptr}, - m_htp{nullptr}, - m_internal_event{nullptr}, - m_auth_mgr{auth_mgr} {} - - HttpServer(const HttpServerConfig& cfg) : HttpServer{cfg, {}} {} - - virtual ~HttpServer() { - std::lock_guard lock{m_event_mutex}; - while (!m_event_list.empty()) { - auto c{std::move(m_event_list.front())}; - m_event_list.pop_front(); - } - } - - int start() { - try { - if (::evthread_use_pthreads() != 0) { throw std::runtime_error{"evthread_use_pthreads error!"}; } - m_http_thread = sisl::make_unique_thread("httpserver", &HttpServer::_run, this); - } catch (const std::system_error& e) { - LOGERROR("Thread creation failed: {} ", e.what()); - return -1; - } - - { - std::unique_lock< std::mutex > lk{m_running_mutex}; - m_ready_cv.wait(lk, [this] { return m_is_running; }); - } - return 0; - } - - int stop() { - run_in_http_thread([this]() { - LOGINFO("Stopping http server event loop."); - if (::event_base_loopbreak(m_ev_base) != 0) { LOGERROR("Error breaking out of admin server loop: "); } - }); - - /* wait for not running indication */ - LOGINFO("Waiting for http server event loop to be stopped."); - { - std::unique_lock< std::mutex > lk{m_running_mutex}; - m_ready_cv.wait(lk, [this] { return !m_is_running; }); - } - LOGINFO("HTTP server event loop stopped."); - - LOGINFO("Waiting for http server thread to join.."); - if (m_http_thread && m_http_thread->joinable()) { - try { - m_http_thread->join(); - } catch (std::exception& e) { LOGERROR("Http thread join error: {}", e.what()); } - } - LOGINFO("HTTP Server thread joined."); - - return 0; - } - - void register_handler_info(const _handler_info& hinfo) { - ::evhtp_set_cb(m_htp, hinfo.m_uri.c_str(), hinfo.m_callback, hinfo.m_arg); - } - - // Commands for admin/diagnostic purposes - // Holding handles to these commands here - evbase_t* get_base() const { return m_ev_base; } - - void run_in_http_thread(std::function< void() > closure) { - HttpEvent event; - event.m_event_type = event_type_t::CALLBACK; - event.m_closure = std::move(closure); - - { - std::lock_guard< std::mutex > lock{m_event_mutex}; - m_event_list.emplace_back(std::move(event)); - } - - ::event_active(m_internal_event, EV_READ | EV_WRITE, 1); - } - - void respond_OK(HttpCallData cd, evhtp_res http_code, const std::string& msg, - const char* content_type = "application/json") { - cd->m_http_code = http_code; - cd->m_response_msg = msg; - cd->m_content_type = content_type; - respond_OK(cd); - } - - void respond_NOTOK(HttpCallData cd, evhtp_res http_code, const std::string& msg) { - cd->m_http_code = http_code; - cd->m_response_msg = msg; - respond_OK(cd); - } - - void respond_OK(HttpCallData cd) { - if (std::this_thread::get_id() == m_http_thread->get_id()) { - http_OK(cd); - } else { - run_in_http_thread([this, cd]() { http_OK(cd); }); - } - } - - void respond_NOTOK(HttpCallData cd) { - if (std::this_thread::get_id() == m_http_thread->get_id()) { - http_NOTOK(cd); - } else { - run_in_http_thread([this, cd]() { http_NOTOK(cd); }); - } - } - - static evhtp_res to_evhtp_res(const AuthVerifyStatus status) { - evhtp_res ret; - switch (status) { - case AuthVerifyStatus::OK: - ret = EVHTP_RES_OK; - break; - case AuthVerifyStatus::UNAUTH: - ret = EVHTP_RES_UNAUTH; - break; - case AuthVerifyStatus::FORBIDDEN: - ret = EVHTP_RES_FORBIDDEN; - break; - default: - ret = EVHTP_RES_BADREQ; - break; - } - return ret; - } - - /* - * The user of the http_server must add a line to call http_auth_verify at the beginning of all the apis defined. - * The ideal way would be for the server to intercept all incoming api calls and do verification before sending it - * down to the url callback function. No proper way could be found to do this. - * One potential way is to use the hooks (per connection hooks/ per request hooks or per cb hooks) which can be set - * at different points in the life cycle of a req. (like on_headers etc) These hooks from evhtp library do not seem - * to work properly when the hook cb functions return anything other than EVHTP_RES_OK For a perfect implementation - * that avoids users to add http_auth_verify before all the apis they define, we need to either explore evhtp lib - * more or switch to a different server like Pistache. - */ - - evhtp_res http_auth_verify(evhtp_request_t* req, std::string& msg) { - if (!m_cfg.is_auth_enabled) { return EVHTP_RES_OK; } - - const std::string bearer{"Bearer "}; - auto* token{::evhtp_header_find(req->headers_in, "Authorization")}; - if (!token) { - msg = "missing auth token in request header"; - LOGDEBUGMOD(httpserver_lmod, "Processing req={}; {}", static_cast< void* >(req), msg); - return EVHTP_RES_UNAUTH; - } - const std::string token_str{token}; - if (token_str.rfind(bearer, 0) != 0) { - msg = "require bearer token in request header"; - LOGDEBUGMOD(httpserver_lmod, "Processing req={}; {}", static_cast< void* >(req), msg); - return EVHTP_RES_UNAUTH; - } - const auto raw_token{token_str.substr(bearer.length())}; - // verify method is expected to not throw - return to_evhtp_res(m_auth_mgr->verify(raw_token, msg)); - } - -#define request_callback(cb) \ - (evhtp_callback_cb) std::bind(&HttpServer::cb, this, std::placeholders::_1, std::placeholders::_2) -#define error_callback(cb) \ - (evhtp_hook) std::bind(&HttpServer::cb, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3) - -protected: - /* ****************** All default connection handlers ****************** */ - static evhtp_res register_connection_handlers(evhtp_connection_t* conn, void* arg) { - evhtp_connection_set_hook(conn, evhtp_hook_on_path, (evhtp_hook)HttpServer::request_on_path_handler, arg); - evhtp_connection_set_hook(conn, evhtp_hook_on_request_fini, (evhtp_hook)HttpServer::request_fini_handler, arg); - evhtp_connection_set_hook(conn, evhtp_hook_on_conn_error, (evhtp_hook)HttpServer::connection_error_callback, - arg); - evhtp_connection_set_hook(conn, evhtp_hook_on_error, (evhtp_hook)HttpServer::request_error_handler, arg); - return EVHTP_RES_OK; - } - - static void default_request_handler(evhtp_request_t* req, void* arg) { - HttpServer* const server{static_cast< HttpServer* >(arg)}; - const HttpCallData cd{new _http_calldata(req, arg)}; - server->respond_NOTOK(cd, EVHTP_RES_BADREQ, "Request can't be matched with any handlers\n"); - } - - static evhtp_res request_on_path_handler(evhtp_request_t* req, void* arg) { - [[maybe_unused]] HttpServer* const server{static_cast< HttpServer* >(arg)}; - - const char* path{""}; - if (req->uri && req->uri->path && req->uri->path->full) { path = req->uri->path->full; } - - LOGDEBUGMOD(httpserver_lmod, "Processing req={} path={}", static_cast< void* >(req), path); - return EVHTP_RES_OK; - } - - static evhtp_res request_fini_handler(evhtp_request_t* req, void* arg) { - [[maybe_unused]] HttpServer* const server{static_cast< HttpServer* >(arg)}; - - const char* path{""}; - if (req->uri && req->uri->path && req->uri->path->full) { path = req->uri->path->full; } - LOGDEBUGMOD(httpserver_lmod, "Finishing req={}, path={}", static_cast< void* >(req), path); - - if (req->cbarg != nullptr) { - _http_calldata* const cd{static_cast< _http_calldata* >(req->cbarg)}; - cd->complete(); - intrusive_ptr_release(cd); - } - return EVHTP_RES_OK; - } - - static void connection_error_callback([[maybe_unused]] evhtp_connection_t* conn, evhtp_error_flags type, - void* arg) { - [[maybe_unused]] HttpServer* const server{static_cast< HttpServer* >(arg)}; - LOGERROR("unhandled connection error of type: {}", type); - } - - static void request_error_handler([[maybe_unused]] evhtp_request_t* req, evhtp_error_flags errtype, void* arg) { - [[maybe_unused]] HttpServer* const server{static_cast< HttpServer* >(arg)}; - LOGERROR("Unhandled request error of type: {}", errtype); - } - -private: - int _run() { - int error{0}; - - m_ev_base = ::event_base_new(); - if (m_ev_base == nullptr) { - LOGERROR("event_base_new() failed!"); - return -1; - } - - m_htp = ::evhtp_new(m_ev_base, nullptr); - if (m_htp == nullptr) { - LOGERROR("evhtp_new() failed!"); - ::event_base_free(m_ev_base); - return -1; - } - - if (m_cfg.is_tls_enabled) { - const auto ssl_config{get_ssl_opts_()}; - if (!ssl_config) { - LOGERROR("get_ssl_opts_ failed!"); - ::evhtp_free(m_htp); - ::event_base_free(m_ev_base); - return -1; - } - - if (::evhtp_ssl_init(m_htp, ssl_config.get()) != 0) { - LOGERROR("evhtp_ssl_init failed!"); - ::evhtp_free(m_htp); - ::event_base_free(m_ev_base); - return -1; - } - } - - struct timeval timeout { - m_cfg.read_write_timeout_secs, 0 - }; - - // For internal events - m_internal_event = ::event_new(m_ev_base, -1, EV_TIMEOUT | EV_READ, &HttpServer::internal_event_handler, this); - if (m_internal_event == nullptr) { - LOGERROR("Adding internal event failed!"); - ::evhtp_free(m_htp); - ::event_base_free(m_ev_base); - return error; - } - ::event_add(m_internal_event, &timeout); - - /* set a callback to set per-connection hooks (via a post_accept cb) */ - ::evhtp_set_post_accept_cb(m_htp, &HttpServer::register_connection_handlers, (void*)this); - - // set read and write timeouts - ::evhtp_set_timeouts(m_htp, &timeout, &timeout); - - // Register all handlers and a default handler - for (auto& handler : m_handlers) { - ::evhtp_set_cb(m_htp, handler.m_uri.c_str(), handler.m_callback, handler.m_arg); - } - ::evhtp_set_gencb(m_htp, (evhtp_callback_cb)default_request_handler, (void*)this); - - // bind a socket - error = ::evhtp_bind_socket(m_htp, m_cfg.bind_address.c_str(), uint16_t(m_cfg.server_port), 128); - if (error != 0) { - // handling socket binding failure - LOGERROR("HTTP listener failed to start at address:port = {}:{} ", m_cfg.bind_address, m_cfg.server_port); - // Free the http resources - ::evhtp_free(m_htp); - ::event_base_free(m_ev_base); - return error; - } - - LOGINFO("HTTP Server started at port: {}", m_cfg.server_port); - - // Notify the caller that we are ready. - { - std::lock_guard< std::mutex > lk{m_running_mutex}; - m_is_running = true; - } - m_ready_cv.notify_one(); - - // start event loop, this will block the thread. - error = ::event_base_loop(m_ev_base, 0); - if (error != 0) { LOGERROR("Error starting Http listener loop"); } - - { - std::lock_guard< std::mutex > lk{m_running_mutex}; - m_is_running = false; - } - m_ready_cv.notify_one(); - - // free the resources - ::evhtp_unbind_socket(m_htp); - - // free pipe event - ::event_free(m_internal_event); - - // free evhtp - ::evhtp_free(m_htp); - - // finally free event base - ::event_base_free(m_ev_base); - - LOGINFO("Exiting http server event loop."); - return error; - } - - void _internal_event_handler(evutil_socket_t, short events) { - std::vector< HttpEvent > events_queue; - { - std::lock_guard lock{m_event_mutex}; - while (!m_event_list.empty()) { - events_queue.emplace_back(std::move(m_event_list.front())); - m_event_list.pop_front(); - } - } - - for (auto& event : events_queue) { - switch (event.m_event_type) { - case event_type_t::CALLBACK: - event.m_closure(); - break; - - default: - LOGERROR("Unknown internal event type {} ", event.m_event_type); - break; - } - } - } - - void http_OK(HttpCallData cd) { - evhtp_request_t* const req{cd->request()}; - - const auto* const conn{::evhtp_request_get_connection(req)}; - if (m_cfg.is_tls_enabled) { ::htp_sslutil_add_xheaders(req->headers_out, conn->ssl, HTP_SSLUTILS_XHDR_ALL); } - if (cd->m_content_type) { - ::evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Type", cd->m_content_type, 0, 0)); - } - - std::ostringstream ss; - ss << cd->m_response_msg.size(); - - /* valloc should be 1 because ss.str().c_str() is freed once control goes out of this function */ - ::evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Length", ss.str().c_str(), 0, 1)); - ::evbuffer_add(req->buffer_out, cd->m_response_msg.c_str(), cd->m_response_msg.size()); - - // Need to increment the calldata reference since evhtp_send_reply will call finish asyncronously and calldata - // needs to stay relavant till that call. - intrusive_ptr_add_ref(cd.get()); - ::evhtp_send_reply(req, cd->m_http_code); - } - - void http_NOTOK(HttpCallData cd) { - evhtp_request_t* const req{cd->request()}; - - const nlohmann::json json = {{"errorCode", cd->m_http_code}, {"errorDetail", cd->m_response_msg}}; - const std::string json_str{json.dump()}; - ::evhtp_headers_add_header(req->headers_out, ::evhtp_header_new("Content-Type", "application/json", 0, 0)); - - std::ostringstream ss; - ss << json_str.size(); - /* valloc should be 1 because ss.str().c_str() is freed once control goes out of this function */ - ::evhtp_headers_add_header(req->headers_out, ::evhtp_header_new("Content-Length", ss.str().c_str(), 0, 1)); - ::evbuffer_add(req->buffer_out, json_str.c_str(), json_str.size()); - - // Need to increment the calldata reference since evhtp_send_reply will call finish asyncronously and calldata - // needs to stay relavant till that call. - intrusive_ptr_add_ref(cd.get()); - ::evhtp_send_reply(req, cd->m_http_code); - } - - static void internal_event_handler(evutil_socket_t socket, short events, void* user_data) { - HttpServer* server{static_cast< HttpServer* >(user_data)}; - server->_internal_event_handler(socket, events); - } - - std::unique_ptr< evhtp_ssl_cfg_t > get_ssl_opts_() { - struct stat f_stat; - auto ssl_config{std::make_unique< evhtp_ssl_cfg_t >()}; - - ssl_config->ssl_opts = 0; // SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1; - ssl_config->pemfile = (char*)m_cfg.tls_cert_path.c_str(); - ssl_config->privfile = (char*)m_cfg.tls_key_path.c_str(); - - if (ssl_config->pemfile) { - if (::stat(ssl_config->pemfile, &f_stat) != 0) { - LOGERROR("Cannot load SSL cert: {}", ssl_config->pemfile); - return nullptr; - } - } - - if (ssl_config->privfile) { - if (::stat(ssl_config->privfile, &f_stat) != 0) { - LOGERROR("Cannot load SSL key: {}", ssl_config->privfile); - return nullptr; - } - } - - return ssl_config; - } - -private: - HttpServerConfig m_cfg; - std::unique_ptr< std::thread > m_http_thread; - std::vector< _handler_info > m_handlers; - - // Maintaining a list of pipe events because multiple threads could add events at the same time. - // Additions and deletions from this list are protected by m_mutex defined . - std::mutex m_event_mutex; - EventList m_event_list; - - mutable evbase_t* m_ev_base; - evhtp_t* m_htp; - struct event* m_internal_event; - - std::mutex m_running_mutex; - bool m_is_running{false}; - std::condition_variable m_ready_cv; - - std::shared_ptr< AuthManager > m_auth_mgr; -}; - -} // namespace sisl diff --git a/src/async_http/tests/AuthTest.cpp b/src/async_http/tests/AuthTest.cpp deleted file mode 100644 index 449b11ca..00000000 --- a/src/async_http/tests/AuthTest.cpp +++ /dev/null @@ -1,517 +0,0 @@ -/** - * The following test cases are taken from OM. - * https://github.corp.ebay.com/SDS/om_cpp/blob/master/src/tests/unit/Middleware/AuthTest.cpp - **/ - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "auth_manager/trf_client.hpp" -#include "http_server.hpp" - -SISL_LOGGING_INIT(httpserver_lmod) -SISL_OPTIONS_ENABLE(logging) - -namespace sisl::testing { -using namespace ::testing; - -/** - * Load public and private keys. - * Assume the keys(id_rsa.pub and id_rsa) are in the same directory as this file - */ - -static std::string get_cur_file_dir() { - const std::string cur_file_path{__FILE__}; - const auto last_slash_pos{cur_file_path.rfind('/')}; - if (last_slash_pos == std::string::npos) { return ""; } - return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; -} - -static const std::string cur_file_dir{get_cur_file_dir()}; - -static const std::string grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); - -static const std::string load_test_data(const std::string& file_name) { - std::ifstream f{fmt::format("{}/{}", cur_file_dir, file_name)}; - std::string buffer{std::istreambuf_iterator< char >{f}, std::istreambuf_iterator< char >{}}; - if (!buffer.empty() && std::isspace(buffer.back())) buffer.pop_back(); - return buffer; -} - -static const std::string rsa_pub_key{load_test_data("id_rsa.pub")}; -static const std::string rsa_priv_key{load_test_data("id_rsa")}; -static const std::string rsa_pub1_key{load_test_data("id_rsa1.pub")}; - -/** - * This will by default construct a valid jwt token, which contains exactly the - * same attributes in heeader and payload claims. In some test cases if we want - * to build a token with some invalid attributes, we must explicitly set those - * attributes. - * - * A trustfabric token: - * Header claims - * alg: RS256 - * kid: 779112af - * typ: JWT - * x5u: https://trustfabric.vip.ebay.com/v2/k/779112af - * - * Payload claims - * iss: trustfabric - * aud: [usersessionauthsvc, protegoreg, fountauth, monstor, ...] - * cluster: 92 - * ns: sds-tess92-19 - * iat: 1610081499 - * exp: 1610083393 - * nbf: 1610081499 - * instances: 10.175.165.15 - * sub: - * uid=sdsapp,networkaddress=10.175.165.15,ou=orchmanager+l=production,o=sdstess9219,dc=tess,dc=ebay,dc=com - * ver: 2 - * vpc: production - */ -struct TestToken { - using token_t = jwt::builder; - - TestToken() : - token{jwt::create() - .set_type("JWT") - .set_algorithm("RS256") - .set_key_id("abc123") - .set_issuer("trustfabric") - .set_header_claim("x5u", jwt::claim(std::string{"http://127.0.0.1:12347/dummy_tf_token"})) - .set_audience(std::set< std::string >{"test-sisl", "protegoreg"}) - .set_issued_at(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_not_before(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_expires_at(std::chrono::system_clock::now() + std::chrono::seconds(180)) - .set_subject("uid=sdsapp,networkaddress=10.175.165.15,ou=orchmanager+l=" - "production,o=testapp,dc=tess,dc=ebay,dc=com") - .set_payload_claim("ver", jwt::claim(std::string{"2"})) - .set_payload_claim("vpc", jwt::claim(std::string{"production"})) - .set_payload_claim("instances", jwt::claim(std::string{"10.175.65.15"}))} {} - - std::string sign_rs256() { return token.sign(jwt::algorithm::rs256(rsa_pub_key, rsa_priv_key, "", "")); } - std::string sign_rs512() { return token.sign(jwt::algorithm::rs512(rsa_pub_key, rsa_priv_key, "", "")); } - token_t& get_token() { return token; } - -private: - token_t token; -}; - -class MockAuthManager : public AuthManager { -public: - using AuthManager::AuthManager; - MOCK_METHOD(std::string, download_key, (const std::string&), (const)); -}; - -class AuthBaseTest : public ::testing::Test { -public: - AuthBaseTest() = default; - AuthBaseTest(const AuthBaseTest&) = delete; - AuthBaseTest& operator=(const AuthBaseTest&) = delete; - AuthBaseTest(AuthBaseTest&&) noexcept = delete; - AuthBaseTest& operator=(AuthBaseTest&&) noexcept = delete; - virtual ~AuthBaseTest() override = default; - - virtual void SetUp() override { - cfg.is_tls_enabled = false; - cfg.bind_address = "127.0.0.1"; - cfg.server_port = 12345; - cfg.read_write_timeout_secs = 10; - } - - virtual void TearDown() override { mock_server->stop(); } - - static void say_hello(HttpCallData cd) { - std::string msg; - if (auto r = pThis(cd)->mock_server->http_auth_verify(cd->request(), msg); r != EVHTP_RES_OK) { - pThis(cd)->mock_server->respond_NOTOK(cd, r, msg); - return; - } - std::cout << "Client is saying hello\n"; - pThis(cd)->mock_server->respond_OK(cd, EVHTP_RES_OK, "Hello client from async_http server\n"); - } - -protected: - HttpServerConfig cfg; - std::unique_ptr< HttpServer > mock_server; - static AuthBaseTest* pThis(HttpCallData cd) { return (AuthBaseTest*)cd->cookie(); } -}; - -class AuthEnableTest : public AuthBaseTest { -public: - AuthEnableTest() = default; - AuthEnableTest(const AuthEnableTest&) = delete; - AuthEnableTest& operator=(const AuthEnableTest&) = delete; - AuthEnableTest(AuthEnableTest&&) noexcept = delete; - AuthEnableTest& operator=(AuthEnableTest&&) noexcept = delete; - virtual ~AuthEnableTest() override = default; - - virtual void SetUp() override { - AuthBaseTest::SetUp(); - load_settings(); - cfg.is_auth_enabled = true; - mock_auth_mgr = std::shared_ptr< MockAuthManager >(new MockAuthManager()); - mock_server = std::unique_ptr< HttpServer >(new HttpServer( - cfg, {handler_info("/api/v1/sayHello", AuthBaseTest::say_hello, (void*)this)}, mock_auth_mgr)); - mock_server->start(); - } - - virtual void TearDown() override { AuthBaseTest::TearDown(); } - - void set_allowed_to_all() { - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { s.auth_manager->auth_allowed_apps = "all"; }); - SECURITY_SETTINGS_FACTORY().save(); - } - - static void load_settings() { - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { - s.auth_manager->auth_allowed_apps = "app1, testapp, app2"; - s.auth_manager->tf_token_url = "http://127.0.0.1"; - s.auth_manager->leeway = 0; - s.auth_manager->issuer = "trustfabric"; - }); - SECURITY_SETTINGS_FACTORY().save(); - } - -protected: - std::shared_ptr< MockAuthManager > mock_auth_mgr; -}; - -class AuthDisableTest : public AuthBaseTest { -public: - AuthDisableTest() = default; - AuthDisableTest(const AuthDisableTest&) = delete; - AuthDisableTest& operator=(const AuthDisableTest&) = delete; - AuthDisableTest(AuthDisableTest&&) noexcept = delete; - AuthDisableTest& operator=(AuthDisableTest&&) noexcept = delete; - virtual ~AuthDisableTest() override = default; - - virtual void SetUp() { - AuthBaseTest::SetUp(); - cfg.is_auth_enabled = false; - mock_server = std::unique_ptr< HttpServer >( - new HttpServer(cfg, {handler_info("/api/v1/sayHello", AuthBaseTest::say_hello, (void*)this)})); - mock_server->start(); - } - - virtual void TearDown() { AuthBaseTest::TearDown(); } -}; - -// test the TestToken utility, should not raise -TEST(TokenGenerte, sign_and_decode) { - const auto token{TestToken().sign_rs256()}; - const auto verify{jwt::verify().allow_algorithm(jwt::algorithm::rs256(rsa_pub_key)).with_issuer("trustfabric")}; - const auto decoded{jwt::decode(token)}; - verify.verify(decoded); -} - -TEST_F(AuthDisableTest, allow_all_on_disabled_mode) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - const auto resp{cpr::Post(url)}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_all_on_enabled_mode) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - const auto resp{cpr::Post(url)}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); -} - -TEST_F(AuthEnableTest, allow_vaid_token) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", TestToken().sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_basic_auth) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); - // has basic auth in requester header, we require bearer token - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Basic {}", TestToken().sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_garbage_auth) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", "Bearer abcdefgh"}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_wrong_algorithm) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // we currently only support rs256 - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", TestToken().sign_rs512())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(401, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_untrusted_issuer) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // token is issued by an untrusted issuer, we only trust "trustfabric" - auto token{TestToken()}; - token.get_token().set_issuer("do_not_trust_me"); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", token.sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_untrusted_keyurl) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); - // the key url is an untrusted address, we only trust "http://127.0.0.1" - auto token{TestToken()}; - token.get_token().set_header_claim("x5u", jwt::claim(std::string{"http://untrusted.addr/keys/abc123"})); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", token.sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_expired_token) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // token expired 1 second ago - auto token{TestToken()}; - token.get_token().set_expires_at(std::chrono::system_clock::now() - std::chrono::seconds(1)); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", token.sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_download_key_fail) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Throw(std::runtime_error("download key failed"))); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", TestToken().sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); -} - -TEST_F(AuthEnableTest, reject_wrong_key) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub1_key)); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", TestToken().sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_UNAUTHORIZED, resp.status_code); -} - -TEST_F(AuthEnableTest, allow_all_apps) { - set_allowed_to_all(); - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - auto token{TestToken()}; - token.get_token().set_subject("any-prefix,o=dummy_app,dc=tess,dc=ebay,dc=com"); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", token.sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); -} - -TEST_F(AuthEnableTest, reject_unauthorized_app) { - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // the client application is "myapp", which is not in the allowed list - auto token{TestToken()}; - token.get_token().set_subject("any-prefix,o=myapp,dc=tess,dc=ebay,dc=com"); - const auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", token.sign_rs256())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_FORBIDDEN, resp.status_code); -} - -// Testing trf client -class MockTrfClient : public TrfClient { -public: - using TrfClient::TrfClient; - MOCK_METHOD(void, request_with_grant_token, ()); - void set_token(const std::string& raw_token, const std::string token_type) { - m_access_token = raw_token; - m_token_type = token_type; - m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(2000); - } - // deligate to parent class (run the real method) - - void __request_with_grant_token() { TrfClient::request_with_grant_token(); } - - void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } - std::string get_access_token() { return m_access_token; } - std::string get_token_type() { return m_token_type; } -}; - -static void load_trf_settings() { - std::ofstream outfile{grant_path}; - outfile << "dummy cg contents\n"; - outfile.close(); - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { - s.trf_client->grant_path = grant_path; - s.trf_client->server = "127.0.0.1:12345/token"; - s.auth_manager->verify = false; - s.auth_manager->leeway = 30; - }); - SECURITY_SETTINGS_FACTORY().save(); -} - -static void remove_grant_path() { std::remove(grant_path.c_str()); } - -// this test will take 10 seconds to run -TEST_F(AuthEnableTest, trf_grant_path_failure) { - load_trf_settings(); - remove_grant_path(); - EXPECT_THROW( - { - try { - TrfClient trf_client; - } catch (const std::runtime_error& e) { - const std::string cmp_string{ - fmt::format("trustfabric client grant path {} does not exist", grant_path)}; - EXPECT_STREQ(e.what(), cmp_string.c_str()); - throw e; - } - }, - std::runtime_error); -} - -TEST_F(AuthEnableTest, trf_allow_valid_token) { - load_trf_settings(); - MockTrfClient mock_trf_client; - const auto raw_token{TestToken().sign_rs256()}; - // mock_trf_client is expected to be called twice - // 1. First time when access_token is empty - // 2. When token is set to be expired - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(2); - ON_CALL(mock_trf_client, request_with_grant_token()) - .WillByDefault( - testing::Invoke([&mock_trf_client, &raw_token]() { mock_trf_client.set_token(raw_token, "Bearer"); })); - - const cpr::Url url{"http://127.0.0.1:12345/api/v1/sayHello"}; - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - auto resp{cpr::Post(url, cpr::Header{{"Authorization", fmt::format("Bearer {}", mock_trf_client.get_token())}})}; - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); - - // use the acces_token saved from the previous call - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - resp = cpr::Post(url, cpr::Header{{"Authorization", mock_trf_client.get_typed_token()}}); - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); - - // set token to be expired invoking request_with_grant_token - mock_trf_client.set_expiry(std::chrono::system_clock::now() - std::chrono::seconds(100)); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - resp = cpr::Post(url, cpr::Header{{"Authorization", mock_trf_client.get_typed_token()}}); - EXPECT_FALSE(resp.error); - EXPECT_EQ(cpr::status::HTTP_OK, resp.status_code); -} - -// Test request_with_grant_token. Setup http server with path /token to return token json -class TrfClientTest : public ::testing::Test { -public: - TrfClientTest() = default; - TrfClientTest(const TrfClientTest&) = delete; - TrfClientTest& operator=(const TrfClientTest&) = delete; - TrfClientTest(TrfClientTest&&) noexcept = delete; - TrfClientTest& operator=(TrfClientTest&&) noexcept = delete; - virtual ~TrfClientTest() override = default; - - virtual void SetUp() override { - cfg.is_tls_enabled = false; - cfg.bind_address = "127.0.0.1"; - cfg.server_port = 12345; - cfg.read_write_timeout_secs = 10; - cfg.is_auth_enabled = false; - mock_server = std::unique_ptr< HttpServer >( - new HttpServer(cfg, {handler_info("/token", TrfClientTest::get_token, this)})); - mock_server->start(); - } - - virtual void TearDown() override { mock_server->stop(); } - - static void get_token(HttpCallData cd) { - std::string msg; - if (const auto r{pThis(cd)->mock_server->http_auth_verify(cd->request(), msg)}; r != EVHTP_RES_OK) { - pThis(cd)->mock_server->respond_NOTOK(cd, r, msg); - return; - } - std::cout << "sending token to client" << std::endl; - pThis(cd)->mock_server->respond_OK(cd, EVHTP_RES_OK, m_token_response); - } - - static void set_token_response(const std::string& raw_token) { - m_token_response = "{\n" - " \"access_token\": \"" + - raw_token + - "\",\n" - " \"token_type\": \"Bearer\",\n" - " \"expires_in\": \"2000\",\n" - " \"refresh_token\": \"dummy_refresh_token\"\n" - "}"; - } - -protected: - HttpServerConfig cfg; - std::unique_ptr< HttpServer > mock_server; - static TrfClientTest* pThis(HttpCallData cd) { return (TrfClientTest*)cd->cookie(); } - static std::string m_token_response; -}; -std::string TrfClientTest::m_token_response; - -TEST_F(TrfClientTest, trf_grant_path_load_failure) { - load_trf_settings(); - MockTrfClient mock_trf_client; - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); - ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { - mock_trf_client.__request_with_grant_token(); - })); - remove_grant_path(); - EXPECT_THROW( - { - try { - mock_trf_client.get_token(); - } catch (const std::runtime_error& e) { - EXPECT_EQ( - e.what(), - fmt::format("could not load grant from path {}", SECURITY_DYNAMIC_CONFIG(trf_client->grant_path))); - throw e; - } - }, - std::runtime_error); -} - -TEST_F(TrfClientTest, request_with_grant_token) { - load_trf_settings(); - MockTrfClient mock_trf_client; - const auto raw_token{TestToken().sign_rs256()}; - TrfClientTest::set_token_response(raw_token); - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); - ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { - mock_trf_client.__request_with_grant_token(); - })); - mock_trf_client.get_token(); - EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); - EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); -} - -} // namespace sisl::testing - -using namespace sisl; -using namespace sisl::testing; - -int main(int argc, char* argv[]) { - ::testing::InitGoogleMock(&argc, argv); - SISL_OPTIONS_LOAD(argc, argv, logging) - return RUN_ALL_TESTS(); -} diff --git a/src/async_http/tests/id_rsa b/src/async_http/tests/id_rsa deleted file mode 100644 index 1427e0d5..00000000 --- a/src/async_http/tests/id_rsa +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4ZtdaIrd1BPIJ -tfnF0TjIK5inQAXZ3XlCrUlJdP+XHwIRxdv1FsN12XyMYO/6ymLmo9ryoQeIrsXB -XYqlET3zfAY+diwCb0HEsVvhisthwMU4gZQu6TYW2s9LnXZB5rVtcBK69hcSlA2k -ZudMZWxZcj0L7KMfO2rIvaHw/qaVOE9j0T257Z8Kp2CLF9MUgX0ObhIsdumFRLaL -DvDUmBPr2zuh/34j2XmWwn1yjN/WvGtdfhXW79Ki1S40HcWnygHgLV8sESFKUxxQ -mKvPUTwDOIwLFL5WtE8Mz7N++kgmDcmWMCHc8kcOIu73Ta/3D4imW7VbKgHZo9+K -3ESFE3RjAgMBAAECggEBAJTEIyjMqUT24G2FKiS1TiHvShBkTlQdoR5xvpZMlYbN -tVWxUmrAGqCQ/TIjYnfpnzCDMLhdwT48Ab6mQJw69MfiXwc1PvwX1e9hRscGul36 -ryGPKIVQEBsQG/zc4/L2tZe8ut+qeaK7XuYrPp8bk/X1e9qK5m7j+JpKosNSLgJj -NIbYsBkG2Mlq671irKYj2hVZeaBQmWmZxK4fw0Istz2WfN5nUKUeJhTwpR+JLUg4 -ELYYoB7EO0Cej9UBG30hbgu4RyXA+VbptJ+H042K5QJROUbtnLWuuWosZ5ATldwO -u03dIXL0SH0ao5NcWBzxU4F2sBXZRGP2x/jiSLHcqoECgYEA4qD7mXQpu1b8XO8U -6abpKloJCatSAHzjgdR2eRDRx5PMvloipfwqA77pnbjTUFajqWQgOXsDTCjcdQui -wf5XAaWu+TeAVTytLQbSiTsBhrnoqVrr3RoyDQmdnwHT8aCMouOgcC5thP9vQ8Us -rVdjvRRbnJpg3BeSNimH+u9AHgsCgYEA0EzcbOltCWPHRAY7B3Ge/AKBjBQr86Kv -TdpTlxePBDVIlH+BM6oct2gaSZZoHbqPjbq5v7yf0fKVcXE4bSVgqfDJ/sZQu9Lp -PTeV7wkk0OsAMKk7QukEpPno5q6tOTNnFecpUhVLLlqbfqkB2baYYwLJR3IRzboJ -FQbLY93E8gkCgYB+zlC5VlQbbNqcLXJoImqItgQkkuW5PCgYdwcrSov2ve5r/Acz -FNt1aRdSlx4176R3nXyibQA1Vw+ztiUFowiP9WLoM3PtPZwwe4bGHmwGNHPIfwVG -m+exf9XgKKespYbLhc45tuC08DATnXoYK7O1EnUINSFJRS8cezSI5eHcbQKBgQDC -PgqHXZ2aVftqCc1eAaxaIRQhRmY+CgUjumaczRFGwVFveP9I6Gdi+Kca3DE3F9Pq -PKgejo0SwP5vDT+rOGHN14bmGJUMsX9i4MTmZUZ5s8s3lXh3ysfT+GAhTd6nKrIE -kM3Nh6HWFhROptfc6BNusRh1kX/cspDplK5x8EpJ0QKBgQDWFg6S2je0KtbV5PYe -RultUEe2C0jYMDQx+JYxbPmtcopvZQrFEur3WKVuLy5UAy7EBvwMnZwIG7OOohJb -vkSpADK6VPn9lbqq7O8cTedEHttm6otmLt8ZyEl3hZMaL3hbuRj6ysjmoFKx6CrX -rK0/Ikt5ybqUzKCMJZg2VKGTxg== ------END PRIVATE KEY----- diff --git a/src/async_http/tests/id_rsa.pub b/src/async_http/tests/id_rsa.pub deleted file mode 100644 index e8d62885..00000000 --- a/src/async_http/tests/id_rsa.pub +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4 -yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9 -83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs -WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT -69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8 -AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0 -YwIDAQAB ------END PUBLIC KEY----- diff --git a/src/async_http/tests/id_rsa1.pub b/src/async_http/tests/id_rsa1.pub deleted file mode 100644 index 5bc6dae3..00000000 --- a/src/async_http/tests/id_rsa1.pub +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4 -yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9 -83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs -WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT -69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8 -AziMCxS+VrRPDM+zfvpIJg3JlkAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0 -YwIDAQAB ------END PUBLIC KEY----- \ No newline at end of file diff --git a/src/async_http/tests/test_http_server.cpp b/src/async_http/tests/test_http_server.cpp deleted file mode 100644 index da83846b..00000000 --- a/src/async_http/tests/test_http_server.cpp +++ /dev/null @@ -1,195 +0,0 @@ -// -// Created by Kadayam, Hari on 12/14/18. -// -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "http_server.hpp" - -SISL_LOGGING_INIT(httpserver_lmod) -SISL_OPTIONS_ENABLE(logging) - -namespace { -sisl::HttpServerConfig s_cfg; -std::unique_ptr< sisl::HttpServer > s_server; -std::mutex s_m; -std::condition_variable s_cv; -bool s_is_shutdown{false}; -std::unique_ptr< std::thread > s_timer_thread; -} // namespace - -static void sleep_and_return(sisl::HttpCallData cd, int64_t secs) { - std::this_thread::sleep_for(std::chrono::seconds{secs}); - std::ostringstream ss{}; - ss << "Took a good nap for " << secs << " seconds. Thank you!\n"; - s_server->respond_OK(cd, EVHTP_RES_OK, ss.str()); -} - -static void delayed_return(sisl::HttpCallData cd) { - const auto req{cd->request()}; - const auto t{::evhtp_kvs_find_kv(req->uri->query, "seconds")}; - if (!t) { - s_server->respond_NOTOK(cd, EVHTP_RES_BADREQ, "Invalid seconds param!"); - return; - } - - std::string sstr{t->val}; - if (sstr.empty() || !std::all_of(sstr.begin(), sstr.end(), ::isdigit)) { - s_server->respond_NOTOK(cd, EVHTP_RES_BADREQ, - "Invalid seconds param! Either empty or contains non-digit characters\n"); - return; - } - - const int64_t secs{std::stoll(sstr, nullptr, 10)}; - s_timer_thread = std::make_unique< std::thread >(sleep_and_return, cd, secs); - return; -} - -static void say_hello(sisl::HttpCallData cd) { - std::cout << "Client is saying hello\n"; - s_server->respond_OK(cd, EVHTP_RES_OK, "Hello client from async_http server\n"); -} - -static void say_name(sisl::HttpCallData cd) { - s_server->respond_OK(cd, EVHTP_RES_OK, "I am the sisl (sizzling) http server \n"); -} - -static void shutdown(sisl::HttpCallData cd) { - std::cout << "Client is asking us to shutdown server\n"; - s_server->respond_OK(cd, EVHTP_RES_OK, "Ok will do\n"); - - { - std::lock_guard< std::mutex > lk{s_m}; - s_is_shutdown = true; - } - s_cv.notify_one(); -} - -class HTTPServerTest : public ::testing::Test { -public: - HTTPServerTest() = default; - HTTPServerTest(const HTTPServerTest&) = delete; - HTTPServerTest& operator=(const HTTPServerTest&) = delete; - HTTPServerTest(HTTPServerTest&&) noexcept = delete; - HTTPServerTest& operator=(HTTPServerTest&&) noexcept = delete; - virtual ~HTTPServerTest() override = default; - - virtual void SetUp() override { - s_server = std::make_unique< sisl::HttpServer >( - s_cfg, - std::vector< sisl::_handler_info >{handler_info("/api/v1/sayHello", say_hello, nullptr), - handler_info("/api/v1/shutdown", shutdown, nullptr), - handler_info("/api/v1/sleepFor", delayed_return, nullptr)}); - s_is_shutdown = false; - s_server->start(); - } - - virtual void TearDown() override { - s_server->stop(); - - if (s_timer_thread && s_timer_thread->joinable()) { s_timer_thread->join(); } - s_timer_thread.reset(); - s_server.reset(); - } - -protected: - void wait_for_shutdown() { - std::unique_lock< std::mutex > lk{s_m}; - s_cv.wait(lk, [] { return (s_is_shutdown); }); - } -}; - -TEST_F(HTTPServerTest, BasicTest) { - s_server->register_handler_info(handler_info("/api/v1/yourNamePlease", say_name, nullptr)); - - const cpr::Url url{"http://127.0.0.1:5051/api/v1/shutdown"}; - const auto resp{cpr::Post(url)}; - - ASSERT_EQ(resp.status_code, cpr::status::HTTP_OK); - - wait_for_shutdown(); - -#ifdef _PRERELEASE - std::cout << "ObjectLife Counter:\n"; - sisl::ObjCounterRegistry::foreach ([](const std::string& name, int64_t created, int64_t alive) { - std::cout << name << ": " << alive << "/" << created << "\n"; - }); -#endif -} - -TEST_F(HTTPServerTest, ParallelTestWithWait) { - s_server->register_handler_info(handler_info("/api/v1/yourNamePlease", say_name, nullptr)); - - std::atomic< bool > failed{false}; - const auto thread_func{[&failed](const size_t iterations) { - const cpr::Url url{"http://127.0.0.1:5051/api/v1/yourNamePlease"}; - for (size_t iteration{0}; (iteration < iterations) && !failed; ++iteration) { - const auto resp{cpr::Post(url)}; - if (resp.status_code != cpr::status::HTTP_OK) failed = true; - } - }}; - - constexpr size_t num_iterations{100}; - const size_t num_threads{std::max< size_t >(std::thread::hardware_concurrency(), 2)}; - std::vector< std::thread > workers; - for (size_t thread_num{0}; thread_num < num_threads; ++thread_num) { - workers.emplace_back(thread_func, num_iterations); - } - - for (auto& worker : workers) { - if (worker.joinable()) worker.join(); - } - - ASSERT_FALSE(failed); -} - -TEST_F(HTTPServerTest, ParallelTestWithoutWait) { - s_server->register_handler_info(handler_info("/api/v1/yourNamePlease", say_name, nullptr)); - - const auto thread_func{[](const size_t iterations) { - const cpr::Url url{"http://127.0.0.1:5051/api/v1/yourNamePlease"}; - for (size_t iteration{0}; iteration < iterations; ++iteration) { - [[maybe_unused]] auto response{cpr::PostAsync(url)}; - } - }}; - - constexpr size_t num_iterations{100}; - const size_t num_threads{std::max< size_t >(std::thread::hardware_concurrency(), 2)}; - std::vector< std::thread > workers; - for (size_t thread_num{0}; thread_num < num_threads; ++thread_num) { - workers.emplace_back(thread_func, num_iterations); - } - - for (auto& worker : workers) { - if (worker.joinable()) worker.join(); - } - - // exit while server processing -} - -int main(int argc, char* argv[]) { - ::testing::InitGoogleTest(&argc, argv); - SISL_OPTIONS_LOAD(argc, argv, logging) - - s_cfg.is_tls_enabled = false; - s_cfg.bind_address = "127.0.0.1"; - s_cfg.server_port = 5051; - s_cfg.read_write_timeout_secs = 10; - - return RUN_ALL_TESTS(); -} diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index 41132d4a..bdf63298 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -6,6 +6,7 @@ include_directories(BEFORE ..) include_directories(BEFORE .) find_package(FlatBuffers REQUIRED) +find_package(Pistache REQUIRED) set(AUTH_MGR_SOURCE_FILES auth_manager.cpp @@ -30,3 +31,19 @@ target_link_libraries(sisl_trf_client flatbuffers::flatbuffers ) settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ sisl_trf_client security_config.fbs) + +set(AUTH_DEPS + sisl + ${COMMON_DEPS} + cpr::cpr + pistache::pistache + flatbuffers::flatbuffers + jwt-cpp::jwt-cpp + GTest::gmock + ) + +add_executable(test_auth_mgr + tests/AuthTest.cpp + ) +target_link_libraries(test_auth_mgr ${AUTH_DEPS}) +add_test(NAME test_auth_mgr COMMAND test_auth_mgr) diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp new file mode 100644 index 00000000..2c320709 --- /dev/null +++ b/src/auth_manager/tests/AuthTest.cpp @@ -0,0 +1,310 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "auth_manager/auth_manager.hpp" +#include "auth_manager/trf_client.hpp" +#include "test_token.hpp" +#include "basic_http_server.hpp" + +SISL_OPTIONS_ENABLE(logging) + +namespace sisl::testing { +using namespace ::testing; + +static std::string get_cur_file_dir() { + const std::string cur_file_path{__FILE__}; + const auto last_slash_pos{cur_file_path.rfind('/')}; + if (last_slash_pos == std::string::npos) { return ""; } + return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; +} + +static const std::string cur_file_dir{get_cur_file_dir()}; + +static const std::string grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); + +class MockAuthManager : public AuthManager { +public: + using AuthManager::AuthManager; + MOCK_METHOD(std::string, download_key, (const std::string&), (const)); + AuthVerifyStatus verify(const std::string& token) { + std::string msg; + return AuthManager::verify(token, msg); + } +}; + +class AuthTest : public ::testing::Test { +public: + virtual void SetUp() override { + load_settings(); + mock_auth_mgr = std::shared_ptr< MockAuthManager >(new MockAuthManager()); + } + + virtual void TearDown() override {} + + void set_allowed_to_all() { + SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { s.auth_manager->auth_allowed_apps = "all"; }); + SECURITY_SETTINGS_FACTORY().save(); + } + + static void load_settings() { + SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { + s.auth_manager->auth_allowed_apps = "app1, testapp, app2"; + s.auth_manager->tf_token_url = "http://127.0.0.1"; + s.auth_manager->leeway = 0; + s.auth_manager->issuer = "trustfabric"; + }); + SECURITY_SETTINGS_FACTORY().save(); + } + +protected: + std::shared_ptr< MockAuthManager > mock_auth_mgr; +}; + +// test the TestToken utility, should not raise +TEST(TokenGenerte, sign_and_decode) { + const auto token{TestToken().sign_rs256()}; + const auto verify{jwt::verify().allow_algorithm(jwt::algorithm::rs256(rsa_pub_key)).with_issuer("trustfabric")}; + const auto decoded{jwt::decode(token)}; + verify.verify(decoded); +} + +TEST_F(AuthTest, allow_vaid_token) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs256()), AuthVerifyStatus::OK); +} + +TEST_F(AuthTest, reject_garbage_auth) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); + EXPECT_EQ(mock_auth_mgr->verify("garbage_token"), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_wrong_algorithm) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_untrusted_issuer) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + // token is issued by an untrusted issuer, we only trust "trustfabric" + auto token{TestToken()}; + token.get_token().set_issuer("do_not_trust_me"); + EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_untrusted_keyurl) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); + // the key url is an untrusted address, we only trust "http://127.0.0.1" + auto token{TestToken()}; + token.get_token().set_header_claim("x5u", jwt::claim(std::string{"http://untrusted.addr/keys/abc123"})); + EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_expired_token) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + // token expired 1 second ago + auto token{TestToken()}; + token.get_token().set_expires_at(std::chrono::system_clock::now() - std::chrono::seconds(1)); + EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_download_key_fail) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Throw(std::runtime_error("download key failed"))); + EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, reject_wrong_key) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub1_key)); + EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); +} + +TEST_F(AuthTest, allow_all_apps) { + set_allowed_to_all(); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + auto token{TestToken()}; + token.get_token().set_subject("any-prefix,o=dummy_app,dc=tess,dc=ebay,dc=com"); + EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::OK); +} + +TEST_F(AuthTest, reject_unauthorized_app) { + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + // the client application is "myapp", which is not in the allowed list + auto token{TestToken()}; + token.get_token().set_subject("any-prefix,o=myapp,dc=tess,dc=ebay,dc=com"); + EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::FORBIDDEN); +} + +// Testing trf client +class MockTrfClient : public TrfClient { +public: + using TrfClient::TrfClient; + MOCK_METHOD(void, request_with_grant_token, ()); + void set_token(const std::string& raw_token, const std::string token_type) { + m_access_token = raw_token; + m_token_type = token_type; + m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(2000); + } + // deligate to parent class (run the real method) + + void __request_with_grant_token() { TrfClient::request_with_grant_token(); } + + void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } + std::string get_access_token() { return m_access_token; } + std::string get_token_type() { return m_token_type; } +}; + +static void load_trf_settings() { + std::ofstream outfile{grant_path}; + outfile << "dummy cg contents\n"; + outfile.close(); + SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { + s.trf_client->grant_path = grant_path; + s.trf_client->server = "127.0.0.1:12346/token"; + s.auth_manager->verify = false; + s.auth_manager->leeway = 30; + }); + SECURITY_SETTINGS_FACTORY().save(); +} + +static void remove_grant_path() { std::remove(grant_path.c_str()); } + +// this test will take 10 seconds to run +TEST_F(AuthTest, trf_grant_path_failure) { + load_trf_settings(); + remove_grant_path(); + EXPECT_THROW( + { + try { + TrfClient trf_client; + } catch (const std::runtime_error& e) { + const std::string cmp_string{ + fmt::format("trustfabric client grant path {} does not exist", grant_path)}; + EXPECT_STREQ(e.what(), cmp_string.c_str()); + throw e; + } + }, + std::runtime_error); +} + +TEST_F(AuthTest, trf_allow_valid_token) { + load_trf_settings(); + MockTrfClient mock_trf_client; + const auto raw_token{TestToken().sign_rs256()}; + // mock_trf_client is expected to be called twice + // 1. First time when access_token is empty + // 2. When token is set to be expired + EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(2); + ON_CALL(mock_trf_client, request_with_grant_token()) + .WillByDefault( + testing::Invoke([&mock_trf_client, &raw_token]() { mock_trf_client.set_token(raw_token, "Bearer"); })); + + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); + + // use the acces_token saved from the previous call + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); + + // set token to be expired invoking request_with_grant_token + mock_trf_client.set_expiry(std::chrono::system_clock::now() - std::chrono::seconds(100)); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); +} + +static const std::string trf_token_server_ip{"127.0.0.1"}; +static const uint32_t trf_token_server_port{12346}; +static std::string token_response; +static void set_token_response(const std::string& raw_token) { + token_response = "{\n" + " \"access_token\": \"" + + raw_token + + "\",\n" + " \"token_type\": \"Bearer\",\n" + " \"expires_in\": \"2000\",\n" + " \"refresh_token\": \"dummy_refresh_token\"\n" + "}"; +} + +class TokenApiImpl : public TokenApi { +public: + void get_token_impl(Pistache::Http::ResponseWriter& response) { + LOGINFO("Sending token to client"); + response.send(Pistache::Http::Code::Ok, token_response); + } +}; + +// Test request_with_grant_token. Setup http server with path /token to return token json +class TrfClientTest : public ::testing::Test { +public: + TrfClientTest() = default; + TrfClientTest(const TrfClientTest&) = delete; + TrfClientTest& operator=(const TrfClientTest&) = delete; + TrfClientTest(TrfClientTest&&) noexcept = delete; + TrfClientTest& operator=(TrfClientTest&&) noexcept = delete; + virtual ~TrfClientTest() override = default; + + virtual void SetUp() override { + // start token server + APIBase::init(Pistache::Address(fmt::format("{}:{}", trf_token_server_ip, trf_token_server_port)), 1); + m_token_server = std::unique_ptr< TokenApiImpl >(new TokenApiImpl()); + m_token_server->setupRoutes(); + APIBase::start(); + } + + virtual void TearDown() override { APIBase::stop(); } + +private: + std::unique_ptr< TokenApiImpl > m_token_server; +}; + +TEST_F(TrfClientTest, trf_grant_path_load_failure) { + load_trf_settings(); + MockTrfClient mock_trf_client; + EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); + ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { + mock_trf_client.__request_with_grant_token(); + })); + remove_grant_path(); + EXPECT_THROW( + { + try { + mock_trf_client.get_token(); + } catch (const std::runtime_error& e) { + EXPECT_EQ( + e.what(), + fmt::format("could not load grant from path {}", SECURITY_DYNAMIC_CONFIG(trf_client->grant_path))); + throw e; + } + }, + std::runtime_error); +} + +TEST_F(TrfClientTest, request_with_grant_token) { + load_trf_settings(); + MockTrfClient mock_trf_client; + const auto raw_token{TestToken().sign_rs256()}; + set_token_response(raw_token); + EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); + ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { + mock_trf_client.__request_with_grant_token(); + })); + mock_trf_client.get_token(); + EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); + EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); +} +} // namespace sisl::testing + +using namespace sisl; +using namespace sisl::testing; + +int main(int argc, char* argv[]) { + ::testing::InitGoogleMock(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging) + return RUN_ALL_TESTS(); +} diff --git a/src/auth_manager/tests/basic_http_server.hpp b/src/auth_manager/tests/basic_http_server.hpp new file mode 100644 index 00000000..f5453045 --- /dev/null +++ b/src/auth_manager/tests/basic_http_server.hpp @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include +#include + +#pragma once + +class APIBase { +public: + static void init(Pistache::Address addr, size_t thr) { + m_http_endpoint = std::make_shared< Pistache::Http::Endpoint >(addr); + auto flags = Pistache::Tcp::Options::ReuseAddr; + auto opts = Pistache::Http::Endpoint::options().threadsName("http_server").threads(thr).flags(flags); + m_http_endpoint->init(opts); + } + + static void start() { + m_http_endpoint->setHandler(m_router.handler()); + m_http_endpoint->serveThreaded(); + } + + static void stop() { m_http_endpoint->shutdown(); } + + virtual ~APIBase() {} + +protected: + static std::shared_ptr< Pistache::Http::Endpoint > m_http_endpoint; + static Pistache::Rest::Router m_router; +}; + +std::shared_ptr< Pistache::Http::Endpoint > APIBase::m_http_endpoint; +Pistache::Rest::Router APIBase::m_router; + +class TokenApi : public APIBase { +public: + void setupRoutes() { + Pistache::Rest::Routes::Post(m_router, "/token", + Pistache::Rest::Routes::bind(&TokenApi::get_token_handler, this)); + } + + void get_token_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + this->get_token_impl(response); + } + + virtual void get_token_impl(Pistache::Http::ResponseWriter& response) = 0; + + virtual ~TokenApi() { Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); } +}; \ No newline at end of file diff --git a/src/auth_manager/tests/test_token.hpp b/src/auth_manager/tests/test_token.hpp new file mode 100644 index 00000000..3d3d198a --- /dev/null +++ b/src/auth_manager/tests/test_token.hpp @@ -0,0 +1,110 @@ +#pragma once + +namespace sisl::testing { +// public and private keys for unit test + +static const std::string rsa_pub_key = "-----BEGIN PUBLIC KEY-----\n" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" + "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" + "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" + "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" + "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" + "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" + "YwIDAQAB\n" + "-----END PUBLIC KEY-----"; + +static const std::string rsa_pub1_key = "-----BEGIN PUBLIC KEY-----\n" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" + "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" + "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" + "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" + "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SptUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" + "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" + "YwIDAQAB\n" + "-----END PUBLIC KEY-----"; + +static const std::string rsa_priv_key = "-----BEGIN PRIVATE KEY-----\n" + "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4ZtdaIrd1BPIJ\n" + "tfnF0TjIK5inQAXZ3XlCrUlJdP+XHwIRxdv1FsN12XyMYO/6ymLmo9ryoQeIrsXB\n" + "XYqlET3zfAY+diwCb0HEsVvhisthwMU4gZQu6TYW2s9LnXZB5rVtcBK69hcSlA2k\n" + "ZudMZWxZcj0L7KMfO2rIvaHw/qaVOE9j0T257Z8Kp2CLF9MUgX0ObhIsdumFRLaL\n" + "DvDUmBPr2zuh/34j2XmWwn1yjN/WvGtdfhXW79Ki1S40HcWnygHgLV8sESFKUxxQ\n" + "mKvPUTwDOIwLFL5WtE8Mz7N++kgmDcmWMCHc8kcOIu73Ta/3D4imW7VbKgHZo9+K\n" + "3ESFE3RjAgMBAAECggEBAJTEIyjMqUT24G2FKiS1TiHvShBkTlQdoR5xvpZMlYbN\n" + "tVWxUmrAGqCQ/TIjYnfpnzCDMLhdwT48Ab6mQJw69MfiXwc1PvwX1e9hRscGul36\n" + "ryGPKIVQEBsQG/zc4/L2tZe8ut+qeaK7XuYrPp8bk/X1e9qK5m7j+JpKosNSLgJj\n" + "NIbYsBkG2Mlq671irKYj2hVZeaBQmWmZxK4fw0Istz2WfN5nUKUeJhTwpR+JLUg4\n" + "ELYYoB7EO0Cej9UBG30hbgu4RyXA+VbptJ+H042K5QJROUbtnLWuuWosZ5ATldwO\n" + "u03dIXL0SH0ao5NcWBzxU4F2sBXZRGP2x/jiSLHcqoECgYEA4qD7mXQpu1b8XO8U\n" + "6abpKloJCatSAHzjgdR2eRDRx5PMvloipfwqA77pnbjTUFajqWQgOXsDTCjcdQui\n" + "wf5XAaWu+TeAVTytLQbSiTsBhrnoqVrr3RoyDQmdnwHT8aCMouOgcC5thP9vQ8Us\n" + "rVdjvRRbnJpg3BeSNimH+u9AHgsCgYEA0EzcbOltCWPHRAY7B3Ge/AKBjBQr86Kv\n" + "TdpTlxePBDVIlH+BM6oct2gaSZZoHbqPjbq5v7yf0fKVcXE4bSVgqfDJ/sZQu9Lp\n" + "PTeV7wkk0OsAMKk7QukEpPno5q6tOTNnFecpUhVLLlqbfqkB2baYYwLJR3IRzboJ\n" + "FQbLY93E8gkCgYB+zlC5VlQbbNqcLXJoImqItgQkkuW5PCgYdwcrSov2ve5r/Acz\n" + "FNt1aRdSlx4176R3nXyibQA1Vw+ztiUFowiP9WLoM3PtPZwwe4bGHmwGNHPIfwVG\n" + "m+exf9XgKKespYbLhc45tuC08DATnXoYK7O1EnUINSFJRS8cezSI5eHcbQKBgQDC\n" + "PgqHXZ2aVftqCc1eAaxaIRQhRmY+CgUjumaczRFGwVFveP9I6Gdi+Kca3DE3F9Pq\n" + "PKgejo0SwP5vDT+rOGHN14bmGJUMsX9i4MTmZUZ5s8s3lXh3ysfT+GAhTd6nKrIE\n" + "kM3Nh6HWFhROptfc6BNusRh1kX/cspDplK5x8EpJ0QKBgQDWFg6S2je0KtbV5PYe\n" + "RultUEe2C0jYMDQx+JYxbPmtcopvZQrFEur3WKVuLy5UAy7EBvwMnZwIG7OOohJb\n" + "vkSpADK6VPn9lbqq7O8cTedEHttm6otmLt8ZyEl3hZMaL3hbuRj6ysjmoFKx6CrX\n" + "rK0/Ikt5ybqUzKCMJZg2VKGTxg==\n" + "-----END PRIVATE KEY-----"; + +/** + * This will by default construct a valid jwt token, which contains exactly the + * same attributes in heeader and payload claims. In some test cases if we want + * to build a token with some invalid attributes, we must explicitly set those + * attributes. + * + * A trustfabric token example: + * Header claims + * alg: RS256 + * kid: 779112af + * typ: JWT + * x5u: https://trustfabric.vip.ebay.com/v2/k/779112af + * + * Payload claims + * iss: trustfabric + * aud: [usersessionauthsvc, protegoreg, fountauth, monstor, ...] + * cluster: 92 + * ns: sds-tess92-19 + * iat: 1610081499 + * exp: 1610083393 + * nbf: 1610081499 + * instances: 10.175.165.15 + * sub: + * uid=sdsapp,networkaddress=10.175.165.15,ou=orchmanager+l=production,o=sdstess9219,dc=tess,dc=ebay,dc=com + * ver: 2 + * vpc: production + */ + +struct TestToken { + using token_t = jwt::builder; + + TestToken() : + token{jwt::create() + .set_type("JWT") + .set_algorithm("RS256") + .set_key_id("abc123") + .set_issuer("trustfabric") + .set_header_claim("x5u", jwt::claim(std::string{"http://127.0.0.1:12346/download_key"})) + .set_audience(std::set< std::string >{"test-sisl", "protegoreg"}) + .set_issued_at(std::chrono::system_clock::now() - std::chrono::seconds(180)) + .set_not_before(std::chrono::system_clock::now() - std::chrono::seconds(180)) + .set_expires_at(std::chrono::system_clock::now() + std::chrono::seconds(180)) + .set_subject("uid=sdsapp,networkaddress=dummy_ip,ou=orchmanager+l=" + "production,o=testapp,dc=tess,dc=ebay,dc=com") + .set_payload_claim("ver", jwt::claim(std::string{"2"})) + .set_payload_claim("vpc", jwt::claim(std::string{"production"})) + .set_payload_claim("instances", jwt::claim(std::string{"dummy_ip"}))} {} + + std::string sign_rs256() { return token.sign(jwt::algorithm::rs256(rsa_pub_key, rsa_priv_key, "", "")); } + std::string sign_rs512() { return token.sign(jwt::algorithm::rs512(rsa_pub_key, rsa_priv_key, "", "")); } + token_t& get_token() { return token; } + +private: + token_t token; +}; +} // namespace sisl::testing \ No newline at end of file From 50e9fd880c82a3be9d6654ded963573cfb03b20e Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 10:46:35 -0700 Subject: [PATCH 132/385] First draft of moving flip to sisl --- src/flip/CMakeLists.txt | 25 +-- src/flip/client/local/flip_client.hpp | 77 +++++++++ .../client/local/test_flip_local_client.cpp | 4 +- src/flip/lib/.clang-format | 147 ------------------ src/flip/lib/flip.hpp | 81 +--------- src/flip/lib/flip_rpc_server.cpp | 54 +++++++ src/flip/lib/flip_rpc_server.hpp | 19 +++ src/flip/lib/test_flip.cpp | 2 +- .../test_flip_server.cpp | 2 +- 9 files changed, 162 insertions(+), 249 deletions(-) create mode 100644 src/flip/client/local/flip_client.hpp delete mode 100644 src/flip/lib/.clang-format create mode 100644 src/flip/lib/flip_rpc_server.cpp create mode 100644 src/flip/lib/flip_rpc_server.hpp rename src/flip/{server/flip_rpc_server.cpp => lib}/test_flip_server.cpp (92%) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index bec29e21..b33e07ee 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,16 +1,4 @@ cmake_minimum_required(VERSION 3.10) -project(flip) -set(CMAKE_CXX_STANDARD 17) - -if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - conan_output_dirs_setup() - conan_set_rpath() - conan_set_std() - conan_set_fpic() -else() - message(WARNING "Conan Build file does not exist, trying to build without!") -endif() if (${CMAKE_BUILD_TYPE} STREQUAL Debug) include (cmake/debug_flags.cmake) @@ -19,25 +7,20 @@ if (${MEMORY_SANITIZER_ON}) include (cmake/mem_sanitizer.cmake) endif () -find_program(CCACHE_FOUND ccache) -if (CCACHE_FOUND) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) -endif () - find_package(gRPC REQUIRED) -find_package(sisl REQUIRED) include_directories(BEFORE include) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/proto) +include_directories(BEFORE ..) -add_library(flip +add_library(flip_server src/flip_rpc_server.cpp $ ) -target_link_libraries(flip +target_link_libraries(flip_server sisl::sisl gRPC::grpc++ + spdlog::spdlog ) add_executable(test_flip src/test_flip.cpp) diff --git a/src/flip/client/local/flip_client.hpp b/src/flip/client/local/flip_client.hpp new file mode 100644 index 00000000..aa35591c --- /dev/null +++ b/src/flip/client/local/flip_client.hpp @@ -0,0 +1,77 @@ +#pragma once +#include "flip.hpp" + +namespace flip { +class FlipClient { +public: + explicit FlipClient(Flip* f) : m_flip(f) {} + + template < typename T > + void create_condition(const std::string& param_name, flip::Operator oper, const T& value, + FlipCondition* out_condition) { + *(out_condition->mutable_name()) = param_name; + out_condition->set_oper(oper); + to_proto_converter< T >()(value, out_condition->mutable_value()); + } + + bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->set_no_action(true); + + m_flip->add(fspec); + return true; + } + + template < typename T > + bool inject_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, const T& retval) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_returns()->mutable_retval()); + + m_flip->add(fspec); + return true; + } + + bool inject_delay_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, uint64_t delay_usec) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); + + m_flip->add(fspec); + return true; + } + + template < typename T > + bool inject_delay_and_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, uint64_t delay_usec, const T& retval) { + FlipSpec fspec; + + _create_flip_spec(flip_name, conditions, freq, fspec); + fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); + to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_delay_returns()->mutable_retval()); + + m_flip->add(fspec); + return true; + } + +private: + void _create_flip_spec(std::string flip_name, const std::vector< FlipCondition >& conditions, + const FlipFrequency& freq, FlipSpec& out_fspec) { + *(out_fspec.mutable_flip_name()) = flip_name; + for (auto& c : conditions) { + *(out_fspec.mutable_conditions()->Add()) = c; + } + *(out_fspec.mutable_flip_frequency()) = freq; + } + +private: + Flip* m_flip; +}; +} // namespace flip \ No newline at end of file diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 8b1b5ca8..c9125baf 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -3,11 +3,11 @@ // #include "flip_spec.pb.h" -#include "flip.hpp" +#include "flip_client.hpp" #include #include -#include +#include "options/options.h" using namespace flip; diff --git a/src/flip/lib/.clang-format b/src/flip/lib/.clang-format deleted file mode 100644 index 2f771200..00000000 --- a/src/flip/lib/.clang-format +++ /dev/null @@ -1,147 +0,0 @@ ---- -# We'll use defaults from the LLVM style, but with 4 columns indentation. -BasedOnStyle: LLVM -IndentWidth: 4 ---- -Language: Cpp -# Force pointers to the type for C++. -DerivePointerAlignment: false -PointerAlignment: Left -ColumnLimit: 120 - -AccessModifierOffset: -4 -AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlines: Right -AlignOperands: false -AlignTrailingComments: true -AllowShortBlocksOnASingleLine: true -AllowShortIfStatementsOnASingleLine: true -AllowShortBlocksOnASingleLine: true -AllowShortCaseLabelsOnASingleLine: false -# AllowShortFunctionsOnASingleLine: InlineOnly -# AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterReturnType: None -AlwaysBreakTemplateDeclarations: true - -BinPackArguments: true -BinPackParameters: true -BreakConstructorInitializersBeforeComma: true -BreakConstructorInitializers: AfterColon - -ConstructorInitializerAllOnOneLineOrOnePerLine: true -ConstructorInitializerIndentWidth: 8 - -IndentCaseLabels: false -SortIncludes: false -#IndentWrappedFunctionNames: true -#SpaceAfterTemplateKeyword: true -#SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpacesInAngles : true - ---- -Language: JavaScript -# Use 140 columns for JS. -ColumnLimit: 140 -... - - -##################### -# --- -# Language: Cpp -# AccessModifierOffset: -1 -# AlignAfterOpenBracket: Align -# AlignConsecutiveAssignments: false -# AlignConsecutiveDeclarations: false -# AlignEscapedNewlinesLeft: true -# AlignOperands: true -# AlignTrailingComments: true -# AllowAllParametersOfDeclarationOnNextLine: false -# AllowShortBlocksOnASingleLine: false -# AllowShortCaseLabelsOnASingleLine: false -# AllowShortFunctionsOnASingleLine: Inline -# AllowShortIfStatementsOnASingleLine: false -# AllowShortLoopsOnASingleLine: false -# AlwaysBreakAfterDefinitionReturnType: None -# AlwaysBreakAfterReturnType: None -# AlwaysBreakBeforeMultilineStrings: true -# AlwaysBreakTemplateDeclarations: true -# BinPackArguments: true -# BinPackParameters: false -# BraceWrapping: -# AfterClass: false -# AfterControlStatement: false -# AfterEnum: false -# AfterFunction: false -# AfterNamespace: false -# AfterObjCDeclaration: false -# AfterStruct: false -# AfterUnion: false -# BeforeCatch: false -# BeforeElse: false -# IndentBraces: false -# BreakBeforeBinaryOperators: None -# BreakBeforeBraces: Attach -# BreakBeforeInheritanceComma: false -# BreakBeforeTernaryOperators: true -# BreakAfterJavaFieldAnnotations: false -# BreakConstructorInitializersBeforeComma: false -# BreakStringLiterals: true -# ColumnLimit: 80 -# CommentPragmas: '^ IWYU pragma:' -# ConstructorInitializerAllOnOneLineOrOnePerLine: true -# ConstructorInitializerIndentWidth: 4 -# ContinuationIndentWidth: 4 -# Cpp11BracedListStyle: true -# DerivePointerAlignment: false -# DisableFormat: false -# ExperimentalAutoDetectBinPacking: false -# FixNamespaceComments: true -# ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] -# IncludeCategories: -# - Regex: '^<.*\.h>' -# Priority: 1 -# - Regex: '^<.*' -# Priority: 2 -# - Regex: '.*' -# Priority: 3 -# IncludeIsMainRegex: '([-_](test|unittest))?$' -# IndentCaseLabels: true -# IndentWidth: 2 -# IndentWrappedFunctionNames: false -# JavaScriptQuotes: Leave -# JavaScriptWrapImports: true -# KeepEmptyLinesAtTheStartOfBlocks: false -# MacroBlockBegin: '' -# MacroBlockEnd: '' -# MaxEmptyLinesToKeep: 1 -# NamespaceIndentation: None -# ObjCBlockIndentWidth: 2 -# ObjCSpaceAfterProperty: true -# ObjCSpaceBeforeProtocolList: false -# PenaltyBreakBeforeFirstCallParameter: 1 -# PenaltyBreakComment: 300 -# PenaltyBreakFirstLessLess: 120 -# PenaltyBreakString: 1000 -# PenaltyExcessCharacter: 1000000 -# PenaltyReturnTypeOnItsOwnLine: 200 -# PointerAlignment: Right -# ReflowComments: true -# SortIncludes: false -# SpaceAfterCStyleCast: false -# SpaceAfterTemplateKeyword: true -# SpaceBeforeAssignmentOperators: true -# SpaceBeforeParens: ControlStatements -# SpaceInEmptyParentheses: false -# SpacesBeforeTrailingComments: 2 -# SpacesInAngles: false -# SpacesInContainerLiterals: true -# SpacesInCStyleCastParentheses: false -# SpacesInParentheses: false -# SpacesInSquareBrackets: false -# Standard: Auto -# TabWidth: 8 -# UseTab: Never -# ... diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index af17c5bc..bc73a47c 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -349,10 +349,10 @@ class FlipTimerAsio : public FlipTimerBase { std::unique_ptr< std::thread > m_timer_thread; }; -#define TEST_ONLY 0 -#define RETURN_VAL 1 -#define SET_DELAY 2 -#define DELAYED_RETURN 3 +static constexpr int TEST_ONLY = 0; +static constexpr int RETURN_VAL = 1; +static constexpr int SET_DELAY = 2; +static constexpr int DELAYED_RETURN = 3; class Flip { public: @@ -672,78 +672,5 @@ class Flip { std::unique_ptr< std::thread > m_flip_server_thread; }; -class FlipClient { -public: - explicit FlipClient(Flip* f) : m_flip(f) {} - - template < typename T > - void create_condition(const std::string& param_name, flip::Operator oper, const T& value, - FlipCondition* out_condition) { - *(out_condition->mutable_name()) = param_name; - out_condition->set_oper(oper); - to_proto_converter< T >()(value, out_condition->mutable_value()); - } - - bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq) { - FlipSpec fspec; - - _create_flip_spec(flip_name, conditions, freq, fspec); - fspec.mutable_flip_action()->set_no_action(true); - - m_flip->add(fspec); - return true; - } - - template < typename T > - bool inject_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, const T& retval) { - FlipSpec fspec; - - _create_flip_spec(flip_name, conditions, freq, fspec); - to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_returns()->mutable_retval()); - - m_flip->add(fspec); - return true; - } - - bool inject_delay_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, uint64_t delay_usec) { - FlipSpec fspec; - - _create_flip_spec(flip_name, conditions, freq, fspec); - fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); - - m_flip->add(fspec); - return true; - } - - template < typename T > - bool inject_delay_and_retval_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, uint64_t delay_usec, const T& retval) { - FlipSpec fspec; - - _create_flip_spec(flip_name, conditions, freq, fspec); - fspec.mutable_flip_action()->mutable_delays()->set_delay_in_usec(delay_usec); - to_proto_converter< T >()(retval, fspec.mutable_flip_action()->mutable_delay_returns()->mutable_retval()); - - m_flip->add(fspec); - return true; - } - -private: - void _create_flip_spec(std::string flip_name, const std::vector< FlipCondition >& conditions, - const FlipFrequency& freq, FlipSpec& out_fspec) { - *(out_fspec.mutable_flip_name()) = flip_name; - for (auto& c : conditions) { - *(out_fspec.mutable_conditions()->Add()) = c; - } - *(out_fspec.mutable_flip_frequency()) = freq; - } - -private: - Flip* m_flip; -}; - } // namespace flip #endif // FLIP_FLIP_HPP diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp new file mode 100644 index 00000000..d5351782 --- /dev/null +++ b/src/flip/lib/flip_rpc_server.cpp @@ -0,0 +1,54 @@ +#include + +#include +#include +#include +#include +#include + +#include "flip_rpc_server.hpp" +#include "flip.hpp" + +namespace flip { +grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) { + // LOG(INFO) << "Flipspec request = " << request->DebugString() << "\n"; + flip::Flip::instance().add(*request); + response->set_success(true); + return grpc::Status::OK; +}; + +grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, + FlipListResponse* response) { + // LOG(INFO) << "GetFaults request = " << request->DebugString(); + auto resp = request->name().size() ? flip::Flip::instance().get(request->name()) : flip::Flip::instance().get_all(); + for (const auto& r : resp) { + response->add_infos()->set_info(r); + } + // LOG(INFO) << "GetFaults response = " << response->DebugString(); + return grpc::Status::OK; +}; + +class FlipRPCServiceWrapper : public FlipRPCServer::Service { +public: + void print_method_names() { + for (auto i = 0; i < 2; ++i) { + auto method = (::grpc::internal::RpcServiceMethod*)GetHandler(i); + if (method) { std::cout << "Method name = " << method->name() << "\n"; } + } + } +}; + +void FlipRPCServer::rpc_thread() { + std::string server_address("0.0.0.0:50051"); + FlipRPCServiceWrapper service; + + grpc::ServerBuilder builder; + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService((FlipRPCServer*)&service); + service.print_method_names(); + std::unique_ptr< grpc::Server > server(builder.BuildAndStart()); + std::cout << "Server listening on " << server_address << std::endl; + server->Wait(); +} + +} // namespace flip diff --git a/src/flip/lib/flip_rpc_server.hpp b/src/flip/lib/flip_rpc_server.hpp new file mode 100644 index 00000000..061304ec --- /dev/null +++ b/src/flip/lib/flip_rpc_server.hpp @@ -0,0 +1,19 @@ +// +// Created by Kadayam, Hari on Jun 12 2019. +// +#ifndef FLIP_FLIP_RCP_SERVER_HPP +#define FLIP_FLIP_RCP_SERVER_HPP + +#include "flip_spec.pb.h" +#include "flip_server.grpc.pb.h" + +namespace flip { +class FlipRPCServer final : public FlipServer::Service { +public: + grpc::Status InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) override; + grpc::Status GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, + FlipListResponse* response) override; + static void rpc_thread(); +}; +} // namespace flip +#endif \ No newline at end of file diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 9fd6965a..63ddc48a 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include "options/options.h" SISL_LOGGING_INIT(flip) SISL_OPTIONS_ENABLE(logging) diff --git a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp b/src/flip/lib/test_flip_server.cpp similarity index 92% rename from src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp rename to src/flip/lib/test_flip_server.cpp index e2b45075..52e0a27c 100644 --- a/src/flip/server/flip_rpc_server.cpp/test_flip_server.cpp +++ b/src/flip/lib/test_flip_server.cpp @@ -4,7 +4,7 @@ #include "flip.hpp" -#include +#include "options/options.h" SISL_LOGGING_INIT(flip) From 80cff58fad12990c966861f8df103b0c5c4c730d Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 11:58:13 -0700 Subject: [PATCH 133/385] Second draft of moving flip to sisl --- CMakeLists.txt | 1 + src/flip/CMakeLists.txt | 31 ++++++++++++------- src/flip/client/local/flip_client.hpp | 2 +- .../client/local/test_flip_local_client.cpp | 2 +- src/flip/lib/flip.hpp | 8 +++-- src/flip/lib/flip_rpc_server.cpp | 4 +-- src/flip/proto/CMakeLists.txt | 10 +++--- 7 files changed, 35 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 632d675c..9977e4bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -126,6 +126,7 @@ add_subdirectory (src/utility) add_subdirectory (src/sisl_version) add_subdirectory (src/auth_manager) add_subdirectory (src/file_watcher) +add_subdirectory (src/flip) add_library(sisl $ diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index b33e07ee..78469f6b 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,5 +1,9 @@ cmake_minimum_required(VERSION 3.10) +if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) + add_flags("-Wno-unused-parameter -Wno-cast-function-type") +endif() + if (${CMAKE_BUILD_TYPE} STREQUAL Debug) include (cmake/debug_flags.cmake) endif () @@ -12,24 +16,29 @@ find_package(gRPC REQUIRED) include_directories(BEFORE include) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/proto) include_directories(BEFORE ..) +include_directories(BEFORE .) + +add_subdirectory (proto) -add_library(flip_server - src/flip_rpc_server.cpp +add_library(flip + lib/flip_rpc_server.cpp $ ) -target_link_libraries(flip_server - sisl::sisl +target_link_libraries(flip + sisl gRPC::grpc++ spdlog::spdlog ) -add_executable(test_flip src/test_flip.cpp) -target_link_libraries(test_flip flip) +add_executable(test_flip lib/test_flip.cpp) +target_link_libraries(test_flip flip cxxopts::cxxopts) +add_test(NAME TestFlip COMMAND test_flip) + +add_executable(test_flip_server lib/test_flip_server.cpp) +target_link_libraries(test_flip_server flip cxxopts::cxxopts) + +add_executable(test_flip_local_client client/local/test_flip_local_client.cpp) +target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) -add_executable(test_flip_local_client src/test_flip_local_client.cpp) -target_link_libraries(test_flip_local_client flip) -add_executable(test_flip_server src/test_flip_server.cpp) -target_link_libraries(test_flip_server flip) -add_subdirectory (proto) diff --git a/src/flip/client/local/flip_client.hpp b/src/flip/client/local/flip_client.hpp index aa35591c..467bfafa 100644 --- a/src/flip/client/local/flip_client.hpp +++ b/src/flip/client/local/flip_client.hpp @@ -1,5 +1,5 @@ #pragma once -#include "flip.hpp" +#include "lib/flip.hpp" namespace flip { class FlipClient { diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index c9125baf..bcea5c93 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -3,7 +3,7 @@ // #include "flip_spec.pb.h" -#include "flip_client.hpp" +#include "Flip/client/local/flip_client.hpp" #include #include diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index bc73a47c..19c8d5c1 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -4,14 +4,12 @@ #ifndef FLIP_FLIP_HPP #define FLIP_FLIP_HPP -#include "flip_spec.pb.h" -#include "flip_rpc_server.hpp" #include #include #include #include #include -#include + #include #include #include @@ -19,6 +17,10 @@ #include #include +#include "flip_spec.pb.h" +#include "flip_rpc_server.hpp" +#include "logging/logging.h" + SISL_LOGGING_DECL(flip) namespace flip { diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index d5351782..7090d757 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -15,7 +15,7 @@ grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const Flip flip::Flip::instance().add(*request); response->set_success(true); return grpc::Status::OK; -}; +} grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, FlipListResponse* response) { @@ -26,7 +26,7 @@ grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNa } // LOG(INFO) << "GetFaults response = " << response->DebugString(); return grpc::Status::OK; -}; +} class FlipRPCServiceWrapper : public FlipRPCServer::Service { public: diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt index 43e990e1..0e0310b2 100644 --- a/src/flip/proto/CMakeLists.txt +++ b/src/flip/proto/CMakeLists.txt @@ -1,18 +1,18 @@ cmake_minimum_required(VERSION 3.11) -add_library(${PROJECT_NAME}_proto OBJECT +add_library(flip_proto OBJECT flip_server.proto flip_spec.proto ) -target_link_libraries(${PROJECT_NAME}_proto +target_link_libraries(flip_proto protobuf::libprotobuf gRPC::grpc++ ) -protobuf_generate(LANGUAGE cpp TARGET ${PROJECT_NAME}_proto PROTOS flip_spec.proto) -protobuf_generate(LANGUAGE cpp TARGET ${PROJECT_NAME}_proto PROTOS flip_server.proto) +protobuf_generate(LANGUAGE cpp TARGET flip_proto PROTOS flip_spec.proto) +protobuf_generate(LANGUAGE cpp TARGET flip_proto PROTOS flip_server.proto) protobuf_generate( - TARGET ${PROJECT_NAME}_proto + TARGET flip_proto LANGUAGE grpc GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc PLUGIN protoc-gen-grpc=$ From e0e4a432e7afda6fcb695238b6a892811262c31f Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 14:16:36 -0700 Subject: [PATCH 134/385] Bringing flip over to sisl --- conanfile.py | 10 +++++++--- src/flip/client/local/test_flip_local_client.cpp | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/conanfile.py b/conanfile.py index 54c43b42..e261f69b 100644 --- a/conanfile.py +++ b/conanfile.py @@ -55,6 +55,7 @@ def requirements(self): self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") self.requires("folly/2022.01.31.00") + self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") @@ -118,11 +119,14 @@ def package(self): copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) hdr_dir = join(self.package_folder, join("include", "sisl")) - - copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True) - copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True) + copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True, excludes="flip/*") + copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True, excludes="flip/*") copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) + flip_hdr_dir = join(self.package_folder, join("include", "flip")) + copy(self, "*.hpp", join(self.source_folder, "src/flip"), flip_hdr_dir, keep_path=False) + copy(self, "*.h", join(self.source_folder, "src/flip"), flip_hdr_dir, keep_path=False) + def package_info(self): self.cpp_info.libs = ["sisl"] self.cpp_info.cppflags.extend(["-Wno-unused-local-typedefs", "-fconcepts"]) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index bcea5c93..c9125baf 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -3,7 +3,7 @@ // #include "flip_spec.pb.h" -#include "Flip/client/local/flip_client.hpp" +#include "flip_client.hpp" #include #include From 8538b8bc38e8d6750be52b53edce3b4945336cb7 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 14:18:43 -0700 Subject: [PATCH 135/385] Merging latest code with flip bring over --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 70cfcd96..a1403ba8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.1.1" + version = "8.1.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") From fdd0644c2d161df263a01de37d988216c2c096a1 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 14:28:01 -0700 Subject: [PATCH 136/385] License header on flip code and readme updates --- README.md | 10 +++---- src/flip/client/local/flip_client.hpp | 16 +++++++++++ .../client/local/test_flip_local_client.cpp | 20 +++++++++++--- src/flip/lib/flip.hpp | 19 ++++++++++--- src/flip/lib/flip_rpc_server.cpp | 16 +++++++++++ src/flip/lib/flip_rpc_server.hpp | 27 +++++++++++++------ src/flip/lib/test_flip.cpp | 20 +++++++++++--- src/flip/lib/test_flip_server.cpp | 20 +++++++++++--- 8 files changed, 119 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 425ddde1..7a31b6ba 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,6 @@ to replace these libraries, but provide a layer on top of it. In general there a Following are the tools it provides so far ## Whats in this library -### Async HTTP Server - -Provides an HTTP REST Server for asynchronous programming model. It works on top of evhtp library, but wraps threading model -C++ methods for evhtp C library. - ### Metrics A very high performance metrics collection (counters, histograms and gauges) and report the results in form of json or @@ -36,7 +31,7 @@ More details in the Wisr README under [src/wisr/README.md] ### FDS This is a bunch of data structures meant for high performance or specific use cases. Each of these structures are detailed in their -corresponding source files. Some of the major data structures are +corresponding source files. Some of the major data structures are listed below: #### Bitset A high performance bitset to have various functionalities to scan the contiguous 1s, 0s, set/reset multiple bits without iterating over @@ -61,6 +56,9 @@ Capture the vector in a pool in thread local fashion, so that vectors are not bu ### Settings Framework Please refer to the README under [src/settings/README.md] +### Flip +Flip is fault injection framework, Please refer to the README under [src/flip/README.md] + ## Installation This is mostly header only library and can be just compiled into your code. There are some of the pieces which needs a library (libsisl) to be built. diff --git a/src/flip/client/local/flip_client.hpp b/src/flip/client/local/flip_client.hpp index 467bfafa..94c420a9 100644 --- a/src/flip/client/local/flip_client.hpp +++ b/src/flip/client/local/flip_client.hpp @@ -1,3 +1,19 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include "lib/flip.hpp" diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index c9125baf..0e37e9ec 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -1,7 +1,19 @@ -// -// Created by Kadayam, Hari on 28/03/18. -// - +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "flip_spec.pb.h" #include "flip_client.hpp" #include diff --git a/src/flip/lib/flip.hpp b/src/flip/lib/flip.hpp index 19c8d5c1..b777ba42 100644 --- a/src/flip/lib/flip.hpp +++ b/src/flip/lib/flip.hpp @@ -1,6 +1,19 @@ -// -// Created by Kadayam, Hari on 28/03/18. -// +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #ifndef FLIP_FLIP_HPP #define FLIP_FLIP_HPP diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 7090d757..94fa2d9e 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -1,3 +1,19 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include diff --git a/src/flip/lib/flip_rpc_server.hpp b/src/flip/lib/flip_rpc_server.hpp index 061304ec..41c4471e 100644 --- a/src/flip/lib/flip_rpc_server.hpp +++ b/src/flip/lib/flip_rpc_server.hpp @@ -1,8 +1,20 @@ -// -// Created by Kadayam, Hari on Jun 12 2019. -// -#ifndef FLIP_FLIP_RCP_SERVER_HPP -#define FLIP_FLIP_RCP_SERVER_HPP +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once #include "flip_spec.pb.h" #include "flip_server.grpc.pb.h" @@ -11,9 +23,8 @@ namespace flip { class FlipRPCServer final : public FlipServer::Service { public: grpc::Status InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) override; - grpc::Status GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, - FlipListResponse* response) override; + grpc::Status GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, + FlipListResponse* response) override; static void rpc_thread(); }; } // namespace flip -#endif \ No newline at end of file diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 63ddc48a..ace5bc04 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -1,7 +1,19 @@ -// -// Created by Kadayam, Hari on 28/03/18. -// - +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "flip_spec.pb.h" #include "flip.hpp" #include diff --git a/src/flip/lib/test_flip_server.cpp b/src/flip/lib/test_flip_server.cpp index 52e0a27c..1cb6b25a 100644 --- a/src/flip/lib/test_flip_server.cpp +++ b/src/flip/lib/test_flip_server.cpp @@ -1,7 +1,19 @@ -// -// Created by Kadayam, Hari on 28/03/18. -// - +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "flip.hpp" #include "options/options.h" From b4fbb93e8110da9723442414d5c2c2955b3d24cf Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Wed, 28 Sep 2022 15:48:04 -0700 Subject: [PATCH 137/385] Removed intermediate btree changes --- src/btree/CMakeLists.txt | 18 - src/btree/btree.hpp | 215 --- src/btree/btree.ipp | 415 ------ src/btree/btree_common.ipp | 367 ------ src/btree/btree_get_impl.hpp | 50 - src/btree/btree_internal.hpp | 280 ---- src/btree/btree_kv.hpp | 314 ----- src/btree/btree_mutate_impl.ipp | 523 -------- src/btree/btree_node.hpp | 607 --------- src/btree/btree_node_mgr.ipp | 480 ------- src/btree/btree_query_impl.ipp | 360 ----- src/btree/btree_remove_impl.ipp | 391 ------ src/btree/btree_req.hpp | 242 ---- src/btree/hs_btree.hpp | 396 ------ src/btree/mem_btree.hpp | 100 -- src/btree/rough/btree_node.cpp | 364 ----- src/btree/rough/physical_node.hpp | 525 -------- src/btree/rough/sisl_btree.hpp | 1894 --------------------------- src/btree/rough/sisl_btree_impl.hpp | 1653 ----------------------- src/btree/simple_node.hpp | 301 ----- src/btree/tests/btree_test_kvs.hpp | 294 ----- src/btree/tests/test_btree_node.cpp | 347 ----- src/btree/tests/test_mem_btree.cpp | 151 --- src/btree/varlen_node.hpp | 695 ---------- src/flip/CMakeLists.txt | 1 + 25 files changed, 1 insertion(+), 10982 deletions(-) delete mode 100644 src/btree/CMakeLists.txt delete mode 100644 src/btree/btree.hpp delete mode 100644 src/btree/btree.ipp delete mode 100644 src/btree/btree_common.ipp delete mode 100644 src/btree/btree_get_impl.hpp delete mode 100644 src/btree/btree_internal.hpp delete mode 100644 src/btree/btree_kv.hpp delete mode 100644 src/btree/btree_mutate_impl.ipp delete mode 100644 src/btree/btree_node.hpp delete mode 100644 src/btree/btree_node_mgr.ipp delete mode 100644 src/btree/btree_query_impl.ipp delete mode 100644 src/btree/btree_remove_impl.ipp delete mode 100644 src/btree/btree_req.hpp delete mode 100644 src/btree/hs_btree.hpp delete mode 100644 src/btree/mem_btree.hpp delete mode 100644 src/btree/rough/btree_node.cpp delete mode 100644 src/btree/rough/physical_node.hpp delete mode 100644 src/btree/rough/sisl_btree.hpp delete mode 100644 src/btree/rough/sisl_btree_impl.hpp delete mode 100644 src/btree/simple_node.hpp delete mode 100644 src/btree/tests/btree_test_kvs.hpp delete mode 100644 src/btree/tests/test_btree_node.cpp delete mode 100644 src/btree/tests/test_mem_btree.cpp delete mode 100644 src/btree/varlen_node.hpp diff --git a/src/btree/CMakeLists.txt b/src/btree/CMakeLists.txt deleted file mode 100644 index eb133b9f..00000000 --- a/src/btree/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-unused-parameter -Wno-cast-function-type") -endif() - -include_directories(BEFORE ..) -include_directories(BEFORE .) - -set(TEST_BTREENODE_SOURCE_FILES - tests/test_btree_node.cpp - ) -add_executable(test_btree_node ${TEST_BTREENODE_SOURCE_FILES}) -target_link_libraries(test_btree_node sisl ${COMMON_DEPS} GTest::gtest) - -set(TEST_MEMBTREE_SOURCE_FILES - tests/test_mem_btree.cpp - ) -add_executable(test_mem_btree ${TEST_MEMBTREE_SOURCE_FILES}) -target_link_libraries(test_mem_btree sisl ${COMMON_DEPS} GTest::gtest) \ No newline at end of file diff --git a/src/btree/btree.hpp b/src/btree/btree.hpp deleted file mode 100644 index d68a22d4..00000000 --- a/src/btree/btree.hpp +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Created on: 14-May-2016 - * Author: Hari Kadayam - * - * Copyright � 2016 Kadayam, Hari. All rights reserved. - */ -#pragma once - -#include -#include - -#include -#include "btree_internal.hpp" -#include "btree_req.hpp" -#include "btree_kv.hpp" -#include "btree_node.hpp" - -namespace sisl { -namespace btree { - -#ifdef INCASE_WE_NEED_COMMON -template < typename K, typename V > -class BtreeCommon { -public: - void deref_node(BtreeNode< K >* node) = 0; -}; -#endif - -template < typename K > -using BtreeNodePtr = boost::intrusive_ptr< sisl::btree::BtreeNode< K > >; - -template < typename K, typename V > -struct BtreeThreadVariables { - std::vector< btree_locked_node_info< K, V > > wr_locked_nodes; - std::vector< btree_locked_node_info< K, V > > rd_locked_nodes; - BtreeNodePtr< K > force_split_node{nullptr}; -}; - -template < typename K, typename V > -class Btree { -private: - mutable folly::SharedMutexWritePriority m_btree_lock; - bnodeid_t m_root_node_id{empty_bnodeid}; - uint32_t m_max_nodes; - - BtreeMetrics m_metrics; - std::atomic< bool > m_destroyed{false}; - std::atomic< uint64_t > m_total_nodes{0}; - uint32_t m_node_size{4096}; -#ifndef NDEBUG - std::atomic< uint64_t > m_req_id{0}; -#endif - - // This workaround of BtreeThreadVariables is needed instead of directly declaring statics - // to overcome the gcc bug, pointer here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66944 - static BtreeThreadVariables< K, V >* bt_thread_vars() { - static thread_local BtreeThreadVariables< K, V >* s_ptr{nullptr}; - if (s_ptr == nullptr) { - static thread_local BtreeThreadVariables< K, V > inst; - s_ptr = &inst; - } - return s_ptr; - } - -protected: - BtreeConfig m_bt_cfg; - -public: - /////////////////////////////////////// All External APIs ///////////////////////////// - Btree(const BtreeConfig& cfg); - virtual ~Btree(); - virtual btree_status_t init(void* op_context); - btree_status_t put(BtreeMutateRequest& put_req); - btree_status_t get(BtreeGetRequest& greq) const; - btree_status_t remove(BtreeRemoveRequest& rreq); - btree_status_t query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) const; - // bool verify_tree(bool update_debug_bm) const; - virtual std::pair< btree_status_t, uint64_t > destroy_btree(void* context); - nlohmann::json get_status(int log_level) const; - void print_tree() const; - nlohmann::json get_metrics_in_json(bool updated = true); - - // static void set_io_flip(); - // static void set_error_flip(); - - // static std::array< std::shared_ptr< BtreeCommon< K, V > >, sizeof(btree_stores_t) > s_btree_stores; - // static std::mutex s_store_reg_mtx; - -protected: - /////////////////////////// Methods the underlying store is expected to handle /////////////////////////// - virtual BtreeNodePtr< K > alloc_node(bool is_leaf, bool& is_new_allocation, - const BtreeNodePtr< K >& copy_from = nullptr) = 0; - virtual BtreeNode< K >* init_node(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf); - virtual btree_status_t read_node(bnodeid_t id, BtreeNodePtr< K >& bnode) const = 0; - virtual btree_status_t write_node(const BtreeNodePtr< K >& bn, const BtreeNodePtr< K >& dependent_bn, - void* context); - virtual btree_status_t write_node_sync(const BtreeNodePtr< K >& node, void* context); - virtual void swap_node(const BtreeNodePtr< K >& node1, const BtreeNodePtr< K >& node2, void* context) = 0; - virtual btree_status_t refresh_node(const BtreeNodePtr< K >& bn, bool is_write_modifiable, void* context) const = 0; - virtual void free_node(const BtreeNodePtr< K >& node, void* context) = 0; - - virtual void create_tree_precommit(const BtreeNodePtr< K >& root_node, void* op_context) = 0; - virtual void split_node_precommit(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, - const BtreeNodePtr< K >& child_node2, bool root_split, bool edge_split, - void* op_context) = 0; - virtual void merge_node_precommit(bool is_root_merge, const BtreeNodePtr< K >& parent_node, - uint32_t parent_merge_start_idx, const BtreeNodePtr< K >& child_node1, - const std::vector< BtreeNodePtr< K > >* old_child_nodes, - const std::vector< BtreeNodePtr< K > >* replace_child_nodes, - void* op_context) = 0; - virtual std::string btree_store_type() const = 0; - - /////////////////////////// Methods the application use case is expected to handle /////////////////////////// - virtual int64_t compute_single_put_needed_size(const V& current_val, const V& new_val) const; - virtual int64_t compute_range_put_needed_size(const std::vector< std::pair< K, V > >& existing_kvs, - const V& new_val) const; - virtual btree_status_t custom_kv_select_for_write(uint8_t node_version, - const std::vector< std::pair< K, V > >& match_kv, - std::vector< std::pair< K, V > >& replace_kv, - const BtreeKeyRange& range, - const BtreeRangeUpdateRequest& rureq) const; - virtual btree_status_t custom_kv_select_for_read(uint8_t node_version, - const std::vector< std::pair< K, V > >& match_kv, - std::vector< std::pair< K, V > >& replace_kv, - const BtreeKeyRange& range, const BtreeRangeRequest& qreq) const; - -protected: - /////////////////////////////// Internal Node Management Methods //////////////////////////////////// - std::pair< btree_status_t, bnodeid_t > create_root_node(void* op_context); - btree_status_t read_and_lock_root(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const; - btree_status_t read_and_lock_child(bnodeid_t child_id, BtreeNodePtr< K >& child_node, - const BtreeNodePtr< K >& parent_node, uint32_t parent_ind, - locktype_t int_lock_type, locktype_t leaf_lock_type, void* context) const; - btree_status_t read_and_lock_sibling(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const; - btree_status_t read_and_lock_node(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const; - btree_status_t get_child_and_lock_node(const BtreeNodePtr< K >& node, uint32_t index, BtreeNodeInfo& child_info, - BtreeNodePtr< K >& child_node, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const; - virtual btree_status_t write_node(const BtreeNodePtr< K >& node, void* context); - void read_node_or_fail(bnodeid_t id, BtreeNodePtr< K >& node) const; - btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, void* context, - locktype_t& cur_lock, locktype_t& child_cur_lock); - btree_status_t _lock_and_refresh_node(const BtreeNodePtr< K >& node, locktype_t type, void* context, - const char* fname, int line) const; - btree_status_t _lock_node_upgrade(const BtreeNodePtr< K >& node, void* context, const char* fname, int line); - void unlock_node(const BtreeNodePtr< K >& node, locktype_t type) const; - BtreeNodePtr< K > alloc_leaf_node(); - BtreeNodePtr< K > alloc_interior_node(); - void do_free_node(const BtreeNodePtr< K >& node); - std::pair< btree_status_t, uint64_t > do_destroy(); - void observe_lock_time(const BtreeNodePtr< K >& node, locktype_t type, uint64_t time_spent) const; - - static void _start_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype, const char* fname, int line); - static bool remove_locked_node(const BtreeNodePtr< K >& node, locktype_t ltype, - btree_locked_node_info< K, V >* out_info); - static uint64_t end_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype); -#ifndef NDEBUG - static void check_lock_debug(); -#endif - - /////////////////////////////////// Helper Methods /////////////////////////////////////// - btree_status_t post_order_traversal(locktype_t acq_lock, const auto& cb); - btree_status_t post_order_traversal(const BtreeNodePtr< K >& node, locktype_t acq_lock, const auto& cb); - void get_all_kvs(std::vector< pair< K, V > >& kvs) const; - btree_status_t do_destroy(uint64_t& n_freed_nodes, void* context); - uint64_t get_btree_node_cnt() const; - uint64_t get_child_node_cnt(bnodeid_t bnodeid) const; - void to_string(bnodeid_t bnodeid, std::string& buf) const; - void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const; - void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const; - void print_node(const bnodeid_t& bnodeid) const; - - //////////////////////////////// Impl Methods ////////////////////////////////////////// - - ///////// Mutate Impl Methods - btree_status_t do_put(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeMutateRequest& put_req, - int ind_hint); - btree_status_t mutate_write_leaf_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req); - btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req, int ind_hint, - const BtreeNodePtr< K >& child_node, locktype_t& curlock, - locktype_t& child_curlock, int child_ind, bool& split_occured); - btree_status_t check_split_root(BtreeMutateRequest& req); - btree_status_t split_node(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node, - uint32_t parent_ind, BtreeKey* out_split_key, bool root_split, void* context); - bool is_split_needed(const BtreeNodePtr< K >& node, const BtreeConfig& cfg, BtreeMutateRequest& req) const; - btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& node, BtreeMutateRequest& req, int& start_ind, - int& end_ind); - - ///////// Remove Impl Methods - btree_status_t do_remove(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeRemoveRequest& rreq); - btree_status_t check_collapse_root(void* context); - btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, - void* context); - - ///////// Query Impl Methods - btree_status_t do_sweep_query(BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values) const; - btree_status_t do_traversal_query(const BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values) const; -#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION - btree_status_t do_serialzable_query(const BtreeNodePtr< K >& my_node, BtreeSerializableQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values); - btree_status_t sweep_query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values); - btree_status_t serializable_query(BtreeSerializableQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values); -#endif - - ///////// Get Impl Methods - btree_status_t do_get(const BtreeNodePtr< K >& my_node, BtreeGetRequest& greq) const; -}; -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree.ipp b/src/btree/btree.ipp deleted file mode 100644 index 813e3a17..00000000 --- a/src/btree/btree.ipp +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Created on: 14-May-2016 - * Author: Hari Kadayam - * - * Copyright � 2016 Kadayam, Hari. All rights reserved. - */ -#pragma once - -#include -#include -#include -#include - -#include -//#include -#include "logging/logging.h" - -#include "fds/buffer.hpp" -#include "btree.hpp" -#include "btree_common.ipp" -#include "btree_node_mgr.ipp" -#include "btree_mutate_impl.ipp" -#include "btree_query_impl.ipp" -#include "btree/btree_node.hpp" - -SISL_LOGGING_DECL(btree) -namespace sisl { -namespace btree { -#if 0 -#define container_of(ptr, type, member) ({ (type*)((char*)ptr - offsetof(type, member)); }) -#endif - -template < typename K, typename V > -Btree< K, V >::Btree(const BtreeConfig& cfg) : - m_metrics{cfg.name().c_str()}, m_node_size{cfg.node_size()}, m_bt_cfg{cfg} { - // calculate number of nodes - const uint32_t node_area_size = BtreeNode< K >::node_area_size(cfg); - uint32_t max_leaf_nodes = - (m_bt_cfg.max_objs() * (m_bt_cfg.max_key_size() + m_bt_cfg.max_value_size())) / node_area_size + 1; - max_leaf_nodes += (100 * max_leaf_nodes) / 60; // Assume 60% btree full - m_max_nodes = max_leaf_nodes + ((double)max_leaf_nodes * 0.05) + 1; // Assume 5% for interior nodes -} - -template < typename K, typename V > -Btree< K, V >::~Btree() = default; - -template < typename K, typename V > -btree_status_t Btree< K, V >::init(void* op_context) { - const auto ret = create_root_node(op_context); - return ret.first; -} - -template < typename K, typename V > -std::pair< btree_status_t, uint64_t > Btree< K, V >::destroy_btree(void* context) { - btree_status_t ret{btree_status_t::success}; - uint64_t n_freed_nodes{0}; - - bool expected = false; - if (!m_destroyed.compare_exchange_strong(expected, true)) { - BT_LOG(DEBUG, "Btree is already being destroyed, ignorining this request"); - return std::make_pair(btree_status_t::not_found, 0); - } - ret = do_destroy(n_freed_nodes, context); - if (ret == btree_status_t::success) { - BT_LOG(DEBUG, "btree(root: {}) {} nodes destroyed successfully", m_root_node_id, n_freed_nodes); - } else { - m_destroyed = false; - BT_LOG(ERROR, "btree(root: {}) nodes destroyed failed, ret: {}", m_root_node_id, ret); - } - - return std::make_pair(ret, n_freed_nodes); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::put(BtreeMutateRequest& put_req) { - COUNTER_INCREMENT(m_metrics, btree_write_ops_count, 1); - auto acq_lock = locktype_t::READ; - int ind = -1; - bool is_leaf = false; - - m_btree_lock.lock_shared(); - btree_status_t ret = btree_status_t::success; - -retry: -#ifndef NDEBUG - check_lock_debug(); -#endif - BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); - BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); - - BtreeNodePtr< K > root; - ret = read_and_lock_root(m_root_node_id, root, acq_lock, acq_lock, put_req_op_ctx(put_req)); - if (ret != btree_status_t::success) { goto out; } - is_leaf = root->is_leaf(); - - if (is_split_needed(root, m_bt_cfg, put_req)) { - // Time to do the split of root. - unlock_node(root, acq_lock); - m_btree_lock.unlock_shared(); - ret = check_split_root(put_req); - BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); - BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); - - // We must have gotten a new root, need to start from scratch. - m_btree_lock.lock_shared(); - if (ret != btree_status_t::success) { - LOGERROR("root split failed btree name {}", m_bt_cfg.name()); - goto out; - } - - goto retry; - } else if ((is_leaf) && (acq_lock != locktype_t::WRITE)) { - // Root is a leaf, need to take write lock, instead of read, retry - unlock_node(root, acq_lock); - acq_lock = locktype_t::WRITE; - goto retry; - } else { - ret = do_put(root, acq_lock, put_req, ind); - if (ret == btree_status_t::retry) { - // Need to start from top down again, since there is a race between 2 inserts or deletes. - acq_lock = locktype_t::READ; - BT_LOG(TRACE, "retrying put operation"); - BT_LOG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); - BT_LOG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); - goto retry; - } - } - -out: - m_btree_lock.unlock_shared(); -#ifndef NDEBUG - check_lock_debug(); -#endif - if (ret != btree_status_t::success && ret != btree_status_t::fast_path_not_possible && - ret != btree_status_t::cp_mismatch) { - BT_LOG(ERROR, "btree put failed {}", ret); - COUNTER_INCREMENT(m_metrics, write_err_cnt, 1); - } - - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::get(BtreeGetRequest& greq) const { - btree_status_t ret = btree_status_t::success; - bool is_found; - - m_btree_lock.lock_shared(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, get_req_op_ctx(greq)); - if (ret != btree_status_t::success) { goto out; } - - ret = do_get(root, greq); -out: - m_btree_lock.unlock_shared(); - -#ifndef NDEBUG - check_lock_debug(); -#endif - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::remove(BtreeRemoveRequest& rreq) { - locktype_t acq_lock = locktype_t::READ; - bool is_found = false; - bool is_leaf = false; - - m_btree_lock.lock_shared(); - -retry: - btree_status_t status = btree_status_t::success; - - BtreeNodePtr< K > root; - status = read_and_lock_root(m_root_node_id, root, acq_lock, acq_lock, remove_req_op_ctx(rreq)); - if (status != btree_status_t::success) { goto out; } - is_leaf = root->is_leaf(); - - if (root->get_total_entries() == 0) { - if (is_leaf) { - // There are no entries in btree. - unlock_node(root, acq_lock); - status = btree_status_t::not_found; - BT_LOG(DEBUG, root, "entry not found in btree"); - goto out; - } - BT_LOG_ASSERT(root->has_valid_edge(), root, "Invalid edge id"); - unlock_node(root, acq_lock); - m_btree_lock.unlock_shared(); - - status = check_collapse_root(remove_req_op_ctx(rreq)); - if (status != btree_status_t::success) { - LOGERROR("check collapse read failed btree name {}", m_bt_cfg.name()); - goto out; - } - - // We must have gotten a new root, need to - // start from scratch. - m_btree_lock.lock_shared(); - goto retry; - } else if ((is_leaf) && (acq_lock != locktype_t::WRITE)) { - // Root is a leaf, need to take write lock, instead - // of read, retry - unlock_node(root, acq_lock); - acq_lock = locktype_t::WRITE; - goto retry; - } else { - status = do_remove(root, acq_lock, rreq); - if (status == btree_status_t::retry) { - // Need to start from top down again, since - // there is a race between 2 inserts or deletes. - acq_lock = locktype_t::READ; - goto retry; - } - } - -out: - m_btree_lock.unlock_shared(); -#ifndef NDEBUG - check_lock_debug(); -#endif - return status; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) const { - COUNTER_INCREMENT(m_metrics, btree_query_ops_count, 1); - - btree_status_t ret = btree_status_t::success; - if (qreq.batch_size() == 0) { return ret; } - - m_btree_lock.lock_shared(); - BtreeNodePtr< K > root = nullptr; - ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, qreq.m_op_context); - if (ret != btree_status_t::success) { goto out; } - - switch (qreq.query_type()) { - case BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY: - ret = do_sweep_query(root, qreq, out_values); - break; - - case BtreeQueryType::TREE_TRAVERSAL_QUERY: - ret = do_traversal_query(root, qreq, out_values); - break; - - default: - unlock_node(root, locktype_t::READ); - LOGERROR("Query type {} is not supported yet", qreq.query_type()); - break; - } - - if ((qreq.query_type() == BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY || - qreq.query_type() == BtreeQueryType::TREE_TRAVERSAL_QUERY) && - out_values.size() > 0) { - - /* if return is not success then set the cursor to last read. No need to set cursor if user is not - * interested in it. - */ - qreq.search_state().set_cursor_key< K >(out_values.back().first); - - /* check if we finished just at the last key */ - if (out_values.back().first.compare(qreq.input_range().end_key()) == 0) { ret = btree_status_t::success; } - } - -out: - m_btree_lock.unlock_shared(); -#ifndef NDEBUG - check_lock_debug(); -#endif - if (ret != btree_status_t::success && ret != btree_status_t::has_more && - ret != btree_status_t::fast_path_not_possible) { - BT_LOG(ERROR, "btree query failed {}", ret); - COUNTER_INCREMENT(m_metrics, query_err_cnt, 1); - } - return ret; -} - -#if 0 -/** - * @brief : verify btree is consistent and no corruption; - * - * @param update_debug_bm : true or false; - * - * @return : true if btree is not corrupted. - * false if btree is corrupted; - */ -template < typename K, typename V > -bool Btree< K, V >::verify_tree(bool update_debug_bm) const { - m_btree_lock.lock_shared(); - bool ret = verify_node(m_root_node_id, nullptr, -1, update_debug_bm); - m_btree_lock.unlock_shared(); - - return ret; -} -#endif - -/** - * @brief : get the status of this btree; - * - * @param log_level : verbosity level; - * - * @return : status in json form; - */ -template < typename K, typename V > -nlohmann::json Btree< K, V >::get_status(int log_level) const { - nlohmann::json j; - return j; -} - -template < typename K, typename V > -void Btree< K, V >::print_tree() const { - std::string buf; - m_btree_lock.lock_shared(); - to_string(m_root_node_id, buf); - m_btree_lock.unlock_shared(); - - BT_LOG(INFO, "Pre order traversal of tree:\n<{}>", buf); -} - -template < typename K, typename V > -nlohmann::json Btree< K, V >::get_metrics_in_json(bool updated) { - return m_metrics.get_result_in_json(updated); -} - -// TODO: Commenting out flip till we figure out how to move flip dependency inside sisl package. -#if 0 -#ifdef _PRERELEASE -template < typename K, typename V > -static void Btree< K, V >::set_io_flip() { - /* IO flips */ - FlipClient* fc = homestore::HomeStoreFlip::client_instance(); - FlipFrequency freq; - FlipCondition cond1; - FlipCondition cond2; - freq.set_count(2000000000); - freq.set_percent(2); - - FlipCondition null_cond; - fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); - - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 0, &cond1); - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 1, &cond2); - fc->inject_noreturn_flip("btree_upgrade_node_fail", {cond1, cond2}, freq); - - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 4, &cond1); - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 2, &cond2); - - fc->inject_retval_flip("btree_delay_and_split", {cond1, cond2}, freq, 20); - fc->inject_retval_flip("btree_delay_and_split_leaf", {cond1, cond2}, freq, 20); - fc->inject_noreturn_flip("btree_parent_node_full", {null_cond}, freq); - fc->inject_noreturn_flip("btree_leaf_node_split", {null_cond}, freq); - fc->inject_retval_flip("btree_upgrade_delay", {null_cond}, freq, 20); - fc->inject_retval_flip("writeBack_completion_req_delay_us", {null_cond}, freq, 20); - fc->inject_noreturn_flip("btree_read_fast_path_not_possible", {null_cond}, freq); -} - -template < typename K, typename V > -static void Btree< K, V >::set_error_flip() { - /* error flips */ - FlipClient* fc = homestore::HomeStoreFlip::client_instance(); - FlipFrequency freq; - freq.set_count(20); - freq.set_percent(10); - - FlipCondition null_cond; - fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); - - fc->inject_noreturn_flip("btree_read_fail", {null_cond}, freq); - fc->inject_noreturn_flip("fixed_blkalloc_no_blks", {null_cond}, freq); -} -#endif -#endif - -template < typename K > -void intrusive_ptr_add_ref(BtreeNode< K >* node) { - node->m_refcount.increment(1); -} - -template < typename K > -void intrusive_ptr_release(BtreeNode< K >* node) { - if (node->m_refcount.decrement_testz(1)) { delete node; } -} - -#ifdef INCASE_WE_NEED_COMMON -template < typename K, typename V > -bool Btree< K, V >::create_store_common(btree_store_t store_type, - std::function< std::shared_ptr< BtreeCommon< K, V > >() >&& create_cb) { - std::unique_lock lg(s_store_reg_mtx); - if (s_btree_stores[int_cast(store_type)] != nullptr) { return false; } - s_btree_stores[int_cast(store_type)] = create_cb(); - return true; -} - -// Get doesn't need to take any lock, since the create/register is once and started once. Please don't add the lock -// here as this is called in critical path and completely unneccessary. -template < typename K, typename V > -BtreeCommon< K, V >* Btree< K, V >::get_store_common(uint8_t store_type) { - return s_btree_stores[store_type].get(); -} - -friend void intrusive_ptr_add_ref(BtreeNode< K >* node) { node->m_refcount.increment(1); } -friend void intrusive_ptr_release(BtreeNode< K >* node) { Btree< K, V >::get_store_common()->deref_node(node); } - -// static inline const char* _type_desc(const BtreeNodePtr< K >& n) { return n->is_leaf() ? "L" : "I"; } - -template < typename K, typename V > -std::array< std::shared_ptr< BtreeCommon< K, V > >, sizeof(btree_stores_t) > Btree< K, V >::s_btree_stores; - -template < typename K, typename V > -std::mutex Btree< K, V >::s_store_reg_mtx; -#endif - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_common.ipp b/src/btree/btree_common.ipp deleted file mode 100644 index 22cc337b..00000000 --- a/src/btree/btree_common.ipp +++ /dev/null @@ -1,367 +0,0 @@ -#pragma once -#include "btree.hpp" - -namespace sisl { -namespace btree { - -template < typename K, typename V > -btree_status_t Btree< K, V >::post_order_traversal(locktype_t ltype, const auto& cb) { - BtreeNodePtr< K > root; - - if (ltype == locktype_t::READ) { - m_btree_lock.lock_shared(); - } else if (ltype == locktype_t::WRITE) { - m_btree_lock.lock(); - } - - btree_status_t ret{btree_status_t::success}; - if (m_root_node_id != empty_bnodeid) { - read_and_lock_root(m_root_node_id, root, ltype, ltype, nullptr); - if (ret != btree_status_t::success) { goto done; } - - ret = post_order_traversal(root, ltype, cb); - } -done: - if (ltype == locktype_t::READ) { - m_btree_lock.unlock_shared(); - } else if (ltype == locktype_t::WRITE) { - m_btree_lock.unlock(); - } - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::post_order_traversal(const BtreeNodePtr< K >& node, locktype_t ltype, const auto& cb) { - uint32_t i{0}; - btree_status_t ret = btree_status_t::success; - - if (!node->is_leaf()) { - BtreeNodeInfo child_info; - while (i <= node->get_total_entries()) { - if (i == node->get_total_entries()) { - if (!node->has_valid_edge()) { break; } - child_info.set_bnode_id(node->get_edge_id()); - } else { - node->get_nth_value(i, &child_info, false /* copy */); - } - - BtreeNodePtr< K > child; - ret = read_and_lock_child(child_info.bnode_id(), child, node, i, ltype, ltype, nullptr); - if (ret != btree_status_t::success) { return ret; } - ret = post_order_traversal(child, ltype, cb); - unlock_node(child, ltype); - ++i; - } - cb(node, false /* is_leaf */); - } - - if (ret == btree_status_t::success) { cb(node, true /* is_leaf */); } - return ret; -} - -template < typename K, typename V > -void Btree< K, V >::get_all_kvs(std::vector< pair< K, V > >& kvs) const { - post_order_traversal(locktype_t::READ, [this, &kvs](const auto& node, bool is_leaf) { - if (!is_leaf) { node->get_all_kvs(kvs); } - }); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::do_destroy(uint64_t& n_freed_nodes, void* context) { - return post_order_traversal(locktype_t::WRITE, [this, &n_freed_nodes, context](const auto& node, bool is_leaf) { - free_node(node, context); - ++n_freed_nodes; - }); -} - -template < typename K, typename V > -uint64_t Btree< K, V >::get_btree_node_cnt() const { - uint64_t cnt = 1; /* increment it for root */ - m_btree_lock.lock_shared(); - cnt += get_child_node_cnt(m_root_node_id); - m_btree_lock.unlock_shared(); - return cnt; -} - -template < typename K, typename V > -uint64_t Btree< K, V >::get_child_node_cnt(bnodeid_t bnodeid) const { - uint64_t cnt{0}; - BtreeNodePtr< K > node; - locktype_t acq_lock = locktype_t::READ; - - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return cnt; } - if (!node->is_leaf()) { - uint32_t i = 0; - while (i < node->get_total_entries()) { - BtreeNodeInfo p = node->get(i, false); - cnt += get_child_node_cnt(p.bnode_id()) + 1; - ++i; - } - if (node->has_valid_edge()) { cnt += get_child_node_cnt(node->get_edge_id()) + 1; } - } - unlock_node(node, acq_lock); - return cnt; -} - -template < typename K, typename V > -void Btree< K, V >::to_string(bnodeid_t bnodeid, std::string& buf) const { - BtreeNodePtr< K > node; - - locktype_t acq_lock = locktype_t::READ; - - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } - fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); - - if (!node->is_leaf()) { - uint32_t i = 0; - while (i < node->get_total_entries()) { - BtreeNodeInfo p; - node->get_nth_value(i, &p, false); - to_string(p.bnode_id(), buf); - ++i; - } - if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } - } - unlock_node(node, acq_lock); -} - -#if 0 - btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node_id) : read_node(jentry->parent_node.node_id); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } - } -#endif - -template < typename K, typename V > -void Btree< K, V >::validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const { - BtreeNodeInfo child_info; - K child_first_key; - K child_last_key; - K parent_key; - - parent_node->get(ind, &child_info, false /* copy */); - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries - BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); - } - return; - } - child_node->get_first_key(&child_first_key); - child_node->get_last_key(&child_last_key); - BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0); - if (ind == parent_node->get_total_entries()) { - BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); - if (ind > 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0); - BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0); - } - } else { - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - if (ind != 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) - } - } -} - -template < typename K, typename V > -void Btree< K, V >::validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) const { - BtreeNodeInfo child_info; - K child_key; - K parent_key; - - if (parent_node->has_valid_edge()) { - if (ind == parent_node->get_total_entries()) { return; } - } else { - if (ind == parent_node->get_total_entries() - 1) { return; } - } - parent_node->get(ind + 1, &child_info, false /* copy */); - - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); - - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node can have 0 entries - BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), - true); - } - return; - } - /* in case of merge next child will never have zero entries otherwise it would have been merged */ - BT_NODE_REL_ASSERT_NE(child_node->get_total_entries(), 0, child_node); - child_node->get_first_key(&child_key); - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) - BT_REL_ASSERT_LT(parent_key.compare_start(&child_key), 0) -} - -template < typename K, typename V > -void Btree< K, V >::print_node(const bnodeid_t& bnodeid) const { - std::string buf; - BtreeNodePtr< K > node; - - m_btree_lock.lock_shared(); - locktype_t acq_lock = locktype_t::READ; - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } - buf = node->to_string(true /* print_friendly */); - unlock_node(node, acq_lock); - -done: - m_btree_lock.unlock_shared(); - - BT_LOG(INFO, "Node: <{}>", buf); -} - -#if 0 -template < typename K, typename V > -void Btree< K, V >::diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { - std::vector< pair< K, V > > my_kvs, other_kvs; - - get_all_kvs(&my_kvs); - other->get_all_kvs(&other_kvs); - auto it1 = my_kvs.begin(); - auto it2 = other_kvs.begin(); - - K k1, k2; - V v1, v2; - - if (it1 != my_kvs.end()) { - k1 = it1->first; - v1 = it1->second; - } - if (it2 != other_kvs.end()) { - k2 = it2->first; - v2 = it2->second; - } - - while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { - if (k1.preceeds(&k2)) { - /* k1 preceeds k2 - push k1 and continue */ - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - } else if (k1.succeeds(&k2)) { - /* k2 preceeds k1 - push k2 and continue */ - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - if (it2 == other_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - } else { - /* k1 and k2 overlaps */ - std::vector< pair< K, V > > overlap_kvs; - diff_read_next_t to_read = READ_BOTH; - - v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); - for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { - diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); - } - - switch (to_read) { - case READ_FIRST: - it1++; - if (it1 == my_kvs.end()) { - // Add k2,v2 - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - break; - } - k1 = it1->first; - v1 = it1->second; - break; - - case READ_SECOND: - it2++; - if (it2 == other_kvs.end()) { - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - break; - } - k2 = it2->first; - v2 = it2->second; - break; - - case READ_BOTH: - /* No tail part */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - - default: - LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, to_read); - /* skip both */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - } - } - } - - while (it1 != my_kvs.end()) { - diff_kv->emplace_back(make_pair(it1->first, it1->second)); - it1++; - } - - while (it2 != other_kvs.end()) { - diff_kv->emplace_back(make_pair(it2->first, it2->second)); - it2++; - } -} - -void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { - std::vector< pair< K, V > > other_kvs; - - other->get_all_kvs(&other_kvs); - for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { - K k = it->first; - V v = it->second; - BRangeCBParam local_param(k, v); - K start(k.start(), 1), end(k.end(), 1); - - auto search_range = BtreeSearchRange(start, true, end, true); - BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); - range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); - } -} -#endif - -#ifdef USE_STORE_TYPE -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; - -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; -#endif - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_get_impl.hpp b/src/btree/btree_get_impl.hpp deleted file mode 100644 index eea09c68..00000000 --- a/src/btree/btree_get_impl.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once -#include "btree.hpp" - -namespace sisl { -namespace btree { -template < typename K, typename V > -btree_status_t Btree< K, V >::do_get(const BtreeNodePtr< K >& my_node, BtreeGetRequest& greq) const { - btree_status_t ret = btree_status_t::success; - bool is_child_lock = false; - locktype_t child_locktype; - - if (my_node->is_leaf()) { - if (is_get_any_request(greq)) { - auto& gareq = to_get_any_req(greq); - const auto [found, idx] = - my_node->get_any(gareq.m_range, gareq.m_outkey.get(), gareq.m_outval.get(), true, true); - ret = found ? btree_status_t::success : btree_status_t::not_found; - } else { - auto& sgreq = to_single_get_req(greq); - const auto [found, idx] = my_node->find(sgreq.key(), sgreq.m_outval.get(), true); - ret = found ? btree_status_t::success : btree_status_t::not_found; - } - unlock_node(my_node, locktype_t::READ); - return ret; - } - - BtreeNodeInfo child_info; - bool found; - uint32_t idx; - if (is_get_any_request(greq)) { - std::tie(found, idx) = my_node->find(to_get_any_req(greq).m_range.start_key(), &child_info, true); - } else { - std::tie(found, idx) = my_node->find(to_single_get_req(greq).key(), &child_info, true); - } - - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, idx, my_node); - BtreeNodePtr< K > child_node; - child_locktype = locktype_t::READ; - ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, idx, child_locktype, child_locktype, nullptr); - if (ret != btree_status_t::success) { goto out; } - - unlock_node(my_node, locktype_t::READ); - return (do_get(child_node, greq)); - -out: - unlock_node(my_node, locktype_t::READ); - return ret; -} -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_internal.hpp b/src/btree/btree_internal.hpp deleted file mode 100644 index 00bffc1a..00000000 --- a/src/btree/btree_internal.hpp +++ /dev/null @@ -1,280 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include "fds/utils.hpp" - -namespace sisl { -namespace btree { - -#define _BT_LOG_METHOD_IMPL(req, btcfg, node) \ - ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ - fmt::make_format_args(file_name(__FILE__), __LINE__)); \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[req={}] "}, \ - fmt::make_format_args(req->to_string())))) \ - (); \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(btcfg), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ - fmt::make_format_args(btcfg.name())))) \ - (); \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(node), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[node={}] "}, \ - fmt::make_format_args(node->to_string())))) \ - (); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ - fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ - return true; \ - }) - -#define BT_LOG(level, msg, ...) \ - { LOG##level##MOD_FMT(btree, (_BT_LOG_METHOD_IMPL(, this->m_bt_cfg, )), msg, ##__VA_ARGS__); } - -#define BT_NODE_LOG(level, node, msg, ...) \ - { LOG##level##MOD_FMT(btree, (_BT_LOG_METHOD_IMPL(, this->m_bt_cfg, node)), msg, ##__VA_ARGS__); } - -#if 0 -#define THIS_BT_LOG(level, req, msg, ...) \ - { \ - LOG##level##MOD_FMT( \ - btree, ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ - fmt::make_format_args(file_name(__FILE__), __LINE__)); \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[req={}] "}, \ - fmt::make_format_args(req->to_string())))) \ - (); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ - fmt::make_format_args(m_cfg.name())); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ - fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ - return true; \ - }), \ - msg, ##__VA_ARGS__); \ - } - -#define THIS_NODE_LOG(level, btcfg, msg, ...) \ - { \ - LOG##level##MOD_FMT( \ - btree, ([&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[{}:{}] "}, \ - fmt::make_format_args(file_name(__FILE__), __LINE__)); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ - fmt::make_format_args(btcfg.name())); \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[node={}] "}, \ - fmt::make_format_args(to_string())))) \ - (); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ - fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ - return true; \ - }), \ - msg, ##__VA_ARGS__); \ - } - -#define BT_ASSERT(assert_type, cond, req, ...) \ - { \ - assert_type##_ASSERT_FMT( \ - cond, \ - [&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"\n[req={}] "}, \ - fmt::make_format_args(req->to_string())))) \ - (); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ - fmt::make_format_args(m_cfg.name())); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ - fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ - return true; \ - }, \ - msg, ##__VA_ARGS__); \ - } - -#define BT_ASSERT_CMP(assert_type, val1, cmp, val2, req, ...) \ - { \ - assert_type##_ASSERT_CMP( \ - val1, cmp, val2, \ - [&](fmt::memory_buffer& buf, const char* msgcb, auto&&... args) -> bool { \ - BOOST_PP_IF(BOOST_VMD_IS_EMPTY(req), BOOST_PP_EMPTY, \ - BOOST_PP_IDENTITY(fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"\n[req={}] "}, \ - fmt::make_format_args(req->to_string())))) \ - (); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{"[btree={}] "}, \ - fmt::make_format_args(m_cfg.name())); \ - fmt::vformat_to(fmt::appender{buf}, fmt::string_view{msgcb}, \ - fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ - return true; \ - }, \ - msg, ##__VA_ARGS__); \ - } -#endif - -#define BT_ASSERT(assert_type, cond, ...) \ - { assert_type##_ASSERT_FMT(cond, _BT_LOG_METHOD_IMPL(, this->m_bt_cfg, ), ##__VA_ARGS__); } - -#define BT_ASSERT_CMP(assert_type, val1, cmp, val2, ...) \ - { assert_type##_ASSERT_CMP(val1, cmp, val2, _BT_LOG_METHOD_IMPL(, this->m_bt_cfg, ), ##__VA_ARGS__); } - -#define BT_DBG_ASSERT(cond, ...) BT_ASSERT(DEBUG, cond, ##__VA_ARGS__) -#define BT_DBG_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, ==, val2, ##__VA_ARGS__) -#define BT_DBG_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, !=, val2, ##__VA_ARGS__) -#define BT_DBG_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, <, val2, ##__VA_ARGS__) -#define BT_DBG_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, <=, val2, ##__VA_ARGS__) -#define BT_DBG_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, >, val2, ##__VA_ARGS__) -#define BT_DBG_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(DEBUG, val1, >=, val2, ##__VA_ARGS__) - -#define BT_LOG_ASSERT(cond, ...) BT_ASSERT(LOGMSG, cond, ##__VA_ARGS__) -#define BT_LOG_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, ==, val2, ##__VA_ARGS__) -#define BT_LOG_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, !=, val2, ##__VA_ARGS__) -#define BT_LOG_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, <, val2, ##__VA_ARGS__) -#define BT_LOG_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, <=, val2, ##__VA_ARGS__) -#define BT_LOG_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, >, val2, ##__VA_ARGS__) -#define BT_LOG_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(LOGMSG, val1, >=, val2, ##__VA_ARGS__) - -#define BT_REL_ASSERT(cond, ...) BT_ASSERT(RELEASE, cond, ##__VA_ARGS__) -#define BT_REL_ASSERT_EQ(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, ==, val2, ##__VA_ARGS__) -#define BT_REL_ASSERT_NE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, !=, val2, ##__VA_ARGS__) -#define BT_REL_ASSERT_LT(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, <, val2, ##__VA_ARGS__) -#define BT_REL_ASSERT_LE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, <=, val2, ##__VA_ARGS__) -#define BT_REL_ASSERT_GT(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, >, val2, ##__VA_ARGS__) -#define BT_REL_ASSERT_GE(val1, val2, ...) BT_ASSERT_CMP(RELEASE, val1, >=, val2, ##__VA_ARGS__) - -#define BT_NODE_ASSERT(assert_type, cond, node, ...) \ - { assert_type##_ASSERT_FMT(cond, _BT_LOG_METHOD_IMPL(, m_bt_cfg, node), ##__VA_ARGS__); } - -#define BT_NODE_ASSERT_CMP(assert_type, val1, cmp, val2, node, ...) \ - { assert_type##_ASSERT_CMP(val1, cmp, val2, _BT_LOG_METHOD_IMPL(, m_bt_cfg, node), ##__VA_ARGS__); } - -#define BT_NODE_DBG_ASSERT(cond, ...) BT_NODE_ASSERT(DEBUG, cond, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, ==, val2, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, !=, val2, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, <, val2, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, <=, val2, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, >, val2, ##__VA_ARGS__) -#define BT_NODE_DBG_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(DEBUG, val1, >=, val2, ##__VA_ARGS__) - -#define BT_NODE_LOG_ASSERT(cond, ...) BT_NODE_ASSERT(LOGMSG, cond, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, ==, val2, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, !=, val2, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, <, val2, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, <=, val2, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, >, val2, ##__VA_ARGS__) -#define BT_NODE_LOG_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(LOGMSG, val1, >=, val2, ##__VA_ARGS__) - -#define BT_NODE_REL_ASSERT(cond, ...) BT_NODE_ASSERT(RELEASE, cond, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_EQ(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, ==, val2, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_NE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, !=, val2, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_LT(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, <, val2, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_LE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, <=, val2, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_GT(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, >, val2, ##__VA_ARGS__) -#define BT_NODE_REL_ASSERT_GE(val1, val2, ...) BT_NODE_ASSERT_CMP(RELEASE, val1, >=, val2, ##__VA_ARGS__) - -#define ASSERT_IS_VALID_INTERIOR_CHILD_INDX(is_found, found_idx, node) \ - BT_NODE_DBG_ASSERT((!is_found || ((int)found_idx < (int)node->get_total_entries()) || node->has_valid_edge()), \ - node, "Is_valid_interior_child_check_failed: found_idx={}", found_idx) - -using bnodeid_t = uint64_t; -static constexpr bnodeid_t empty_bnodeid = std::numeric_limits< bnodeid_t >::max(); -static constexpr uint16_t init_crc_16 = 0x8005; - -VENUM(btree_node_type, uint32_t, FIXED = 0, VAR_VALUE = 1, VAR_KEY = 2, VAR_OBJECT = 3, PREFIX = 4, COMPACT = 5) - -#ifdef USE_STORE_TYPE -VENUM(btree_store_type, uint8_t, MEM = 0, SSD = 1) -#endif - -ENUM(btree_status_t, uint32_t, success, not_found, item_found, closest_found, closest_removed, retry, has_more, - read_failed, write_failed, stale_buf, refresh_failed, put_failed, space_not_avail, split_failed, insert_failed, - cp_mismatch, merge_not_required, merge_failed, replay_not_needed, fast_path_not_possible, resource_full, - update_debug_bm_failed, crc_mismatch) - -struct BtreeConfig { - uint64_t m_max_objs{0}; - uint32_t m_max_key_size{0}; - uint32_t m_max_value_size{0}; - uint32_t m_node_size; - - uint8_t m_ideal_fill_pct{90}; - uint8_t m_split_pct{50}; - - bool m_custom_kv{false}; // If Key/Value needs some transformation before read or write - btree_node_type m_leaf_node_type{btree_node_type::VAR_OBJECT}; - btree_node_type m_int_node_type{btree_node_type::VAR_KEY}; - std::string m_btree_name; // Unique name for the btree - - BtreeConfig(uint32_t node_size, const std::string& btree_name = "") : - m_node_size{node_size}, m_btree_name{btree_name.empty() ? std::string("btree") : btree_name} {} - - virtual ~BtreeConfig() = default; - uint32_t node_size() const { return m_node_size; }; - uint32_t max_key_size() const { return m_max_key_size; } - void set_max_key_size(uint32_t max_key_size) { m_max_key_size = max_key_size; } - - uint64_t max_objs() const { return m_max_objs; } - void set_max_objs(uint64_t max_objs) { m_max_objs = max_objs; } - - uint32_t max_value_size() const { return m_max_value_size; } - - void set_max_value_size(uint32_t max_value_size) { m_max_value_size = max_value_size; } - - uint32_t split_size(uint32_t filled_size) const { return uint32_cast(filled_size * m_split_pct) / 100; } - const std::string& name() const { return m_btree_name; } - - bool is_custom_kv() const { return m_custom_kv; } - btree_node_type leaf_node_type() const { return m_leaf_node_type; } - btree_node_type interior_node_type() const { return m_int_node_type; } -}; - -class BtreeMetrics : public MetricsGroup { -public: - explicit BtreeMetrics(const char* inst_name) : MetricsGroup("Btree", inst_name) { - REGISTER_COUNTER(btree_obj_count, "Btree object count", _publish_as::publish_as_gauge); - REGISTER_COUNTER(btree_leaf_node_count, "Btree Leaf node count", "btree_node_count", {{"node_type", "leaf"}}, - _publish_as::publish_as_gauge); - REGISTER_COUNTER(btree_int_node_count, "Btree Interior node count", "btree_node_count", - {{"node_type", "interior"}}, _publish_as::publish_as_gauge); - REGISTER_COUNTER(btree_split_count, "Total number of btree node splits"); - REGISTER_COUNTER(insert_failed_count, "Total number of inserts failed"); - REGISTER_COUNTER(btree_merge_count, "Total number of btree node merges"); - REGISTER_COUNTER(btree_depth, "Depth of btree", _publish_as::publish_as_gauge); - - REGISTER_COUNTER(btree_int_node_writes, "Total number of btree interior node writes", "btree_node_writes", - {{"node_type", "interior"}}); - REGISTER_COUNTER(btree_leaf_node_writes, "Total number of btree leaf node writes", "btree_node_writes", - {{"node_type", "leaf"}}); - REGISTER_COUNTER(btree_num_pc_gen_mismatch, "Number of gen mismatches to recover"); - - REGISTER_HISTOGRAM(btree_int_node_occupancy, "Interior node occupancy", "btree_node_occupancy", - {{"node_type", "interior"}}, HistogramBucketsType(LinearUpto128Buckets)); - REGISTER_HISTOGRAM(btree_leaf_node_occupancy, "Leaf node occupancy", "btree_node_occupancy", - {{"node_type", "leaf"}}, HistogramBucketsType(LinearUpto128Buckets)); - REGISTER_COUNTER(btree_retry_count, "number of retries"); - REGISTER_COUNTER(write_err_cnt, "number of errors in write"); - REGISTER_COUNTER(split_failed, "split failed"); - REGISTER_COUNTER(query_err_cnt, "number of errors in query"); - REGISTER_COUNTER(read_node_count_in_write_ops, "number of nodes read in write_op"); - REGISTER_COUNTER(read_node_count_in_query_ops, "number of nodes read in query_op"); - REGISTER_COUNTER(btree_write_ops_count, "number of btree operations"); - REGISTER_COUNTER(btree_query_ops_count, "number of btree operations"); - REGISTER_COUNTER(btree_remove_ops_count, "number of btree operations"); - REGISTER_HISTOGRAM(btree_exclusive_time_in_int_node, - "Exclusive time spent (Write locked) on interior node (ns)", "btree_exclusive_time_in_node", - {{"node_type", "interior"}}); - REGISTER_HISTOGRAM(btree_exclusive_time_in_leaf_node, "Exclusive time spent (Write locked) on leaf node (ns)", - "btree_exclusive_time_in_node", {{"node_type", "leaf"}}); - REGISTER_HISTOGRAM(btree_inclusive_time_in_int_node, "Inclusive time spent (Read locked) on interior node (ns)", - "btree_inclusive_time_in_node", {{"node_type", "interior"}}); - REGISTER_HISTOGRAM(btree_inclusive_time_in_leaf_node, "Inclusive time spent (Read locked) on leaf node (ns)", - "btree_inclusive_time_in_node", {{"node_type", "leaf"}}); - - register_me_to_farm(); - } - - ~BtreeMetrics() { deregister_me_from_farm(); } -}; - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_kv.hpp b/src/btree/btree_kv.hpp deleted file mode 100644 index f0fb18c3..00000000 --- a/src/btree/btree_kv.hpp +++ /dev/null @@ -1,314 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam, Rishabh Mittal - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#pragma once - -#include -#include -#include -#include "fds/buffer.hpp" - -namespace sisl { -namespace btree { - -ENUM(MultiMatchOption, uint16_t, - DO_NOT_CARE, // Select anything that matches - LEFT_MOST, // Select the left most one - RIGHT_MOST, // Select the right most one - MID // Select the middle one -) - -ENUM(btree_put_type, uint16_t, - INSERT_ONLY_IF_NOT_EXISTS, // Insert - REPLACE_ONLY_IF_EXISTS, // Upsert - REPLACE_IF_EXISTS_ELSE_INSERT, - APPEND_ONLY_IF_EXISTS, // Update - APPEND_IF_EXISTS_ELSE_INSERT) - -// The base class, btree library expects its key to be derived from -class BtreeKeyRange; -class BtreeKey { -public: - BtreeKey() = default; - - // Deleting copy constructor forces the derived class to define its own copy constructor - // BtreeKey(const BtreeKey& other) = delete; - // BtreeKey(const sisl::blob& b) = delete; - BtreeKey(const BtreeKey& other) = default; - virtual ~BtreeKey() = default; - - virtual BtreeKey& operator=(const BtreeKey& other) { - clone(other); - return *this; - }; - - virtual void clone(const BtreeKey& other) = 0; - virtual int compare(const BtreeKey& other) const = 0; - - /* Applicable only for extent keys, so do default compare */ - virtual int compare_head(const BtreeKey& other) const { return compare(other); }; - - virtual int compare_range(const BtreeKeyRange& range) const = 0; - - virtual sisl::blob serialize() const = 0; - virtual uint32_t serialized_size() const = 0; - // virtual void deserialize(const sisl::blob& b) = 0; - - // Applicable only to extent keys, where keys have head and tail - virtual sisl::blob serialize_tail() const { return serialize(); } - - virtual std::string to_string() const = 0; - virtual bool is_extent_key() const { return false; } -}; - -class BtreeKeyRange { -public: - const BtreeKey* m_input_start_key{nullptr}; - const BtreeKey* m_input_end_key{nullptr}; - bool m_start_incl; - bool m_end_incl; - MultiMatchOption m_multi_selector; - - friend class BtreeSearchState; - - template < typename K > - friend class BtreeKeyRangeSafe; - - void set_multi_option(MultiMatchOption o) { m_multi_selector = o; } - virtual const BtreeKey& start_key() const { return *m_input_start_key; } - virtual const BtreeKey& end_key() const { return *m_input_end_key; } - - virtual bool is_start_inclusive() const { return m_start_incl; } - virtual bool is_end_inclusive() const { return m_end_incl; } - virtual bool is_simple_search() const { - return ((m_input_start_key == m_input_end_key) && (m_start_incl == m_end_incl)); - } - MultiMatchOption multi_option() const { return m_multi_selector; } - -private: - BtreeKeyRange(const BtreeKey* start_key, bool start_incl, const BtreeKey* end_key, bool end_incl, - MultiMatchOption option) : - m_input_start_key{start_key}, - m_input_end_key{end_key}, - m_start_incl{start_incl}, - m_end_incl{end_incl}, - m_multi_selector{option} {} - BtreeKeyRange(const BtreeKey* start_key, bool start_incl, MultiMatchOption option) : - m_input_start_key{start_key}, - m_input_end_key{start_key}, - m_start_incl{start_incl}, - m_end_incl{start_incl}, - m_multi_selector{option} {} -}; - -/* This type is for keys which is range in itself i.e each key is having its own - * start() and end(). - */ -class ExtentBtreeKey : public BtreeKey { -public: - ExtentBtreeKey() = default; - virtual ~ExtentBtreeKey() = default; - virtual bool is_extent_key() const { return true; } - virtual int compare_end(const BtreeKey& other) const = 0; - virtual int compare_start(const BtreeKey& other) const = 0; - - virtual bool preceeds(const BtreeKey& other) const = 0; - virtual bool succeeds(const BtreeKey& other) const = 0; - - virtual sisl::blob serialize_tail() const override = 0; - - /* we always compare the end key in case of extent */ - virtual int compare(const BtreeKey& other) const override { return (compare_end(other)); } - - /* we always compare the end key in case of extent */ - virtual int compare_range(const BtreeKeyRange& range) const override { return (compare_end(range.end_key())); } -}; - -class BtreeValue { -public: - BtreeValue() = default; - virtual ~BtreeValue() = default; - - // Deleting copy constructor forces the derived class to define its own copy constructor - BtreeValue(const BtreeValue& other) = delete; - - virtual blob serialize() const = 0; - virtual uint32_t serialized_size() const = 0; - virtual void deserialize(const blob& b, bool copy) = 0; - // virtual void append_blob(const BtreeValue& new_val, BtreeValue& existing_val) = 0; - - // virtual void set_blob_size(uint32_t size) = 0; - // virtual uint32_t estimate_size_after_append(const BtreeValue& new_val) = 0; - -#if 0 - virtual void get_overlap_diff_kvs(BtreeKey* k1, BtreeValue* v1, BtreeKey* k2, BtreeValue* v2, uint32_t param, - diff_read_next_t& to_read, - std::vector< std::pair< BtreeKey, BtreeValue > >& overlap_kvs) { - LOGINFO("Not Implemented"); - } -#endif - - virtual std::string to_string() const { return ""; } -}; - -template < typename K > -class BtreeKeyRangeSafe : public BtreeKeyRange { -private: - const K m_actual_start_key; - const K m_actual_end_key; - -public: - BtreeKeyRangeSafe(const BtreeKey& start_key) : - BtreeKeyRange(nullptr, true, nullptr, true, MultiMatchOption::DO_NOT_CARE), m_actual_start_key{start_key} { - this->m_input_start_key = &m_actual_start_key; - this->m_input_end_key = &m_actual_start_key; - } - - virtual ~BtreeKeyRangeSafe() = default; - - BtreeKeyRangeSafe(const BtreeKey& start_key, const BtreeKey& end_key) : - BtreeKeyRangeSafe(start_key, true, end_key, true) {} - - BtreeKeyRangeSafe(const BtreeKey& start_key, bool start_incl, const BtreeKey& end_key, bool end_incl, - MultiMatchOption option = MultiMatchOption::DO_NOT_CARE) : - BtreeKeyRange(nullptr, start_incl, nullptr, end_incl, option), - m_actual_start_key{start_key}, - m_actual_end_key{end_key} { - this->m_input_start_key = &m_actual_start_key; - this->m_input_end_key = &m_actual_end_key; - } - - /******************* all functions are constant *************/ - BtreeKeyRangeSafe< K > start_of_range() const { - return BtreeKeyRangeSafe< K >(start_key(), is_start_inclusive(), multi_option()); - } - BtreeKeyRangeSafe< K > end_of_range() const { - return BtreeKeyRangeSafe< K >(end_key(), is_end_inclusive(), multi_option()); - } -}; - -struct BtreeLockTracker; -struct BtreeQueryCursor { - std::unique_ptr< BtreeKey > m_last_key; - std::unique_ptr< BtreeLockTracker > m_locked_nodes; - BtreeQueryCursor() = default; - - const sisl::blob serialize() const { return m_last_key ? m_last_key->serialize() : sisl::blob{}; }; - virtual std::string to_string() const { return (m_last_key) ? m_last_key->to_string() : "null"; } -}; - -// This class holds the current state of the search. This is where intermediate search state are stored -// and it is mutated by the do_put and do_query methods. Expect the current_sub_range and cursor to keep -// getting updated on calls. -class BtreeSearchState { -protected: - const BtreeKeyRange m_input_range; - BtreeKeyRange m_current_sub_range; - BtreeQueryCursor* m_cursor{nullptr}; - -public: - BtreeSearchState(BtreeKeyRange&& inp_range, BtreeQueryCursor* cur = nullptr) : - m_input_range(std::move(inp_range)), m_current_sub_range{m_input_range}, m_cursor{cur} {} - - const BtreeQueryCursor* const_cursor() const { return m_cursor; } - BtreeQueryCursor* cursor() { return m_cursor; } - void set_cursor(BtreeQueryCursor* cur) { m_cursor = cur; } - void reset_cursor() { set_cursor(nullptr); } - bool is_cursor_valid() const { return (m_cursor != nullptr); } - - template < typename K > - void set_cursor_key(const BtreeKey& end_key) { - if (!m_cursor) { - /* no need to set cursor as user doesn't want to keep track of it */ - return; - } - m_cursor->m_last_key = std::make_unique< K >(end_key); - } - - const BtreeKeyRange& input_range() const { return m_input_range; } - const BtreeKeyRange& current_sub_range() const { return m_current_sub_range; } - void set_current_sub_range(const BtreeKeyRange& new_sub_range) { m_current_sub_range = new_sub_range; } - const BtreeKey& next_key() const { - return (m_cursor && m_cursor->m_last_key) ? *m_cursor->m_last_key : m_input_range.start_key(); - } - -#if 0 - template < typename K > - BtreeKeyRangeSafe< K > next_start_range() const { - return BtreeKeyRangeSafe< K >(next_key(), is_start_inclusive(), m_input_range.multi_option()); - } - - template < typename K > - BtreeKeyRangeSafe< K > end_of_range() const { - return BtreeKeyRangeSafe< K >(m_input_range.end_key(), is_end_inclusive(), m_input_range.multi_option()); - } -#endif - - BtreeKeyRange next_range() const { - return BtreeKeyRange(&next_key(), is_start_inclusive(), &m_input_range.end_key(), is_end_inclusive(), - m_input_range.multi_option()); - } - -private: - bool is_start_inclusive() const { - if (m_cursor && m_cursor->m_last_key) { - // cursor always have the last key not included - return false; - } else { - return m_input_range.is_start_inclusive(); - } - } - - bool is_end_inclusive() const { return m_input_range.is_end_inclusive(); } -}; - -class BtreeNodeInfo : public BtreeValue { -private: - bnodeid_t m_bnodeid{empty_bnodeid}; - -public: - BtreeNodeInfo() = default; - explicit BtreeNodeInfo(const bnodeid_t& id) : m_bnodeid(id) {} - BtreeNodeInfo& operator=(const BtreeNodeInfo& other) = default; - - bnodeid_t bnode_id() const { return m_bnodeid; } - void set_bnode_id(bnodeid_t bid) { m_bnodeid = bid; } - bool has_valid_bnode_id() const { return (m_bnodeid != empty_bnodeid); } - - sisl::blob serialize() const override { - sisl::blob b; - b.size = sizeof(bnodeid_t); - b.bytes = uintptr_cast(const_cast< bnodeid_t* >(&m_bnodeid)); - return b; - } - uint32_t serialized_size() const override { return sizeof(bnodeid_t); } - static uint32_t get_fixed_size() { return sizeof(bnodeid_t); } - std::string to_string() const override { return fmt::format("{}", m_bnodeid); } - bool operator==(const BtreeNodeInfo& other) const { return (m_bnodeid == other.m_bnodeid); } - - void deserialize(const blob& b, bool copy) override { - DEBUG_ASSERT_EQ(b.size, sizeof(bnodeid_t), "BtreeNodeInfo deserialize received invalid blob"); - m_bnodeid = *(r_cast< bnodeid_t* >(b.bytes)); - } - - friend std::ostream& operator<<(std::ostream& os, const BtreeNodeInfo& b) { - os << b.m_bnodeid; - return os; - } -}; - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_mutate_impl.ipp b/src/btree/btree_mutate_impl.ipp deleted file mode 100644 index 8ce6c8fc..00000000 --- a/src/btree/btree_mutate_impl.ipp +++ /dev/null @@ -1,523 +0,0 @@ -#pragma once -#include "btree.hpp" - -namespace sisl { -namespace btree { - -/* This function does the heavy lifiting of co-ordinating inserts. It is a recursive function which walks - * down the tree. - * - * NOTE: It expects the node it operates to be locked (either read or write) and also the node should not be - * full. - * - * Input: - * myNode = Node it operates on - * curLock = Type of lock held for this node - * put_req = Key to insert - * v = Value to insert - * ind_hint = If we already know which slot to insert to, if not -1 - * put_type = Type of the put (refer to structure btree_put_type) - * is_end_path = set to true only for last path from root to tree, for range put - * op = tracks multi node io. - */ -template < typename K, typename V > -btree_status_t Btree< K, V >::do_put(const BtreeNodePtr< K >& my_node, locktype_t curlock, BtreeMutateRequest& put_req, - int ind_hint) { - btree_status_t ret = btree_status_t::success; - int curr_ind = -1; - - if (my_node->is_leaf()) { - /* update the leaf node */ - BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); - ret = mutate_write_leaf_node(my_node, put_req); - unlock_node(my_node, curlock); - return ret; - } - -retry: - int start_ind = 0, end_ind = -1; - - /* Get the start and end ind in a parent node for the range updates. For - * non range updates, start ind and end ind are same. - */ - ret = get_start_and_end_ind(my_node, put_req, start_ind, end_ind); - if (ret != btree_status_t::success) { goto out; } - - BT_NODE_DBG_ASSERT((curlock == locktype_t::READ || curlock == locktype_t::WRITE), my_node, "unexpected locktype {}", - curlock); - curr_ind = start_ind; - - while (curr_ind <= end_ind) { // iterate all matched childrens -#ifdef _PRERELEASE - if (curr_ind - start_ind > 1 && homestore_flip->test_flip("btree_leaf_node_split")) { - ret = btree_status_t::retry; - goto out; - } -#endif - locktype_t child_cur_lock = locktype_t::NONE; - - // Get the childPtr for given key. - BtreeNodeInfo child_info; - BtreeNodePtr< K > child_node; - ret = get_child_and_lock_node(my_node, curr_ind, child_info, child_node, locktype_t::READ, locktype_t::WRITE, - put_req_op_ctx(put_req)); - if (ret != btree_status_t::success) { - if (ret == btree_status_t::not_found) { - // Either the node was updated or mynode is freed. Just proceed again from top. - /* XXX: Is this case really possible as we always take the parent lock and never - * release it. - */ - ret = btree_status_t::retry; - } - goto out; - } - - // Directly get write lock for leaf, since its an insert. - child_cur_lock = (child_node->is_leaf()) ? locktype_t::WRITE : locktype_t::READ; - - /* Get subrange if it is a range update */ - if (is_range_update_req(put_req) && child_node->is_leaf()) { - /* We get the subrange only for leaf because this is where we will be inserting keys. In interior - * nodes, keys are always propogated from the lower nodes. - */ - BtreeSearchState& search_state = to_range_update_req(put_req).search_state(); - search_state.set_current_sub_range(my_node->get_subrange(search_state.next_range(), curr_ind)); - - BT_NODE_LOG(DEBUG, my_node, "Subrange:s:{},e:{},c:{},nid:{},edgeid:{},sk:{},ek:{}", start_ind, end_ind, - curr_ind, my_node->get_node_id(), my_node->get_edge_id(), - search_state.current_sub_range().start_key().to_string(), - search_state.current_sub_range().end_key().to_string()); - } - - /* check if child node is needed to split */ - bool split_occured = false; - ret = check_and_split_node(my_node, put_req, ind_hint, child_node, curlock, child_cur_lock, curr_ind, - split_occured); - if (ret != btree_status_t::success) { goto out; } - if (split_occured) { - ind_hint = -1; // Since split is needed, hint is no longer valid - goto retry; - } - -#ifndef NDEBUG - K ckey, pkey; - if (curr_ind != int_cast(my_node->get_total_entries())) { // not edge - pkey = my_node->get_nth_key(curr_ind, true); - if (child_node->get_total_entries() != 0) { - ckey = child_node->get_last_key(); - if (!child_node->is_leaf()) { - BT_NODE_DBG_ASSERT_EQ(ckey.compare(pkey), 0, my_node); - } else { - BT_NODE_DBG_ASSERT_LE(ckey.compare(pkey), 0, my_node); - } - } - // BT_NODE_DBG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) <= 0), true, child_node); - } - if (curr_ind > 0) { // not first child - pkey = my_node->get_nth_key(curr_ind - 1, true); - if (child_node->get_total_entries() != 0) { - ckey = child_node->get_first_key(); - BT_NODE_DBG_ASSERT_LE(pkey.compare(ckey), 0, child_node); - } - // BT_NODE_DBG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) >= 0), true, my_node); - } -#endif - if (curr_ind == end_ind) { - // If we have reached the last index, unlock before traversing down, because we no longer need - // this lock. Holding this lock will impact performance unncessarily. - unlock_node(my_node, curlock); - curlock = locktype_t::NONE; - } - -#ifndef NDEBUG - if (child_cur_lock == locktype_t::WRITE) { - BT_NODE_DBG_ASSERT_EQ(child_node->m_trans_hdr.is_lock, true, child_node); - } -#endif - - ret = do_put(child_node, child_cur_lock, put_req, ind_hint); - if (ret != btree_status_t::success) { goto out; } - - ++curr_ind; - } -out: - if (curlock != locktype_t::NONE) { unlock_node(my_node, curlock); } - return ret; - // Warning: Do not access childNode or myNode beyond this point, since it would - // have been unlocked by the recursive function and it could also been deleted. -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::mutate_write_leaf_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req) { - btree_status_t ret = btree_status_t::success; - if (is_range_update_req(req)) { - BtreeRangeUpdateRequest& rureq = to_range_update_req(req); - BtreeSearchState& search_state = rureq.search_state(); - const BtreeKeyRange& subrange = search_state.current_sub_range(); - - static thread_local std::vector< std::pair< K, V > > s_match; - s_match.clear(); - uint32_t start_ind = 0u, end_ind = 0u; - my_node->get_all(subrange, UINT32_MAX, start_ind, end_ind, &s_match); - - static thread_local std::vector< pair< K, V > > s_replace_kv; - std::vector< pair< K, V > >* p_replace_kvs = &s_match; - if (m_bt_cfg.is_custom_kv()) { - s_replace_kv.clear(); - // rreq.get_cb_param()->node_version = my_node->get_version(); - // ret = rreq.callback()(s_match, s_replace_kv, rreq.get_cb_param(), subrange); - ret = custom_kv_select_for_write(my_node->get_version(), s_match, s_replace_kv, subrange, rureq); - if (ret != btree_status_t::success) { return ret; } - p_replace_kvs = &s_replace_kv; - } - - BT_NODE_DBG_ASSERT_LE(start_ind, end_ind, my_node); - if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } - COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); - - for (const auto& [key, value] : *p_replace_kvs) { // insert is based on compare() of BtreeKey - auto status = my_node->insert(key, value); - BT_NODE_REL_ASSERT_EQ(status, btree_status_t::success, my_node, "unexpected insert failure"); - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - /* update cursor in intermediate search state */ - rureq.search_state().set_cursor_key< K >(subrange.end_key()); - } else { - const BtreeSinglePutRequest& sreq = to_single_put_req(req); - if (!my_node->put(sreq.key(), sreq.value(), sreq.m_put_type, sreq.m_existing_val.get())) { - ret = btree_status_t::put_failed; - } - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - if (ret == btree_status_t::success) { write_node(my_node, put_req_op_ctx(req)); } - return ret; -} - -/* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ -template < typename K, typename V > -btree_status_t Btree< K, V >::check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeMutateRequest& req, - int ind_hint, const BtreeNodePtr< K >& child_node, - locktype_t& curlock, locktype_t& child_curlock, int child_ind, - bool& split_occured) { - split_occured = false; - K split_key; - btree_status_t ret = btree_status_t::success; - auto child_lock_type = child_curlock; - auto none_lock_type = locktype_t::NONE; - -#ifdef _PRERELEASE - boost::optional< int > time; - if (child_node->is_leaf()) { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); - } else { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); - } - if (time && child_node->get_total_entries() > 2) { - std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); - } else -#endif - { - if (!is_split_needed(child_node, m_bt_cfg, req)) { return ret; } - } - - /* Split needed */ - if (is_range_update_req(req)) { - /* In case of range update we might split multiple childs of a parent in a single - * iteration which result into less space in the parent node. - */ -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_parent_node_full")) { - ret = btree_status_t::retry; - goto out; - } -#endif - if (is_split_needed(my_node, m_bt_cfg, req)) { - // restart from root - ret = btree_status_t::retry; - bt_thread_vars()->force_split_node = my_node; // On retry force split the my_node - goto out; - } - } - - // Time to split the child, but we need to convert parent to write lock - ret = upgrade_node(my_node, child_node, put_req_op_ctx(req), curlock, child_curlock); - if (ret != btree_status_t::success) { - BT_NODE_LOG(DEBUG, my_node, "Upgrade of node lock failed, retrying from root"); - BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::NONE, my_node); - goto out; - } - BT_NODE_LOG_ASSERT_EQ(child_curlock, child_lock_type, my_node); - BT_NODE_LOG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); - - // We need to upgrade the child to WriteLock - ret = upgrade_node(child_node, nullptr, put_req_op_ctx(req), child_curlock, none_lock_type); - if (ret != btree_status_t::success) { - BT_NODE_LOG(DEBUG, child_node, "Upgrade of child node lock failed, retrying from root"); - BT_NODE_LOG_ASSERT_EQ(child_curlock, locktype_t::NONE, child_node); - goto out; - } - BT_NODE_LOG_ASSERT_EQ(none_lock_type, locktype_t::NONE, my_node); - BT_NODE_LOG_ASSERT_EQ(child_curlock, locktype_t::WRITE, child_node); - - // Real time to split the node and get point at which it was split - ret = split_node(my_node, child_node, child_ind, &split_key, false /* root_split */, put_req_op_ctx(req)); - if (ret != btree_status_t::success) { goto out; } - - // After split, retry search and walk down. - unlock_node(child_node, locktype_t::WRITE); - child_curlock = locktype_t::NONE; - COUNTER_INCREMENT(m_metrics, btree_split_count, 1); - split_occured = true; - -out: - if (ret != btree_status_t::success) { - if (curlock != locktype_t::NONE) { - unlock_node(my_node, curlock); - curlock = locktype_t::NONE; - } - - if (child_curlock != locktype_t::NONE) { - unlock_node(child_node, child_curlock); - child_curlock = locktype_t::NONE; - } - } - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::check_split_root(BtreeMutateRequest& req) { - K split_key; - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - - m_btree_lock.lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node_id, root, locktype_t::WRITE, locktype_t::WRITE, put_req_op_ctx(req)); - if (ret != btree_status_t::success) { goto done; } - - if (!is_split_needed(root, m_bt_cfg, req)) { - unlock_node(root, locktype_t::WRITE); - goto done; - } - - // Create a new child node and split them - child_node = alloc_interior_node(); - if (child_node == nullptr) { - ret = btree_status_t::space_not_avail; - unlock_node(root, locktype_t::WRITE); - goto done; - } - - /* it swap the data while keeping the nodeid same */ - swap_node(root, child_node, put_req_op_ctx(req)); - write_node(child_node, put_req_op_ctx(req)); - - BT_NODE_LOG(DEBUG, root, "Root node is full, swapping contents with child_node {} and split that", - child_node->get_node_id()); - - BT_NODE_DBG_ASSERT_EQ(root->get_total_entries(), 0, root); - ret = split_node(root, child_node, root->get_total_entries(), &split_key, true, put_req_op_ctx(req)); - BT_NODE_DBG_ASSERT_EQ(m_root_node_id, root->get_node_id(), root); - - if (ret != btree_status_t::success) { - swap_node(child_node, root, put_req_op_ctx(req)); - write_node(child_node, put_req_op_ctx(req)); - } - - /* unlock child node */ - unlock_node(root, locktype_t::WRITE); - - if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } -done: - m_btree_lock.unlock(); - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::split_node(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node, - uint32_t parent_ind, BtreeKey* out_split_key, bool root_split, void* context) { - BtreeNodeInfo ninfo; - BtreeNodePtr< K > child_node1 = child_node; - BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); - - if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } - - btree_status_t ret = btree_status_t::success; - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node1->set_next_bnode(child_node2->get_node_id()); - uint32_t child1_filled_size = BtreeNode< K >::node_area_size(m_bt_cfg) - child_node1->get_available_size(m_bt_cfg); - - auto split_size = m_bt_cfg.split_size(child1_filled_size); - uint32_t res = child_node1->move_out_to_right_by_size(m_bt_cfg, *child_node2, split_size); - - BT_NODE_REL_ASSERT_GT(res, 0, child_node1, - "Unable to split entries in the child node"); // means cannot split entries - BT_NODE_DBG_ASSERT_GT(child_node1->get_total_entries(), 0, child_node1); - - // In an unlikely case where parent node has no room to accomodate the child key, we need to un-split and then - // free up the new node. This situation could happen on variable key, where the key max size is purely - // an estimation. This logic allows the max size to be declared more optimistically than say 1/4 of node - // which will have substatinally large number of splits and performance constraints. - if (out_split_key->serialized_size() > parent_node->get_available_size(m_bt_cfg)) { - uint32_t move_in_res = child_node1->move_in_from_right_by_size(m_bt_cfg, *child_node2, split_size); - BT_NODE_REL_ASSERT_EQ(move_in_res, res, child_node1, - "The split key size is more than estimated parent available space, but when revert is " - "attempted it fails. Continuing can cause data loss, so crashing"); - free_node(child_node2, context); - - // Mark the parent_node itself to be split upon next retry. - bt_thread_vars()->force_split_node = parent_node; - return btree_status_t::retry; - } - - // Update the existing parent node entry to point to second child ptr. - bool edge_split = (parent_ind == parent_node->get_total_entries()); - ninfo.set_bnode_id(child_node2->get_node_id()); - parent_node->update(parent_ind, ninfo); - - // Insert the last entry in first child to parent node - *out_split_key = child_node1->get_last_key(); - ninfo.set_bnode_id(child_node1->get_node_id()); - - // If key is extent then we always insert the tail portion of the extent key in the parent node - if (out_split_key->is_extent_key()) { - K split_tail_key{out_split_key->serialize_tail(), true}; - parent_node->insert(split_tail_key, ninfo); - } else { - parent_node->insert(*out_split_key, ninfo); - } - - BT_NODE_DBG_ASSERT_GT(child_node2->get_first_key().compare(*out_split_key), 0, child_node2); - BT_NODE_LOG(DEBUG, parent_node, "Split child_node={} with new_child_node={}, split_key={}", - child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); - - split_node_precommit(parent_node, child_node1, child_node2, root_split, edge_split, context); - -#if 0 - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - btree_store_t::append_node_to_journal( - j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, bcp, - out_split_end_key.get_blob()); - - // For root split or split around the edge, we don't write the key, which will cause replay to insert - // edge - if (edge_split) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); - } else { - K child2_pkey; - parent_node->get_nth_key(parent_ind, &child2_pkey, true); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, - child2_pkey.get_blob()); - } - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } -#endif - - // we write right child node, than left and than parent child - write_node(child_node2, nullptr, context); - write_node(child_node1, child_node2, context); - write_node(parent_node, child_node1, context); - - // NOTE: Do not access parentInd after insert, since insert would have - // shifted parentNode to the right. - return ret; -} - -template < typename K, typename V > -bool Btree< K, V >::is_split_needed(const BtreeNodePtr< K >& node, const BtreeConfig& cfg, - BtreeMutateRequest& req) const { - if (bt_thread_vars()->force_split_node && (bt_thread_vars()->force_split_node == node)) { - bt_thread_vars()->force_split_node = nullptr; - return true; - } - - int64_t size_needed = 0; - if (!node->is_leaf()) { // if internal node, size is atmost one additional entry, size of K/V - size_needed = K::get_estimate_max_size() + BtreeNodeInfo::get_fixed_size() + node->get_record_size(); - } else if (is_range_update_req(req)) { - /* - * If there is an overlap then we can add (n + 1) more keys :- one in the front, one in the tail and - * other in between match entries (n - 1). - */ - static thread_local std::vector< std::pair< K, V > > s_match; - s_match.clear(); - uint32_t start_ind = 0, end_ind = 0; - auto& rureq = to_range_update_req(req); - node->get_all(rureq.input_range(), UINT32_MAX, start_ind, end_ind, &s_match); - - size_needed = compute_range_put_needed_size(s_match, (const V&)rureq.m_newval) + - ((s_match.size() + 1) * (K::get_estimate_max_size() + node->get_record_size())); - } else { - auto& sreq = to_single_put_req(req); - - // leaf node, - // NOTE : size_needed is just an guess here. Actual implementation of Mapping key/value can have - // specific logic which determines of size changes on insert or update. - auto [found, idx] = node->find(sreq.key(), nullptr, false); - if (!found) { // We need to insert, this newly. Find out if we have space for value. - size_needed = sreq.key().serialized_size() + sreq.value().serialized_size() + node->get_record_size(); - } else { - // Its an update, see how much additional space needed - V existing_val; - node->get_nth_value(idx, &existing_val, false); - size_needed = compute_single_put_needed_size(existing_val, (const V&)sreq.value()) + - sreq.key().serialized_size() + node->get_record_size(); - } - } - int64_t alreadyFilledSize = BtreeNode< K >::node_area_size(cfg) - node->get_available_size(cfg); - return (alreadyFilledSize + size_needed >= BtreeNode< K >::ideal_fill_size(cfg)); -} - -template < typename K, typename V > -int64_t Btree< K, V >::compute_single_put_needed_size(const V& current_val, const V& new_val) const { - return new_val.serialized_size() - current_val.serialized_size(); -} - -template < typename K, typename V > -int64_t Btree< K, V >::compute_range_put_needed_size(const std::vector< std::pair< K, V > >& existing_kvs, - const V& new_val) const { - return new_val.serialized_size() * existing_kvs.size(); -} - -template < typename K, typename V > -btree_status_t -Btree< K, V >::custom_kv_select_for_write(uint8_t node_version, const std::vector< std::pair< K, V > >& match_kv, - std::vector< std::pair< K, V > >& replace_kv, const BtreeKeyRange& range, - const BtreeRangeUpdateRequest& rureq) const { - for (const auto& [k, v] : match_kv) { - replace_kv.push_back(std::make_pair(k, (V&)rureq.m_newval)); - } - return btree_status_t::success; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::get_start_and_end_ind(const BtreeNodePtr< K >& node, BtreeMutateRequest& req, - int& start_ind, int& end_ind) { - btree_status_t ret = btree_status_t::success; - if (is_range_update_req(req)) { - /* just get start/end index from get_all. We don't release the parent lock until this - * key range is not inserted from start_ind to end_ind. - */ - node->template get_all< V >(to_range_update_req(req).input_range(), UINT32_MAX, (uint32_t&)start_ind, - (uint32_t&)end_ind); - } else { - auto [found, idx] = node->find(to_single_put_req(req).key(), nullptr, true); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, idx, node); - end_ind = start_ind = (int)idx; - } - - if (start_ind > end_ind) { - BT_NODE_LOG_ASSERT(false, node, "start ind {} greater than end ind {}", start_ind, end_ind); - ret = btree_status_t::retry; - } - return ret; -} - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_node.hpp b/src/btree/btree_node.hpp deleted file mode 100644 index 1dec7259..00000000 --- a/src/btree/btree_node.hpp +++ /dev/null @@ -1,607 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam, Rishabh Mittal - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ - -#pragma once -#include - -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpedantic" -#pragma GCC diagnostic ignored "-Wattributes" -#endif -#include -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic pop -#endif - -#include "utility/atomic_counter.hpp" -#include "utility/enum.hpp" -#include "utility/obj_life_counter.hpp" -#include "btree_internal.hpp" -#include "btree_kv.hpp" - -namespace sisl { -namespace btree { -ENUM(locktype_t, uint8_t, NONE, READ, WRITE) - -#pragma pack(1) -struct transient_hdr_t { - mutable folly::SharedMutexReadPriority lock; - sisl::atomic_counter< uint16_t > upgraders{0}; - - /* these variables are accessed without taking lock and are not expected to change after init */ - uint8_t is_leaf_node{0}; - // btree_store_type store_type{btree_store_type::MEM}; - -#ifndef NDEBUG - int is_lock{-1}; -#endif - - bool is_leaf() const { return (is_leaf_node != 0); } -}; -#pragma pack() - -static constexpr uint8_t BTREE_NODE_VERSION = 1; -static constexpr uint8_t BTREE_NODE_MAGIC = 0xab; - -#pragma pack(1) -struct persistent_hdr_t { - uint8_t magic{BTREE_NODE_MAGIC}; - uint8_t version{BTREE_NODE_VERSION}; - uint16_t checksum; - - bnodeid_t node_id; - bnodeid_t next_node; - - uint32_t nentries : 27; - uint32_t node_type : 3; - uint32_t leaf : 1; - uint32_t valid_node : 1; - - uint64_t node_gen; - bnodeid_t edge_entry; - - std::string to_string() const { - return fmt::format("magic={} version={} csum={} node_id={} next_node={} nentries={} node_type={} is_leaf={} " - "valid_node={} node_gen={} edge_entry={}", - magic, version, checksum, node_id, next_node, nentries, node_type, leaf, valid_node, - node_gen, edge_entry); - } -}; -#pragma pack() - -template < typename K > -class BtreeNode : public sisl::ObjLifeCounter< BtreeNode< K > > { - typedef std::pair< bool, uint32_t > node_find_result_t; - -public: - atomic_counter< int32_t > m_refcount{0}; - transient_hdr_t m_trans_hdr; - uint8_t* m_phys_node_buf; - -public: - BtreeNode(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf) : m_phys_node_buf{node_buf} { - if (init_buf) { - set_magic(); - init_checksum(); - set_leaf(is_leaf); - set_total_entries(0); - set_next_bnode(empty_bnodeid); - set_gen(0); - set_valid_node(true); - set_edge_id(empty_bnodeid); - set_node_id(id); - } else { - DEBUG_ASSERT_EQ(get_node_id(), id); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); - } - m_trans_hdr.is_leaf_node = is_leaf; - } - virtual ~BtreeNode() = default; - - node_find_result_t find(const BtreeKey& key, BtreeValue* outval, bool copy_val) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", - get_persistent_header_const()->to_string()); - - auto [found, idx] = bsearch_node(key); - if (idx == get_total_entries() && !has_valid_edge()) { - DEBUG_ASSERT_EQ(found, false); - return std::make_pair(found, idx); - } - - if (get_total_entries() == 0) { - DEBUG_ASSERT((has_valid_edge() || is_leaf()), "Invalid node"); - if (is_leaf()) { return std::make_pair(found, idx); /* Leaf doesn't have any elements */ } - } - - if (outval) { get_nth_value(idx, outval, copy_val); } - return std::make_pair(found, idx); - } - - template < typename V > - uint32_t get_all(const BtreeKeyRange& range, uint32_t max_count, uint32_t& start_ind, uint32_t& end_ind, - std::vector< std::pair< K, V > >* out_values = nullptr) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", - get_persistent_header_const()->to_string()); - auto count = 0U; - - // Get the start index of the search range. - const auto [sfound, sind] = bsearch_node(range.start_key()); - - start_ind = sind; - if (!range.is_start_inclusive()) { - if (start_ind < get_total_entries()) { - /* start is not inclusive so increment the start_ind if it is same as this key */ - const int x = compare_nth_key(range.start_key(), start_ind); - if (x == 0) { ++start_ind; } - } else { - DEBUG_ASSERT(is_leaf() || has_valid_edge(), "Invalid node"); - } - } - - if (start_ind == get_total_entries() && is_leaf()) { - end_ind = start_ind; - return 0; // no result found - } - DEBUG_ASSERT((start_ind < get_total_entries()) || has_valid_edge(), "Invalid node"); - - // search by the end index - const auto [efound, eind] = bsearch_node(range.end_key()); - end_ind = eind; - - if (end_ind == get_total_entries() && !has_valid_edge()) { --end_ind; } - if (is_leaf()) { - /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ - K key = get_nth_key(end_ind, false); - if ((range.start_key().compare(key) < 0) && ((range.end_key().compare(key)) < 0)) { - if (start_ind == end_ind) { return 0; /* no match */ } - --end_ind; - } - } - - /* We should always find the entries in interior node */ - DEBUG_ASSERT_LE(start_ind, end_ind); - // DEBUG_ASSERT_EQ(range.is_end_inclusive(), true); /* we don't support end exclusive */ - DEBUG_ASSERT(start_ind < get_total_entries() || has_valid_edge(), "Invalid node"); - - count = std::min(end_ind - start_ind + 1, max_count); - if (out_values == nullptr) { return count; } - - /* get the keys and values */ - for (auto i{start_ind}; i < (start_ind + count); ++i) { - add_nth_obj_to_list< V >(i, out_values, true); -#if 0 - if (i == get_total_entries() && !is_leaf()) { - // invalid key in case of edge entry for internal node - out_values->emplace_back(std::make_pair(K{}, get_edge_value())); - } else { - out_values->emplace_back(std::make_pair(K{}, get_nth_value(i, true))); - } -#endif - } - return count; - } - - std::pair< bool, uint32_t > get_any(const BtreeKeyRange& range, BtreeKey* out_key, BtreeValue* out_val, - bool copy_key, bool copy_val) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", - get_persistent_header_const()->to_string()); - uint32_t result_idx; - const auto mm_opt = range.multi_option(); - bool efound; - uint32_t end_idx; - - // Get the start index of the search range. - auto [sfound, start_idx] = bsearch_node(range.start_key()); - if (sfound && !range.is_start_inclusive()) { - ++start_idx; - sfound = false; - } - - if (sfound && ((mm_opt == MultiMatchOption::DO_NOT_CARE) || (mm_opt == MultiMatchOption::LEFT_MOST))) { - result_idx = start_idx; - goto found_result; - } else if (start_idx == get_total_entries()) { - DEBUG_ASSERT(is_leaf() || has_valid_edge(), "Invalid node"); - return std::make_pair(false, 0); // out_of_range - } - - std::tie(efound, end_idx) = bsearch_node(range.end_key()); - if (efound && !range.is_end_inclusive()) { - if (end_idx == 0) { return std::make_pair(false, 0); } - --end_idx; - efound = false; - } - - if (end_idx > start_idx) { - if (mm_opt == MultiMatchOption::RIGHT_MOST) { - result_idx = end_idx; - } else if (mm_opt == MultiMatchOption::MID) { - result_idx = (end_idx - start_idx) / 2; - } else { - result_idx = start_idx; - } - } else if ((start_idx == end_idx) && ((sfound || efound))) { - result_idx = start_idx; - } else { - return std::make_pair(false, 0); - } - - found_result: - if (out_key) { *out_key = get_nth_key(result_idx, copy_key); } - if (out_val) { get_nth_value(result_idx, out_val, copy_val); } - return std::make_pair(true, result_idx); - } - - bool put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue* existing_val) { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", - get_persistent_header_const()->to_string()); - bool ret = true; - - const auto [found, idx] = find(key, nullptr, false); - if (found && existing_val) { get_nth_value(idx, existing_val, true); } - - if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { - if (found) { - LOGDEBUG("Attempt to insert duplicate entry {}", key.to_string()); - return false; - } - ret = (insert(idx, key, val) == btree_status_t::success); - } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { - if (!found) return false; - update(idx, key, val); - } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { - (found) ? update(idx, key, val) : (void)insert(idx, key, val); - } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { - if (!found) return false; - append(idx, key, val); - } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { - (found) ? append(idx, key, val) : (void)insert(idx, key, val); - } else { - DEBUG_ASSERT(false, "Wrong put_type {}", put_type); - } - return ret; - } - - virtual btree_status_t insert(const BtreeKey& key, const BtreeValue& val) { - const auto [found, idx] = find(key, nullptr, false); - DEBUG_ASSERT(!is_leaf() || (!found), "Invalid node"); // We do not support duplicate keys yet - insert(idx, key, val); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); - return btree_status_t::success; - } - - virtual bool remove_one(const BtreeKey& key, BtreeKey* outkey, BtreeValue* outval) { - const auto [found, idx] = find(key, outval, true); - if (found) { - if (outkey) { *outkey = get_nth_key(idx, true); } - remove(idx); - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); - } - return found; - } - - virtual bool remove_any(const BtreeKeyRange& range, BtreeKey* outkey, BtreeValue* outval) { - const auto [found, idx] = get_any(range, outkey, outval, true, true); - if (found) { - remove(idx); - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", get_persistent_header_const()->to_string()); - } - return found; - } - - /* Update the key and value pair and after update if outkey and outval are non-nullptr, it fills them with - * the key and value it just updated respectively */ - virtual bool update_one(const BtreeKey& key, const BtreeValue& val, BtreeValue* outval) { - const auto [found, idx] = find(key, outval, true); - if (found) { - update(idx, val); - LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", get_persistent_header_const()->to_string()); - } - return found; - } - - void get_adjacent_indicies(uint32_t cur_ind, std::vector< uint32_t >& indices_list, uint32_t max_indices) const { - uint32_t i = 0; - uint32_t start_ind; - uint32_t end_ind; - uint32_t nentries = get_total_entries(); - - auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); - end_ind = cur_ind + (max_indices / 2); - if (cur_ind < max_ind) { - end_ind += max_ind - cur_ind; - start_ind = 0; - } else { - start_ind = cur_ind - max_ind; - } - - for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); ++i) { - if (i == nentries) { - if (has_valid_edge()) { indices_list.push_back(i); } - break; - } else { - indices_list.push_back(i); - } - } - } - - BtreeKeyRange get_subrange(const BtreeKeyRange& inp_range, int upto_ind) const { -#ifndef NDEBUG - if (upto_ind > 0) { - /* start of input range should always be more then the key in curr_ind - 1 */ - DEBUG_ASSERT_LE(get_nth_key(upto_ind - 1, false).compare(inp_range.start_key()), 0, "[node={}]", - to_string()); - } -#endif - - // find end of subrange - bool end_inc = true; - K end_key; - - if (upto_ind < int_cast(get_total_entries())) { - end_key = get_nth_key(upto_ind, false); - if (end_key.compare(inp_range.end_key()) >= 0) { - /* this is last index to process as end of range is smaller then key in this node */ - end_key = inp_range.end_key(); - end_inc = inp_range.is_end_inclusive(); - } else { - end_inc = true; - } - } else { - /* it is the edge node. end key is the end of input range */ - LOGMSG_ASSERT_EQ(has_valid_edge(), true, "node={}", to_string()); - end_key = inp_range.end_key(); - end_inc = inp_range.is_end_inclusive(); - } - - BtreeKeyRangeSafe< K > subrange{inp_range.start_key(), inp_range.is_start_inclusive(), end_key, end_inc}; - RELEASE_ASSERT_LE(subrange.start_key().compare(subrange.end_key()), 0, "[node={}]", to_string()); - RELEASE_ASSERT_LE(subrange.start_key().compare(inp_range.end_key()), 0, "[node={}]", to_string()); - return subrange; - } - - K get_last_key() const { - if (get_total_entries() == 0) { return K{}; } - return get_nth_key(get_total_entries() - 1, true); - } - - K get_first_key() const { return get_nth_key(0, true); } - - bool validate_key_order() const { - for (auto i = 1u; i < get_total_entries(); ++i) { - auto prevKey = get_nth_key(i - 1, false); - auto curKey = get_nth_key(i, false); - if (prevKey.compare(curKey) >= 0) { - DEBUG_ASSERT(false, "Order check failed at entry={}", i); - return false; - } - } - return true; - } - - virtual BtreeNodeInfo get_edge_value() const { return BtreeNodeInfo{get_edge_id()}; } - - virtual void set_edge_value(const BtreeValue& v) { - const auto b = v.serialize(); - ASSERT_EQ(b.size, sizeof(bnodeid_t)); - set_edge_id(*r_cast< bnodeid_t* >(b.bytes)); - } - - void invalidate_edge() { set_edge_id(empty_bnodeid); } - - uint32_t get_total_entries() const { return get_persistent_header_const()->nentries; } - - void lock(locktype_t l) const { - if (l == locktype_t::READ) { - m_trans_hdr.lock.lock_shared(); - } else if (l == locktype_t::WRITE) { - m_trans_hdr.lock.lock(); - } - } - - void unlock(locktype_t l) const { - if (l == locktype_t::READ) { - m_trans_hdr.lock.unlock_shared(); - } else if (l == locktype_t::WRITE) { - m_trans_hdr.lock.unlock(); - } - } - - void lock_upgrade() { - m_trans_hdr.upgraders.increment(1); - this->unlock(locktype_t::READ); - this->lock(locktype_t::WRITE); - } - - void lock_acknowledge() { m_trans_hdr.upgraders.decrement(1); } - bool any_upgrade_waiters() const { return (!m_trans_hdr.upgraders.testz()); } - -public: - // Public method which needs to be implemented by variants - virtual uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t nentries) = 0; - virtual uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t size) = 0; - virtual uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode& other_node, - uint32_t nentries) = 0; - virtual uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode& other_node, uint32_t size) = 0; - virtual uint32_t get_available_size(const BtreeConfig& cfg) const = 0; - virtual std::string to_string(bool print_friendly = false) const = 0; - virtual void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const = 0; - virtual K get_nth_key(uint32_t ind, bool copykey) const = 0; - - virtual btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; - virtual void remove(uint32_t ind) { remove(ind, ind); } - virtual void remove(uint32_t ind_s, uint32_t ind_e) = 0; - virtual void update(uint32_t ind, const BtreeValue& val) = 0; - virtual void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; - virtual void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) = 0; - - virtual uint32_t get_nth_obj_size(uint32_t ind) const = 0; - virtual uint16_t get_record_size() const = 0; - virtual int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const = 0; - - // Method just to please compiler - template < typename V > - V edge_value_internal() const { - return V{get_edge_id()}; - } - -private: - node_find_result_t bsearch_node(const BtreeKey& key) const { - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - auto [found, idx] = bsearch(-1, get_total_entries(), key); - if (found) { DEBUG_ASSERT_LT(idx, get_total_entries()); } - - return std::make_pair(found, idx); - } - - node_find_result_t bsearch(int start, int end, const BtreeKey& key) const { - int mid = 0; - bool found{false}; - uint32_t end_of_search_index{0}; - - if ((end - start) <= 1) { return std::make_pair(found, end_of_search_index); } - while ((end - start) > 1) { - mid = start + (end - start) / 2; - DEBUG_ASSERT(mid >= 0 && mid < int_cast(get_total_entries()), "Invalid mid={}", mid); - int x = compare_nth_key(key, mid); - if (x == 0) { - found = true; - end = mid; - break; - } else if (x > 0) { - end = mid; - } else { - start = mid; - } - } - - return std::make_pair(found, end); - } - - template < typename V > - void add_nth_obj_to_list(uint32_t ind, std::vector< std::pair< K, V > >* vec, bool copy) const { - std::pair< K, V > kv; - vec->emplace_back(kv); - - auto* pkv = &vec->back(); - if (ind == get_total_entries() && !is_leaf()) { - pkv->second = edge_value_internal< V >(); - } else { - pkv->first = get_nth_key(ind, copy); - get_nth_value(ind, &pkv->second, copy); - } - } - -public: - persistent_hdr_t* get_persistent_header() { return r_cast< persistent_hdr_t* >(m_phys_node_buf); } - const persistent_hdr_t* get_persistent_header_const() const { - return r_cast< const persistent_hdr_t* >(m_phys_node_buf); - } - uint8_t* node_data_area() { return (m_phys_node_buf + sizeof(persistent_hdr_t)); } - const uint8_t* node_data_area_const() const { return (m_phys_node_buf + sizeof(persistent_hdr_t)); } - - uint8_t get_magic() const { return get_persistent_header_const()->magic; } - void set_magic() { get_persistent_header()->magic = BTREE_NODE_MAGIC; } - - uint8_t get_version() const { return get_persistent_header_const()->version; } - uint16_t get_checksum() const { return get_persistent_header_const()->checksum; } - void init_checksum() { get_persistent_header()->checksum = 0; } - - void set_node_id(bnodeid_t id) { get_persistent_header()->node_id = id; } - bnodeid_t get_node_id() const { return get_persistent_header_const()->node_id; } - -#ifndef NO_CHECKSUM - void set_checksum(const BtreeConfig& cfg) { - get_persistent_header()->checksum = crc16_t10dif(init_crc_16, node_data_area_const(), node_area_size(cfg)); - } - - bool verify_node(const BtreeConfig& cfg) const { - HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", - get_persistent_header_const()->to_string()); - auto exp_checksum = crc16_t10dif(init_crc_16, node_data_area_const(), node_area_size(cfg)); - return ((get_magic() == BTREE_NODE_MAGIC) && (get_checksum() == exp_checksum)); - } -#endif - - bool is_leaf() const { return get_persistent_header_const()->leaf; } - btree_node_type get_node_type() const { - return s_cast< btree_node_type >(get_persistent_header_const()->node_type); - } - - void set_total_entries(uint32_t n) { get_persistent_header()->nentries = n; } - void inc_entries() { ++get_persistent_header()->nentries; } - void dec_entries() { --get_persistent_header()->nentries; } - - void add_entries(uint32_t addn) { get_persistent_header()->nentries += addn; } - void sub_entries(uint32_t subn) { get_persistent_header()->nentries -= subn; } - - void set_leaf(bool leaf) { get_persistent_header()->leaf = leaf; } - void set_node_type(btree_node_type t) { get_persistent_header()->node_type = uint32_cast(t); } - uint64_t get_gen() const { return get_persistent_header_const()->node_gen; } - void inc_gen() { get_persistent_header()->node_gen++; } - void set_gen(uint64_t g) { get_persistent_header()->node_gen = g; } - - void set_valid_node(bool valid) { get_persistent_header()->valid_node = (valid ? 1 : 0); } - bool is_valid_node() const { return get_persistent_header_const()->valid_node; } - - uint32_t get_occupied_size(const BtreeConfig& cfg) const { return (node_area_size(cfg) - get_available_size(cfg)); } - uint32_t get_suggested_min_size(const BtreeConfig& cfg) const { return cfg.max_key_size(); } - - static uint32_t node_area_size(const BtreeConfig& cfg) { return cfg.node_size() - sizeof(persistent_hdr_t); } - static uint32_t ideal_fill_size(const BtreeConfig& cfg) { - return uint32_cast(node_area_size(cfg) * cfg.m_ideal_fill_pct) / 100; - } - static uint32_t merge_suggested_size(const BtreeConfig& cfg) { return node_area_size(cfg) - ideal_fill_size(cfg); } - - bool is_merge_needed(const BtreeConfig& cfg) const { -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < node_area_size(cfg)) { - return true; - } - - auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); - if (ret && get_occupied_size(cfg) < (ret.get() * node_area_size(cfg) / 100)) { return true; } -#endif - return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); - } - - bnodeid_t next_bnode() const { return get_persistent_header_const()->next_node; } - void set_next_bnode(bnodeid_t b) { get_persistent_header()->next_node = b; } - - bnodeid_t get_edge_id() const { return get_persistent_header_const()->edge_entry; } - void set_edge_id(bnodeid_t edge) { get_persistent_header()->edge_entry = edge; } - - bool has_valid_edge() const { - if (is_leaf()) { return false; } - return (get_edge_id() != empty_bnodeid); - } -}; - -template < typename K, typename V > -struct btree_locked_node_info { - BtreeNode< K >* node; - Clock::time_point start_time; - const char* fname; - int line; - - void dump() const { LOGINFO("node locked by file: {}, line: {}", fname, line); } -}; - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_node_mgr.ipp b/src/btree/btree_node_mgr.ipp deleted file mode 100644 index add991b4..00000000 --- a/src/btree/btree_node_mgr.ipp +++ /dev/null @@ -1,480 +0,0 @@ -#pragma once - -#include "btree.hpp" -#include "fds/utils.hpp" -#include - -namespace sisl { -namespace btree { - -#define lock_and_refresh_node(a, b, c) _lock_and_refresh_node(a, b, c, __FILE__, __LINE__) -#define lock_node_upgrade(a, b) _lock_node_upgrade(a, b, __FILE__, __LINE__) -#define start_of_lock(a, b) _start_of_lock(a, b, __FILE__, __LINE__) - -template < typename K, typename V > -std::pair< btree_status_t, bnodeid_t > Btree< K, V >::create_root_node(void* op_context) { - // Assign one node as root node and initially root is leaf - BtreeNodePtr< K > root = alloc_leaf_node(); - if (root == nullptr) { return std::make_pair(btree_status_t::space_not_avail, empty_bnodeid); } - m_root_node_id = root->get_node_id(); - - create_tree_precommit(root, op_context); - - auto ret = write_node(root, nullptr, op_context); - BT_DBG_ASSERT_EQ(ret, btree_status_t::success, "Writing root node failed"); - - /* write an entry to the journal also */ - return std::make_pair(ret, m_root_node_id); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::read_and_lock_root(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const { - return (read_and_lock_node(id, node_ptr, int_lock_type, int_lock_type, context)); -} - -/* It read the node, take the lock and recover it if required */ -template < typename K, typename V > -btree_status_t Btree< K, V >::read_and_lock_child(bnodeid_t child_id, BtreeNodePtr< K >& child_node, - const BtreeNodePtr< K >& parent_node, uint32_t parent_ind, - locktype_t int_lock_type, locktype_t leaf_lock_type, - void* context) const { - btree_status_t ret = read_node(child_id, child_node); - if (child_node == nullptr) { - if (ret != btree_status_t::fast_path_not_possible) { BT_LOG(ERROR, "read failed, reason: {}", ret); } - return ret; - } - - auto is_leaf = child_node->is_leaf(); - auto acq_lock = is_leaf ? leaf_lock_type : int_lock_type; - ret = lock_and_refresh_node(child_node, acq_lock, context); - - BT_NODE_DBG_ASSERT_EQ(is_leaf, child_node->is_leaf(), child_node); - - return ret; -} - -/* It read the node, take the lock and recover it if required */ -template < typename K, typename V > -btree_status_t Btree< K, V >::read_and_lock_sibling(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const { - /* TODO: Currently we do not have any recovery while sibling is read. It is not a problem today - * as we always scan the whole btree traversally during boot. However, we should support - * it later. - */ - return (read_and_lock_node(id, node_ptr, int_lock_type, int_lock_type, context)); -} - -/* It read the node and take a lock of the node. It doesn't recover the node. - * @int_lock_type :- lock type if a node is interior node. - * @leaf_lock_type :- lock type if a node is leaf node. - */ -template < typename K, typename V > -btree_status_t Btree< K, V >::read_and_lock_node(bnodeid_t id, BtreeNodePtr< K >& node_ptr, locktype_t int_lock_type, - locktype_t leaf_lock_type, void* context) const { - auto ret = read_node(id, node_ptr); - if (node_ptr == nullptr) { - if (ret != btree_status_t::fast_path_not_possible) { BT_LOG(ERROR, "read failed, reason: {}", ret); } - return ret; - } - - auto acq_lock = (node_ptr->is_leaf()) ? leaf_lock_type : int_lock_type; - ret = lock_and_refresh_node(node_ptr, acq_lock, context); - if (ret != btree_status_t::success) { BT_LOG(ERROR, "Node refresh failed"); } - - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::get_child_and_lock_node(const BtreeNodePtr< K >& node, uint32_t index, - BtreeNodeInfo& child_info, BtreeNodePtr< K >& child_node, - locktype_t int_lock_type, locktype_t leaf_lock_type, - void* context) const { - if (index == node->get_total_entries()) { - const auto& edge_id{node->get_edge_id()}; - child_info.set_bnode_id(edge_id); - // If bsearch points to last index, it means the search has not found entry unless it is an edge value. - if (!child_info.has_valid_bnode_id()) { - BT_NODE_LOG_ASSERT(false, node, "Child index {} does not have valid bnode_id", index); - return btree_status_t::not_found; - } - } else { - BT_NODE_LOG_ASSERT_LT(index, node->get_total_entries(), node); - node->get_nth_value(index, &child_info, false /* copy */); - } - - return ( - read_and_lock_child(child_info.bnode_id(), child_node, node, index, int_lock_type, leaf_lock_type, context)); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::write_node_sync(const BtreeNodePtr< K >& node, void* context) { - return (write_node(node, nullptr, context)); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::write_node(const BtreeNodePtr< K >& node, void* context) { - return (write_node(node, nullptr, context)); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::write_node(const BtreeNodePtr< K >& node, const BtreeNodePtr< K >& dependent_node, - void* context) { - BT_NODE_LOG(DEBUG, node, "Writing node"); - - COUNTER_INCREMENT_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_writes, btree_int_node_writes, 1); - HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_occupancy, btree_int_node_occupancy, - ((m_node_size - node->get_available_size(m_bt_cfg)) * 100) / m_node_size); - - return btree_status_t::success; -} - -/* Caller of this api doesn't expect read to fail in any circumstance */ -template < typename K, typename V > -void Btree< K, V >::read_node_or_fail(bnodeid_t id, BtreeNodePtr< K >& node) const { - BT_NODE_REL_ASSERT_EQ(read_node(id, node), btree_status_t::success, node); -} - -/* This function upgrades the node lock and take required steps if things have - * changed during the upgrade. - * - * Inputs: - * myNode - Node to upgrade - * childNode - In case childNode needs to be unlocked. Could be nullptr - * curLock - Input/Output: current lock type - * - * Returns - If successfully able to upgrade, return true, else false. - * - * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects - * it to be locked too. If it is able to successfully upgrade it continue to retain its - * old lock. If failed to upgrade, will release all locks. - */ -template < typename K, typename V > -btree_status_t Btree< K, V >::upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, - void* context, locktype_t& cur_lock, locktype_t& child_cur_lock) { - uint64_t prev_gen; - btree_status_t ret = btree_status_t::success; - locktype_t child_lock_type = child_cur_lock; - - if (cur_lock == locktype_t::WRITE) { goto done; } - - prev_gen = my_node->get_gen(); - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype_t::NONE; - } - -#ifdef _PRERELEASE - { - auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); - if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } - } -#endif - ret = lock_node_upgrade(my_node, context); - if (ret != btree_status_t::success) { - cur_lock = locktype_t::NONE; - return ret; - } - - // The node was not changed by anyone else during upgrade. - cur_lock = locktype_t::WRITE; - - // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before - // that cleanup or free this node if there is no one waiting. - if (!my_node->is_valid_node()) { - unlock_node(my_node, locktype_t::WRITE); - cur_lock = locktype_t::NONE; - ret = btree_status_t::retry; - goto done; - } - - // If node has been updated, while we have upgraded, ask caller to start all over again. - if (prev_gen != my_node->get_gen()) { - unlock_node(my_node, cur_lock); - cur_lock = locktype_t::NONE; - ret = btree_status_t::retry; - goto done; - } - - if (child_node) { - ret = lock_and_refresh_node(child_node, child_lock_type, context); - if (ret != btree_status_t::success) { - unlock_node(my_node, cur_lock); - cur_lock = locktype_t::NONE; - child_cur_lock = locktype_t::NONE; - goto done; - } - child_cur_lock = child_lock_type; - } - -#ifdef _PRERELEASE - { - int is_leaf = 0; - - if (child_node && child_node->is_leaf()) { is_leaf = 1; } - if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { - unlock_node(my_node, cur_lock); - cur_lock = locktype_t::NONE; - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype_t::NONE; - } - ret = btree_status_t::retry; - goto done; - } - } -#endif - - BT_NODE_DBG_ASSERT_EQ(my_node->m_trans_hdr.is_lock, 1, my_node); -done: - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::_lock_and_refresh_node(const BtreeNodePtr< K >& node, locktype_t type, void* context, - const char* fname, int line) const { - bool is_write_modifiable; - node->lock(type); - if (type == locktype_t::WRITE) { - is_write_modifiable = true; -#ifndef NDEBUG - node->m_trans_hdr.is_lock = 1; -#endif - } else { - is_write_modifiable = false; - } - - auto ret = refresh_node(node, is_write_modifiable, context); - if (ret != btree_status_t::success) { - node->unlock(type); - return ret; - } - - _start_of_lock(node, type, fname, line); - return btree_status_t::success; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::_lock_node_upgrade(const BtreeNodePtr< K >& node, void* context, const char* fname, - int line) { - // Explicitly dec and incr, for upgrade, since it does not call top level functions to lock/unlock node - auto time_spent = end_of_lock(node, locktype_t::READ); - - node->lock_upgrade(); -#ifndef NDEBUG - node->m_trans_hdr.is_lock = 1; -#endif - node->lock_acknowledge(); - auto ret = refresh_node(node, true, context); - if (ret != btree_status_t::success) { - node->unlock(locktype_t::WRITE); - return ret; - } - - observe_lock_time(node, locktype_t::READ, time_spent); - _start_of_lock(node, locktype_t::WRITE, fname, line); - return btree_status_t::success; -} - -template < typename K, typename V > -void Btree< K, V >::unlock_node(const BtreeNodePtr< K >& node, locktype_t type) const { -#ifndef NDEBUG - if (type == locktype_t::WRITE) { node->m_trans_hdr.is_lock = 0; } -#endif - node->unlock(type); - auto time_spent = end_of_lock(node, type); - observe_lock_time(node, type, time_spent); -} - -template < typename K, typename V > -BtreeNodePtr< K > Btree< K, V >::alloc_leaf_node() { - bool is_new_allocation; - BtreeNodePtr< K > n = alloc_node(true /* is_leaf */, is_new_allocation); - if (n) { - COUNTER_INCREMENT(m_metrics, btree_leaf_node_count, 1); - ++m_total_nodes; - } - return n; -} - -template < typename K, typename V > -BtreeNodePtr< K > Btree< K, V >::alloc_interior_node() { - bool is_new_allocation; - BtreeNodePtr< K > n = alloc_node(false /* is_leaf */, is_new_allocation); - if (n) { - COUNTER_INCREMENT(m_metrics, btree_int_node_count, 1); - ++m_total_nodes; - } - return n; -} - -template < typename K, typename V > -BtreeNode< K >* Btree< K, V >::init_node(uint8_t* node_buf, bnodeid_t id, bool init_buf, bool is_leaf) { - BtreeNode< K >* ret_node{nullptr}; - btree_node_type node_type = is_leaf ? m_bt_cfg.leaf_node_type() : m_bt_cfg.interior_node_type(); - - switch (node_type) { - case btree_node_type::VAR_OBJECT: - if (is_leaf) { - ret_node = new VarObjSizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } else { - ret_node = new VarObjSizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } - break; - - case btree_node_type::FIXED: - if (is_leaf) { - ret_node = new SimpleNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } else { - ret_node = new SimpleNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } - break; - - case btree_node_type::VAR_VALUE: - if (is_leaf) { - ret_node = new VarValueSizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } else { - ret_node = new VarValueSizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } - break; - - case btree_node_type::VAR_KEY: - if (is_leaf) { - ret_node = new VarKeySizeNode< K, V >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } else { - ret_node = new VarKeySizeNode< K, BtreeNodeInfo >(node_buf, id, init_buf, is_leaf, this->m_bt_cfg); - } - break; - - default: - BT_REL_ASSERT(false, "Unsupported node type {}", node_type); - break; - } - return ret_node; -} - -/* Note:- This function assumes that access of this node is thread safe. */ -template < typename K, typename V > -void Btree< K, V >::do_free_node(const BtreeNodePtr< K >& node) { - BT_NODE_LOG(DEBUG, node, "Freeing node"); - - COUNTER_DECREMENT_IF_ELSE(m_metrics, node->is_leaf(), btree_leaf_node_count, btree_int_node_count, 1); - if (node->is_valid_node() == false) { - // a node could be marked as invalid during previous destroy and hit crash before destroy completes; - // and upon boot volume continues to destroy this btree; - BT_NODE_LOG(INFO, node, "Freeing a node already freed because of crash during destroy btree."); - } - node->set_valid_node(false); - --m_total_nodes; - - intrusive_ptr_release(node.get()); -} - -template < typename K, typename V > -void Btree< K, V >::observe_lock_time(const BtreeNodePtr< K >& node, locktype_t type, uint64_t time_spent) const { - if (time_spent == 0) { return; } - - if (type == locktype_t::READ) { - HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_inclusive_time_in_leaf_node, - btree_inclusive_time_in_int_node, time_spent); - } else { - HISTOGRAM_OBSERVE_IF_ELSE(m_metrics, node->is_leaf(), btree_exclusive_time_in_leaf_node, - btree_exclusive_time_in_int_node, time_spent); - } -} - -template < typename K, typename V > -void Btree< K, V >::_start_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype, const char* fname, int line) { - btree_locked_node_info< K, V > info; - -#ifndef NDEBUG - info.fname = fname; - info.line = line; -#endif - - info.start_time = Clock::now(); - info.node = node.get(); - if (ltype == locktype_t::WRITE) { - bt_thread_vars()->wr_locked_nodes.push_back(info); - LOGTRACEMOD(btree, "ADDING node {} to write locked nodes list, its size={}", (void*)info.node, - bt_thread_vars()->wr_locked_nodes.size()); - } else if (ltype == locktype_t::READ) { - bt_thread_vars()->rd_locked_nodes.push_back(info); - LOGTRACEMOD(btree, "ADDING node {} to read locked nodes list, its size={}", (void*)info.node, - bt_thread_vars()->rd_locked_nodes.size()); - } else { - DEBUG_ASSERT(false, "Invalid locktype_t {}", ltype); - } -} - -template < typename K, typename V > -bool Btree< K, V >::remove_locked_node(const BtreeNodePtr< K >& node, locktype_t ltype, - btree_locked_node_info< K, V >* out_info) { - auto pnode_infos = - (ltype == locktype_t::WRITE) ? &bt_thread_vars()->wr_locked_nodes : &bt_thread_vars()->rd_locked_nodes; - - if (!pnode_infos->empty()) { - auto info = pnode_infos->back(); - if (info.node == node.get()) { - *out_info = info; - pnode_infos->pop_back(); - LOGTRACEMOD(btree, "REMOVING node {} from {} locked nodes list, its size = {}", (void*)info.node, - (ltype == locktype_t::WRITE) ? "write" : "read", pnode_infos->size()); - return true; - } else if (pnode_infos->size() > 1) { - info = pnode_infos->at(pnode_infos->size() - 2); - if (info.node == node.get()) { - *out_info = info; - pnode_infos->at(pnode_infos->size() - 2) = pnode_infos->back(); - pnode_infos->pop_back(); - LOGTRACEMOD(btree, "REMOVING node {} from {} locked nodes list, its size = {}", (void*)info.node, - (ltype == locktype_t::WRITE) ? "write" : "read", pnode_infos->size()); - return true; - } - } - } - -#ifndef NDEBUG - if (pnode_infos->empty()) { - LOGERRORMOD(btree, "locked_node_list: node = {} not found, locked node list empty", (void*)node.get()); - } else if (pnode_infos->size() == 1) { - LOGERRORMOD(btree, "locked_node_list: node = {} not found, total list count = 1, Expecting node = {}", - (void*)node.get(), (void*)pnode_infos->back().node); - } else { - LOGERRORMOD(btree, "locked_node_list: node = {} not found, total list count = {}, Expecting nodes = {} or {}", - (void*)node.get(), pnode_infos->size(), (void*)pnode_infos->back().node, - (void*)pnode_infos->at(pnode_infos->size() - 2).node); - } -#endif - return false; -} - -template < typename K, typename V > -uint64_t Btree< K, V >::end_of_lock(const BtreeNodePtr< K >& node, locktype_t ltype) { - btree_locked_node_info< K, V > info; - if (!remove_locked_node(node, ltype, &info)) { - DEBUG_ASSERT(false, "Expected node = {} is not there in locked_node_list", (void*)node.get()); - return 0; - } - // DEBUG_ASSERT_EQ(node.get(), info.node); - return get_elapsed_time_ns(info.start_time); -} - -#ifndef NDEBUG -template < typename K, typename V > -void Btree< K, V >::check_lock_debug() { - // both wr_locked_nodes and rd_locked_nodes are thread_local; - // nothing will be dumpped if there is no assert failure; - for (const auto& x : bt_thread_vars()->wr_locked_nodes) { - x.dump(); - } - for (const auto& x : bt_thread_vars()->rd_locked_nodes) { - x.dump(); - } - DEBUG_ASSERT_EQ(bt_thread_vars()->wr_locked_nodes.size(), 0); - DEBUG_ASSERT_EQ(bt_thread_vars()->rd_locked_nodes.size(), 0); -} -#endif - -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_query_impl.ipp b/src/btree/btree_query_impl.ipp deleted file mode 100644 index 0cc65619..00000000 --- a/src/btree/btree_query_impl.ipp +++ /dev/null @@ -1,360 +0,0 @@ -#pragma once -#include "btree.hpp" - -namespace sisl { -namespace btree { - -template < typename K, typename V > -btree_status_t Btree< K, V >::do_sweep_query(BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values) const { - btree_status_t ret = btree_status_t::success; - if (my_node->is_leaf()) { - BT_NODE_DBG_ASSERT_GT(qreq.batch_size(), 0, my_node); - - auto count = 0U; - BtreeNodePtr< K > next_node = nullptr; - - do { - if (next_node) { - unlock_node(my_node, locktype_t::READ); - my_node = next_node; - } - - BT_NODE_LOG(TRACE, my_node, "Query leaf node"); - - uint32_t start_ind = 0u, end_ind = 0u; - static thread_local std::vector< std::pair< K, V > > s_match_kvs; - - s_match_kvs.clear(); - auto cur_count = - my_node->get_all(qreq.next_range(), qreq.batch_size() - count, start_ind, end_ind, &s_match_kvs); - if (cur_count == 0) { - if (my_node->get_last_key().compare(qreq.input_range().end_key()) >= 0) { - // we've covered all lba range, we are done now; - break; - } - } else { - // fall through to visit siblings if we haven't covered lba range yet; - if (m_bt_cfg.is_custom_kv()) { - static thread_local std::vector< std::pair< K, V > > s_result_kvs; - s_result_kvs.clear(); - custom_kv_select_for_read(my_node->get_version(), s_match_kvs, s_result_kvs, qreq.next_range(), - qreq); - - auto ele_to_add = std::min((uint32_t)s_result_kvs.size(), qreq.batch_size()); - if (ele_to_add > 0) { - out_values.insert(out_values.end(), s_result_kvs.begin(), s_result_kvs.begin() + ele_to_add); - } - count += ele_to_add; - BT_NODE_DBG_ASSERT_LE(count, qreq.batch_size(), my_node); - } else { - out_values.insert(std::end(out_values), std::begin(s_match_kvs), std::end(s_match_kvs)); - count += cur_count; - } - } - - // if cur_count is 0, keep querying sibling nodes; - if (ret == btree_status_t::success && (count < qreq.batch_size())) { - if (my_node->next_bnode() == empty_bnodeid) { break; } - ret = read_and_lock_sibling(my_node->next_bnode(), next_node, locktype_t::READ, locktype_t::READ, - nullptr); - if (ret == btree_status_t::fast_path_not_possible) { break; } - - if (ret != btree_status_t::success) { - LOGERROR("read failed btree name {}", m_bt_cfg.name()); - break; - } - } else { - if (count >= qreq.batch_size()) { ret = btree_status_t::has_more; } - break; - } - } while (true); - - unlock_node(my_node, locktype_t::READ); - return ret; - } - - BtreeNodeInfo start_child_info; - const auto [isfound, idx] = my_node->find(qreq.next_key(), &start_child_info, false); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(isfound, idx, my_node); - - BtreeNodePtr< K > child_node; - ret = read_and_lock_child(start_child_info.bnode_id(), child_node, my_node, idx, locktype_t::READ, locktype_t::READ, - nullptr); - unlock_node(my_node, locktype_t::READ); - if (ret != btree_status_t::success) { return ret; } - return (do_sweep_query(child_node, qreq, out_values)); -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::do_traversal_query(const BtreeNodePtr< K >& my_node, BtreeQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values) const { - btree_status_t ret = btree_status_t::success; - uint32_t idx; - - if (my_node->is_leaf()) { - BT_NODE_LOG_ASSERT_GT(qreq.batch_size(), 0, my_node); - - uint32_t start_ind = 0, end_ind = 0; - - static thread_local std::vector< std::pair< K, V > > s_match_kvs; - s_match_kvs.clear(); - auto cur_count = my_node->get_all(qreq.next_range(), qreq.batch_size() - (uint32_t)out_values.size(), start_ind, - end_ind, &s_match_kvs); - - if (cur_count && m_bt_cfg.is_custom_kv()) { - static thread_local std::vector< std::pair< K, V > > s_result_kvs; - s_result_kvs.clear(); - custom_kv_select_for_read(my_node->get_version(), s_match_kvs, s_result_kvs, qreq.next_range(), qreq); - - auto ele_to_add = s_result_kvs.size(); - if (ele_to_add > 0) { - out_values.insert(out_values.end(), s_result_kvs.begin(), s_result_kvs.begin() + ele_to_add); - } - } - out_values.insert(std::end(out_values), std::begin(s_match_kvs), std::end(s_match_kvs)); - - unlock_node(my_node, locktype_t::READ); - if (ret != btree_status_t::success || out_values.size() >= qreq.batch_size()) { - if (out_values.size() >= qreq.batch_size()) { ret = btree_status_t::has_more; } - } - - return ret; - } - - const auto [start_isfound, start_idx] = my_node->find(qreq.next_key(), nullptr, false); - auto [end_is_found, end_idx] = my_node->find(qreq.input_range().end_key(), nullptr, false); - bool unlocked_already = false; - - if (start_idx == my_node->get_total_entries() && !(my_node->has_valid_edge())) { - goto done; // no results found - } else if (end_idx == my_node->get_total_entries() && !(my_node->has_valid_edge())) { - --end_idx; // end is not valid - } - - BT_NODE_LOG_ASSERT_LE(start_idx, end_idx, my_node); - idx = start_idx; - - while (idx <= end_idx) { - BtreeNodeInfo child_info; - my_node->get_nth_value(idx, &child_info, false); - BtreeNodePtr< K > child_node = nullptr; - locktype_t child_cur_lock = locktype_t::READ; - ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, idx, child_cur_lock, child_cur_lock, - nullptr); - if (ret != btree_status_t::success) { break; } - - if (idx == end_idx) { - // If we have reached the last index, unlock before traversing down, because we no longer need - // this lock. Holding this lock will impact performance unncessarily. - unlock_node(my_node, locktype_t::READ); - unlocked_already = true; - } - // TODO - pass sub range if child is leaf - ret = do_traversal_query(child_node, qreq, out_values); - if (ret == btree_status_t::has_more) { break; } - ++idx; - } -done: - if (!unlocked_already) { unlock_node(my_node, locktype_t::READ); } - - return ret; -} - -template < typename K, typename V > -btree_status_t -Btree< K, V >::custom_kv_select_for_read(uint8_t node_version, const std::vector< std::pair< K, V > >& match_kv, - std::vector< std::pair< K, V > >& replace_kv, const BtreeKeyRange& range, - const BtreeRangeRequest& qreq) const { - - replace_kv = match_kv; - return btree_status_t::success; -} - -#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION -btree_status_t do_serialzable_query(const BtreeNodePtr< K >& my_node, BtreeSerializableQueryRequest& qreq, - std::vector< std::pair< K, V > >& out_values) { - - btree_status_t ret = btree_status_t::success; - if (my_node->is_leaf) { - auto count = 0; - auto start_result = my_node->find(qreq.get_start_of_range(), nullptr, nullptr); - auto start_ind = start_result.end_of_search_index; - - auto end_result = my_node->find(qreq.get_end_of_range(), nullptr, nullptr); - auto end_ind = end_result.end_of_search_index; - if (!end_result.found) { end_ind--; } // not found entries will point to 1 ind after last in range. - - ind = start_ind; - while ((ind <= end_ind) && (count < qreq.batch_size())) { - K key; - V value; - my_node->get_nth_element(ind, &key, &value, false); - - if (!qreq.m_match_item_cb || qreq.m_match_item_cb(key, value)) { - out_values.emplace_back(std::make_pair< K, V >(key, value)); - count++; - } - ind++; - } - - bool has_more = ((ind >= start_ind) && (ind < end_ind)); - if (!has_more) { - unlock_node(my_node, locktype_t::READ); - get_tracker(qreq)->pop(); - return success; - } - - return has_more; - } - - BtreeNodeId start_child_ptr, end_child_ptr; - auto start_ret = my_node->find(qreq.get_start_of_range(), nullptr, &start_child_ptr); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(start_ret, my_node); - auto end_ret = my_node->find(qreq.get_end_of_range(), nullptr, &end_child_ptr); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(end_ret, my_node); - - BtreeNodePtr< K > child_node; - if (start_ret.end_of_search_index == end_ret.end_of_search_index) { - BT_LOG_ASSERT_CMP(start_child_ptr, ==, end_child_ptr, my_node); - - ret = - read_and_lock_node(start_child_ptr.get_node_id(), child_node, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { - unlock_node(my_node, locktype_t::READ); - return ret; - } - unlock_node(my_node, locktype_t::READ); - - // Pop the last node and push this child node - get_tracker(qreq)->pop(); - get_tracker(qreq)->push(child_node); - return do_serialzable_query(child_node, qreq, search_range, out_values); - } else { - // This is where the deviation of tree happens. Do not pop the node out of lock tracker - bool has_more = false; - - for (auto i = start_ret.end_of_search_index; i <= end_ret.end_of_search_index; i++) { - BtreeNodeId child_ptr; - my_node->get_nth_value(i, &child_ptr, false); - ret = read_and_lock_node(child_ptr.get_node_id(), child_node, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { - unlock_node(my_node, locktype_t::READ); - return ret; - } - - get_tracker(qreq)->push(child_node); - - ret = do_serialzable_query(child_node, qreq, out_values); - if (ret == BTREE_AGAIN) { - BT_LOG_ASSERT_CMP(out_values.size(), ==, qreq.batch_size(), ); - break; - } - } - - if (ret == BTREE_SUCCESS) { - unlock_node(my_node, locktype_t::READ); - HS_DEBUG_ASSERT_EQ(get_tracker(qreq)->top(), my_node); - get_tracker(qreq)->pop(); - } - return ret; - } -} -#endif - -#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION -btree_status_t sweep_query(BtreeQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) { - COUNTER_INCREMENT(m_metrics, btree_read_ops_count, 1); - qreq.init_batch_range(); - - m_btree_lock.lock_shared(); - - BtreeNodePtr< K > root; - btree_status_t ret = btree_status_t::success; - - ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - - ret = do_sweep_query(root, qreq, out_values); -out: - m_btree_lock.unlock_shared(); - -#ifndef NDEBUG - check_lock_debug(); -#endif - return ret; -} - -btree_status_t serializable_query(BtreeSerializableQueryRequest& qreq, std::vector< std::pair< K, V > >& out_values) { - qreq.init_batch_range(); - - m_btree_lock.lock_shared(); - BtreeNodePtr< K > node; - btree_status_t ret; - - if (qreq.is_empty_cursor()) { - // Initialize a new lock tracker and put inside the cursor. - qreq.cursor().m_locked_nodes = std::make_unique< BtreeLockTrackerImpl >(this); - - BtreeNodePtr< K > root; - ret = read_and_lock_root(m_root_node_id, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - get_tracker(qreq)->push(root); // Start tracking the locked nodes. - } else { - node = get_tracker(qreq)->top(); - } - - ret = do_serialzable_query(node, qreq, out_values); -out: - m_btree_lock.unlock_shared(); - - // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match - -#ifndef NDEBUG - check_lock_debug(); -#endif - - return ret; -} - -BtreeLockTrackerImpl* get_tracker(BtreeSerializableQueryRequest& qreq) { - return (BtreeLockTrackerImpl*)qreq->get_cursor.m_locked_nodes.get(); -} - -template < typename K, typename V > -class BtreeLockTrackerImpl : public BtreeLockTracker { -public: - BtreeLockTrackerImpl(btree_t* bt) : m_bt(bt) {} - - virtual ~BtreeLockTrackerImpl() { - while (m_nodes.size()) { - auto& p = m_nodes.top(); - m_bt->unlock_node(p.first, p.second); - m_nodes.pop(); - } - } - - void push(const BtreeNodePtr< K >& node, locktype_t locktype) { m_nodes.emplace(std::make_pair<>(node, locktype)); } - - std::pair< BtreeNodePtr< K >, locktype_t > pop() { - HS_ASSERT_CMP(DEBUG, m_nodes.size(), !=, 0); - std::pair< BtreeNodePtr< K >, locktype_t > p; - if (m_nodes.size()) { - p = m_nodes.top(); - m_nodes.pop(); - } else { - p = std::make_pair<>(nullptr, locktype_t::LOCKTYPE_NONE); - } - - return p; - } - - BtreeNodePtr< K > top() { return (m_nodes.size == 0) ? nullptr : m_nodes.top().first; } - -private: - btree_t m_bt; - std::stack< std::pair< BtreeNodePtr< K >, locktype_t > > m_nodes; -}; -#endif -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_remove_impl.ipp b/src/btree/btree_remove_impl.ipp deleted file mode 100644 index b74d0d82..00000000 --- a/src/btree/btree_remove_impl.ipp +++ /dev/null @@ -1,391 +0,0 @@ -#pragma once -#include "btree.hpp" - -namespace sisl { -namespace btree { -template < typename K, typename V > -btree_status_t Btree< K, V >::do_remove(const BtreeNodePtr< K >& my_node, locktype_t curlock, - BtreeRemoveRequest& rreq) { - btree_status_t ret = btree_status_t::success; - if (my_node->is_leaf()) { - BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); - -#ifndef NDEBUG - my_node->validate_key_order(); -#endif - bool is_found; - - if (is_remove_any_request(rreq)) { - is_found = my_node->remove_any(rreq.m_range, rreq.m_outkey.get(), rreq.m_outval.get()); - } else { - is_found = my_node->remove_one(rreq.key(), rreq.m_outkey.get(), rreq.m_outval.get()); - } -#ifndef NDEBUG - my_node->validate_key_order(); -#endif - if (is_found) { - write_node(my_node, nullptr, remove_req_op_ctx(rreq)); - COUNTER_DECREMENT(m_metrics, btree_obj_count, 1); - } - - unlock_node(my_node, curlock); - return is_found ? btree_status_t::success : btree_status_t::not_found; - } - -retry: - locktype_t child_cur_lock = locktype_t::NONE; - bool found; - uint32_t ind; - - // TODO: Range Delete support needs to be added here - // Get the childPtr for given key. - if (is_remove_any_request(rreq)) { - std::tie(found, ind) = my_node->find(to_remove_any_req(rreq).m_range.start_key(), &child_info, true); - } else { - std::tie(found, ind) = my_node->find(to_single_remove_req(rreq).key(), &child_info, true); - } - - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(found, ind, my_node); - - BtreeNodeInfo child_info; - BtreeNodePtr< K > child_node; - ret = get_child_and_lock_node(my_node, ind, child_info, child_node, locktype_t::READ, locktype_t::WRITE); - if (ret != btree_status_t::success) { - unlock_node(my_node, curlock); - return ret; - } - - // Check if child node is minimal. - child_cur_lock = child_node->is_leaf() ? locktype_t::WRITE : locktype_t::READ; - if (child_node->is_merge_needed(m_bt_cfg)) { - // If we are unable to upgrade the node, ask the caller to retry. - ret = upgrade_node(my_node, child_node, curlock, child_cur_lock); - if (ret != btree_status_t::success) { - BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::NONE, my_node) - return ret; - } - BT_NODE_DBG_ASSERT_EQ(curlock, locktype_t::WRITE, my_node); - - uint32_t node_end_indx = - my_node->has_valid_edge() ? my_node->get_total_entries() : my_node->get_total_entries() - 1; - uint32_t end_ind = (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) < node_end_indx - ? (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) - : node_end_indx; - if (end_ind > ind) { - // It is safe to unlock child without upgrade, because child node would not be deleted, since its - // parent (myNode) is being write locked by this thread. In fact upgrading would be a problem, since - // this child might be a middle child in the list of indices, which means we might have to lock one - // in left against the direction of intended locking (which could cause deadlock). - unlock_node(child_node, child_cur_lock); - auto result = merge_nodes(my_node, ind, end_ind); - if (result != btree_status_t::success && result != btree_status_t::merge_not_required) { - // write or read failed - unlock_node(my_node, curlock); - return ret; - } - if (result == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_merge_count, 1); } - goto retry; - } - } - -#ifndef NDEBUG - if (ind != my_node->get_total_entries() && child_node->get_total_entries()) { // not edge - BT_NODE_DBG_ASSERT_LE(child_node->get_last_key().compare(my_node->get_nth_key(ind, false)), 0, my_node); - } - - if (ind > 0 && child_node->get_total_entries()) { // not first child - BT_NODE_DBG_ASSERT_LT(child_node->get_first_key().compare(my_node->get_nth_key(ind - 1, false)), 0, my_node); - } -#endif - - unlock_node(my_node, curlock); - return (do_remove(child_node, child_cur_lock, rreq)); - - // Warning: Do not access childNode or myNode beyond this point, since it would - // have been unlocked by the recursive function and it could also been deleted. -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::check_collapse_root(void* context) { - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - - m_btree_lock.lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node_id, root, locktype_t::WRITE, locktype_t::WRITE, context); - if (ret != btree_status_t::success) { goto done; } - - if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { - unlock_node(root, locktype_t::WRITE); - goto done; - } - - BT_NODE_DBG_ASSERT_EQ(root->has_valid_edge(), true, root); - ret = read_node(root->get_edge_id(), child_node); - if (child_node == nullptr) { - unlock_node(root, locktype_t::WRITE); - goto done; - } - - // Elevate the edge child as root. - swap_node(root, child_node, context); - write_node(root, context); - BT_NODE_DBG_ASSERT_EQ(m_root_node_id, root->get_node_id(), root); - old_nodes.push_back(child_node); - - static thread_local std::vector< BtreeNodePtr< K > > s_nodes; - s_nodes.clear(); - s_nodes.push_back(child_node); - merge_node_precommit(true, nullptr, 0, root, &s_nodes, nullptr, context); - - unlock_node(root, locktype_t::WRITE); - free_node(child_node, context); - - if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } -done: - m_btree_lock.unlock(); - return ret; -} - -template < typename K, typename V > -btree_status_t Btree< K, V >::merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, - void* context) { - btree_status_t ret = btree_status_t::merge_failed; - std::vector< BtreeNodePtr< K > > child_nodes; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > replace_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - std::vector< BtreeNodePtr< K > > deleted_nodes; - BtreeNodePtr< K > left_most_node; - K last_pkey; // last key of parent node - bool last_pkey_valid = false; - uint32_t balanced_size; - BtreeNodePtr< K > merge_node; - K last_ckey; // last key in child - uint32_t parent_insert_indx = start_indx; -#ifndef NDEBUG - uint32_t total_child_entries = 0; - uint32_t new_entries = 0; - K last_debug_ckey; - K new_last_debug_ckey; - BtreeNodePtr< K > last_node; -#endif - /* Try to take a lock on all nodes participating in merge*/ - for (auto indx = start_indx; indx <= end_indx; ++indx) { - if (indx == parent_node->get_total_entries()) { - BT_NODE_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, - "Assertion failure, expected valid edge for parent_node: {}"); - } - - BtreeNodeInfo child_info; - parent_node->get(indx, &child_info, false /* copy */); - - BtreeNodePtr< K > child; - ret = read_and_lock_node(child_info.bnode_id(), child, locktype_t::WRITE, locktype_t::WRITE, bcp); - if (ret != btree_status_t::success) { goto out; } - BT_NODE_LOG_ASSERT_EQ(child->is_valid_node(), true, child); - - /* check if left most node has space */ - if (indx == start_indx) { - balanced_size = m_bt_cfg.ideal_fill_size(); - left_most_node = child; - if (left_most_node->get_occupied_size(m_bt_cfg) > balanced_size) { - /* first node doesn't have any free space. we can exit now */ - ret = btree_status_t::merge_not_required; - goto out; - } - } else { - bool is_allocated = true; - /* pre allocate the new nodes. We will free the nodes which are not in use later */ - auto new_node = alloc_node(child->is_leaf(), is_allocated, child); - if (is_allocated) { - /* we are going to allocate new blkid of all the nodes except the first node. - * Note :- These blkids will leak if we fail or crash before writing entry into - * journal. - */ - old_nodes.push_back(child); - COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, 1); - } - /* Blk IDs can leak if it crash before writing it to a journal */ - if (new_node == nullptr) { - ret = btree_status_t::space_not_avail; - goto out; - } - new_nodes.push_back(new_node); - } -#ifndef NDEBUG - total_child_entries += child->get_total_entries(); - child->get_last_key(&last_debug_ckey); -#endif - child_nodes.push_back(child); - } - - if (end_indx != parent_node->get_total_entries()) { - /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ - parent_node->get_nth_key(end_indx, &last_pkey, true); - last_pkey_valid = true; - } - - merge_node = left_most_node; - /* We can not fail from this point. Nodes will be modified in memory. */ - for (uint32_t i = 0; i < new_nodes.size(); ++i) { - auto occupied_size = merge_node->get_occupied_size(m_bt_cfg); - if (occupied_size < balanced_size) { - uint32_t pull_size = balanced_size - occupied_size; - merge_node->move_in_from_right_by_size(m_bt_cfg, new_nodes[i], pull_size); - if (new_nodes[i]->get_total_entries() == 0) { - /* this node is freed */ - deleted_nodes.push_back(new_nodes[i]); - continue; - } - } - - /* update the last key of merge node in parent node */ - K last_ckey; // last key in child - merge_node->get_last_key(&last_ckey); - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - - merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them - merge_node = new_nodes[i]; - if (merge_node != left_most_node) { - /* left most node is not replaced */ - replace_nodes.push_back(merge_node); - } - } - - /* update the latest merge node */ - merge_node->get_last_key(&last_ckey); - if (last_pkey_valid) { - BT_DBG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); - last_ckey = last_pkey; - } - - /* update the last key */ - { - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - } - - /* remove the keys which are no longer used */ - if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } - - // TODO: Validate if empty child_pkey on last_key or edge has any impact on journal/precommit - K child_pkey; - if (start_indx < parent_node->get_total_entries()) { - child_pkey = parent_node->get_nth_key(start_indx, true); - BT_NODE_REL_ASSERT_EQ(start_indx, (parent_insert_indx - 1), parent_node, "it should be last index"); - } - - merge_node_precommit(false, parent_node, parent_merge_start_idx, left_most_node, &old_nodes, &replace_nodes, - context); - -#if 0 - /* write the journal entry */ - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - K child_pkey; - if (start_indx < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx, &child_pkey, true); - BT_REL_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, - child_pkey.get_blob()); - for (auto& node : old_nodes) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); - } - uint32_t insert_indx = 0; - for (auto& node : replace_nodes) { - K child_pkey; - if ((start_indx + insert_indx) < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); - BT_REL_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, - "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, - child_pkey.get_blob()); - ++insert_indx; - } - BT_REL_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } -#endif - - if (replace_nodes.size() > 0) { - /* write the right most node */ - write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); - if (replace_nodes.size() > 1) { - /* write the middle nodes */ - for (int i = replace_nodes.size() - 2; i >= 0; --i) { - write_node(replace_nodes[i], replace_nodes[i + 1], bcp); - } - } - /* write the left most node */ - write_node(left_most_node, replace_nodes[0], bcp); - } else { - /* write the left most node */ - write_node(left_most_node, nullptr, bcp); - } - - /* write the parent node */ - write_node(parent_node, left_most_node, bcp); - -#ifndef NDEBUG - for (const auto& n : replace_nodes) { - new_entries += n->get_total_entries(); - } - - new_entries += left_most_node->get_total_entries(); - BT_DBG_ASSERT_EQ(total_child_entries, new_entries); - - if (replace_nodes.size()) { - replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); - last_node = replace_nodes[replace_nodes.size() - 1]; - } else { - left_most_node->get_last_key(&new_last_debug_ckey); - last_node = left_most_node; - } - if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { - LOGINFO("{}", last_node->to_string()); - if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } - HS_DEBUG_ASSERT(false, "compared failed"); - } -#endif - /* free nodes. It actually gets freed after cp is completed */ - for (const auto& n : old_nodes) { - free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); - } - for (const auto& n : deleted_nodes) { - free_node(n); - } - ret = btree_status_t::success; -out: -#ifndef NDEBUG - uint32_t freed_entries = deleted_nodes.size(); - uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; - for (uint32_t i = 0; i < scan_entries; ++i) { - if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } - validate_sanity_child(parent_node, (uint32_t)start_indx + i); - } -#endif - // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed - for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { - unlock_node(child_nodes[i], locktype_t::WRITE); - } - unlock_node(child_nodes[0], locktype_t::WRITE); - if (ret != btree_status_t::success) { - /* free the allocated nodes */ - for (const auto& n : new_nodes) { - free_node(n); - } - } - return ret; -} -} // namespace btree -} // namespace sisl diff --git a/src/btree/btree_req.hpp b/src/btree/btree_req.hpp deleted file mode 100644 index d7b45e9a..00000000 --- a/src/btree/btree_req.hpp +++ /dev/null @@ -1,242 +0,0 @@ -#pragma once -#include "btree_kv.hpp" -#include "fds/buffer.hpp" - -namespace sisl { -namespace btree { -// Base class for any btree operations -struct BtreeRequest { - BtreeRequest() = default; - BtreeRequest(void* app_ctx, void* op_ctx) : m_app_context{app_ctx}, m_op_context{op_ctx} {} - void* m_app_context{nullptr}; - void* m_op_context{nullptr}; -}; - -// Base class for all range related operations -struct BtreeRangeRequest : public BtreeRequest { -public: - const BtreeKeyRange& input_range() const { return m_search_state.input_range(); } - uint32_t batch_size() const { return m_batch_size; } - void set_batch_size(uint32_t count) { m_batch_size = count; } - - bool is_empty_cursor() const { - return ((m_search_state.const_cursor()->m_last_key == nullptr) && - (m_search_state.const_cursor()->m_locked_nodes == nullptr)); - } - - BtreeSearchState& search_state() { return m_search_state; } - BtreeQueryCursor* cursor() { return m_search_state.cursor(); } - const BtreeQueryCursor* const_cursor() const { return m_search_state.const_cursor(); } - BtreeKeyRange next_range() const { return m_search_state.next_range(); } - - const BtreeKeyRange& current_sub_range() const { return m_search_state.current_sub_range(); } - void set_current_sub_range(const BtreeKeyRange& new_sub_range) { - m_search_state.set_current_sub_range(new_sub_range); - } - const BtreeKey& next_key() const { return m_search_state.next_key(); } - -protected: - BtreeRangeRequest(BtreeSearchState&& search_state, void* app_context = nullptr, uint32_t batch_size = UINT32_MAX) : - BtreeRequest{app_context, nullptr}, m_search_state(std::move(search_state)), m_batch_size(UINT32_MAX) {} - -private: - BtreeSearchState m_search_state; - uint32_t m_batch_size{1}; -}; - -/////////////////////////// 1: Put Operations ///////////////////////////////////// -struct BtreeSinglePutRequest : public BtreeRequest { -public: - BtreeSinglePutRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< const BtreeValue > v, - btree_put_type put_type, std::unique_ptr< BtreeValue > existing_val = nullptr) : - m_k{std::move(k)}, m_v{std::move(v)}, m_put_type{put_type}, m_existing_val{std::move(existing_val)} {} - - const BtreeKey& key() const { return *m_k; } - const BtreeValue& value() const { return *m_v; } - - std::unique_ptr< const BtreeKey > m_k; - std::unique_ptr< const BtreeValue > m_v; - btree_put_type m_put_type; - std::unique_ptr< BtreeValue > m_existing_val; -}; - -struct BtreeRangeUpdateRequest : public BtreeRangeRequest { -public: - BtreeRangeUpdateRequest(BtreeSearchState&& search_state, btree_put_type put_type, const BtreeValue& value, - void* app_context = nullptr, uint32_t batch_size = std::numeric_limits< uint32_t >::max()) : - BtreeRangeRequest(std::move(search_state), app_context, batch_size), - m_put_type{put_type}, - m_newval{value} {} - - const btree_put_type m_put_type{btree_put_type::INSERT_ONLY_IF_NOT_EXISTS}; - const BtreeValue& m_newval; -}; - -using BtreeMutateRequest = std::variant< BtreeSinglePutRequest, BtreeRangeUpdateRequest >; - -static bool is_range_update_req(BtreeMutateRequest& req) { - return (std::holds_alternative< BtreeRangeUpdateRequest >(req)); -} - -static BtreeRangeUpdateRequest& to_range_update_req(BtreeMutateRequest& req) { - return std::get< BtreeRangeUpdateRequest >(req); -} - -static BtreeSinglePutRequest& to_single_put_req(BtreeMutateRequest& req) { - return std::get< BtreeSinglePutRequest >(req); -} - -static void* put_req_op_ctx(BtreeMutateRequest& req) { - return (is_range_update_req(req)) ? to_range_update_req(req).m_op_context : to_single_put_req(req).m_op_context; -} - -/////////////////////////// 2: Remove Operations ///////////////////////////////////// -struct BtreeSingleRemoveRequest : public BtreeRequest { -public: - BtreeSingleRemoveRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< BtreeValue > out_val) : - m_k{std::move(k)}, m_outval{std::move(out_val)} {} - - const BtreeKey& key() const { return *m_k; } - const BtreeValue& value() const { return *m_outval; } - - std::unique_ptr< const BtreeKey > m_k; - std::unique_ptr< BtreeValue > m_outval; -}; - -struct BtreeRemoveAnyRequest : public BtreeRequest { -public: - BtreeRemoveAnyRequest(BtreeKeyRange&& range, std::unique_ptr< BtreeKey > out_key, - std::unique_ptr< BtreeValue > out_val) : - m_range{std::move(range)}, m_outkey{std::move(out_key)}, m_outval{std::move(out_val)} {} - - BtreeKeyRange m_range; - std::unique_ptr< BtreeKey > m_outkey; - std::unique_ptr< BtreeValue > m_outval; -}; - -using BtreeRemoveRequest = std::variant< BtreeSingleRemoveRequest, BtreeRemoveAnyRequest >; - -static bool is_remove_any_request(BtreeRemoveRequest& req) { - return (std::holds_alternative< BtreeRemoveAnyRequest >(req)); -} - -static BtreeSingleRemoveRequest& to_single_remove_req(BtreeRemoveRequest& req) { - return std::get< BtreeSingleRemoveRequest >(req); -} - -static BtreeRemoveAnyRequest& to_remove_any_req(BtreeRemoveRequest& req) { - return std::get< BtreeRemoveAnyRequest >(req); -} - -static void* remove_req_op_ctx(BtreeRemoveRequest& req) { - return (is_remove_any_request(req)) ? to_remove_any_req(req).m_op_context : to_single_remove_req(req).m_op_context; -} - -/////////////////////////// 3: Get Operations ///////////////////////////////////// -struct BtreeSingleGetRequest : public BtreeRequest { -public: - BtreeSingleGetRequest(std::unique_ptr< const BtreeKey > k, std::unique_ptr< BtreeValue > out_val) : - m_k{std::move(k)}, m_outval{std::move(out_val)} {} - - const BtreeKey& key() const { return *m_k; } - const BtreeValue& value() const { return *m_outval; } - - std::unique_ptr< const BtreeKey > m_k; - std::unique_ptr< BtreeValue > m_outval; -}; - -struct BtreeGetAnyRequest : public BtreeRequest { -public: - BtreeGetAnyRequest(BtreeKeyRange&& range, std::unique_ptr< BtreeKey > out_key, - std::unique_ptr< BtreeValue > out_val) : - m_range{std::move(range)}, m_outkey{std::move(out_key)}, m_outval{std::move(out_val)} {} - - BtreeKeyRange m_range; - std::unique_ptr< BtreeKey > m_outkey; - std::unique_ptr< BtreeValue > m_outval; -}; - -using BtreeGetRequest = std::variant< BtreeSingleGetRequest, BtreeGetAnyRequest >; - -static bool is_get_any_request(BtreeGetRequest& req) { return (std::holds_alternative< BtreeGetAnyRequest >(req)); } - -static BtreeSingleGetRequest& to_single_get_req(BtreeGetRequest& req) { return std::get< BtreeSingleGetRequest >(req); } - -static BtreeGetAnyRequest& to_get_any_req(BtreeGetRequest& req) { return std::get< BtreeGetAnyRequest >(req); } - -static void* get_req_op_ctx(BtreeGetRequest& req) { - return (is_get_any_request(req)) ? to_get_any_req(req).m_op_context : to_single_get_req(req).m_op_context; -} - -/////////////////////////// 4 Range Query Operations ///////////////////////////////////// -ENUM(BtreeQueryType, uint8_t, - // This is default query which walks to first element in range, and then sweeps/walks - // across the leaf nodes. However, if upon pagination, it again walks down the query from - // the key it left off. - SWEEP_NON_INTRUSIVE_PAGINATION_QUERY, - - // Similar to sweep query, except that it retains the node and its lock during - // pagination. This is more of intrusive query and if the caller is not careful, the read - // lock will never be unlocked and could cause deadlocks. Use this option carefully. - SWEEP_INTRUSIVE_PAGINATION_QUERY, - - // This is relatively inefficient query where every leaf node goes from its parent node - // instead of walking the leaf node across. This is useful only if we want to check and - // recover if parent and leaf node are in different generations or crash recovery cases. - TREE_TRAVERSAL_QUERY, - - // This is both inefficient and quiet intrusive/unsafe query, where it locks the range - // that is being queried for and do not allow any insert or update within that range. It - // essentially create a serializable level of isolation. - SERIALIZABLE_QUERY) - -struct BtreeQueryRequest : public BtreeRangeRequest { -public: - /* TODO :- uint32_max to c++. pass reference */ - BtreeQueryRequest(BtreeSearchState&& search_state, - BtreeQueryType query_type = BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY, - uint32_t batch_size = UINT32_MAX, void* app_context = nullptr) : - BtreeRangeRequest(std::move(search_state), app_context, batch_size), m_query_type(query_type) {} - ~BtreeQueryRequest() = default; - - // virtual bool is_serializable() const = 0; - BtreeQueryType query_type() const { return m_query_type; } - -protected: - const BtreeQueryType m_query_type; // Type of the query -}; - -/* This class is a top level class to keep track of the locks that are held currently. It is - * used for serializabke query to unlock all nodes in right order at the end of the lock */ -class BtreeLockTracker { -public: - virtual ~BtreeLockTracker() = default; -}; - -#if 0 -class BtreeSweepQueryRequest : public BtreeQueryRequest { -public: - BtreeSweepQueryRequest(const BtreeSearchRange& criteria, uint32_t iter_count = 1000, - const match_item_cb_t& match_item_cb = nullptr) : - BtreeQueryRequest(criteria, iter_count, match_item_cb) {} - - BtreeSweepQueryRequest(const BtreeSearchRange &criteria, const match_item_cb_t& match_item_cb) : - BtreeQueryRequest(criteria, 1000, match_item_cb) {} - - bool is_serializable() const { return false; } -}; - -class BtreeSerializableQueryRequest : public BtreeQueryRequest { -public: - BtreeSerializableQueryRequest(const BtreeSearchRange &range, uint32_t iter_count = 1000, - const match_item_cb_t& match_item_cb = nullptr) : - BtreeQueryRequest(range, iter_count, match_item_cb) {} - - BtreeSerializableQueryRequest(const BtreeSearchRange &criteria, const match_item_cb_t& match_item_cb) : - BtreeSerializableQueryRequest(criteria, 1000, match_item_cb) {} - - bool is_serializable() const { return true; } -}; -#endif -} // namespace btree -} // namespace sisl diff --git a/src/btree/hs_btree.hpp b/src/btree/hs_btree.hpp deleted file mode 100644 index 626e5b61..00000000 --- a/src/btree/hs_btree.hpp +++ /dev/null @@ -1,396 +0,0 @@ -#pragma once - -namespace sisl { -template < typename K, typename V > -class HSBtree : public Btree< K, V > { - static btree_t* create_btree(const btree_super_block& btree_sb, const BtreeConfig& cfg, btree_cp_sb* cp_sb, - const split_key_callback& split_key_cb) { - Btree* bt = new Btree(cfg); - auto impl_ptr = btree_store_t::init_btree(bt, cfg); - bt->m_btree_store = std::move(impl_ptr); - bt->init_recovery(btree_sb, cp_sb, split_key_cb); - LOGINFO("btree recovered and created {}, node size {}", cfg.get_name(), cfg.get_node_size()); - return bt; - } - - void init(bool recovery) { - m_total_nodes = m_last_cp_sb.btree_size; - m_bt_store->update_sb(m_sb, &m_last_cp_sb, is_recovery); - create_root_node(); - } - - void init_recovery(const btree_super_block& btree_sb, btree_cp_sb* cp_sb, const split_key_callback& split_key_cb) { - m_sb = btree_sb; - m_split_key_cb = split_key_cb; - if (cp_sb) { memcpy(&m_last_cp_sb, cp_sb, sizeof(m_last_cp_sb)); } - do_common_init(true); - m_root_node_id = m_sb.root_node; - } - - /* It is called when its btree consumer has successfully stored the btree superblock */ - void create_done() { btree_store_t::create_done(m_btree_store.get(), m_root_node_id); } - void destroy_done() { btree_store_t::destroy_done(m_btree_store.get()); } - - void replay_done(const btree_cp_ptr& bcp) { - m_total_nodes = m_last_cp_sb.btree_size + bcp->btree_size.load(); - BT_LOG(INFO, base, , "total btree nodes {}", m_total_nodes); - } - - btree_status_t free_btree(const BtreeNodePtr< K >& start_node, blkid_list_ptr free_blkid_list, bool in_mem, - uint64_t& free_node_cnt) { - // TODO - this calls free node on mem_tree and ssd_tree. - // In ssd_tree we free actual block id, which is not correct behavior - // we shouldnt really free any blocks on free node, just reclaim any memory - // occupied by ssd_tree structure in memory. Ideally we should have sepearte - // api like deleteNode which should be called instead of freeNode - const auto ret = post_order_traversal( - start_node, [this, free_blkid_list, in_mem, &free_node_cnt](const BtreeNodePtr< K >& node) { - free_node(node, free_blkid_list, in_mem); - ++free_node_cnt; - }); - return ret; - } - - /* It attaches the new CP and prepare for cur cp flush */ - btree_cp_ptr attach_prepare_cp(const btree_cp_ptr& cur_bcp, bool is_last_cp, bool blkalloc_checkpoint) { - return (btree_store_t::attach_prepare_cp(m_btree_store.get(), cur_bcp, is_last_cp, blkalloc_checkpoint)); - } - - void cp_start(const btree_cp_ptr& bcp, cp_comp_callback cb) { - btree_store_t::cp_start(m_btree_store.get(), bcp, cb); - } - - std::string get_cp_flush_status(const btree_cp_ptr& bcp) const { - return (btree_store_t::get_cp_flush_status(m_btree_store.get(), bcp)); - } - - void truncate(const btree_cp_ptr& bcp) { btree_store_t::truncate(m_btree_store.get(), bcp); } - - /* It is called before superblock is persisted for each CP */ - void update_btree_cp_sb(const btree_cp_ptr& bcp, btree_cp_sb& btree_sb, bool is_blkalloc_cp) { - btree_sb.active_seqid = bcp->end_seqid; - btree_sb.blkalloc_cp_id = is_blkalloc_cp ? bcp->cp_id : m_last_cp_sb.blkalloc_cp_id; - btree_sb.btree_size = bcp->btree_size.load() + m_last_cp_sb.btree_size; - btree_sb.cp_id = bcp->cp_id; - HS_DEBUG_ASSERT_EQ((int64_t)m_last_cp_sb.cp_id, (int64_t)bcp->cp_id - 1); - memcpy(&m_last_cp_sb, &btree_sb, sizeof(m_last_cp_sb)); - } - - void flush_free_blks(const btree_cp_ptr& bcp, std::shared_ptr< homestore::blkalloc_cp >& ba_cp) { - btree_store_t::flush_free_blks(m_btree_store.get(), bcp, ba_cp); - } - - /** - * @brief : verify the btree node is corrupted or not; - * - * Note: this function should never assert, but only return success or failure since it is in verification mode; - * - * @param bnodeid : node id - * @param parent_node : parent node ptr - * @param indx : index within thie node; - * @param update_debug_bm : true or false; - * - * @return : true if this node including all its children are not corrupted; - * false if not; - */ - template < typename K, typename V > - bool Btree< K, V >::verify_node(bnodeid_t bnodeid, BtreeNodePtr< K > parent_node, uint32_t indx, - bool update_debug_bm) { - locktype_t acq_lock = locktype_t::READ; - BtreeNodePtr< K > my_node; - if (read_and_lock_node(bnodeid, my_node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { - LOGINFO("read node failed"); - return false; - } - if (update_debug_bm && - (btree_store_t::update_debug_bm(m_btree_store.get(), my_node) != btree_status_t::success)) { - LOGERROR("bitmap update failed for node {}", my_node->to_string()); - return false; - } - - K prev_key; - bool success = true; - for (uint32_t i = 0; i < my_node->get_total_entries(); ++i) { - K key; - my_node->get_nth_key(i, &key, false); - if (!my_node->is_leaf()) { - BtreeNodeInfo child; - my_node->get(i, &child, false); - success = verify_node(child.bnode_id(), my_node, i, update_debug_bm); - if (!success) { goto exit_on_error; } - - if (i > 0) { - BT_LOG_ASSERT_CMP(prev_key.compare(&key), <, 0, my_node); - if (prev_key.compare(&key) >= 0) { - success = false; - goto exit_on_error; - } - } - } - if (my_node->is_leaf() && i > 0) { - BT_LOG_ASSERT_CMP(prev_key.compare_start(&key), <, 0, my_node); - if (prev_key.compare_start(&key) >= 0) { - success = false; - goto exit_on_error; - } - } - prev_key = key; - } - - if (my_node->is_leaf() && my_node->get_total_entries() == 0) { - /* this node has zero entries */ - goto exit_on_error; - } - if (parent_node && parent_node->get_total_entries() != indx) { - K parent_key; - parent_node->get_nth_key(indx, &parent_key, false); - - K last_key; - my_node->get_nth_key(my_node->get_total_entries() - 1, &last_key, false); - if (!my_node->is_leaf()) { - BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), ==, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (last_key.compare(&parent_key) != 0) { - success = false; - goto exit_on_error; - } - } else { - BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), <=, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (last_key.compare(&parent_key) > 0) { - success = false; - goto exit_on_error; - } - BT_LOG_ASSERT_CMP(parent_key.compare_start(&last_key), >=, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (parent_key.compare_start(&last_key) < 0) { - success = false; - goto exit_on_error; - } - } - } - - if (parent_node && indx != 0) { - K parent_key; - parent_node->get_nth_key(indx - 1, &parent_key, false); - - K first_key; - my_node->get_nth_key(0, &first_key, false); - BT_LOG_ASSERT_CMP(first_key.compare(&parent_key), >, 0, parent_node, "my node {}", my_node->to_string()); - if (first_key.compare(&parent_key) <= 0) { - success = false; - goto exit_on_error; - } - - BT_LOG_ASSERT_CMP(parent_key.compare_start(&first_key), <, 0, parent_node, "my node {}", - my_node->to_string()); - if (parent_key.compare_start(&first_key) > 0) { - success = false; - goto exit_on_error; - } - } - - if (my_node->has_valid_edge()) { - success = verify_node(my_node->get_edge_id(), my_node, my_node->get_total_entries(), update_debug_bm); - if (!success) { goto exit_on_error; } - } - - exit_on_error: - unlock_node(my_node, acq_lock); - return success; - } - - btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - if (jentry) { - BT_DBG_ASSERT_CMP(jentry->is_root, ==, true, , - "Expected create_btree_replay entry to be root journal entry"); - BT_DBG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node_id, , "Root node journal entry mismatch"); - } - - // Create a root node by reserving the leaf node - BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node_id)); - auto ret = write_node(root, nullptr, bcp); - BT_DBG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); - return btree_status_t::success; - } - - btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - bnodeid_t id = jentry->is_root ? m_root_node_id : jentry->parent_node.node_id; - BtreeNodePtr< K > parent_node; - - // read parent node - read_node_or_fail(id, parent_node); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { - BT_LOG(INFO, base, , "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", - parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); - return btree_status_t::replay_not_needed; - } - - // Read the first inplace write node which is the leftmost child and also form child split key from journal - auto j_child_nodes = jentry->get_nodes(); - - BtreeNodePtr< K > child_node1; - if (jentry->is_root) { - // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to - // be treated as root here on. - child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); - btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); - - BT_LOG(INFO, btree_generics, , - "Journal replay: root split, so creating child_node id={} and swapping the node with " - "parent_node id={} names {}", - child_node1->get_node_id(), parent_node->get_node_id(), m_cfg.name()); - - } else { - read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); - } - - THIS_BT_LOG(INFO, btree_generics, , - "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", - j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), - child_node1->get_gen(), m_cfg.name()); - if (jentry->is_root) { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , - "Expected first node in journal entry to be new creation for root split"); - } else { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , - "Expected first node in journal entry to be in-place write"); - } - BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , - "Expected second node in journal entry to be new node creation"); - - // recover child node - bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); - - // recover parent node - recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); - return btree_status_t::success; - } - - bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, - const std::vector< bt_journal_node_info* >& j_child_nodes, - const btree_cp_ptr& bcp) { - - BtreeNodePtr< K > child_node2; - // Check if child1 is ahead of the generation - if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { - // leftmost_node is written, so right node must have been written as well. - read_node_or_fail(child_node1->next_bnode(), child_node2); - - // sanity check for right node - BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, - "gen cnt should be more than the journal entry"); - // no need to recover child nodes - return false; - } - - K split_key; - split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) - : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); - - // We need to do split based on entries since the left children is also not written yet. - // Find the split key within the child_node1. It is not always found, so we split upto that. - auto ret = child_node1->find(split_key, nullptr, false); - - // sanity check for left mode node before recovery - { - if (!ret.found) { - if (!child_node1->is_leaf()) { - BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); - } - } - } - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", - split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); - /* if it is not found than end_of_search_index points to first ind which is greater than split key */ - auto split_ind = ret.end_of_search_index; - if (ret.found) { ++split_ind; } // we don't want to move split key */ - if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { - K key; - child_node1->get_nth_key(split_ind, &key, false); - - if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ - THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); - V v; - child_node1->get_nth_value(split_ind, &v, false); - vector< pair< K, V > > replace_kv; - child_node1->remove(split_ind, split_ind); - m_split_key_cb(key, v, split_key, replace_kv); - for (auto& pair : replace_kv) { - auto status = child_node1->insert(pair.first, pair.second); - BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); - } - auto ret = child_node1->find(split_key, nullptr, false); - BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, - "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); - ++split_ind; - } - } - child_node1->move_out_to_right_by_entries(m_cfg, child_node2, child_node1->get_total_entries() - split_ind); - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node2->set_gen(j_child_nodes[1]->node_gen()); - - child_node1->set_next_bnode(child_node2->get_node_id()); - child_node1->set_gen(j_child_nodes[0]->node_gen()); - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); - write_node(child_node2, nullptr, bcp); - write_node(child_node1, child_node2, bcp); - return true; - } - - void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, - std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { - - // find child_1 key - K child1_key; // we need to insert child1_key - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); - child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - auto child1_node_id = j_child_nodes[0]->node_id(); - - // find split indx - auto ret = parent_node->find(child1_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); - auto split_indx = ret.end_of_search_index; - - // find child2_key - K child2_key; // we only need to update child2_key to new node - if (j_child_nodes[1]->key_size != 0) { - child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); - ret = parent_node->find(child2_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); - } else { - // parent should be valid edge it is not a root split - } - auto child2_node_id = j_child_nodes[1]->node_id(); - - // update child2_key value - BtreeNodeInfo ninfo; - ninfo.set_bnode_id(child2_node_id); - parent_node->update(split_indx, ninfo); - - // insert child 1 - ninfo.set_bnode_id(child1_node_id); - K out_split_end_key; - out_split_end_key.copy_end_key_blob(child1_key.get_blob()); - parent_node->insert(out_split_end_key, ninfo); - - // Write the parent node - write_node(parent_node, child_node1, bcp); - - /* do sanity check after recovery split */ - { - validate_sanity_child(parent_node, split_indx); - validate_sanity_next_child(parent_node, split_indx); - } - } -}; -} // namespace sisl diff --git a/src/btree/mem_btree.hpp b/src/btree/mem_btree.hpp deleted file mode 100644 index 5bd7eb70..00000000 --- a/src/btree/mem_btree.hpp +++ /dev/null @@ -1,100 +0,0 @@ -#pragma once -#include "btree.ipp" - -namespace sisl { -namespace btree { -#ifdef INCASE_WE_NEED_COMMON -// Common class for all membtree's -template < typename K, typename V > -class MemBtreeCommon : public BtreeCommon< K, V > { -public: - void deref_node(BtreeNode< K >* node) override { - if (node->m_refcount.decrement_testz()) { - delete node->m_node_buf; - delete node; - } - } -}; - -MemBtree(BtreeConfig& cfg) : Btree(update_node_area_size(cfg)) { - Btree< K, V >::create_store_common(btree_store_type::MEM, []() { return std::make_shared< MemBtreeCommon >(); }); -} -#endif - -template < typename K, typename V > -class MemBtree : public Btree< K, V > { -public: - MemBtree(const BtreeConfig& cfg) : Btree< K, V >(cfg) { - BT_LOG(INFO, "New {} being created: Node size {}", btree_store_type(), cfg.node_size()); - } - - virtual ~MemBtree() { - const auto [ret, free_node_cnt] = this->destroy_btree(nullptr); - BT_LOG_ASSERT_EQ(ret, btree_status_t::success, "btree destroy failed"); - } - - std::string btree_store_type() const override { return "MEM_BTREE"; } - -private: - BtreeNodePtr< K > alloc_node(bool is_leaf, bool& is_new_allocation, /* is alloced same as copy_from */ - const BtreeNodePtr< K >& copy_from = nullptr) override { - if (copy_from != nullptr) { - is_new_allocation = false; - return copy_from; - } - - is_new_allocation = true; - uint8_t* node_buf = new uint8_t[this->m_bt_cfg.node_size()]; - auto new_node = this->init_node(node_buf, bnodeid_t{0}, true, is_leaf); - new_node->set_node_id(bnodeid_t{r_cast< std::uintptr_t >(new_node)}); - new_node->m_refcount.increment(); - return BtreeNodePtr< K >{new_node}; - } - - btree_status_t read_node(bnodeid_t id, BtreeNodePtr< K >& bnode) const override { - bnode = BtreeNodePtr< K >{r_cast< BtreeNode< K >* >(id)}; - return btree_status_t::success; - } - - void swap_node(const BtreeNodePtr< K >& node1, const BtreeNodePtr< K >& node2, void* context) override { - std::swap(node1->m_phys_node_buf, node2->m_phys_node_buf); - } - - btree_status_t refresh_node(const BtreeNodePtr< K >& bn, bool is_write_modifiable, void* context) const override { - return btree_status_t::success; - } - - void free_node(const BtreeNodePtr< K >& node, void* context) override { this->do_free_node(node); } - - void create_tree_precommit(const BtreeNodePtr< K >& root_node, void* op_context) override {} - void split_node_precommit(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, - const BtreeNodePtr< K >& child_node2, bool root_split, bool edge_split, - void* context) override {} - - void merge_node_precommit(bool is_root_merge, const BtreeNodePtr< K >& parent_node, uint32_t parent_merge_start_idx, - const BtreeNodePtr< K >& child_node1, - const std::vector< BtreeNodePtr< K > >* old_child_nodes, - const std::vector< BtreeNodePtr< K > >* replace_child_nodes, void* op_context) override {} -#if 0 - static void ref_node(MemBtreeNode* bn) { - auto mbh = (mem_btree_node_header*)bn; - LOGMSG_ASSERT_EQ(mbh->magic, 0xDEADBEEF, "Invalid Magic for Membtree node {}, Metrics {}", bn->to_string(), - sisl::MetricsFarm::getInstance().get_result_in_json_string()); - mbh->refcount.increment(); - } - - static void deref_node(MemBtreeNode* bn) { - auto mbh = (mem_btree_node_header*)bn; - LOGMSG_ASSERT_EQ(mbh->magic, 0xDEADBEEF, "Invalid Magic for Membtree node {}, Metrics {}", bn->to_string(), - sisl::MetricsFarm::getInstance().get_result_in_json_string()); - if (mbh->refcount.decrement_testz()) { - mbh->magic = 0; - bn->~MemBtreeNode(); - deallocate_mem((uint8_t*)bn); - } - } -#endif -}; - -} // namespace btree -} // namespace sisl diff --git a/src/btree/rough/btree_node.cpp b/src/btree/rough/btree_node.cpp deleted file mode 100644 index d697adfc..00000000 --- a/src/btree/rough/btree_node.cpp +++ /dev/null @@ -1,364 +0,0 @@ -/* - * physical_node.cpp - * - * Created on: 16-May-2016 - * Author: Hari Kadayam - * - * Copyright © 2016 Kadayam, Hari. All rights reserved. - */ -#include -#include "btree_node.hpp" - -namespace sisl { -BtreeNode::BtreeNode(uint8_t* node_buf, bnodeid_t id, bool init) : m_phys_node_buf{node_buf} { - if (init) { - set_magic(); - init_checksum(); - set_leaf(true); - set_total_entries(0); - set_next_bnode(empty_bnodeid); - set_gen(0); - set_valid_node(true); - set_edge_id(empty_bnodeid); - set_node_id(id); - } else { - DEBUG_ASSERT_EQ(get_node_id(), id); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); - } -} - -node_find_result_t BtreeNode::find(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, bool copy_key, - bool copy_val) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); - - auto result = bsearch_node(range); - if (result.end_of_search_index == (int)get_total_entries() && !has_valid_edge()) { - assert(!result.found); - return result; - } - - if (get_total_entries() == 0) { - assert(has_valid_edge() || is_leaf()); - if (is_leaf()) { - /* Leaf doesn't have any elements */ - return result; - } - } - - if (outval) { get(result.end_of_search_index, outval, copy_val /* copy */); } - if (!range.is_simple_search() && outkey) { get_nth_key(result.end_of_search_index, outkey, copy_key /* copy */); } - return result; -} - -node_find_result_t BtreeNode::find(const BtreeKey& find_key, BtreeValue* outval, bool copy_val) const { - return find(BtreeSearchRange(find_key), nullptr, outval, false, copy_val); -} - -uint32_t BtreeNode::get_all(const BtreeSearchRange& range, uint32_t max_count, int& start_ind, int& end_ind, - std::vector< std::pair< K, V > >* out_values = nullptr) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); - auto count = 0U; - - // Get the start index of the search range. - BtreeSearchRange sr = range.get_start_of_range(); - sr.set_multi_option(MultiMatchOption::DO_NOT_CARE); - - auto result = bsearch_node(sr); // doing bsearch only based on start key - // at this point start index will point to exact found or element after that - start_ind = result.end_of_search_index; - - if (!range.is_start_inclusive()) { - if (start_ind < (int)get_total_entries()) { - /* start is not inclusive so increment the start_ind if it is same as this key */ - int x = to_variant_node_const()->compare_nth_key(*range.get_start_key(), start_ind); - if (x == 0) { start_ind++; } - } else { - assert(is_leaf() || has_valid_edge()); - } - } - - if (start_ind == (int)get_total_entries() && is_leaf()) { - end_ind = start_ind; - return 0; // no result found - } - - assert((start_ind < (int)get_total_entries()) || has_valid_edge()); - - // search by the end index - BtreeSearchRange er = range.get_end_of_range(); - er.set_multi_option(MultiMatchOption::DO_NOT_CARE); - result = bsearch_node(er); // doing bsearch only based on end key - end_ind = result.end_of_search_index; - - assert(start_ind <= end_ind); - - /* we don't support end exclusive */ - assert(range.is_end_inclusive()); - - if (end_ind == (int)get_total_entries() && !has_valid_edge()) { --end_ind; } - - if (is_leaf()) { - /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ - sisl::blob blob; - K key; - get_nth_key(end_ind, &key, false); - - if ((range.get_start_key())->compare_start(&key) < 0 && ((range.get_end_key())->compare_start(&key)) < 0) { - if (start_ind == end_ind) { - /* no match */ - return 0; - } - --end_ind; - } - } - - assert(start_ind <= end_ind); - count = end_ind - start_ind + 1; - if (count > max_count) { count = max_count; } - - /* We should always find the entries in interior node */ - assert(start_ind < (int)get_total_entries() || has_valid_edge()); - assert(end_ind < (int)get_total_entries() || has_valid_edge()); - - if (out_values == nullptr) { return count; } - - /* get the keys and values */ - for (auto i = start_ind; i < (int)(start_ind + count); ++i) { - K key; - V value; - if (i == (int)get_total_entries() && !is_leaf()) - get_edge_value(&value); // invalid key in case of edge entry for internal node - else { - get_nth_key(i, &key, true); - get_nth_value(i, &value, true); - } - out_values->emplace_back(std::make_pair<>(key, value)); - } - return count; -} - -bool BtreeNode::put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue& existing_val) { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); - auto result = find(key, nullptr, nullptr); - bool ret = true; - - if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { - if (result.found) { - LOGINFO("entry already exist"); - return false; - } - insert(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { - if (!result.found) return false; - update(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { - !(result.found) ? insert(result.end_of_search_index, key, val) : update(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { - if (!result.found) return false; - append(result.end_of_search_index, key, val, existing_val); - } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { - (!result.found) ? insert(result.end_of_search_index, key, val) - : append(result.end_of_search_index, key, val, existing_val); - } else { - DEBUG_ASSERT(false, "Wrong put_type {}", put_type); - } - return ret; -} - -#ifndef NO_CHECKSUM -void BtreeNode::set_checksum(size_t size) { - get_persistent_header()->checksum = crc16_t10dif(init_crc_16, m_node_area, size); -} - -bool BtreeNode::verify_node(size_t size, verify_result& vr) const { - HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", m_pers_header.to_string()); - vr.act_magic = get_magic(); - vr.exp_magic = BTREE_NODE_MAGIC; - vr.act_checksum = get_checksum(); - vr.exp_checksum = crc16_t10dif(init_crc_16, m_node_area, size); - return (vr.act_magic == vr.exp_magic && vr.act_checksum == vr.exp_checksum) ? true : false; -} -#endif - -bool BtreeNode::is_merge_needed(const BtreeConfig& cfg) const { -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < cfg.get_node_area_size()) { - return true; - } - - auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); - if (ret && get_occupied_size(cfg) < (ret.get() * cfg.get_node_area_size() / 100)) { return true; } -#endif - return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); -} - -void BtreeNode::get_last_key(BtreeKey* out_lastkey) const { - if (get_total_entries() == 0) { return; } - return get_nth_key(get_total_entries() - 1, out_lastkey, true); -} - -void BtreeNode::get_var_nth_key(int i, BtreeKey* out_firstkey) const { return get_nth_key(i, out_firstkey, true); } - -btree_status_t BtreeNode::insert(const BtreeKey& key, const BtreeValue& val) { - auto result = find(key, nullptr, nullptr); - assert(!is_leaf() || (!result.found)); // We do not support duplicate keys yet - auto ret = insert(result.end_of_search_index, key, val); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - return ret; -} - -bool BtreeNode::remove_one(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { - auto result = find(range, outkey, outval); - if (!result.found) { return false; } - - remove(result.end_of_search_index); - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); - return true; -} - -void BtreeNode::append(uint32_t index, const BtreeKey& key, const BtreeValue& val, BtreeValue& existing_val) { - // Get the nth value and do a callback to update its blob with the new value, being passed - V nth_val; - get_nth_value(index, &nth_val, false); - nth_val.append_blob(val, existing_val); - to_variant_node()->update(index, key, nth_val); - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); -} - -void BtreeNode::update(const BtreeKey& key, const BtreeValue& val, BtreeKey* outkey, BtreeValue* outval) { - auto result = find(key, outkey, outval); - assert(result.found); - update(result.end_of_search_index, val); - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "{}", m_pers_header.to_string()); -} - -void BtreeNode::set_edge_value(const BtreeValue& v) { - BtreeNodeInfo* bni = (BtreeNodeInfo*)&v; - set_edge_id(bni->bnode_id()); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); -} - -void BtreeNode::get_edge_value(BtreeValue* v) const { - if (is_leaf()) { return; } - v->set_blob(BtreeNodeInfo(get_edge_id()).get_blob()); -} - -void BtreeNode::get_adjacent_indicies(uint32_t cur_ind, vector< int >& indices_list, uint32_t max_indices) const { - uint32_t i = 0; - uint32_t start_ind; - uint32_t end_ind; - uint32_t nentries = this->get_total_entries(); - - auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); - end_ind = cur_ind + (max_indices / 2); - if (cur_ind < max_ind) { - end_ind += max_ind - cur_ind; - start_ind = 0; - } else { - start_ind = cur_ind - max_ind; - } - - for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); i++) { - if (i == nentries) { - if (this->has_valid_edge()) { indices_list.push_back(i); } - break; - } else { - indices_list.push_back(i); - } - } -} - -node_find_result_t BtreeNode::bsearch_node(const BtreeSearchRange& range) const { - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - const auto ret = bsearch(-1, get_total_entries(), range); - const auto selection = range.multi_option(); - - if (ret.found) { assert(ret.end_of_search_index < (int)get_total_entries() && ret.end_of_search_index > -1); } - - /* BEST_FIT_TO_CLOSEST is used by remove only. Remove doesn't support range_remove. Until - * then we have the special logic : - */ - if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { - if (!ret.found && is_leaf()) { - if (get_total_entries() != 0) { - ret.end_of_search_index = get_total_entries() - 1; - ret.found = true; - } - } - } - - return ret; -} - -node_find_result_t BtreeNode::is_bsearch_left_or_right_most(const BtreeSearchRange& range) const { - auto selection = range.multi_option(); - if (range.is_simple_search()) { return (MultiMatchOption::DO_NOT_CARE); } - if (selection == MultiMatchOption::LEFT_MOST) { - return (MultiMatchOption::LEFT_MOST); - } else if (selection == MultiMatchOption::RIGHT_MOST) { - return (MultiMatchOption::RIGHT_MOST); - } else if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { - return (MultiMatchOption::LEFT_MOST); - } - return (MultiMatchOption::DO_NOT_CARE); -} - -node_find_result_t BtreeNode::bsearch(int start, int end, const BtreeSearchRange& range) const { - int mid = 0; - int initial_end = end; - int min_ind_found = INT32_MAX; - int second_min = INT32_MAX; - int max_ind_found = 0; - - struct { - bool found; - int end_of_search_index; - } ret{false, 0}; - - if ((end - start) <= 1) { return ret; } - - auto selection = is_bsearch_left_or_right_most(range); - - while ((end - start) > 1) { - mid = start + (end - start) / 2; - assert(mid >= 0 && mid < (int)get_total_entries()); - int x = range.is_simple_search() ? to_variant_node_const()->compare_nth_key(*range.get_start_key(), mid) - : to_variant_node_const()->compare_nth_key_range(range, mid); - if (x == 0) { - ret.found = true; - if (selection == MultiMatchOption::DO_NOT_CARE) { - end = mid; - break; - } else if (selection == MultiMatchOption::LEFT_MOST) { - if (mid < min_ind_found) { min_ind_found = mid; } - end = mid; - } else if (selection == MultiMatchOption::RIGHT_MOST) { - if (mid > max_ind_found) { max_ind_found = mid; } - start = mid; - } else { - assert(false); - } - } else if (x > 0) { - end = mid; - } else { - start = mid; - } - } - - if (ret.found) { - if (selection == MultiMatchOption::LEFT_MOST) { - assert(min_ind_found != INT32_MAX); - ret.end_of_search_index = min_ind_found; - } else if (selection == MultiMatchOption::RIGHT_MOST) { - assert(max_ind_found != INT32_MAX); - ret.end_of_search_index = max_ind_found; - } else { - ret.end_of_search_index = end; - } - } else { - ret.end_of_search_index = end; - } - return ret; -} -} // namespace sisl diff --git a/src/btree/rough/physical_node.hpp b/src/btree/rough/physical_node.hpp deleted file mode 100644 index c9a7c48d..00000000 --- a/src/btree/rough/physical_node.hpp +++ /dev/null @@ -1,525 +0,0 @@ -/* - * physical_node.hpp - * - * Created on: 16-May-2016 - * Author: Hari Kadayam - * - * Copyright © 2016 Kadayam, Hari. All rights reserved. - */ -#pragma once - -#include -#include "logging/logging.h" -#include "btree_internal.h" - -namespace sisl { -static constexpr uint8_t BTREE_NODE_VERSION = 1; -static constexpr uint8_t BTREE_NODE_MAGIC = 0xab; - -#pragma pack(1) -struct persistent_hdr_t { - uint8_t magic{BTREE_NODE_MAGIC}; - uint8_t version{BTREE_NODE_VERSION}; - uint16_t checksum; - - bnodeid_t node_id; - bnodeid_t next_node; - - uint32_t nentries : 27; - uint32_t node_type : 3; - uint32_t leaf : 1; - uint32_t valid_node : 1; - - uint64_t node_gen; - bnodeid_t edge_entry; - - std::string to_string() const { - return fmt::format("magic={} version={} csum={} node_id={} next_node={} nentries={} node_type={} is_leaf={} " - "valid_node={} node_gen={} edge_entry={}", - magic, version, checksum, node_id, next_node, nentries, node_type, leaf, valid_node, - node_gen, edge_entry); - } -}; -#pragma pack() - -#ifndef NO_CHECKSUM -struct verify_result { - uint8_t act_magic; - uint16_t act_checksum; - uint8_t exp_magic; - uint16_t exp_checksum; - - std::string to_string() const { - return fmt::format(" Actual magic={} Expected magic={} Actual checksum={} Expected checksum={}", act_magic, - exp_magic, act_checksum, exp_checksum); - } - - friend ostream& operator<<(ostream& os, const verify_result& vr) { - os << vr.to_string(); - return os; - } -}; -#endif - -class BtreeSearchRange; - -#pragma pack(1) -template < typename VariantNodeT > -class PhysicalNode { -protected: - persistent_hdr_t m_pers_header; - uint8_t m_node_area[0]; - -public: - PhysicalNode(bnodeid_t* id, bool init) { - if (init) { - set_magic(); - init_checksum(); - set_leaf(true); - set_total_entries(0); - set_next_bnode(empty_bnodeid); - set_gen(0); - set_valid_node(true); - set_edge_id(empty_bnodeid); - set_node_id(*id); - } else { - DEBUG_ASSERT_EQ(get_node_id(), *id); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); - } - } - PhysicalNode(bnodeid_t id, bool init) { - if (init) { - set_magic(); - init_checksum(); - set_leaf(true); - set_total_entries(0); - set_next_bnode(empty_bnodeid); - set_gen(0); - set_valid_node(true); - set_edge_id(empty_bnodeid); - set_node_id(id); - } else { - DEBUG_ASSERT_EQ(get_node_id(), id); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - DEBUG_ASSERT_EQ(get_version(), BTREE_NODE_VERSION); - } - } - ~PhysicalNode() = default; - - persistent_hdr_t* get_persistent_header() { return &m_pers_header; } - - uint8_t get_magic() const { return m_pers_header.magic; } - void set_magic() { m_pers_header.magic = BTREE_NODE_MAGIC; } - - uint8_t get_version() const { return m_pers_header.version; } - uint16_t get_checksum() const { return m_pers_header.checksum; } - void init_checksum() { m_pers_header.checksum = 0; } - - void set_node_id(bnodeid_t id) { m_pers_header.node_id = id; } - bnodeid_t get_node_id() const { return m_pers_header.node_id; } - -#ifndef NO_CHECKSUM - void set_checksum(size_t size) { m_pers_header.checksum = crc16_t10dif(init_crc_16, m_node_area, size); } - bool verify_node(size_t size, verify_result& vr) const { - HS_DEBUG_ASSERT_EQ(is_valid_node(), true, "verifying invalide node {}!", m_pers_header.to_string()); - vr.act_magic = get_magic(); - vr.exp_magic = BTREE_NODE_MAGIC; - vr.act_checksum = get_checksum(); - vr.exp_checksum = crc16_t10dif(init_crc_16, m_node_area, size); - return (vr.act_magic == vr.exp_magic && vr.act_checksum == vr.exp_checksum) ? true : false; - } -#endif - - uint32_t get_total_entries() const { return m_pers_header.nentries; } - bool is_leaf() const { return m_pers_header.leaf; } - btree_node_type get_node_type() const { return s_cast< btree_node_type >(m_pers_header.node_type); } - -protected: - void set_total_entries(uint32_t n) { get_persistent_header()->nentries = n; } - void inc_entries() { ++get_persistent_header()->nentries; } - void dec_entries() { --get_persistent_header()->nentries; } - - void add_entries(uint32_t addn) { get_persistent_header()->nentries += addn; } - void sub_entries(uint32_t subn) { get_persistent_header()->nentries -= subn; } - - void set_leaf(bool leaf) { get_persistent_header()->leaf = leaf; } - void set_node_type(btree_node_type t) { get_persistent_header()->node_type = uint32_cast(t); } - uint64_t get_gen() const { return m_pers_header.node_gen; } - void inc_gen() { get_persistent_header()->node_gen++; } - void set_gen(uint64_t g) { get_persistent_header()->node_gen = g; } - - void set_valid_node(bool valid) { get_persistent_header()->valid_node = (valid ? 1 : 0); } - bool is_valid_node() const { return m_pers_header.valid_node; } - - uint8_t* get_node_area_mutable() { return m_node_area; } - const uint8_t* get_node_area() const { return m_node_area; } - - uint32_t get_occupied_size(const BtreeConfig& cfg) const { - return (cfg.get_node_area_size() - to_variant_node_const().get_available_size(cfg)); - } - uint32_t get_suggested_min_size(const BtreeConfig& cfg) const { return cfg.get_max_key_size(); } - - bool is_merge_needed(const BtreeConfig& cfg) const { -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_merge_node") && get_occupied_size(cfg) < cfg.get_node_area_size()) { - return true; - } - - auto ret = homestore_flip->get_test_flip< uint64_t >("btree_merge_node_pct"); - if (ret && get_occupied_size(cfg) < (ret.get() * cfg.get_node_area_size() / 100)) { return true; } -#endif - return (get_occupied_size(cfg) < get_suggested_min_size(cfg)); - } - - bnodeid_t next_bnode() const { return m_pers_header.next_node; } - void set_next_bnode(bnodeid_t b) { get_persistent_header()->next_node = b; } - - bnodeid_t get_edge_id() const { return m_pers_header.edge_entry; } - void set_edge_id(bnodeid_t edge) { get_persistent_header()->edge_entry = edge; } - - typedef std::pair< bool, int > node_find_result_t; - - ////////// Top level functions (CRUD on a node) ////////////////// - // Find the slot where the key is present. If not present, return the closest location for the key. - // Assumption: Node lock is already taken - node_find_result_t find(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, bool copy_key = true, - bool copy_val = true) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); - - auto result = bsearch_node(range); - if (result.end_of_search_index == int_cast(get_total_entries()) && !has_valid_edge()) { - assert(!result.found); - return result; - } - - if (get_total_entries() == 0) { - assert(has_valid_edge() || is_leaf()); - if (is_leaf()) { - /* Leaf doesn't have any elements */ - return result; - } - } - - if (outval) { to_variant_node_const().get(result.end_of_search_index, outval, copy_val /* copy */); } - - if (!range.is_simple_search() && outkey) { - to_variant_node_const().get_nth_key(result.end_of_search_index, outkey, copy_key /* copy */); - } - - return result; - } - - node_find_result_t find(const BtreeKey& find_key, BtreeValue* outval, bool copy_val = true) const { - return find(BtreeSearchRange(find_key), nullptr, outval, false, copy_val); - } - - void get_last_key(BtreeKey* out_lastkey) const { - if (get_total_entries() == 0) { return; } - to_variant_node().get_nth_key(get_total_entries() - 1, out_lastkey, true); - } - - void get_first_key(BtreeKey* out_firstkey) const { return to_variant_node().get_nth_key(0, out_firstkey, true); } - void get_var_nth_key(int i, BtreeKey* out_firstkey) const { - return to_variant_node().get_nth_key(i, out_firstkey, true); - } - - uint32_t get_all(const BtreeSearchRange& range, uint32_t max_count, int& start_ind, int& end_ind, - std::vector< std::pair< K, V > >* out_values = nullptr) const { - LOGMSG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC, "Magic mismatch on btree_node {}", m_pers_header.to_string()); - auto count = 0U; - - // Get the start index of the search range. - BtreeSearchRange sr = range.get_start_of_range(); - sr.set_multi_option(MultiMatchOption::DO_NOT_CARE); - - auto result = bsearch_node(sr); // doing bsearch only based on start key - // at this point start index will point to exact found or element after that - start_ind = result.end_of_search_index; - - if (!range.is_start_inclusive()) { - if (start_ind < (int)get_total_entries()) { - /* start is not inclusive so increment the start_ind if it is same as this key */ - int x = to_variant_node_const().compare_nth_key(*range.get_start_key(), start_ind); - if (x == 0) { start_ind++; } - } else { - assert(is_leaf() || has_valid_edge()); - } - } - - if (start_ind == (int)get_total_entries() && is_leaf()) { - end_ind = start_ind; - return 0; // no result found - } - - assert((start_ind < (int)get_total_entries()) || has_valid_edge()); - - // search by the end index - BtreeSearchRange er = range.get_end_of_range(); - er.set_multi_option(MultiMatchOption::DO_NOT_CARE); - result = bsearch_node(er); // doing bsearch only based on end key - end_ind = result.end_of_search_index; - - assert(start_ind <= end_ind); - - /* we don't support end exclusive */ - assert(range.is_end_inclusive()); - - if (end_ind == (int)get_total_entries() && !has_valid_edge()) { --end_ind; } - - if (is_leaf()) { - /* Decrement the end indx if range doesn't overlap with the start of key at end indx */ - sisl::blob blob; - K key; - to_variant_node().get_nth_key(end_ind, &key, false); - - if ((range.get_start_key())->compare_start(&key) < 0 && ((range.get_end_key())->compare_start(&key)) < 0) { - if (start_ind == end_ind) { - /* no match */ - return 0; - } - --end_ind; - } - } - - assert(start_ind <= end_ind); - count = end_ind - start_ind + 1; - if (count > max_count) { count = max_count; } - - /* We should always find the entries in interior node */ - assert(start_ind < (int)get_total_entries() || has_valid_edge()); - assert(end_ind < (int)get_total_entries() || has_valid_edge()); - - if (out_values == nullptr) { return count; } - - /* get the keys and values */ - for (auto i = start_ind; i < (int)(start_ind + count); ++i) { - K key; - V value; - if (i == (int)get_total_entries() && !is_leaf()) - get_edge_value(&value); // invalid key in case of edge entry for internal node - else { - to_variant_node().get_nth_key(i, &key, true); - to_variant_node().get_nth_value(i, &value, true); - } - out_values->emplace_back(std::make_pair<>(key, value)); - } - return count; - } - - bool put(const BtreeKey& key, const BtreeValue& val, btree_put_type put_type, BtreeValue& existing_val) { - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - auto result = find(key, nullptr, nullptr); - bool ret = true; - - LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); - if (put_type == btree_put_type::INSERT_ONLY_IF_NOT_EXISTS) { - if (result.found) { - LOGINFO("entry already exist"); - return false; - } - (void)to_variant_node().insert(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::REPLACE_ONLY_IF_EXISTS) { - if (!result.found) return false; - to_variant_node().update(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::REPLACE_IF_EXISTS_ELSE_INSERT) { - !(result.found) ? (void)to_variant_node().insert(result.end_of_search_index, key, val) - : to_variant_node().update(result.end_of_search_index, key, val); - } else if (put_type == btree_put_type::APPEND_ONLY_IF_EXISTS) { - if (!result.found) return false; - append(result.end_of_search_index, key, val, existing_val); - } else if (put_type == btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT) { - (!result.found) ? (void)to_variant_node().insert(result.end_of_search_index, key, val) - : append(result.end_of_search_index, key, val, existing_val); - } else { - assert(false); - } - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - - LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); - return ret; - } - - btree_status_t insert(const BtreeKey& key, const BtreeValue& val) { - auto result = find(key, nullptr, nullptr); - assert(!is_leaf() || (!result.found)); // We do not support duplicate keys yet - auto ret = to_variant_node().insert(result.end_of_search_index, key, val); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - return ret; - } - - bool remove_one(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { - auto result = find(range, outkey, outval); - if (!result.found) { return false; } - - to_variant_node().remove(result.end_of_search_index); - LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); - return true; - } - - void append(uint32_t index, const BtreeKey& key, const BtreeValue& val, BtreeValue& existing_val) { - // Get the nth value and do a callback to update its blob with the new value, being passed - V nth_val; - to_variant_node().get_nth_value(index, &nth_val, false); - nth_val.append_blob(val, existing_val); - to_variant_node().update(index, key, nth_val); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - } - - /* Update the key and value pair and after update if outkey and outval are non-nullptr, it fills them with - * the key and value it just updated respectively */ - void update(const BtreeKey& key, const BtreeValue& val, BtreeKey* outkey, BtreeValue* outval) { - auto result = find(key, outkey, outval); - assert(result.found); - to_variant_node().update(result.end_of_search_index, val); - LOGMSG_ASSERT((get_magic() == BTREE_NODE_MAGIC), "{}", m_pers_header.to_string()); - } - - //////////// Edge Related Methods /////////////// - void invalidate_edge() { set_edge_id(empty_bnodeid); } - - void set_edge_value(const BtreeValue& v) { - BtreeNodeInfo* bni = (BtreeNodeInfo*)&v; - set_edge_id(bni->bnode_id()); - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - } - - void get_edge_value(BtreeValue* v) const { - if (is_leaf()) { return; } - v->set_blob(BtreeNodeInfo(get_edge_id()).get_blob()); - } - - bool has_valid_edge() const { - if (is_leaf()) { return false; } - return (get_edge_id() != empty_bnodeid); - } - - void get_adjacent_indicies(uint32_t cur_ind, vector< int >& indices_list, uint32_t max_indices) const { - uint32_t i = 0; - uint32_t start_ind; - uint32_t end_ind; - uint32_t nentries = this->get_total_entries(); - - auto max_ind = ((max_indices / 2) - 1 + (max_indices % 2)); - end_ind = cur_ind + (max_indices / 2); - if (cur_ind < max_ind) { - end_ind += max_ind - cur_ind; - start_ind = 0; - } else { - start_ind = cur_ind - max_ind; - } - - for (i = start_ind; (i <= end_ind) && (indices_list.size() < max_indices); i++) { - if (i == nentries) { - if (this->has_valid_edge()) { indices_list.push_back(i); } - break; - } else { - indices_list.push_back(i); - } - } - } - -protected: - node_find_result_t bsearch_node(const BtreeSearchRange& range) const { - DEBUG_ASSERT_EQ(get_magic(), BTREE_NODE_MAGIC); - const auto ret = bsearch(-1, get_total_entries(), range); - const auto selection = range.multi_option(); - - if (ret.found) { assert(ret.end_of_search_index < (int)get_total_entries() && ret.end_of_search_index > -1); } - - /* BEST_FIT_TO_CLOSEST is used by remove only. Remove doesn't support range_remove. Until - * then we have the special logic : - */ - if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { - if (!ret.found && is_leaf()) { - if (get_total_entries() != 0) { - ret.end_of_search_index = get_total_entries() - 1; - ret.found = true; - } - } - } - - return ret; - } - - node_find_result_t is_bsearch_left_or_right_most(const BtreeSearchRange& range) const { - auto selection = range.multi_option(); - if (range.is_simple_search()) { return (MultiMatchOption::DO_NOT_CARE); } - if (selection == MultiMatchOption::LEFT_MOST) { - return (MultiMatchOption::LEFT_MOST); - } else if (selection == MultiMatchOption::RIGHT_MOST) { - return (MultiMatchOption::RIGHT_MOST); - } else if (selection == MultiMatchOption::BEST_FIT_TO_CLOSEST_FOR_REMOVE) { - return (MultiMatchOption::LEFT_MOST); - } - return (MultiMatchOption::DO_NOT_CARE); - } - - /* This function does bseach between start and end where start and end are not included. - * It either gives left most, right most or the first found entry based on the range selection policy. - * If entry doesn't found then it gives the closest found entry. - */ - node_find_result_t bsearch(int start, int end, const BtreeSearchRange& range) const { - int mid = 0; - int initial_end = end; - int min_ind_found = INT32_MAX; - int second_min = INT32_MAX; - int max_ind_found = 0; - - struct { - bool found; - int end_of_search_index; - } ret{false, 0}; - - if ((end - start) <= 1) { return ret; } - - auto selection = is_bsearch_left_or_right_most(range); - - while ((end - start) > 1) { - mid = start + (end - start) / 2; - assert(mid >= 0 && mid < (int)get_total_entries()); - int x = range.is_simple_search() ? to_variant_node_const().compare_nth_key(*range.get_start_key(), mid) - : to_variant_node_const().compare_nth_key_range(range, mid); - if (x == 0) { - ret.found = true; - if (selection == MultiMatchOption::DO_NOT_CARE) { - end = mid; - break; - } else if (selection == MultiMatchOption::LEFT_MOST) { - if (mid < min_ind_found) { min_ind_found = mid; } - end = mid; - } else if (selection == MultiMatchOption::RIGHT_MOST) { - if (mid > max_ind_found) { max_ind_found = mid; } - start = mid; - } else { - assert(false); - } - } else if (x > 0) { - end = mid; - } else { - start = mid; - } - } - - if (ret.found) { - if (selection == MultiMatchOption::LEFT_MOST) { - assert(min_ind_found != INT32_MAX); - ret.end_of_search_index = min_ind_found; - } else if (selection == MultiMatchOption::RIGHT_MOST) { - assert(max_ind_found != INT32_MAX); - ret.end_of_search_index = max_ind_found; - } else { - ret.end_of_search_index = end; - } - } else { - ret.end_of_search_index = end; - } - return ret; - } - - VariantNodeT& to_variant_node() { return s_cast< VariantNodeT& >(*this); } - const VariantNodeT& to_variant_node_const() const { return s_cast< const VariantNodeT& >(*this); } -}; -#pragma pack() - -} // namespace sisl diff --git a/src/btree/rough/sisl_btree.hpp b/src/btree/rough/sisl_btree.hpp deleted file mode 100644 index 6f11ac27..00000000 --- a/src/btree/rough/sisl_btree.hpp +++ /dev/null @@ -1,1894 +0,0 @@ -/* - * Created on: 14-May-2016 - * Author: Hari Kadayam - * - * Copyright © 2016 Kadayam, Hari. All rights reserved. - */ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include "logging/logging.h" - -#include "fds/buffer.hpp" -#include "btree_internal.h" -#include "btree_node.hpp" - -SISL_LOGGING_DECL(btree) -namespace sisl { - -#if 0 -#define container_of(ptr, type, member) ({ (type*)((char*)ptr - offsetof(type, member)); }) -#endif - -#define btree_t Btree< BtreeStoreType, K, V, InteriorNodeType, LeafNodeType > - -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -struct _btree_locked_node_info { - btree_node_t* node; - Clock::time_point start_time; - const char* fname; - int line; - void dump() { LOGINFO("node locked by file: {}, line: {}", fname, line); } -}; - -#define btree_locked_node_info _btree_locked_node_info< BtreeStoreType, K, V, InteriorNodeType, LeafNodeType > - -template < typename K, typename V > -class Btree { - typedef std::function< void(V& mv) > free_blk_callback; - typedef std::function< void() > destroy_btree_comp_callback; - typedef std::function< void(const K& k, const V& v, const K& split_key, - std::vector< std::pair< K, V > >& replace_kv) > - split_key_callback; - -private: - bnodeid_t m_root_node; - homeds::thread::RWLock m_btree_lock; - - uint32_t m_max_nodes; - BtreeConfig m_bt_cfg; - btree_super_block m_sb; - - BtreeMetrics m_metrics; - std::unique_ptr< btree_store_t > m_btree_store; - bool m_destroy = false; - std::atomic< uint64_t > m_total_nodes = 0; - uint32_t m_node_size = 4096; - btree_cp_sb m_last_cp_sb; - split_key_callback m_split_key_cb; -#ifndef NDEBUG - std::atomic< uint64_t > m_req_id = 0; -#endif - - static thread_local homeds::reserve_vector< btree_locked_node_info, 5 > wr_locked_nodes; - static thread_local homeds::reserve_vector< btree_locked_node_info, 5 > rd_locked_nodes; - - ////////////////// Implementation ///////////////////////// -public: - btree_super_block get_btree_sb() { return m_sb; } - const btree_cp_sb& get_last_cp_cb() const { return m_last_cp_sb; } - - /** - * @brief : return the btree cfg - * - * @return : the btree cfg; - */ - BtreeConfig get_btree_cfg() const { return m_bt_cfg; } - uint64_t get_used_size() const { return m_node_size * m_total_nodes.load(); } -#ifdef _PRERELEASE - static void set_io_flip() { - /* IO flips */ - FlipClient* fc = homestore::HomeStoreFlip::client_instance(); - FlipFrequency freq; - FlipCondition cond1; - FlipCondition cond2; - freq.set_count(2000000000); - freq.set_percent(2); - - FlipCondition null_cond; - fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); - - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 0, &cond1); - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 1, &cond2); - fc->inject_noreturn_flip("btree_upgrade_node_fail", {cond1, cond2}, freq); - - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 4, &cond1); - fc->create_condition("nuber of entries in a node", flip::Operator::EQUAL, 2, &cond2); - - fc->inject_retval_flip("btree_delay_and_split", {cond1, cond2}, freq, 20); - fc->inject_retval_flip("btree_delay_and_split_leaf", {cond1, cond2}, freq, 20); - fc->inject_noreturn_flip("btree_parent_node_full", {null_cond}, freq); - fc->inject_noreturn_flip("btree_leaf_node_split", {null_cond}, freq); - fc->inject_retval_flip("btree_upgrade_delay", {null_cond}, freq, 20); - fc->inject_retval_flip("writeBack_completion_req_delay_us", {null_cond}, freq, 20); - fc->inject_noreturn_flip("btree_read_fast_path_not_possible", {null_cond}, freq); - } - - static void set_error_flip() { - /* error flips */ - FlipClient* fc = homestore::HomeStoreFlip::client_instance(); - FlipFrequency freq; - freq.set_count(20); - freq.set_percent(10); - - FlipCondition null_cond; - fc->create_condition("", flip::Operator::DONT_CARE, (int)1, &null_cond); - - fc->inject_noreturn_flip("btree_read_fail", {null_cond}, freq); - fc->inject_noreturn_flip("fixed_blkalloc_no_blks", {null_cond}, freq); - } -#endif - - static btree_t* create_btree(BtreeConfig& cfg) { - Btree* bt = new Btree(cfg); - auto impl_ptr = btree_store_t::init_btree(bt, cfg); - bt->m_btree_store = std::move(impl_ptr); - btree_status_t ret = bt->init(); - if (ret != btree_status_t::success) { - LOGERROR("btree create failed. error {} name {}", ret, cfg.get_name()); - delete (bt); - return nullptr; - } - - HS_SUBMOD_LOG(INFO, base, , "btree", cfg.get_name(), "New {} created: Node size {}", BtreeStoreType, - cfg.get_node_size()); - return bt; - } - - void do_common_init(bool is_recovery = false) { - // TODO: Check if node_area_size need to include persistent header - uint32_t node_area_size = btree_store_t::get_node_area_size(m_btree_store.get()); - m_bt_cfg.set_node_area_size(node_area_size); - - // calculate number of nodes - uint32_t max_leaf_nodes = - (m_bt_cfg.get_max_objs() * (m_bt_cfg.get_max_key_size() + m_bt_cfg.get_max_value_size())) / node_area_size + - 1; - max_leaf_nodes += (100 * max_leaf_nodes) / 60; // Assume 60% btree full - - m_max_nodes = max_leaf_nodes + ((double)max_leaf_nodes * 0.05) + 1; // Assume 5% for interior nodes - m_total_nodes = m_last_cp_sb.btree_size; - btree_store_t::update_sb(m_btree_store.get(), m_sb, &m_last_cp_sb, is_recovery); - } - - void replay_done(const btree_cp_ptr& bcp) { - m_total_nodes = m_last_cp_sb.btree_size + bcp->btree_size.load(); - THIS_BT_LOG(INFO, base, , "total btree nodes {}", m_total_nodes); - } - - btree_status_t init() { - do_common_init(); - return (create_root_node()); - } - - void init_recovery(const btree_super_block& btree_sb, btree_cp_sb* cp_sb, const split_key_callback& split_key_cb) { - m_sb = btree_sb; - m_split_key_cb = split_key_cb; - if (cp_sb) { memcpy(&m_last_cp_sb, cp_sb, sizeof(m_last_cp_sb)); } - do_common_init(true); - m_root_node = m_sb.root_node; - } - - Btree(BtreeConfig& cfg) : - m_bt_cfg(cfg), m_metrics(BtreeStoreType, cfg.get_name().c_str()), m_node_size(cfg.get_node_size()) {} - - ~Btree() { - if (BtreeStoreType != btree_store_type::MEM_BTREE) { - LOGINFO("Skipping destroy in-memory btree nodes for non mem btree types."); - return; - } - - uint64_t free_node_cnt; - auto ret = destroy_btree(nullptr, free_node_cnt, true); - - HS_DEBUG_ASSERT_EQ(ret, btree_status_t::success, "btree destroy failed"); - LOGWARN("Destroy in-memory btree nodes failed."); - } - - btree_status_t destroy_btree(blkid_list_ptr free_blkid_list, uint64_t& free_node_cnt, bool in_mem = false) { - btree_status_t ret{btree_status_t::success}; - m_btree_lock.write_lock(); - if (!m_destroy) { // if previous destroy is successful, do not destroy again; - BtreeNodePtr< K > root; - homeds::thread::locktype acq_lock = LOCKTYPE_WRITE; - - ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock, nullptr); - if (ret != btree_status_t::success) { - m_btree_lock.unlock(); - return ret; - } - - free_node_cnt = 0; - ret = free(root, free_blkid_list, in_mem, free_node_cnt); - - unlock_node(root, acq_lock); - - if (ret == btree_status_t::success) { - THIS_BT_LOG(DEBUG, base, , "btree(root: {}) nodes destroyed successfully", m_root_node); - m_destroy = true; - } else { - THIS_BT_LOG(ERROR, base, , "btree(root: {}) nodes destroyed failed, ret: {}", m_root_node, ret); - } - } - m_btree_lock.unlock(); - return ret; - } - - // - // 1. free nodes in post order traversal of tree to free non-leaf node - // - btree_status_t post_order_traversal(const BtreeNodePtr< K >& node, const auto& cb) { - homeds::thread::locktype acq_lock = homeds::thread::LOCKTYPE_WRITE; - uint32_t i = 0; - btree_status_t ret = btree_status_t::success; - - if (!node->is_leaf()) { - BtreeNodeInfo child_info; - while (i <= node->get_total_entries()) { - if (i == node->get_total_entries()) { - if (!node->has_valid_edge()) { break; } - child_info.set_bnode_id(node->get_edge_id()); - } else { - child_info = node->get(i, false /* copy */); - } - - BtreeNodePtr< K > child; - ret = read_and_lock_child(child_info.bnode_id(), child, node, i, acq_lock, acq_lock, nullptr); - if (ret != btree_status_t::success) { return ret; } - ret = post_order_traversal(child, cb); - unlock_node(child, acq_lock); - ++i; - } - } - - if (ret != btree_status_t::success) { return ret; } - cb(node); - return ret; - } - - void destroy_done() { btree_store_t::destroy_done(m_btree_store.get()); } - - uint64_t get_used_size() const { return m_node_size * m_total_nodes.load(); } - - btree_status_t range_put(const BtreeRangeUpdateRequest< K, V >& bur) { - BtreeQueryCursor cur; - bool reset_cur = false; - if (!bur.get_input_range().is_cursor_valid()) { - bur.get_input_range().set_cursor(&cur); - reset_cur = true; - } - auto ret = put_internal(bur); - if (reset_cur) { bur.get_input_range().reset_cursor(); } - return ret; - } - - btree_status_t put(const BtreeKey& k, const BtreeValue& v, btree_put_type put_type, - BtreeValue* existing_val = nullptr) { - return put_internal(BtreeSinglePutRequest{k, v, put_type, existing_val}); - } - - btree_status_t get(const BtreeKey& key, BtreeValue* outval) { return get(key, nullptr, outval); } - - btree_status_t get(const BtreeKey& key, BtreeKey* outkey, BtreeValue* outval) { - return get_any(BtreeSearchRange(key), outkey, outval); - } - - btree_status_t get_any(const BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { - btree_status_t ret = btree_status_t::success; - bool is_found; - - m_btree_lock.read_lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - - ret = do_get(root, range, outkey, outval); - out: - m_btree_lock.unlock(); - - // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match - -#ifndef NDEBUG - check_lock_debug(); -#endif - return ret; - } - - btree_status_t query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) { - COUNTER_INCREMENT(m_metrics, btree_query_ops_count, 1); - - btree_status_t ret = btree_status_t::success; - if (query_req.batch_size() == 0) { return ret; } - - /* set cursor if it is invalid. User is not interested in the cursor but we need it for internal logic */ - BtreeQueryCursor cur; - bool reset_cur = false; - if (!query_req.get_input_range().is_cursor_valid()) { - query_req.get_input_range().set_cursor(&cur); - reset_cur = true; - } - - m_btree_lock.read_lock(); - BtreeNodePtr< K > root = nullptr; - ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - - switch (query_req.query_type()) { - case BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY: - ret = do_sweep_query(root, query_req, out_values); - break; - - case BtreeQueryType::TREE_TRAVERSAL_QUERY: - ret = do_traversal_query(root, query_req, out_values); - break; - - default: - unlock_node(root, homeds::thread::locktype::locktype_t::READ); - LOGERROR("Query type {} is not supported yet", query_req.query_type()); - break; - } - - if ((query_req.query_type() == BtreeQueryType::SWEEP_NON_INTRUSIVE_PAGINATION_QUERY || - query_req.query_type() == BtreeQueryType::TREE_TRAVERSAL_QUERY) && - out_values.size() > 0) { - - /* if return is not success then set the cursor to last read. No need to set cursor if user is not - * interested in it. - */ - if (!reset_cur) { - query_req.get_input_range().set_cursor_key(&out_values.back().first, ([](BtreeKey* key) { - K end_key; - end_key.copy_end_key_blob(key->get_blob()); - return std::move(std::make_unique< K >(end_key)); - })); - } - - /* check if we finished just at the last key */ - if (out_values.back().first.compare(query_req.get_input_range().get_end_key()) == 0) { - ret = btree_status_t::success; - } - } - - out: - m_btree_lock.unlock(); -#ifndef NDEBUG - check_lock_debug(); -#endif - if (ret != btree_status_t::success && ret != btree_status_t::has_more && - ret != btree_status_t::fast_path_not_possible) { - THIS_BT_LOG(ERROR, base, , "btree get failed {}", ret); - COUNTER_INCREMENT(m_metrics, query_err_cnt, 1); - } - if (reset_cur) { query_req.get_input_range().reset_cursor(); } - return ret; - } - -#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION - btree_status_t sweep_query(BtreeQueryRequest& query_req, std::vector< std::pair< K, V > >& out_values) { - COUNTER_INCREMENT(m_metrics, btree_read_ops_count, 1); - query_req.init_batch_range(); - - m_btree_lock.read_lock(); - - BtreeNodePtr< K > root; - btree_status_t ret = btree_status_t::success; - - ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - - ret = do_sweep_query(root, query_req, out_values); - out: - m_btree_lock.unlock(); - -#ifndef NDEBUG - check_lock_debug(); -#endif - return ret; - } - - btree_status_t serializable_query(BtreeSerializableQueryRequest& query_req, - std::vector< std::pair< K, V > >& out_values) { - query_req.init_batch_range(); - - m_btree_lock.read_lock(); - BtreeNodePtr< K > node; - btree_status_t ret; - - if (query_req.is_empty_cursor()) { - // Initialize a new lock tracker and put inside the cursor. - query_req.cursor().m_locked_nodes = std::make_unique< BtreeLockTrackerImpl >(this); - - BtreeNodePtr< K > root; - ret = read_and_lock_root(m_root_node, root, locktype_t::READ, locktype_t::READ, nullptr); - if (ret != btree_status_t::success) { goto out; } - get_tracker(query_req)->push(root); // Start tracking the locked nodes. - } else { - node = get_tracker(query_req)->top(); - } - - ret = do_serialzable_query(node, query_req, out_values); - out: - m_btree_lock.unlock(); - - // TODO: Assert if key returned from do_get is same as key requested, incase of perfect match - -#ifndef NDEBUG - check_lock_debug(); -#endif - - return ret; - } - - BtreeLockTrackerImpl* get_tracker(BtreeSerializableQueryRequest& query_req) { - return (BtreeLockTrackerImpl*)query_req->get_cursor.m_locked_nodes.get(); - } -#endif - - /* It doesn't support async */ - btree_status_t remove_any(BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval) { - return (remove_any(range, outkey, outval, nullptr)); - } - - btree_status_t remove_any(BtreeSearchRange& range, BtreeKey* outkey, BtreeValue* outval, const btree_cp_ptr& bcp) { - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - bool is_found = false; - bool is_leaf = false; - /* set cursor if it is invalid. User is not interested in the cursor but we need it for internal logic */ - BtreeQueryCursor cur; - bool reset_cur = false; - if (!range.is_cursor_valid()) { - range.set_cursor(&cur); - reset_cur = true; - } - - m_btree_lock.read_lock(); - - retry: - - btree_status_t status = btree_status_t::success; - - BtreeNodePtr< K > root; - status = read_and_lock_root(m_root_node, root, acq_lock, acq_lock); - if (status != btree_status_t::success) { goto out; } - is_leaf = root->is_leaf(); - - if (root->get_total_entries() == 0) { - if (is_leaf) { - // There are no entries in btree. - unlock_node(root, acq_lock); - status = btree_status_t::not_found; - THIS_BT_LOG(DEBUG, base, root, "entry not found in btree"); - goto out; - } - BT_LOG_ASSERT(root->has_valid_edge(), root, "Invalid edge id"); - unlock_node(root, acq_lock); - m_btree_lock.unlock(); - - status = check_collapse_root(); - if (status != btree_status_t::success) { - LOGERROR("check collapse read failed btree name {}", m_bt_cfg.get_name()); - goto out; - } - - // We must have gotten a new root, need to - // start from scratch. - m_btree_lock.read_lock(); - goto retry; - } else if ((is_leaf) && (acq_lock != homeds::thread::LOCKTYPE_WRITE)) { - // Root is a leaf, need to take write lock, instead - // of read, retry - unlock_node(root, acq_lock); - acq_lock = homeds::thread::LOCKTYPE_WRITE; - goto retry; - } else { - status = do_remove(root, acq_lock, range, outkey, outval, bcp); - if (status == btree_status_t::retry) { - // Need to start from top down again, since - // there is a race between 2 inserts or deletes. - acq_lock = homeds::thread::locktype_t::READ; - goto retry; - } - } - - out: - m_btree_lock.unlock(); -#ifndef NDEBUG - check_lock_debug(); -#endif - if (reset_cur) { range.reset_cursor(); } - return status; - } - - btree_status_t remove(const BtreeKey& key, BtreeValue* outval) { return (remove(key, outval, nullptr)); } - - btree_status_t remove(const BtreeKey& key, BtreeValue* outval, const btree_cp_ptr& bcp) { - auto range = BtreeSearchRange(key); - return remove_any(range, nullptr, outval, bcp); - } - - /** - * @brief : verify btree is consistent and no corruption; - * - * @param update_debug_bm : true or false; - * - * @return : true if btree is not corrupted. - * false if btree is corrupted; - */ - bool verify_tree(bool update_debug_bm) { - m_btree_lock.read_lock(); - bool ret = verify_node(m_root_node, nullptr, -1, update_debug_bm); - m_btree_lock.unlock(); - - return ret; - } - - /** - * @brief : get the status of this btree; - * - * @param log_level : verbosity level; - * - * @return : status in json form; - */ - nlohmann::json get_status(const int log_level) { - nlohmann::json j; - return j; - } - - void diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { - std::vector< pair< K, V > > my_kvs, other_kvs; - - get_all_kvs(&my_kvs); - other->get_all_kvs(&other_kvs); - auto it1 = my_kvs.begin(); - auto it2 = other_kvs.begin(); - - K k1, k2; - V v1, v2; - - if (it1 != my_kvs.end()) { - k1 = it1->first; - v1 = it1->second; - } - if (it2 != other_kvs.end()) { - k2 = it2->first; - v2 = it2->second; - } - - while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { - if (k1.preceeds(&k2)) { - /* k1 preceeds k2 - push k1 and continue */ - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - } else if (k1.succeeds(&k2)) { - /* k2 preceeds k1 - push k2 and continue */ - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - if (it2 == other_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - } else { - /* k1 and k2 overlaps */ - std::vector< pair< K, V > > overlap_kvs; - diff_read_next_t to_read = READ_BOTH; - - v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); - for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { - diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); - } - - switch (to_read) { - case READ_FIRST: - it1++; - if (it1 == my_kvs.end()) { - // Add k2,v2 - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - break; - } - k1 = it1->first; - v1 = it1->second; - break; - - case READ_SECOND: - it2++; - if (it2 == other_kvs.end()) { - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - break; - } - k2 = it2->first; - v2 = it2->second; - break; - - case READ_BOTH: - /* No tail part */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - - default: - LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, - to_read); - /* skip both */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - } - } - } - - while (it1 != my_kvs.end()) { - diff_kv->emplace_back(make_pair(it1->first, it1->second)); - it1++; - } - - while (it2 != other_kvs.end()) { - diff_kv->emplace_back(make_pair(it2->first, it2->second)); - it2++; - } - } - - void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { - std::vector< pair< K, V > > other_kvs; - - other->get_all_kvs(&other_kvs); - for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { - K k = it->first; - V v = it->second; - BRangeCBParam local_param(k, v); - K start(k.start(), 1), end(k.end(), 1); - - auto search_range = BtreeSearchRange(start, true, end, true); - BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); - range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); - } - } - - void print_tree() { - std::string buf; - m_btree_lock.read_lock(); - to_string(m_root_node, buf); - m_btree_lock.unlock(); - - THIS_BT_LOG(INFO, base, , "Pre order traversal of tree:\n<{}>", buf); - } - - void print_node(const bnodeid_t& bnodeid) { - std::string buf; - BtreeNodePtr< K > node; - - m_btree_lock.read_lock(); - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } - buf = node->to_string(true /* print_friendly */); - unlock_node(node, acq_lock); - - done: - m_btree_lock.unlock(); - - THIS_BT_LOG(INFO, base, , "Node: <{}>", buf); - } - - nlohmann::json get_metrics_in_json(bool updated = true) { return m_metrics.get_result_in_json(updated); } - -private: - /** - * @brief : verify the btree node is corrupted or not; - * - * Note: this function should never assert, but only return success or failure since it is in verification mode; - * - * @param bnodeid : node id - * @param parent_node : parent node ptr - * @param indx : index within thie node; - * @param update_debug_bm : true or false; - * - * @return : true if this node including all its children are not corrupted; - * false if not; - */ - bool verify_node(bnodeid_t bnodeid, BtreeNodePtr< K > parent_node, uint32_t indx, bool update_debug_bm) { - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - BtreeNodePtr< K > my_node; - if (read_and_lock_node(bnodeid, my_node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { - LOGINFO("read node failed"); - return false; - } - if (update_debug_bm && - (btree_store_t::update_debug_bm(m_btree_store.get(), my_node) != btree_status_t::success)) { - LOGERROR("bitmap update failed for node {}", my_node->to_string()); - return false; - } - - K prev_key; - bool success = true; - for (uint32_t i = 0; i < my_node->get_total_entries(); ++i) { - K key; - my_node->get_nth_key(i, &key, false); - if (!my_node->is_leaf()) { - BtreeNodeInfo child; - my_node->get(i, &child, false); - success = verify_node(child.bnode_id(), my_node, i, update_debug_bm); - if (!success) { goto exit_on_error; } - - if (i > 0) { - BT_LOG_ASSERT_CMP(prev_key.compare(&key), <, 0, my_node); - if (prev_key.compare(&key) >= 0) { - success = false; - goto exit_on_error; - } - } - } - if (my_node->is_leaf() && i > 0) { - BT_LOG_ASSERT_CMP(prev_key.compare_start(&key), <, 0, my_node); - if (prev_key.compare_start(&key) >= 0) { - success = false; - goto exit_on_error; - } - } - prev_key = key; - } - - if (my_node->is_leaf() && my_node->get_total_entries() == 0) { - /* this node has zero entries */ - goto exit_on_error; - } - if (parent_node && parent_node->get_total_entries() != indx) { - K parent_key; - parent_node->get_nth_key(indx, &parent_key, false); - - K last_key; - my_node->get_nth_key(my_node->get_total_entries() - 1, &last_key, false); - if (!my_node->is_leaf()) { - BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), ==, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (last_key.compare(&parent_key) != 0) { - success = false; - goto exit_on_error; - } - } else { - BT_LOG_ASSERT_CMP(last_key.compare(&parent_key), <=, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (last_key.compare(&parent_key) > 0) { - success = false; - goto exit_on_error; - } - BT_LOG_ASSERT_CMP(parent_key.compare_start(&last_key), >=, 0, parent_node, - "last key {} parent_key {} child {}", last_key.to_string(), parent_key.to_string(), - my_node->to_string()); - if (parent_key.compare_start(&last_key) < 0) { - success = false; - goto exit_on_error; - } - } - } - - if (parent_node && indx != 0) { - K parent_key; - parent_node->get_nth_key(indx - 1, &parent_key, false); - - K first_key; - my_node->get_nth_key(0, &first_key, false); - BT_LOG_ASSERT_CMP(first_key.compare(&parent_key), >, 0, parent_node, "my node {}", my_node->to_string()); - if (first_key.compare(&parent_key) <= 0) { - success = false; - goto exit_on_error; - } - - BT_LOG_ASSERT_CMP(parent_key.compare_start(&first_key), <, 0, parent_node, "my node {}", - my_node->to_string()); - if (parent_key.compare_start(&first_key) > 0) { - success = false; - goto exit_on_error; - } - } - - if (my_node->has_valid_edge()) { - success = verify_node(my_node->get_edge_id(), my_node, my_node->get_total_entries(), update_debug_bm); - if (!success) { goto exit_on_error; } - } - - exit_on_error: - unlock_node(my_node, acq_lock); - return success; - } - - void to_string(bnodeid_t bnodeid, std::string& buf) const { - BtreeNodePtr< K > node; - - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } - fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); - - if (!node->is_leaf()) { - uint32_t i = 0; - while (i < node->get_total_entries()) { - BtreeNodeInfo p; - node->get(i, &p, false); - to_string(p.bnode_id(), buf); - i++; - } - if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } - } - unlock_node(node, acq_lock); - } - - /* This function upgrades the node lock and take required steps if things have - * changed during the upgrade. - * - * Inputs: - * myNode - Node to upgrade - * childNode - In case childNode needs to be unlocked. Could be nullptr - * curLock - Input/Output: current lock type - * - * Returns - If successfully able to upgrade, return true, else false. - * - * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects - * it to be locked too. If it is able to successfully upgrade it continue to retain its - * old lock. If failed to upgrade, will release all locks. - */ - btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, - homeds::thread::locktype& cur_lock, homeds::thread::locktype& child_cur_lock, - const btree_cp_ptr& bcp) { - uint64_t prev_gen; - btree_status_t ret = btree_status_t::success; - homeds::thread::locktype child_lock_type = child_cur_lock; - - if (cur_lock == homeds::thread::LOCKTYPE_WRITE) { goto done; } - - prev_gen = my_node->get_gen(); - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype::LOCKTYPE_NONE; - } - -#ifdef _PRERELEASE - { - auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); - if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } - } -#endif - ret = lock_node_upgrade(my_node, bcp); - if (ret != btree_status_t::success) { - cur_lock = locktype::LOCKTYPE_NONE; - return ret; - } - - // The node was not changed by anyone else during upgrade. - cur_lock = homeds::thread::LOCKTYPE_WRITE; - - // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before - // that cleanup or free this node if there is no one waiting. - if (!my_node->is_valid_node()) { - unlock_node(my_node, homeds::thread::LOCKTYPE_WRITE); - cur_lock = locktype::LOCKTYPE_NONE; - ret = btree_status_t::retry; - goto done; - } - - // If node has been updated, while we have upgraded, ask caller to start all over again. - if (prev_gen != my_node->get_gen()) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - ret = btree_status_t::retry; - goto done; - } - - if (child_node) { - ret = lock_and_refresh_node(child_node, child_lock_type, bcp); - if (ret != btree_status_t::success) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - child_cur_lock = locktype::LOCKTYPE_NONE; - goto done; - } - child_cur_lock = child_lock_type; - } - -#ifdef _PRERELEASE - { - int is_leaf = 0; - - if (child_node && child_node->is_leaf()) { is_leaf = 1; } - if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype::LOCKTYPE_NONE; - } - ret = btree_status_t::retry; - goto done; - } - } -#endif - - BT_DEBUG_ASSERT_CMP(my_node->m_common_header.is_lock, ==, 1, my_node); - done: - return ret; - } - - btree_status_t update_leaf_node(const BtreeNodePtr< K >& my_node, const BtreeKey& k, const BtreeValue& v, - btree_put_type put_type, BtreeValue& existing_val, BtreeUpdateRequest< K, V >* bur, - const btree_cp_ptr& bcp, BtreeSearchRange& subrange) { - btree_status_t ret = btree_status_t::success; - if (bur != nullptr) { - // BT_DEBUG_ASSERT_CMP(bur->callback(), !=, nullptr, my_node); // TODO - range req without - // callback implementation - static thread_local std::vector< std::pair< K, V > > s_match; - s_match.clear(); - int start_ind = 0, end_ind = 0; - my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind, &s_match); - - static thread_local std::vector< pair< K, V > > s_replace_kv; - s_replace_kv.clear(); - bur->get_cb_param()->node_version = my_node->get_version(); - ret = bur->callback()(s_match, s_replace_kv, bur->get_cb_param(), subrange); - if (ret != btree_status_t::success) { return ret; } - - HS_ASSERT_CMP(DEBUG, start_ind, <=, end_ind); - if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } - COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); - - for (const auto& pair : s_replace_kv) { // insert is based on compare() of BtreeKey - auto status = my_node->insert(pair.first, pair.second); - BT_RELEASE_ASSERT((status == btree_status_t::success), my_node, "unexpected insert failure"); - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - /* update cursor in input range */ - auto end_key_ptr = const_cast< BtreeKey* >(subrange.get_end_key()); - bur->get_input_range().set_cursor_key( - end_key_ptr, ([](BtreeKey* end_key) { return std::move(std::make_unique< K >(*((K*)end_key))); })); - if (homestore::vol_test_run) { - // sorted check - for (auto i = 1u; i < my_node->get_total_entries(); i++) { - K curKey, prevKey; - my_node->get_nth_key(i - 1, &prevKey, false); - my_node->get_nth_key(i, &curKey, false); - if (prevKey.compare(&curKey) >= 0) { - LOGINFO("my_node {}", my_node->to_string()); - for (const auto& [k, v] : s_match) { - LOGINFO("match key {} value {}", k.to_string(), v.to_string()); - } - for (const auto& [k, v] : s_replace_kv) { - LOGINFO("replace key {} value {}", k.to_string(), v.to_string()); - } - } - BT_RELEASE_ASSERT_CMP(prevKey.compare(&curKey), <, 0, my_node); - } - } - } else { - if (!my_node->put(k, v, put_type, existing_val)) { ret = btree_status_t::put_failed; } - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - write_node(my_node, bcp); - return ret; - } - - btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, - const BtreeKey& k, int& start_ind, int& end_ind) { - - btree_status_t ret = btree_status_t::success; - if (bur != nullptr) { - /* just get start/end index from get_all. We don't release the parent lock until this - * key range is not inserted from start_ind to end_ind. - */ - my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind); - } else { - auto result = my_node->find(k, nullptr, nullptr, true, true); - end_ind = start_ind = result.end_of_search_index; - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); - } - - if (start_ind > end_ind) { - BT_LOG_ASSERT(false, my_node, "start ind {} greater than end ind {}", start_ind, end_ind); - ret = btree_status_t::retry; - } - return ret; - } - - /* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ - btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, - const BtreeKey& k, const BtreeValue& v, int ind_hint, btree_put_type put_type, - BtreeNodePtr< K > child_node, homeds::thread::locktype& curlock, - homeds::thread::locktype& child_curlock, int child_ind, bool& split_occured, - const btree_cp_ptr& bcp) { - - split_occured = false; - K split_key; - btree_status_t ret = btree_status_t::success; - auto child_lock_type = child_curlock; - auto none_lock_type = LOCKTYPE_NONE; - -#ifdef _PRERELEASE - boost::optional< int > time; - if (child_node->is_leaf()) { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); - } else { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); - } - if (time && child_node->get_total_entries() > 2) { - std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); - } else -#endif - { - if (!child_node->is_split_needed(m_bt_cfg, k, v, &ind_hint, put_type, bur)) { return ret; } - } - - /* Split needed */ - if (bur) { - - /* In case of range update we might split multiple childs of a parent in a single - * iteration which result into less space in the parent node. - */ -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_parent_node_full")) { - ret = btree_status_t::retry; - goto out; - } -#endif - if (my_node->is_split_needed(m_bt_cfg, k, v, &ind_hint, put_type, bur)) { - // restart from root - ret = btree_status_t::retry; - goto out; - } - } - - // Time to split the child, but we need to convert parent to write lock - ret = upgrade_node(my_node, child_node, curlock, child_curlock, bcp); - if (ret != btree_status_t::success) { - THIS_BT_LOG(DEBUG, btree_structures, my_node, "Upgrade of node lock failed, retrying from root"); - BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_NONE, my_node); - goto out; - } - BT_LOG_ASSERT_CMP(child_curlock, ==, child_lock_type, my_node); - BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_WRITE, my_node); - - // We need to upgrade the child to WriteLock - ret = upgrade_node(child_node, nullptr, child_curlock, none_lock_type, bcp); - if (ret != btree_status_t::success) { - THIS_BT_LOG(DEBUG, btree_structures, child_node, "Upgrade of child node lock failed, retrying from root"); - BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_NONE, child_node); - goto out; - } - BT_LOG_ASSERT_CMP(none_lock_type, ==, homeds::thread::LOCKTYPE_NONE, my_node); - BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_WRITE, child_node); - - // Real time to split the node and get point at which it was split - ret = split_node(my_node, child_node, child_ind, &split_key, bcp); - if (ret != btree_status_t::success) { goto out; } - - // After split, retry search and walk down. - unlock_node(child_node, homeds::thread::LOCKTYPE_WRITE); - child_curlock = LOCKTYPE_NONE; - COUNTER_INCREMENT(m_metrics, btree_split_count, 1); - split_occured = true; - out: - if (ret != btree_status_t::success) { - if (curlock != LOCKTYPE_NONE) { - unlock_node(my_node, curlock); - curlock = LOCKTYPE_NONE; - } - - if (child_curlock != LOCKTYPE_NONE) { - unlock_node(child_node, child_curlock); - child_curlock = LOCKTYPE_NONE; - } - } - return ret; - } - - /* This function is called for the interior nodes whose childs are leaf nodes to calculate the sub range */ - void get_subrange(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, int curr_ind, - K& subrange_start_key, K& subrange_end_key, bool& subrange_start_inc, bool& subrange_end_inc) { - -#ifndef NDEBUG - if (curr_ind > 0) { - /* start of subrange will always be more then the key in curr_ind - 1 */ - K start_key; - BtreeKey* start_key_ptr = &start_key; - - my_node->get_nth_key(curr_ind - 1, start_key_ptr, false); - HS_ASSERT_CMP(DEBUG, start_key_ptr->compare(bur->get_input_range().get_start_key()), <=, 0); - } -#endif - - // find end of subrange - bool end_inc = true; - K end_key; - BtreeKey* end_key_ptr = &end_key; - - if (curr_ind < (int)my_node->get_total_entries()) { - my_node->get_nth_key(curr_ind, end_key_ptr, false); - if (end_key_ptr->compare(bur->get_input_range().get_end_key()) >= 0) { - /* this is last index to process as end of range is smaller then key in this node */ - end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); - end_inc = bur->get_input_range().is_end_inclusive(); - } else { - end_inc = true; - } - } else { - /* it is the edge node. end key is the end of input range */ - BT_LOG_ASSERT_CMP(my_node->has_valid_edge(), ==, true, my_node); - end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); - end_inc = bur->get_input_range().is_end_inclusive(); - } - - BtreeSearchRange& input_range = bur->get_input_range(); - auto start_key_ptr = input_range.get_start_key(); - subrange_start_key.copy_blob(start_key_ptr->get_blob()); - subrange_end_key.copy_blob(end_key_ptr->get_blob()); - subrange_start_inc = input_range.is_start_inclusive(); - subrange_end_inc = end_inc; - - auto ret = subrange_start_key.compare(&subrange_end_key); - BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); - ret = subrange_start_key.compare(bur->get_input_range().get_end_key()); - BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); - /* We don't neeed to update the start at it is updated when entries are inserted in leaf nodes */ - } - - btree_status_t check_split_root(const BtreeMutateRequest& put_req) { - int ind; - K split_key; - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - - m_btree_lock.write_lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { goto done; } - - if (!root->is_split_needed(m_bt_cfg, put_req)) { - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - goto done; - } - - // Create a new child node and split them - child_node = alloc_interior_node(); - if (child_node == nullptr) { - ret = btree_status_t::space_not_avail; - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - goto done; - } - - /* it swap the data while keeping the nodeid same */ - btree_store_t::swap_node(m_btree_store.get(), root, child_node); - write_node(child_node); - - THIS_BT_LOG(DEBUG, btree_structures, root, - "Root node is full, swapping contents with child_node {} and split that", - child_node->get_node_id()); - - BT_DEBUG_ASSERT_CMP(root->get_total_entries(), ==, 0, root); - ret = split_node(root, child_node, root->get_total_entries(), &split_key, true); - BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); - - if (ret != btree_status_t::success) { - btree_store_t::swap_node(m_btree_store.get(), child_node, root); - write_node(child_node); - } - - /* unlock child node */ - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - - if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } - done: - m_btree_lock.unlock(); - return ret; - } - - btree_status_t check_collapse_root(const btree_cp_ptr& bcp) { - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - - m_btree_lock.write_lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, bcp); - if (ret != btree_status_t::success) { goto done; } - - if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { - unlock_node(root, locktype::LOCKTYPE_WRITE); - goto done; - } - - BT_DEBUG_ASSERT_CMP(root->has_valid_edge(), ==, true, root); - ret = read_node(root->get_edge_id(), child_node); - if (child_node == nullptr) { - unlock_node(root, locktype::LOCKTYPE_WRITE); - goto done; - } - - // Elevate the edge child as root. - btree_store_t::swap_node(m_btree_store.get(), root, child_node); - write_node(root, bcp); - BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); - - old_nodes.push_back(child_node); - - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, true /* is_root */, bcp); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, root, bcp); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, child_node, bcp); - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - unlock_node(root, locktype::LOCKTYPE_WRITE); - free_node(child_node, (bcp ? bcp->free_blkid_list : nullptr)); - - if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } - done: - m_btree_lock.unlock(); - return ret; - } - - btree_status_t split_node(const BtreeNodePtr< K >& parent_node, BtreeNodePtr< K > child_node, uint32_t parent_ind, - BtreeKey* out_split_key, const btree_cp_ptr& bcp, bool root_split = false) { - BtreeNodeInfo ninfo; - BtreeNodePtr< K > child_node1 = child_node; - BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); - - if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } - - btree_status_t ret = btree_status_t::success; - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node1->set_next_bnode(child_node2->get_node_id()); - uint32_t child1_filled_size = m_bt_cfg.get_node_area_size() - child_node1->get_available_size(m_bt_cfg); - - auto split_size = m_bt_cfg.get_split_size(child1_filled_size); - uint32_t res = child_node1->move_out_to_right_by_size(m_bt_cfg, child_node2, split_size); - - BT_RELEASE_ASSERT_CMP(res, >, 0, child_node1, - "Unable to split entries in the child node"); // means cannot split entries - BT_DEBUG_ASSERT_CMP(child_node1->get_total_entries(), >, 0, child_node1); - - // Update the existing parent node entry to point to second child ptr. - bool edge_split = (parent_ind == parent_node->get_total_entries()); - ninfo.set_bnode_id(child_node2->get_node_id()); - parent_node->update(parent_ind, ninfo); - - // Insert the last entry in first child to parent node - child_node1->get_last_key(out_split_key); - ninfo.set_bnode_id(child_node1->get_node_id()); - - /* If key is extent then we always insert the end key in the parent node */ - K out_split_end_key; - out_split_end_key.copy_end_key_blob(out_split_key->get_blob()); - parent_node->insert(out_split_end_key, ninfo); - -#ifndef NDEBUG - K split_key; - child_node2->get_first_key(&split_key); - BT_DEBUG_ASSERT_CMP(split_key.compare(out_split_key), >, 0, child_node2); -#endif - THIS_BT_LOG(DEBUG, btree_structures, parent_node, "Split child_node={} with new_child_node={}, split_key={}", - child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); - - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - btree_store_t::append_node_to_journal( - j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, - bcp, out_split_end_key.get_blob()); - - // For root split or split around the edge, we don't write the key, which will cause replay to insert - // edge - if (edge_split) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); - } else { - K child2_pkey; - parent_node->get_nth_key(parent_ind, &child2_pkey, true); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, - child2_pkey.get_blob()); - } - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - - // we write right child node, than left and than parent child - write_node(child_node2, nullptr, bcp); - write_node(child_node1, child_node2, bcp); - write_node(parent_node, child_node1, bcp); - - // NOTE: Do not access parentInd after insert, since insert would have - // shifted parentNode to the right. - return ret; - } - -public: - btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - if (jentry) { - BT_DEBUG_ASSERT_CMP(jentry->is_root, ==, true, , - "Expected create_btree_replay entry to be root journal entry"); - BT_DEBUG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node, , "Root node journal entry mismatch"); - } - - // Create a root node by reserving the leaf node - BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node)); - auto ret = write_node(root, nullptr, bcp); - BT_DEBUG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); - return btree_status_t::success; - } - - btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - bnodeid_t id = jentry->is_root ? m_root_node : jentry->parent_node.node_id; - BtreeNodePtr< K > parent_node; - - // read parent node - read_node_or_fail(id, parent_node); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { - THIS_BT_LOG(INFO, base, , - "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", - parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); - return btree_status_t::replay_not_needed; - } - - // Read the first inplace write node which is the leftmost child and also form child split key from journal - auto j_child_nodes = jentry->get_nodes(); - - BtreeNodePtr< K > child_node1; - if (jentry->is_root) { - // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to - // be treated as root here on. - child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); - btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); - - THIS_BT_LOG(INFO, btree_generics, , - "Journal replay: root split, so creating child_node id={} and swapping the node with " - "parent_node id={} names {}", - child_node1->get_node_id(), parent_node->get_node_id(), m_bt_cfg.get_name()); - - } else { - read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); - } - - THIS_BT_LOG(INFO, btree_generics, , - "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", - j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), - child_node1->get_gen(), m_bt_cfg.get_name()); - if (jentry->is_root) { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , - "Expected first node in journal entry to be new creation for root split"); - } else { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , - "Expected first node in journal entry to be in-place write"); - } - BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , - "Expected second node in journal entry to be new node creation"); - - // recover child node - bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); - - // recover parent node - recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); - return btree_status_t::success; - } - -private: - bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, - const std::vector< bt_journal_node_info* >& j_child_nodes, - const btree_cp_ptr& bcp) { - - BtreeNodePtr< K > child_node2; - // Check if child1 is ahead of the generation - if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { - // leftmost_node is written, so right node must have been written as well. - read_node_or_fail(child_node1->next_bnode(), child_node2); - - // sanity check for right node - BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, - "gen cnt should be more than the journal entry"); - // no need to recover child nodes - return false; - } - - K split_key; - split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) - : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); - - // We need to do split based on entries since the left children is also not written yet. - // Find the split key within the child_node1. It is not always found, so we split upto that. - auto ret = child_node1->find(split_key, nullptr, false); - - // sanity check for left mode node before recovery - { - if (!ret.found) { - if (!child_node1->is_leaf()) { - BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); - } - } - } - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", - split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); - /* if it is not found than end_of_search_index points to first ind which is greater than split key */ - auto split_ind = ret.end_of_search_index; - if (ret.found) { ++split_ind; } // we don't want to move split key */ - if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { - K key; - child_node1->get_nth_key(split_ind, &key, false); - - if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ - THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); - V v; - child_node1->get_nth_value(split_ind, &v, false); - vector< pair< K, V > > replace_kv; - child_node1->remove(split_ind, split_ind); - m_split_key_cb(key, v, split_key, replace_kv); - for (auto& pair : replace_kv) { - auto status = child_node1->insert(pair.first, pair.second); - BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); - } - auto ret = child_node1->find(split_key, nullptr, false); - BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, - "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); - ++split_ind; - } - } - child_node1->move_out_to_right_by_entries(m_bt_cfg, child_node2, child_node1->get_total_entries() - split_ind); - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node2->set_gen(j_child_nodes[1]->node_gen()); - - child_node1->set_next_bnode(child_node2->get_node_id()); - child_node1->set_gen(j_child_nodes[0]->node_gen()); - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); - write_node(child_node2, nullptr, bcp); - write_node(child_node1, child_node2, bcp); - return true; - } - - void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, - std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { - - // find child_1 key - K child1_key; // we need to insert child1_key - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); - child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - auto child1_node_id = j_child_nodes[0]->node_id(); - - // find split indx - auto ret = parent_node->find(child1_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); - auto split_indx = ret.end_of_search_index; - - // find child2_key - K child2_key; // we only need to update child2_key to new node - if (j_child_nodes[1]->key_size != 0) { - child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); - ret = parent_node->find(child2_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); - } else { - // parent should be valid edge it is not a root split - } - auto child2_node_id = j_child_nodes[1]->node_id(); - - // update child2_key value - BtreeNodeInfo ninfo; - ninfo.set_bnode_id(child2_node_id); - parent_node->update(split_indx, ninfo); - - // insert child 1 - ninfo.set_bnode_id(child1_node_id); - K out_split_end_key; - out_split_end_key.copy_end_key_blob(child1_key.get_blob()); - parent_node->insert(out_split_end_key, ninfo); - - // Write the parent node - write_node(parent_node, child_node1, bcp); - - /* do sanity check after recovery split */ - { - validate_sanity_child(parent_node, split_indx); - validate_sanity_next_child(parent_node, split_indx); - } - } - - btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, - const btree_cp_ptr& bcp) { - btree_status_t ret = btree_status_t::merge_failed; - std::vector< BtreeNodePtr< K > > child_nodes; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > replace_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - std::vector< BtreeNodePtr< K > > deleted_nodes; - BtreeNodePtr< K > left_most_node; - K last_pkey; // last key of parent node - bool last_pkey_valid = false; - uint32_t balanced_size; - BtreeNodePtr< K > merge_node; - K last_ckey; // last key in child - uint32_t parent_insert_indx = start_indx; -#ifndef NDEBUG - uint32_t total_child_entries = 0; - uint32_t new_entries = 0; - K last_debug_ckey; - K new_last_debug_ckey; - BtreeNodePtr< K > last_node; -#endif - /* Try to take a lock on all nodes participating in merge*/ - for (auto indx = start_indx; indx <= end_indx; ++indx) { - if (indx == parent_node->get_total_entries()) { - BT_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, - "Assertion failure, expected valid edge for parent_node: {}"); - } - - BtreeNodeInfo child_info; - parent_node->get(indx, &child_info, false /* copy */); - - BtreeNodePtr< K > child; - ret = read_and_lock_node(child_info.bnode_id(), child, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, - bcp); - if (ret != btree_status_t::success) { goto out; } - BT_LOG_ASSERT_CMP(child->is_valid_node(), ==, true, child); - - /* check if left most node has space */ - if (indx == start_indx) { - balanced_size = m_bt_cfg.get_ideal_fill_size(); - left_most_node = child; - if (left_most_node->get_occupied_size(m_bt_cfg) > balanced_size) { - /* first node doesn't have any free space. we can exit now */ - ret = btree_status_t::merge_not_required; - goto out; - } - } else { - bool is_allocated = true; - /* pre allocate the new nodes. We will free the nodes which are not in use later */ - auto new_node = btree_store_t::alloc_node(m_btree_store.get(), child->is_leaf(), is_allocated, child); - if (is_allocated) { - /* we are going to allocate new blkid of all the nodes except the first node. - * Note :- These blkids will leak if we fail or crash before writing entry into - * journal. - */ - old_nodes.push_back(child); - COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, - 1); - } - /* Blk IDs can leak if it crash before writing it to a journal */ - if (new_node == nullptr) { - ret = btree_status_t::space_not_avail; - goto out; - } - new_nodes.push_back(new_node); - } -#ifndef NDEBUG - total_child_entries += child->get_total_entries(); - child->get_last_key(&last_debug_ckey); -#endif - child_nodes.push_back(child); - } - - if (end_indx != parent_node->get_total_entries()) { - /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ - parent_node->get_nth_key(end_indx, &last_pkey, true); - last_pkey_valid = true; - } - - merge_node = left_most_node; - /* We can not fail from this point. Nodes will be modified in memory. */ - for (uint32_t i = 0; i < new_nodes.size(); ++i) { - auto occupied_size = merge_node->get_occupied_size(m_bt_cfg); - if (occupied_size < balanced_size) { - uint32_t pull_size = balanced_size - occupied_size; - merge_node->move_in_from_right_by_size(m_bt_cfg, new_nodes[i], pull_size); - if (new_nodes[i]->get_total_entries() == 0) { - /* this node is freed */ - deleted_nodes.push_back(new_nodes[i]); - continue; - } - } - - /* update the last key of merge node in parent node */ - K last_ckey; // last key in child - merge_node->get_last_key(&last_ckey); - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - - merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them - merge_node = new_nodes[i]; - if (merge_node != left_most_node) { - /* left most node is not replaced */ - replace_nodes.push_back(merge_node); - } - } - - /* update the latest merge node */ - merge_node->get_last_key(&last_ckey); - if (last_pkey_valid) { - BT_DEBUG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); - last_ckey = last_pkey; - } - - /* update the last key */ - { - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - } - - /* remove the keys which are no longer used */ - if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } - - /* write the journal entry */ - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - K child_pkey; - if (start_indx < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx, &child_pkey, true); - BT_RELEASE_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, - child_pkey.get_blob()); - for (auto& node : old_nodes) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); - } - uint32_t insert_indx = 0; - for (auto& node : replace_nodes) { - K child_pkey; - if ((start_indx + insert_indx) < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); - BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, - "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, - child_pkey.get_blob()); - ++insert_indx; - } - BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - - if (replace_nodes.size() > 0) { - /* write the right most node */ - write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); - if (replace_nodes.size() > 1) { - /* write the middle nodes */ - for (int i = replace_nodes.size() - 2; i >= 0; --i) { - write_node(replace_nodes[i], replace_nodes[i + 1], bcp); - } - } - /* write the left most node */ - write_node(left_most_node, replace_nodes[0], bcp); - } else { - /* write the left most node */ - write_node(left_most_node, nullptr, bcp); - } - - /* write the parent node */ - write_node(parent_node, left_most_node, bcp); - -#ifndef NDEBUG - for (const auto& n : replace_nodes) { - new_entries += n->get_total_entries(); - } - - new_entries += left_most_node->get_total_entries(); - HS_DEBUG_ASSERT_EQ(total_child_entries, new_entries); - - if (replace_nodes.size()) { - replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); - last_node = replace_nodes[replace_nodes.size() - 1]; - } else { - left_most_node->get_last_key(&new_last_debug_ckey); - last_node = left_most_node; - } - if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { - LOGINFO("{}", last_node->to_string()); - if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } - HS_DEBUG_ASSERT(false, "compared failed"); - } -#endif - /* free nodes. It actually gets freed after cp is completed */ - for (const auto& n : old_nodes) { - free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); - } - for (const auto& n : deleted_nodes) { - free_node(n); - } - ret = btree_status_t::success; - out: -#ifndef NDEBUG - uint32_t freed_entries = deleted_nodes.size(); - uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; - for (uint32_t i = 0; i < scan_entries; ++i) { - if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } - validate_sanity_child(parent_node, (uint32_t)start_indx + i); - } -#endif - // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed - for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { - unlock_node(child_nodes[i], locktype::LOCKTYPE_WRITE); - } - unlock_node(child_nodes[0], locktype::LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { - /* free the allocated nodes */ - for (const auto& n : new_nodes) { - free_node(n); - } - } - return ret; - } - -#if 0 - btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node) : read_node(jentry->parent_node.node_id); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } - } -#endif - - void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { - BtreeNodeInfo child_info; - K child_first_key; - K child_last_key; - K parent_key; - - parent_node->get(ind, &child_info, false /* copy */); - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries - BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); - } - return; - } - child_node->get_first_key(&child_first_key); - child_node->get_last_key(&child_last_key); - BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0) - if (ind == parent_node->get_total_entries()) { - BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); - if (ind > 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_GT(parent_key.compare_start(&child_first_key), 0) - } - } else { - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - if (ind != 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_GT(parent_key.compare_start(&child_first_key), 0) - } - } - } - - void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { - BtreeNodeInfo child_info; - K child_key; - K parent_key; - - if (parent_node->has_valid_edge()) { - if (ind == parent_node->get_total_entries()) { return; } - } else { - if (ind == parent_node->get_total_entries() - 1) { return; } - } - parent_node->get(ind + 1, &child_info, false /* copy */); - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - HS_RELEASE_ASSERT(ret == btree_status_t::success, "read failed, reason: {}", ret); - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node can have 0 entries - HS_ASSERT_CMP(RELEASE, - ((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), - ==, true); - } - return; - } - /* in case of merge next child will never have zero entries otherwise it would have been merged */ - HS_ASSERT_CMP(RELEASE, child_node->get_total_entries(), !=, 0); - child_node->get_first_key(&child_key); - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) - BT_REL_ASSERT_GT(parent_key.compare_start(&child_key), 0) - } - - /* Recovery process is different for root node, child node and sibling node depending on how the node - * is accessed. This is the reason to create below three apis separately. - */ - -protected: - BtreeConfig* get_config() { return &m_bt_cfg; } -}; // namespace btree - -// static inline const char* _type_desc(const BtreeNodePtr< K >& n) { return n->is_leaf() ? "L" : "I"; } - -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; - -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; - -#ifdef SERIALIZABLE_QUERY_IMPLEMENTATION -template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > -class BtreeLockTrackerImpl : public BtreeLockTracker { -public: - BtreeLockTrackerImpl(btree_t* bt) : m_bt(bt) {} - - virtual ~BtreeLockTrackerImpl() { - while (m_nodes.size()) { - auto& p = m_nodes.top(); - m_bt->unlock_node(p.first, p.second); - m_nodes.pop(); - } - } - - void push(const BtreeNodePtr< K >& node, homeds::thread::locktype locktype) { - m_nodes.emplace(std::make_pair<>(node, locktype)); - } - - std::pair< BtreeNodePtr< K >, homeds::thread::locktype > pop() { - HS_ASSERT_CMP(DEBUG, m_nodes.size(), !=, 0); - std::pair< BtreeNodePtr< K >, homeds::thread::locktype > p; - if (m_nodes.size()) { - p = m_nodes.top(); - m_nodes.pop(); - } else { - p = std::make_pair<>(nullptr, homeds::thread::locktype::LOCKTYPE_NONE); - } - - return p; - } - - BtreeNodePtr< K > top() { return (m_nodes.size == 0) ? nullptr : m_nodes.top().first; } - -private: - btree_t m_bt; - std::stack< std::pair< BtreeNodePtr< K >, homeds::thread::locktype > > m_nodes; -}; -#endif - -} // namespace btree -} // namespace sisl diff --git a/src/btree/rough/sisl_btree_impl.hpp b/src/btree/rough/sisl_btree_impl.hpp deleted file mode 100644 index 107df40e..00000000 --- a/src/btree/rough/sisl_btree_impl.hpp +++ /dev/null @@ -1,1653 +0,0 @@ -#pragma once - -namespace sisl { -namespace btree { -template < typename K, typename V > -class BtreeImpl { -protected: - template < typename K, typename V > - btree_status_t Btree< K, V >::post_order_traversal(locktype_t ltype, - const std::function< void(const BtreeNodePtr< K >&) >& cb) { - BtreeNodePtr< K > root; - btree_status_t ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock, nullptr); - if (ret != btree_status_t::success) { - m_btree_lock.unlock(); - return ret; - } - - post_order_traversal(root, ltype, cb); - } - - template < typename K, typename V > - btree_status_t Btree< K, V >::post_order_traversal(const BtreeNodePtr< K >& node, locktype_t ltype, - const auto& cb) { - homeds::thread::locktype acq_lock = homeds::thread::LOCKTYPE_WRITE; - uint32_t i = 0; - btree_status_t ret = btree_status_t::success; - - if (!node->is_leaf()) { - BtreeNodeInfo child_info; - while (i <= node->get_total_entries()) { - if (i == node->get_total_entries()) { - if (!node->has_valid_edge()) { break; } - child_info.set_bnode_id(node->get_edge_id()); - } else { - child_info = node->get(i, false /* copy */); - } - - BtreeNodePtr< K > child; - ret = read_and_lock_child(child_info.bnode_id(), child, node, i, acq_lock, acq_lock, nullptr); - if (ret != btree_status_t::success) { return ret; } - ret = post_order_traversal(child, cb); - unlock_node(child, acq_lock); - ++i; - } - } - - if (ret != btree_status_t::success) { return ret; } - cb(node); - return ret; - } - - btree_status_t put_internal(const BtreeMutateRequest& put_req) { - COUNTER_INCREMENT(m_metrics, btree_write_ops_count, 1); - locktype acq_lock = locktype::READ; - int ind = -1; - bool is_leaf = false; - - // THIS_BT_LOG(INFO, base, , "Put called for key = {}, value = {}", k.to_string(), v.to_string()); - - m_btree_lock.read_lock(); - - btree_status_t ret = btree_status_t::success; - retry: - -#ifndef NDEBUG - check_lock_debug(); -#endif - BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); - BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); - - BtreeNodePtr< K > root; - ret = read_and_lock_root(m_root_node, root, acq_lock, acq_lock); - if (ret != btree_status_t::success) { goto out; } - is_leaf = root->is_leaf(); - - if (root->is_split_needed(m_bt_cfg, put_req)) { - // Time to do the split of root. - unlock_node(root, acq_lock); - m_btree_lock.unlock(); - ret = check_split_root(put_req); - BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); - BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); - - // We must have gotten a new root, need to start from scratch. - m_btree_lock.read_lock(); - - if (ret != btree_status_t::success) { - LOGERROR("root split failed btree name {}", m_bt_cfg.get_name()); - goto out; - } - - goto retry; - } else if ((is_leaf) && (acq_lock != homeds::thread::LOCKTYPE_WRITE)) { - // Root is a leaf, need to take write lock, instead of read, retry - unlock_node(root, acq_lock); - acq_lock = homeds::thread::LOCKTYPE_WRITE; - goto retry; - } else { - K subrange_start_key, subrange_end_key; - bool start_incl = false, end_incl = false; - if (is_range_update_req(put_req)) { - to_range_update_req(put_req)->get_input_range().copy_start_end_blob(subrange_start_key, start_incl, - subrange_end_key, end_incl); - } - BtreeSearchRange subrange(subrange_start_key, start_incl, subrange_end_key, end_incl); - ret = do_put(root, acq_lock, put_req, ind, subrange); - if (ret == btree_status_t::retry) { - // Need to start from top down again, since there is a race between 2 inserts or deletes. - acq_lock = homeds::thread::locktype_t::READ; - THIS_BT_LOG(TRACE, btree_generics, , "retrying put operation"); - BT_LOG_ASSERT_CMP(rd_locked_nodes.size(), ==, 0, ); - BT_LOG_ASSERT_CMP(wr_locked_nodes.size(), ==, 0, ); - goto retry; - } - } - - out: - m_btree_lock.unlock(); -#ifndef NDEBUG - check_lock_debug(); -#endif - if (ret != btree_status_t::success && ret != btree_status_t::fast_path_not_possible && - ret != btree_status_t::cp_mismatch) { - THIS_BT_LOG(ERROR, base, , "btree put failed {}", ret); - COUNTER_INCREMENT(m_metrics, write_err_cnt, 1); - } - - return ret; - } - - btree_status_t do_get(const BtreeNodePtr< K >& my_node, const BtreeSearchRange& range, BtreeKey* outkey, - BtreeValue* outval) const { - btree_status_t ret = btree_status_t::success; - bool is_child_lock = false; - homeds::thread::locktype child_locktype; - - if (my_node->is_leaf()) { - auto result = my_node->find(range, outkey, outval); - if (result.found) { - ret = btree_status_t::success; - } else { - ret = btree_status_t::not_found; - } - unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); - return ret; - } - - BtreeNodeInfo child_info; - auto result = my_node->find(range, nullptr, &child_info); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); - - BtreeNodePtr< K > child_node; - child_locktype = homeds::thread::locktype_t::READ; - ret = read_and_lock_child(child_info.bnode_id(), child_node, my_node, result.end_of_search_index, - child_locktype, child_locktype, nullptr); - if (ret != btree_status_t::success) { goto out; } - - unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); - - return (do_get(child_node, range, outkey, outval)); - out: - unlock_node(my_node, homeds::thread::locktype::locktype_t::READ); - return ret; - } - - btree_status_t do_remove(const BtreeNodePtr< K >& my_node, locktype curlock, const BtreeSearchRange& range, - BtreeKey* outkey, BtreeValue* outval) { - btree_status_t ret = btree_status_t::success; - if (my_node->is_leaf()) { - BT_DEBUG_ASSERT_CMP(curlock, ==, LOCKTYPE_WRITE, my_node); - -#ifndef NDEBUG - my_node->validate_key_order(); -#endif - bool is_found = my_node->remove_one(range, outkey, outval); -#ifndef NDEBUG - my_node->validate_key_order(); -#endif - if (is_found) { - write_node(my_node); - COUNTER_DECREMENT(m_metrics, btree_obj_count, 1); - } - - unlock_node(my_node, curlock); - return is_found ? btree_status_t::success : btree_status_t::not_found; - } - - retry: - locktype child_cur_lock = LOCKTYPE_NONE; - - /* range delete is not supported yet */ - // Get the childPtr for given key. - auto [found, ind] = my_node->find(range, nullptr, nullptr); - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); - - BtreeNodeInfo child_info; - BtreeNodePtr< K > child_node; - ret = get_child_and_lock_node(my_node, ind, child_info, child_node, locktype_t::READ, LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { - unlock_node(my_node, curlock); - return ret; - } - - // Check if child node is minimal. - child_cur_lock = child_node->is_leaf() ? LOCKTYPE_WRITE : locktype_t::READ; - if (child_node->is_merge_needed(m_bt_cfg)) { - // If we are unable to upgrade the node, ask the caller to retry. - ret = upgrade_node(my_node, child_node, curlock, child_cur_lock, bcp); - if (ret != btree_status_t::success) { - BT_DEBUG_ASSERT_CMP(curlock, ==, locktype::NONE, my_node) - return ret; - } - BT_DEBUG_ASSERT_CMP(curlock, ==, locktype::WRITE, my_node); - - uint32_t node_end_indx = - my_node->has_valid_edge() ? my_node->get_total_entries() : my_node->get_total_entries() - 1; - uint32_t end_ind = (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) < node_end_indx - ? (ind + HS_DYNAMIC_CONFIG(btree->max_nodes_to_rebalance)) - : node_end_indx; - if (end_ind > ind) { - // It is safe to unlock child without upgrade, because child node would not be deleted, since its - // parent (myNode) is being write locked by this thread. In fact upgrading would be a problem, since - // this child might be a middle child in the list of indices, which means we might have to lock one - // in left against the direction of intended locking (which could cause deadlock). - unlock_node(child_node, child_cur_lock); - auto result = merge_nodes(my_node, ind, end_ind); - if (result != btree_status_t::success && result != btree_status_t::merge_not_required) { - // write or read failed - unlock_node(my_node, curlock); - return ret; - } - if (result == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_merge_count, 1); } - goto retry; - } - } - -#ifndef NDEBUG - if (ind != my_node->get_total_entries() && child_node->get_total_entries()) { // not edge - const auto ckey = child_node->get_last_key(); - const auto pkey = my_node->get_nth_key(ind, true); - BT_DEBUG_ASSERT_CMP(ckey.compare(&pkey), <=, 0, my_node); - } - - if (ind > 0 && child_node->get_total_entries()) { // not first child - const auto ckey = child_node->get_first_key(); - const auto pkey = my_node->get_nth_key(ind - 1, true); - BT_DEBUG_ASSERT_CMP(pkey.compare(&ckey), <, 0, my_node); - } -#endif - - unlock_node(my_node, curlock); - return (do_remove(child_node, child_cur_lock, range, outkey, outval)); - - // Warning: Do not access childNode or myNode beyond this point, since it would - // have been unlocked by the recursive function and it could also been deleted. - } - -private: - /* This function does the heavy lifiting of co-ordinating inserts. It is a recursive function which walks - * down the tree. - * - * NOTE: It expects the node it operates to be locked (either read or write) and also the node should not be - * full. - * - * Input: - * myNode = Node it operates on - * curLock = Type of lock held for this node - * put_req = Key to insert - * v = Value to insert - * ind_hint = If we already know which slot to insert to, if not -1 - * put_type = Type of the put (refer to structure btree_put_type) - * is_end_path = set to true only for last path from root to tree, for range put - * op = tracks multi node io. - */ - btree_status_t do_put(const BtreeNodePtr< K >& my_node, btree::locktype curlock, const BtreeMutateRequest& put_req, - int ind_hint, BtreeSearchRange& child_subrange) { - btree_status_t ret = btree_status_t::success; - bool unlocked_already = false; - int curr_ind = -1; - - if (my_node->is_leaf()) { - /* update the leaf node */ - BT_LOG_ASSERT_CMP(curlock, ==, LOCKTYPE_WRITE, my_node); - ret = update_leaf_node(my_node, put_req, child_subrange); - unlock_node(my_node, curlock); - return ret; - } - - bool is_any_child_splitted = false; - - retry: - int start_ind = 0, end_ind = -1; - - /* Get the start and end ind in a parent node for the range updates. For - * non range updates, start ind and end ind are same. - */ - ret = get_start_and_end_ind(my_node, put_req, start_ind, end_ind); - if (ret != btree_status_t::success) { goto out; } - - BT_DEBUG_ASSERT((curlock == locktype_t::READ || curlock == LOCKTYPE_WRITE), my_node, "unexpected locktype {}", - curlock); - curr_ind = start_ind; - - while (curr_ind <= end_ind) { // iterate all matched childrens - -#ifdef _PRERELEASE - if (curr_ind - start_ind > 1 && homestore_flip->test_flip("btree_leaf_node_split")) { - ret = btree_status_t::retry; - goto out; - } -#endif - - homeds::thread::locktype child_cur_lock = homeds::thread::LOCKTYPE_NONE; - - // Get the childPtr for given key. - BtreeNodeInfo child_info; - BtreeNodePtr< K > child_node; - - ret = get_child_and_lock_node(my_node, curr_ind, child_info, child_node, locktype_t::READ, LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { - if (ret == btree_status_t::not_found) { - // Either the node was updated or mynode is freed. Just proceed again from top. - /* XXX: Is this case really possible as we always take the parent lock and never - * release it. - */ - ret = btree_status_t::retry; - } - goto out; - } - - // Directly get write lock for leaf, since its an insert. - child_cur_lock = (child_node->is_leaf()) ? LOCKTYPE_WRITE : locktype_t::READ; - - /* Get subrange if it is a range update */ - K start_key, end_key; - bool start_incl = false, end_incl = false; - if (is_range_update_req(put_req) && child_node->is_leaf()) { - /* We get the subrange only for leaf because this is where we will be inserting keys. In interior - * nodes, keys are always propogated from the lower nodes. - */ - get_subrange(my_node, put_req, curr_ind, start_key, end_key, start_incl, end_incl); - } - BtreeSearchRange subrange(start_key, start_incl, end_key, end_incl); - - /* check if child node is needed to split */ - bool split_occured = false; - ret = check_and_split_node(my_node, put_req, ind_hint, child_node, curlock, child_cur_lock, curr_ind, - split_occured); - if (ret != btree_status_t::success) { goto out; } - if (split_occured) { - ind_hint = -1; // Since split is needed, hint is no longer valid - goto retry; - } - - if (is_range_update_req(put_req) && child_node->is_leaf()) { - THIS_BT_LOG(DEBUG, btree_structures, my_node, "Subrange:s:{},e:{},c:{},nid:{},edgeid:{},sk:{},ek:{}", - start_ind, end_ind, curr_ind, my_node->get_node_id(), my_node->get_edge_id(), - subrange.get_start_key()->to_string(), subrange.get_end_key()->to_string()); - } - -#ifndef NDEBUG - K ckey, pkey; - if (curr_ind != int_cast(my_node->get_total_entries())) { // not edge - pkey = my_node->get_nth_key(curr_ind, true); - if (child_node->get_total_entries() != 0) { - ckey = child_node->get_last_key(); - if (!child_node->is_leaf()) { - HS_DEBUG_ASSERT_EQ(ckey.compare(pkey), 0); - } else { - HS_ASSERT_CMP(DEBUG, ckey.compare(pkey), <=, 0); - } - } - HS_DEBUG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) <= 0), true); - } - if (curr_ind > 0) { // not first child - pkey = my_node->get_nth_key(curr_ind - 1, true); - if (child_node->get_total_entries() != 0) { - ckey = child_node->get_first_key(); - HS_ASSERT_CMP(DEBUG, pkey.compare(ckey), <=, 0); - } - HS_DEBUG_ASSERT_EQ((is_range_update_req(put_req) || k.compare(pkey) >= 0), true); - } -#endif - if (curr_ind == end_ind) { - // If we have reached the last index, unlock before traversing down, because we no longer need - // this lock. Holding this lock will impact performance unncessarily. - unlock_node(my_node, curlock); - curlock = LOCKTYPE_NONE; - } - -#ifndef NDEBUG - if (child_cur_lock == homeds::thread::LOCKTYPE_WRITE) { - HS_DEBUG_ASSERT_EQ(child_node->m_common_header.is_lock, true); - } -#endif - - ret = do_put(child_node, child_cur_lock, put_req, ind_hint, subrange); - if (ret != btree_status_t::success) { goto out; } - - curr_ind++; - } - out: - if (curlock != LOCKTYPE_NONE) { unlock_node(my_node, curlock); } - return ret; - // Warning: Do not access childNode or myNode beyond this point, since it would - // have been unlocked by the recursive function and it could also been deleted. - } - - void get_all_kvs(std::vector< pair< K, V > >& kvs) const { - // TODO: Isn't it better to do DFS traversal and get kvs instead of collecting all leafs. Its a non-scalable - // operation. - static thread_local std::vector< BtreeNodePtr< K > > leaves; - leaves.clear(); - get_all_leaf_nodes(leaves); - - for (auto& l : leaves) { - l->get_all_kvs(kvs); - } - leaves.clear(); - } - - uint64_t get_btree_node_cnt() const { - uint64_t cnt = 1; /* increment it for root */ - m_btree_lock.read_lock(); - cnt += get_child_node_cnt(m_root_node); - m_btree_lock.unlock(); - return cnt; - } - - uint64_t get_child_node_cnt(bnodeid_t bnodeid) const { - uint64_t cnt{0}; - BtreeNodePtr< K > node; - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return cnt; } - if (!node->is_leaf()) { - uint32_t i = 0; - while (i < node->get_total_entries()) { - BtreeNodeInfo p = node->get(i, false); - cnt += get_child_node_cnt(p.bnode_id()) + 1; - ++i; - } - if (node->has_valid_edge()) { cnt += get_child_node_cnt(node->get_edge_id()) + 1; } - } - unlock_node(node, acq_lock); - return cnt; - } - - /* - * Get all leaf nodes from the read-only tree (CP tree, Snap Tree etc) - * NOTE: Doesn't take any lock - */ - void get_all_leaf_nodes(std::vector< BtreeNodePtr< K > >& leaves) const { - /* TODO: Add a flag to indicate RO tree - * TODO: Check the flag here - */ - get_leaf_nodes(m_root_node, leaves); - } - - // TODO: Remove the locks once we have RO flags - void get_leaf_nodes(bnodeid_t bnodeid, std::vector< BtreeNodePtr< K > >& leaves) const { - BtreeNodePtr< K > node; - if (read_and_lock_node(bnodeid, node, locktype_t::READ, locktype_t::READ, nullptr) != btree_status_t::success) { - return; - } - - if (node->is_leaf()) { - BtreeNodePtr< K > next_node = nullptr; - leaves.push_back(node); - while (node->next_bnode() != empty_bnodeid) { - auto ret = - read_and_lock_sibling(node->next_bnode(), next_node, locktype_t::READ, locktype_t::READ, nullptr); - unlock_node(node, locktype_t::READ); - HS_DEBUG_ASSERT_EQ(ret, btree_status_t::success); - if (ret != btree_status_t::success) { - LOGERROR("Cannot read sibling node for {}", node); - return; - } - HS_DEBUG_ASSERT_EQ(next_node->is_leaf(), true); - leaves.push_back(next_node); - node = next_node; - } - unlock_node(node, locktype_t::READ); - return; - } - - HS_ASSERT_CMP(DEBUG, node->get_total_entries(), >, 0); - if (node->get_total_entries() > 0) { - BtreeNodeInfo p = node->get(0, false); - // XXX If we cannot get rid of locks, lock child and release parent here - get_leaf_nodes(p.bnode_id(), leaves); - } - unlock_node(node, locktype_t::READ); - } - - void to_string(bnodeid_t bnodeid, std::string& buf) const { - BtreeNodePtr< K > node; - - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { return; } - fmt::format_to(std::back_inserter(buf), "{}\n", node->to_string(true /* print_friendly */)); - - if (!node->is_leaf()) { - uint32_t i = 0; - while (i < node->get_total_entries()) { - BtreeNodeInfo p; - node->get(i, &p, false); - to_string(p.bnode_id(), buf); - i++; - } - if (node->has_valid_edge()) { to_string(node->get_edge_id(), buf); } - } - unlock_node(node, acq_lock); - } - - /* This function upgrades the node lock and take required steps if things have - * changed during the upgrade. - * - * Inputs: - * myNode - Node to upgrade - * childNode - In case childNode needs to be unlocked. Could be nullptr - * curLock - Input/Output: current lock type - * - * Returns - If successfully able to upgrade, return true, else false. - * - * About Locks: This function expects the myNode to be locked and if childNode is not nullptr, expects - * it to be locked too. If it is able to successfully upgrade it continue to retain its - * old lock. If failed to upgrade, will release all locks. - */ - btree_status_t upgrade_node(const BtreeNodePtr< K >& my_node, BtreeNodePtr< K > child_node, - homeds::thread::locktype& cur_lock, homeds::thread::locktype& child_cur_lock, - const btree_cp_ptr& bcp) { - uint64_t prev_gen; - btree_status_t ret = btree_status_t::success; - homeds::thread::locktype child_lock_type = child_cur_lock; - - if (cur_lock == homeds::thread::LOCKTYPE_WRITE) { goto done; } - - prev_gen = my_node->get_gen(); - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype::LOCKTYPE_NONE; - } - -#ifdef _PRERELEASE - { - auto time = homestore_flip->get_test_flip< uint64_t >("btree_upgrade_delay"); - if (time) { std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); } - } -#endif - ret = lock_node_upgrade(my_node, bcp); - if (ret != btree_status_t::success) { - cur_lock = locktype::LOCKTYPE_NONE; - return ret; - } - - // The node was not changed by anyone else during upgrade. - cur_lock = homeds::thread::LOCKTYPE_WRITE; - - // If the node has been made invalid (probably by mergeNodes) ask caller to start over again, but before - // that cleanup or free this node if there is no one waiting. - if (!my_node->is_valid_node()) { - unlock_node(my_node, homeds::thread::LOCKTYPE_WRITE); - cur_lock = locktype::LOCKTYPE_NONE; - ret = btree_status_t::retry; - goto done; - } - - // If node has been updated, while we have upgraded, ask caller to start all over again. - if (prev_gen != my_node->get_gen()) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - ret = btree_status_t::retry; - goto done; - } - - if (child_node) { - ret = lock_and_refresh_node(child_node, child_lock_type, bcp); - if (ret != btree_status_t::success) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - child_cur_lock = locktype::LOCKTYPE_NONE; - goto done; - } - child_cur_lock = child_lock_type; - } - -#ifdef _PRERELEASE - { - int is_leaf = 0; - - if (child_node && child_node->is_leaf()) { is_leaf = 1; } - if (homestore_flip->test_flip("btree_upgrade_node_fail", is_leaf)) { - unlock_node(my_node, cur_lock); - cur_lock = locktype::LOCKTYPE_NONE; - if (child_node) { - unlock_node(child_node, child_cur_lock); - child_cur_lock = locktype::LOCKTYPE_NONE; - } - ret = btree_status_t::retry; - goto done; - } - } -#endif - - BT_DEBUG_ASSERT_CMP(my_node->m_common_header.is_lock, ==, 1, my_node); - done: - return ret; - } - - btree_status_t update_leaf_node(const BtreeNodePtr< K >& my_node, const BtreeKey& k, const BtreeValue& v, - btree_put_type put_type, BtreeValue& existing_val, BtreeUpdateRequest< K, V >* bur, - const btree_cp_ptr& bcp, BtreeSearchRange& subrange) { - btree_status_t ret = btree_status_t::success; - if (bur != nullptr) { - // BT_DEBUG_ASSERT_CMP(bur->callback(), !=, nullptr, my_node); // TODO - range req without - // callback implementation - static thread_local std::vector< std::pair< K, V > > s_match; - s_match.clear(); - int start_ind = 0, end_ind = 0; - my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind, &s_match); - - static thread_local std::vector< pair< K, V > > s_replace_kv; - s_replace_kv.clear(); - bur->get_cb_param()->node_version = my_node->get_version(); - ret = bur->callback()(s_match, s_replace_kv, bur->get_cb_param(), subrange); - if (ret != btree_status_t::success) { return ret; } - - HS_ASSERT_CMP(DEBUG, start_ind, <=, end_ind); - if (s_match.size() > 0) { my_node->remove(start_ind, end_ind); } - COUNTER_DECREMENT(m_metrics, btree_obj_count, s_match.size()); - - for (const auto& pair : s_replace_kv) { // insert is based on compare() of BtreeKey - auto status = my_node->insert(pair.first, pair.second); - BT_RELEASE_ASSERT((status == btree_status_t::success), my_node, "unexpected insert failure"); - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - /* update cursor in input range */ - auto end_key_ptr = const_cast< BtreeKey* >(subrange.get_end_key()); - bur->get_input_range().set_cursor_key( - end_key_ptr, ([](BtreeKey* end_key) { return std::move(std::make_unique< K >(*((K*)end_key))); })); - if (homestore::vol_test_run) { - // sorted check - for (auto i = 1u; i < my_node->get_total_entries(); i++) { - K curKey, prevKey; - my_node->get_nth_key(i - 1, &prevKey, false); - my_node->get_nth_key(i, &curKey, false); - if (prevKey.compare(&curKey) >= 0) { - LOGINFO("my_node {}", my_node->to_string()); - for (const auto& [k, v] : s_match) { - LOGINFO("match key {} value {}", k.to_string(), v.to_string()); - } - for (const auto& [k, v] : s_replace_kv) { - LOGINFO("replace key {} value {}", k.to_string(), v.to_string()); - } - } - BT_RELEASE_ASSERT_CMP(prevKey.compare(&curKey), <, 0, my_node); - } - } - } else { - if (!my_node->put(k, v, put_type, existing_val)) { ret = btree_status_t::put_failed; } - COUNTER_INCREMENT(m_metrics, btree_obj_count, 1); - } - - write_node(my_node, bcp); - return ret; - } - - btree_status_t get_start_and_end_ind(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, - const BtreeKey& k, int& start_ind, int& end_ind) { - - btree_status_t ret = btree_status_t::success; - if (bur != nullptr) { - /* just get start/end index from get_all. We don't release the parent lock until this - * key range is not inserted from start_ind to end_ind. - */ - my_node->get_all(bur->get_input_range(), UINT32_MAX, start_ind, end_ind); - } else { - auto result = my_node->find(k, nullptr, nullptr, true, true); - end_ind = start_ind = result.end_of_search_index; - ASSERT_IS_VALID_INTERIOR_CHILD_INDX(result, my_node); - } - - if (start_ind > end_ind) { - BT_LOG_ASSERT(false, my_node, "start ind {} greater than end ind {}", start_ind, end_ind); - ret = btree_status_t::retry; - } - return ret; - } - - /* It split the child if a split is required. It releases lock on parent and child_node in case of failure */ - btree_status_t check_and_split_node(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, - const BtreeKey& k, const BtreeValue& v, int ind_hint, btree_put_type put_type, - BtreeNodePtr< K > child_node, homeds::thread::locktype& curlock, - homeds::thread::locktype& child_curlock, int child_ind, bool& split_occured, - const btree_cp_ptr& bcp) { - - split_occured = false; - K split_key; - btree_status_t ret = btree_status_t::success; - auto child_lock_type = child_curlock; - auto none_lock_type = LOCKTYPE_NONE; - -#ifdef _PRERELEASE - boost::optional< int > time; - if (child_node->is_leaf()) { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split_leaf", child_node->get_total_entries()); - } else { - time = homestore_flip->get_test_flip< int >("btree_delay_and_split", child_node->get_total_entries()); - } - if (time && child_node->get_total_entries() > 2) { - std::this_thread::sleep_for(std::chrono::microseconds{time.get()}); - } else -#endif - { - if (!child_node->is_split_needed(m_cfg, k, v, &ind_hint, put_type, bur)) { return ret; } - } - - /* Split needed */ - if (bur) { - - /* In case of range update we might split multiple childs of a parent in a single - * iteration which result into less space in the parent node. - */ -#ifdef _PRERELEASE - if (homestore_flip->test_flip("btree_parent_node_full")) { - ret = btree_status_t::retry; - goto out; - } -#endif - if (my_node->is_split_needed(m_cfg, k, v, &ind_hint, put_type, bur)) { - // restart from root - ret = btree_status_t::retry; - goto out; - } - } - - // Time to split the child, but we need to convert parent to write lock - ret = upgrade_node(my_node, child_node, curlock, child_curlock, bcp); - if (ret != btree_status_t::success) { - THIS_BT_LOG(DEBUG, btree_structures, my_node, "Upgrade of node lock failed, retrying from root"); - BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_NONE, my_node); - goto out; - } - BT_LOG_ASSERT_CMP(child_curlock, ==, child_lock_type, my_node); - BT_LOG_ASSERT_CMP(curlock, ==, homeds::thread::LOCKTYPE_WRITE, my_node); - - // We need to upgrade the child to WriteLock - ret = upgrade_node(child_node, nullptr, child_curlock, none_lock_type, bcp); - if (ret != btree_status_t::success) { - THIS_BT_LOG(DEBUG, btree_structures, child_node, "Upgrade of child node lock failed, retrying from root"); - BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_NONE, child_node); - goto out; - } - BT_LOG_ASSERT_CMP(none_lock_type, ==, homeds::thread::LOCKTYPE_NONE, my_node); - BT_LOG_ASSERT_CMP(child_curlock, ==, homeds::thread::LOCKTYPE_WRITE, child_node); - - // Real time to split the node and get point at which it was split - ret = split_node(my_node, child_node, child_ind, &split_key, bcp); - if (ret != btree_status_t::success) { goto out; } - - // After split, retry search and walk down. - unlock_node(child_node, homeds::thread::LOCKTYPE_WRITE); - child_curlock = LOCKTYPE_NONE; - COUNTER_INCREMENT(m_metrics, btree_split_count, 1); - split_occured = true; - out: - if (ret != btree_status_t::success) { - if (curlock != LOCKTYPE_NONE) { - unlock_node(my_node, curlock); - curlock = LOCKTYPE_NONE; - } - - if (child_curlock != LOCKTYPE_NONE) { - unlock_node(child_node, child_curlock); - child_curlock = LOCKTYPE_NONE; - } - } - return ret; - } - - /* This function is called for the interior nodes whose childs are leaf nodes to calculate the sub range */ - void get_subrange(const BtreeNodePtr< K >& my_node, BtreeUpdateRequest< K, V >* bur, int curr_ind, - K& subrange_start_key, K& subrange_end_key, bool& subrange_start_inc, bool& subrange_end_inc) { - -#ifndef NDEBUG - if (curr_ind > 0) { - /* start of subrange will always be more then the key in curr_ind - 1 */ - K start_key; - BtreeKey* start_key_ptr = &start_key; - - my_node->get_nth_key(curr_ind - 1, start_key_ptr, false); - HS_ASSERT_CMP(DEBUG, start_key_ptr->compare(bur->get_input_range().get_start_key()), <=, 0); - } -#endif - - // find end of subrange - bool end_inc = true; - K end_key; - BtreeKey* end_key_ptr = &end_key; - - if (curr_ind < (int)my_node->get_total_entries()) { - my_node->get_nth_key(curr_ind, end_key_ptr, false); - if (end_key_ptr->compare(bur->get_input_range().get_end_key()) >= 0) { - /* this is last index to process as end of range is smaller then key in this node */ - end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); - end_inc = bur->get_input_range().is_end_inclusive(); - } else { - end_inc = true; - } - } else { - /* it is the edge node. end key is the end of input range */ - BT_LOG_ASSERT_CMP(my_node->has_valid_edge(), ==, true, my_node); - end_key_ptr = const_cast< BtreeKey* >(bur->get_input_range().get_end_key()); - end_inc = bur->get_input_range().is_end_inclusive(); - } - - BtreeSearchRange& input_range = bur->get_input_range(); - auto start_key_ptr = input_range.get_start_key(); - subrange_start_key.copy_blob(start_key_ptr->get_blob()); - subrange_end_key.copy_blob(end_key_ptr->get_blob()); - subrange_start_inc = input_range.is_start_inclusive(); - subrange_end_inc = end_inc; - - auto ret = subrange_start_key.compare(&subrange_end_key); - BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); - ret = subrange_start_key.compare(bur->get_input_range().get_end_key()); - BT_RELEASE_ASSERT_CMP(ret, <=, 0, my_node); - /* We don't neeed to update the start at it is updated when entries are inserted in leaf nodes */ - } - - btree_status_t check_split_root(const BtreeMutateRequest& put_req) { - int ind; - K split_key; - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - - m_btree_lock.write_lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { goto done; } - - if (!root->is_split_needed(m_cfg, put_req)) { - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - goto done; - } - - // Create a new child node and split them - child_node = alloc_interior_node(); - if (child_node == nullptr) { - ret = btree_status_t::space_not_avail; - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - goto done; - } - - /* it swap the data while keeping the nodeid same */ - btree_store_t::swap_node(m_btree_store.get(), root, child_node); - write_node(child_node); - - THIS_BT_LOG(DEBUG, btree_structures, root, - "Root node is full, swapping contents with child_node {} and split that", - child_node->get_node_id()); - - BT_DEBUG_ASSERT_CMP(root->get_total_entries(), ==, 0, root); - ret = split_node(root, child_node, root->get_total_entries(), &split_key, true); - BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); - - if (ret != btree_status_t::success) { - btree_store_t::swap_node(m_btree_store.get(), child_node, root); - write_node(child_node); - } - - /* unlock child node */ - unlock_node(root, homeds::thread::LOCKTYPE_WRITE); - - if (ret == btree_status_t::success) { COUNTER_INCREMENT(m_metrics, btree_depth, 1); } - done: - m_btree_lock.unlock(); - return ret; - } - - btree_status_t check_collapse_root(const btree_cp_ptr& bcp) { - BtreeNodePtr< K > child_node = nullptr; - btree_status_t ret = btree_status_t::success; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - - m_btree_lock.write_lock(); - BtreeNodePtr< K > root; - - ret = read_and_lock_root(m_root_node, root, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, bcp); - if (ret != btree_status_t::success) { goto done; } - - if (root->get_total_entries() != 0 || root->is_leaf() /*some other thread collapsed root already*/) { - unlock_node(root, locktype::LOCKTYPE_WRITE); - goto done; - } - - BT_DEBUG_ASSERT_CMP(root->has_valid_edge(), ==, true, root); - ret = read_node(root->get_edge_id(), child_node); - if (child_node == nullptr) { - unlock_node(root, locktype::LOCKTYPE_WRITE); - goto done; - } - - // Elevate the edge child as root. - btree_store_t::swap_node(m_btree_store.get(), root, child_node); - write_node(root, bcp); - BT_DEBUG_ASSERT_CMP(m_root_node, ==, root->get_node_id(), root); - - old_nodes.push_back(child_node); - - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, true /* is_root */, bcp); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, root, bcp); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, child_node, bcp); - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - unlock_node(root, locktype::LOCKTYPE_WRITE); - free_node(child_node, (bcp ? bcp->free_blkid_list : nullptr)); - - if (ret == btree_status_t::success) { COUNTER_DECREMENT(m_metrics, btree_depth, 1); } - done: - m_btree_lock.unlock(); - return ret; - } - - btree_status_t split_node(const BtreeNodePtr< K >& parent_node, BtreeNodePtr< K > child_node, uint32_t parent_ind, - BtreeKey* out_split_key, const btree_cp_ptr& bcp, bool root_split = false) { - BtreeNodeInfo ninfo; - BtreeNodePtr< K > child_node1 = child_node; - BtreeNodePtr< K > child_node2 = child_node1->is_leaf() ? alloc_leaf_node() : alloc_interior_node(); - - if (child_node2 == nullptr) { return (btree_status_t::space_not_avail); } - - btree_status_t ret = btree_status_t::success; - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node1->set_next_bnode(child_node2->get_node_id()); - uint32_t child1_filled_size = m_cfg.get_node_area_size() - child_node1->get_available_size(m_cfg); - - auto split_size = m_cfg.get_split_size(child1_filled_size); - uint32_t res = child_node1->move_out_to_right_by_size(m_cfg, child_node2, split_size); - - BT_RELEASE_ASSERT_CMP(res, >, 0, child_node1, - "Unable to split entries in the child node"); // means cannot split entries - BT_DEBUG_ASSERT_CMP(child_node1->get_total_entries(), >, 0, child_node1); - - // Update the existing parent node entry to point to second child ptr. - bool edge_split = (parent_ind == parent_node->get_total_entries()); - ninfo.set_bnode_id(child_node2->get_node_id()); - parent_node->update(parent_ind, ninfo); - - // Insert the last entry in first child to parent node - child_node1->get_last_key(out_split_key); - ninfo.set_bnode_id(child_node1->get_node_id()); - - /* If key is extent then we always insert the end key in the parent node */ - K out_split_end_key; - out_split_end_key.copy_end_key_blob(out_split_key->get_blob()); - parent_node->insert(out_split_end_key, ninfo); - -#ifndef NDEBUG - K split_key; - child_node2->get_first_key(&split_key); - BT_DEBUG_ASSERT_CMP(split_key.compare(out_split_key), >, 0, child_node2); -#endif - THIS_BT_LOG(DEBUG, btree_structures, parent_node, "Split child_node={} with new_child_node={}, split_key={}", - child_node1->get_node_id(), child_node2->get_node_id(), out_split_key->to_string()); - - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_SPLIT, root_split, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - btree_store_t::append_node_to_journal( - j_iob, (root_split ? bt_journal_node_op::creation : bt_journal_node_op::inplace_write), child_node1, - bcp, out_split_end_key.get_blob()); - - // For root split or split around the edge, we don't write the key, which will cause replay to insert - // edge - if (edge_split) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp); - } else { - K child2_pkey; - parent_node->get_nth_key(parent_ind, &child2_pkey, true); - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, child_node2, bcp, - child2_pkey.get_blob()); - } - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - - // we write right child node, than left and than parent child - write_node(child_node2, nullptr, bcp); - write_node(child_node1, child_node2, bcp); - write_node(parent_node, child_node1, bcp); - - // NOTE: Do not access parentInd after insert, since insert would have - // shifted parentNode to the right. - return ret; - } - - btree_status_t create_btree_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - if (jentry) { - BT_DEBUG_ASSERT_CMP(jentry->is_root, ==, true, , - "Expected create_btree_replay entry to be root journal entry"); - BT_DEBUG_ASSERT_CMP(jentry->parent_node.get_id(), ==, m_root_node, , "Root node journal entry mismatch"); - } - - // Create a root node by reserving the leaf node - BtreeNodePtr< K > root = reserve_leaf_node(BlkId(m_root_node)); - auto ret = write_node(root, nullptr, bcp); - BT_DEBUG_ASSERT_CMP(ret, ==, btree_status_t::success, , "expecting success in writing root node"); - return btree_status_t::success; - } - - btree_status_t split_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - bnodeid_t id = jentry->is_root ? m_root_node : jentry->parent_node.node_id; - BtreeNodePtr< K > parent_node; - - // read parent node - read_node_or_fail(id, parent_node); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { - THIS_BT_LOG(INFO, base, , - "Journal replay: parent_node gen {} ahead of jentry gen {} is root {} , skipping ", - parent_node->get_gen(), jentry->parent_node.get_gen(), jentry->is_root); - return btree_status_t::replay_not_needed; - } - - // Read the first inplace write node which is the leftmost child and also form child split key from journal - auto j_child_nodes = jentry->get_nodes(); - - BtreeNodePtr< K > child_node1; - if (jentry->is_root) { - // If root is not written yet, parent_node will be pointing child_node1, so create a new parent_node to - // be treated as root here on. - child_node1 = reserve_interior_node(BlkId(j_child_nodes[0]->node_id())); - btree_store_t::swap_node(m_btree_store.get(), parent_node, child_node1); - - THIS_BT_LOG(INFO, btree_generics, , - "Journal replay: root split, so creating child_node id={} and swapping the node with " - "parent_node id={} names {}", - child_node1->get_node_id(), parent_node->get_node_id(), m_cfg.get_name()); - - } else { - read_node_or_fail(j_child_nodes[0]->node_id(), child_node1); - } - - THIS_BT_LOG(INFO, btree_generics, , - "Journal replay: child_node1 => jentry: [id={} gen={}], ondisk: [id={} gen={}] names {}", - j_child_nodes[0]->node_id(), j_child_nodes[0]->node_gen(), child_node1->get_node_id(), - child_node1->get_gen(), m_cfg.get_name()); - if (jentry->is_root) { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::creation, , - "Expected first node in journal entry to be new creation for root split"); - } else { - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->type, ==, bt_journal_node_op::inplace_write, , - "Expected first node in journal entry to be in-place write"); - } - BT_RELEASE_ASSERT_CMP(j_child_nodes[1]->type, ==, bt_journal_node_op::creation, , - "Expected second node in journal entry to be new node creation"); - - // recover child node - bool child_split = recover_child_nodes_in_split(child_node1, j_child_nodes, bcp); - - // recover parent node - recover_parent_node_in_split(parent_node, child_split ? child_node1 : nullptr, j_child_nodes, bcp); - return btree_status_t::success; - } - - bool recover_child_nodes_in_split(const BtreeNodePtr< K >& child_node1, - const std::vector< bt_journal_node_info* >& j_child_nodes, - const btree_cp_ptr& bcp) { - - BtreeNodePtr< K > child_node2; - // Check if child1 is ahead of the generation - if (child_node1->get_gen() >= j_child_nodes[0]->node_gen()) { - // leftmost_node is written, so right node must have been written as well. - read_node_or_fail(child_node1->next_bnode(), child_node2); - - // sanity check for right node - BT_RELEASE_ASSERT_CMP(child_node2->get_gen(), >=, j_child_nodes[1]->node_gen(), child_node2, - "gen cnt should be more than the journal entry"); - // no need to recover child nodes - return false; - } - - K split_key; - split_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - child_node2 = child_node1->is_leaf() ? reserve_leaf_node(BlkId(j_child_nodes[1]->node_id())) - : reserve_interior_node(BlkId(j_child_nodes[1]->node_id())); - - // We need to do split based on entries since the left children is also not written yet. - // Find the split key within the child_node1. It is not always found, so we split upto that. - auto ret = child_node1->find(split_key, nullptr, false); - - // sanity check for left mode node before recovery - { - if (!ret.found) { - if (!child_node1->is_leaf()) { - BT_RELEASE_ASSERT(0, , "interior nodes should always have this key if it is written yet"); - } - } - } - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: split key {}, split indx {} child_node1 {}", - split_key.to_string(), ret.end_of_search_index, child_node1->to_string()); - /* if it is not found than end_of_search_index points to first ind which is greater than split key */ - auto split_ind = ret.end_of_search_index; - if (ret.found) { ++split_ind; } // we don't want to move split key */ - if (child_node1->is_leaf() && split_ind < (int)child_node1->get_total_entries()) { - K key; - child_node1->get_nth_key(split_ind, &key, false); - - if (split_key.compare_start(&key) >= 0) { /* we need to split the key range */ - THIS_BT_LOG(INFO, btree_generics, , "splitting a leaf node key {}", key.to_string()); - V v; - child_node1->get_nth_value(split_ind, &v, false); - vector< pair< K, V > > replace_kv; - child_node1->remove(split_ind, split_ind); - m_split_key_cb(key, v, split_key, replace_kv); - for (auto& pair : replace_kv) { - auto status = child_node1->insert(pair.first, pair.second); - BT_RELEASE_ASSERT((status == btree_status_t::success), child_node1, "unexpected insert failure"); - } - auto ret = child_node1->find(split_key, nullptr, false); - BT_RELEASE_ASSERT((ret.found && (ret.end_of_search_index == split_ind)), child_node1, - "found new indx {}, old split indx{}", ret.end_of_search_index, split_ind); - ++split_ind; - } - } - child_node1->move_out_to_right_by_entries(m_cfg, child_node2, child_node1->get_total_entries() - split_ind); - - child_node2->set_next_bnode(child_node1->next_bnode()); - child_node2->set_gen(j_child_nodes[1]->node_gen()); - - child_node1->set_next_bnode(child_node2->get_node_id()); - child_node1->set_gen(j_child_nodes[0]->node_gen()); - - THIS_BT_LOG(INFO, btree_generics, , "Journal replay: child_node2 {}", child_node2->to_string()); - write_node(child_node2, nullptr, bcp); - write_node(child_node1, child_node2, bcp); - return true; - } - - void recover_parent_node_in_split(const BtreeNodePtr< K >& parent_node, const BtreeNodePtr< K >& child_node1, - std::vector< bt_journal_node_info* >& j_child_nodes, const btree_cp_ptr& bcp) { - - // find child_1 key - K child1_key; // we need to insert child1_key - BT_RELEASE_ASSERT_CMP(j_child_nodes[0]->key_size, !=, 0, , "key size of left mode node is zero"); - child1_key.set_blob({j_child_nodes[0]->key_area(), j_child_nodes[0]->key_size}); - auto child1_node_id = j_child_nodes[0]->node_id(); - - // find split indx - auto ret = parent_node->find(child1_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(ret.found, ==, false, , "child_1 key should not be in this parent"); - auto split_indx = ret.end_of_search_index; - - // find child2_key - K child2_key; // we only need to update child2_key to new node - if (j_child_nodes[1]->key_size != 0) { - child2_key.set_blob({j_child_nodes[1]->key_area(), j_child_nodes[1]->key_size}); - ret = parent_node->find(child2_key, nullptr, false); - BT_RELEASE_ASSERT_CMP(split_indx, ==, ret.end_of_search_index, , "it should be same as split index"); - } else { - // parent should be valid edge it is not a root split - } - auto child2_node_id = j_child_nodes[1]->node_id(); - - // update child2_key value - BtreeNodeInfo ninfo; - ninfo.set_bnode_id(child2_node_id); - parent_node->update(split_indx, ninfo); - - // insert child 1 - ninfo.set_bnode_id(child1_node_id); - K out_split_end_key; - out_split_end_key.copy_end_key_blob(child1_key.get_blob()); - parent_node->insert(out_split_end_key, ninfo); - - // Write the parent node - write_node(parent_node, child_node1, bcp); - - /* do sanity check after recovery split */ - { - validate_sanity_child(parent_node, split_indx); - validate_sanity_next_child(parent_node, split_indx); - } - } - - btree_status_t merge_nodes(const BtreeNodePtr< K >& parent_node, uint32_t start_indx, uint32_t end_indx, - const btree_cp_ptr& bcp) { - btree_status_t ret = btree_status_t::merge_failed; - std::vector< BtreeNodePtr< K > > child_nodes; - std::vector< BtreeNodePtr< K > > old_nodes; - std::vector< BtreeNodePtr< K > > replace_nodes; - std::vector< BtreeNodePtr< K > > new_nodes; - std::vector< BtreeNodePtr< K > > deleted_nodes; - BtreeNodePtr< K > left_most_node; - K last_pkey; // last key of parent node - bool last_pkey_valid = false; - uint32_t balanced_size; - BtreeNodePtr< K > merge_node; - K last_ckey; // last key in child - uint32_t parent_insert_indx = start_indx; -#ifndef NDEBUG - uint32_t total_child_entries = 0; - uint32_t new_entries = 0; - K last_debug_ckey; - K new_last_debug_ckey; - BtreeNodePtr< K > last_node; -#endif - /* Try to take a lock on all nodes participating in merge*/ - for (auto indx = start_indx; indx <= end_indx; ++indx) { - if (indx == parent_node->get_total_entries()) { - BT_LOG_ASSERT(parent_node->has_valid_edge(), parent_node, - "Assertion failure, expected valid edge for parent_node: {}"); - } - - BtreeNodeInfo child_info; - parent_node->get(indx, &child_info, false /* copy */); - - BtreeNodePtr< K > child; - ret = read_and_lock_node(child_info.bnode_id(), child, locktype::LOCKTYPE_WRITE, locktype::LOCKTYPE_WRITE, - bcp); - if (ret != btree_status_t::success) { goto out; } - BT_LOG_ASSERT_CMP(child->is_valid_node(), ==, true, child); - - /* check if left most node has space */ - if (indx == start_indx) { - balanced_size = m_cfg.get_ideal_fill_size(); - left_most_node = child; - if (left_most_node->get_occupied_size(m_cfg) > balanced_size) { - /* first node doesn't have any free space. we can exit now */ - ret = btree_status_t::merge_not_required; - goto out; - } - } else { - bool is_allocated = true; - /* pre allocate the new nodes. We will free the nodes which are not in use later */ - auto new_node = btree_store_t::alloc_node(m_btree_store.get(), child->is_leaf(), is_allocated, child); - if (is_allocated) { - /* we are going to allocate new blkid of all the nodes except the first node. - * Note :- These blkids will leak if we fail or crash before writing entry into - * journal. - */ - old_nodes.push_back(child); - COUNTER_INCREMENT_IF_ELSE(m_metrics, child->is_leaf(), btree_leaf_node_count, btree_int_node_count, - 1); - } - /* Blk IDs can leak if it crash before writing it to a journal */ - if (new_node == nullptr) { - ret = btree_status_t::space_not_avail; - goto out; - } - new_nodes.push_back(new_node); - } -#ifndef NDEBUG - total_child_entries += child->get_total_entries(); - child->get_last_key(&last_debug_ckey); -#endif - child_nodes.push_back(child); - } - - if (end_indx != parent_node->get_total_entries()) { - /* If it is not edge we always preserve the last key in a given merge group of nodes.*/ - parent_node->get_nth_key(end_indx, &last_pkey, true); - last_pkey_valid = true; - } - - merge_node = left_most_node; - /* We can not fail from this point. Nodes will be modified in memory. */ - for (uint32_t i = 0; i < new_nodes.size(); ++i) { - auto occupied_size = merge_node->get_occupied_size(m_cfg); - if (occupied_size < balanced_size) { - uint32_t pull_size = balanced_size - occupied_size; - merge_node->move_in_from_right_by_size(m_cfg, new_nodes[i], pull_size); - if (new_nodes[i]->get_total_entries() == 0) { - /* this node is freed */ - deleted_nodes.push_back(new_nodes[i]); - continue; - } - } - - /* update the last key of merge node in parent node */ - K last_ckey; // last key in child - merge_node->get_last_key(&last_ckey); - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - - merge_node->set_next_bnode(new_nodes[i]->get_node_id()); // link them - merge_node = new_nodes[i]; - if (merge_node != left_most_node) { - /* left most node is not replaced */ - replace_nodes.push_back(merge_node); - } - } - - /* update the latest merge node */ - merge_node->get_last_key(&last_ckey); - if (last_pkey_valid) { - BT_DEBUG_ASSERT_CMP(last_ckey.compare(&last_pkey), <=, 0, parent_node); - last_ckey = last_pkey; - } - - /* update the last key */ - { - BtreeNodeInfo ninfo(merge_node->get_node_id()); - parent_node->update(parent_insert_indx, last_ckey, ninfo); - ++parent_insert_indx; - } - - /* remove the keys which are no longer used */ - if ((parent_insert_indx) <= end_indx) { parent_node->remove(parent_insert_indx, end_indx); } - - /* write the journal entry */ - if (BtreeStoreType == btree_store_type::SSD_BTREE) { - auto j_iob = btree_store_t::make_journal_entry(journal_op::BTREE_MERGE, false /* is_root */, bcp, - {parent_node->get_node_id(), parent_node->get_gen()}); - K child_pkey; - if (start_indx < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx, &child_pkey, true); - BT_RELEASE_ASSERT_CMP(start_indx, ==, (parent_insert_indx - 1), parent_node, "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::inplace_write, left_most_node, bcp, - child_pkey.get_blob()); - for (auto& node : old_nodes) { - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::removal, node, bcp); - } - uint32_t insert_indx = 0; - for (auto& node : replace_nodes) { - K child_pkey; - if ((start_indx + insert_indx) < parent_node->get_total_entries()) { - parent_node->get_nth_key(start_indx + insert_indx, &child_pkey, true); - BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, (parent_insert_indx - 1), parent_node, - "it should be last index"); - } - btree_store_t::append_node_to_journal(j_iob, bt_journal_node_op::creation, node, bcp, - child_pkey.get_blob()); - ++insert_indx; - } - BT_RELEASE_ASSERT_CMP((start_indx + insert_indx), ==, parent_insert_indx, parent_node, "it should be same"); - btree_store_t::write_journal_entry(m_btree_store.get(), bcp, j_iob); - } - - if (replace_nodes.size() > 0) { - /* write the right most node */ - write_node(replace_nodes[replace_nodes.size() - 1], nullptr, bcp); - if (replace_nodes.size() > 1) { - /* write the middle nodes */ - for (int i = replace_nodes.size() - 2; i >= 0; --i) { - write_node(replace_nodes[i], replace_nodes[i + 1], bcp); - } - } - /* write the left most node */ - write_node(left_most_node, replace_nodes[0], bcp); - } else { - /* write the left most node */ - write_node(left_most_node, nullptr, bcp); - } - - /* write the parent node */ - write_node(parent_node, left_most_node, bcp); - -#ifndef NDEBUG - for (const auto& n : replace_nodes) { - new_entries += n->get_total_entries(); - } - - new_entries += left_most_node->get_total_entries(); - HS_DEBUG_ASSERT_EQ(total_child_entries, new_entries); - - if (replace_nodes.size()) { - replace_nodes[replace_nodes.size() - 1]->get_last_key(&new_last_debug_ckey); - last_node = replace_nodes[replace_nodes.size() - 1]; - } else { - left_most_node->get_last_key(&new_last_debug_ckey); - last_node = left_most_node; - } - if (last_debug_ckey.compare(&new_last_debug_ckey) != 0) { - LOGINFO("{}", last_node->to_string()); - if (deleted_nodes.size() > 0) { LOGINFO("{}", (deleted_nodes[deleted_nodes.size() - 1]->to_string())); } - HS_DEBUG_ASSERT(false, "compared failed"); - } -#endif - /* free nodes. It actually gets freed after cp is completed */ - for (const auto& n : old_nodes) { - free_node(n, (bcp ? bcp->free_blkid_list : nullptr)); - } - for (const auto& n : deleted_nodes) { - free_node(n); - } - ret = btree_status_t::success; - out: -#ifndef NDEBUG - uint32_t freed_entries = deleted_nodes.size(); - uint32_t scan_entries = end_indx - start_indx - freed_entries + 1; - for (uint32_t i = 0; i < scan_entries; ++i) { - if (i < (scan_entries - 1)) { validate_sanity_next_child(parent_node, (uint32_t)start_indx + i); } - validate_sanity_child(parent_node, (uint32_t)start_indx + i); - } -#endif - // Loop again in reverse order to unlock the nodes. freeable nodes need to be unlocked and freed - for (uint32_t i = child_nodes.size() - 1; i != 0; i--) { - unlock_node(child_nodes[i], locktype::LOCKTYPE_WRITE); - } - unlock_node(child_nodes[0], locktype::LOCKTYPE_WRITE); - if (ret != btree_status_t::success) { - /* free the allocated nodes */ - for (const auto& n : new_nodes) { - free_node(n); - } - } - return ret; - } - -#if 0 - btree_status_t merge_node_replay(btree_journal_entry* jentry, const btree_cp_ptr& bcp) { - BtreeNodePtr< K > parent_node = (jentry->is_root) ? read_node(m_root_node) : read_node(jentry->parent_node.node_id); - - // Parent already went ahead of the journal entry, return done - if (parent_node->get_gen() >= jentry->parent_node.node_gen) { return btree_status_t::replay_not_needed; } - } -#endif - - void validate_sanity_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { - BtreeNodeInfo child_info; - K child_first_key; - K child_last_key; - K parent_key; - - parent_node->get(ind, &child_info, false /* copy */); - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - BT_REL_ASSERT_EQ(ret, btree_status_t::success, "read failed, reason: {}", ret); - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node or edge node can have 0 entries - BT_REL_ASSERT_EQ(((parent_node->has_valid_edge() && ind == parent_entries)), true); - } - return; - } - child_node->get_first_key(&child_first_key); - child_node->get_last_key(&child_last_key); - BT_REL_ASSERT_LE(child_first_key.compare(&child_last_key), 0) - if (ind == parent_node->get_total_entries()) { - BT_REL_ASSERT_EQ(parent_node->has_valid_edge(), true); - if (ind > 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) - } - } else { - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_LE(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LE(child_last_key.compare(&parent_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - BT_REL_ASSERT_GE(parent_key.compare_start(&child_first_key), 0) - if (ind != 0) { - parent_node->get_nth_key(ind - 1, &parent_key, false); - BT_REL_ASSERT_GT(child_first_key.compare(&parent_key), 0) - BT_REL_ASSERT_LT(parent_key.compare_start(&child_first_key), 0) - } - } - } - - void validate_sanity_next_child(const BtreeNodePtr< K >& parent_node, uint32_t ind) { - BtreeNodeInfo child_info; - K child_key; - K parent_key; - - if (parent_node->has_valid_edge()) { - if (ind == parent_node->get_total_entries()) { return; } - } else { - if (ind == parent_node->get_total_entries() - 1) { return; } - } - parent_node->get(ind + 1, &child_info, false /* copy */); - BtreeNodePtr< K > child_node = nullptr; - auto ret = read_node(child_info.bnode_id(), child_node); - HS_RELEASE_ASSERT(ret == btree_status_t::success, "read failed, reason: {}", ret); - if (child_node->get_total_entries() == 0) { - auto parent_entries = parent_node->get_total_entries(); - if (!child_node->is_leaf()) { // leaf node can have 0 entries - HS_ASSERT_CMP(RELEASE, - ((parent_node->has_valid_edge() && ind == parent_entries) || (ind = parent_entries - 1)), - ==, true); - } - return; - } - /* in case of merge next child will never have zero entries otherwise it would have been merged */ - HS_ASSERT_CMP(RELEASE, child_node->get_total_entries(), !=, 0); - child_node->get_first_key(&child_key); - parent_node->get_nth_key(ind, &parent_key, false); - BT_REL_ASSERT_GT(child_key.compare(&parent_key), 0) - BT_REL_ASSERT_GT(parent_key.compare_start(&child_key), 0) - } - - void print_node(const bnodeid_t& bnodeid) { - std::string buf; - BtreeNodePtr< K > node; - - m_btree_lock.read_lock(); - homeds::thread::locktype acq_lock = homeds::thread::locktype::locktype_t::READ; - if (read_and_lock_node(bnodeid, node, acq_lock, acq_lock, nullptr) != btree_status_t::success) { goto done; } - buf = node->to_string(true /* print_friendly */); - unlock_node(node, acq_lock); - - done: - m_btree_lock.unlock(); - - THIS_BT_LOG(INFO, base, , "Node: <{}>", buf); - } - - void diff(Btree* other, uint32_t param, vector< pair< K, V > >* diff_kv) { - std::vector< pair< K, V > > my_kvs, other_kvs; - - get_all_kvs(&my_kvs); - other->get_all_kvs(&other_kvs); - auto it1 = my_kvs.begin(); - auto it2 = other_kvs.begin(); - - K k1, k2; - V v1, v2; - - if (it1 != my_kvs.end()) { - k1 = it1->first; - v1 = it1->second; - } - if (it2 != other_kvs.end()) { - k2 = it2->first; - v2 = it2->second; - } - - while ((it1 != my_kvs.end()) && (it2 != other_kvs.end())) { - if (k1.preceeds(&k2)) { - /* k1 preceeds k2 - push k1 and continue */ - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - } else if (k1.succeeds(&k2)) { - /* k2 preceeds k1 - push k2 and continue */ - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - if (it2 == other_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - } else { - /* k1 and k2 overlaps */ - std::vector< pair< K, V > > overlap_kvs; - diff_read_next_t to_read = READ_BOTH; - - v1.get_overlap_diff_kvs(&k1, &v1, &k2, &v2, param, to_read, overlap_kvs); - for (auto ovr_it = overlap_kvs.begin(); ovr_it != overlap_kvs.end(); ovr_it++) { - diff_kv->emplace_back(make_pair(ovr_it->first, ovr_it->second)); - } - - switch (to_read) { - case READ_FIRST: - it1++; - if (it1 == my_kvs.end()) { - // Add k2,v2 - diff_kv->emplace_back(make_pair(k2, v2)); - it2++; - break; - } - k1 = it1->first; - v1 = it1->second; - break; - - case READ_SECOND: - it2++; - if (it2 == other_kvs.end()) { - diff_kv->emplace_back(make_pair(k1, v1)); - it1++; - break; - } - k2 = it2->first; - v2 = it2->second; - break; - - case READ_BOTH: - /* No tail part */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - - default: - LOGERROR("ERROR: Getting Overlapping Diff KVS for {}:{}, {}:{}, to_read {}", k1, v1, k2, v2, - to_read); - /* skip both */ - it1++; - if (it1 == my_kvs.end()) { break; } - k1 = it1->first; - v1 = it1->second; - it2++; - if (it2 == my_kvs.end()) { break; } - k2 = it2->first; - v2 = it2->second; - break; - } - } - } - - while (it1 != my_kvs.end()) { - diff_kv->emplace_back(make_pair(it1->first, it1->second)); - it1++; - } - - while (it2 != other_kvs.end()) { - diff_kv->emplace_back(make_pair(it2->first, it2->second)); - it2++; - } - } - - void merge(Btree* other, match_item_cb_t< K, V > merge_cb) { - std::vector< pair< K, V > > other_kvs; - - other->get_all_kvs(&other_kvs); - for (auto it = other_kvs.begin(); it != other_kvs.end(); it++) { - K k = it->first; - V v = it->second; - BRangeCBParam local_param(k, v); - K start(k.start(), 1), end(k.end(), 1); - - auto search_range = BtreeSearchRange(start, true, end, true); - BtreeUpdateRequest< K, V > ureq(search_range, merge_cb, nullptr, (BRangeCBParam*)&local_param); - range_put(k, v, btree_put_type::APPEND_IF_EXISTS_ELSE_INSERT, nullptr, nullptr, ureq); - } - } - - template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > - thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::wr_locked_nodes; - - template < btree_store_type BtreeStoreType, typename K, typename V, btree_node_type InteriorNodeType, - btree_node_type LeafNodeType > - thread_local homeds::reserve_vector< btree_locked_node_info, 5 > btree_t::rd_locked_nodes; -}; -} // namespace btree -} // namespace sisl diff --git a/src/btree/simple_node.hpp b/src/btree/simple_node.hpp deleted file mode 100644 index da6c5344..00000000 --- a/src/btree/simple_node.hpp +++ /dev/null @@ -1,301 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#pragma once - -#include "btree_node.hpp" -#include "btree_kv.hpp" -#include "btree_internal.hpp" - -using namespace std; -using namespace boost; - -SISL_LOGGING_DECL(btree) - -namespace sisl { -namespace btree { - -template < typename K, typename V > -class SimpleNode : public BtreeNode< K > { -public: - SimpleNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : - BtreeNode< K >(node_buf, id, init, is_leaf) { - this->set_node_type(btree_node_type::FIXED); - } - - // Insert the key and value in provided index - // Assumption: Node lock is already taken - btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - uint32_t sz = (this->get_total_entries() - (ind + 1) + 1) * get_nth_obj_size(0); - - if (sz != 0) { std::memmove(get_nth_obj(ind + 1), get_nth_obj(ind), sz); } - this->set_nth_obj(ind, key, val); - this->inc_entries(); - this->inc_gen(); - -#ifndef NDEBUG - validate_sanity(); -#endif - return btree_status_t::success; - } - - void update(uint32_t ind, const BtreeValue& val) override { - set_nth_value(ind, val); - - // TODO: Check if we need to upgrade the gen and impact of doing so with performance. It is especially - // needed for non similar key/value pairs - this->inc_gen(); -#ifndef NDEBUG - validate_sanity(); -#endif - } - - void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - set_nth_obj(ind, key, val); - this->inc_gen(); - } - - // ind_s and ind_e are inclusive - void remove(uint32_t ind_s, uint32_t ind_e) override { - uint32_t total_entries = this->get_total_entries(); - DEBUG_ASSERT_GE(total_entries, ind_s, "node={}", to_string()); - DEBUG_ASSERT_GE(total_entries, ind_e, "node={}", to_string()); - - if (ind_e == total_entries) { // edge entry - DEBUG_ASSERT((!this->is_leaf() && this->has_valid_edge()), "node={}", to_string()); - // Set the last key/value as edge entry and by decrementing entry count automatically removed the last - // entry. - BtreeNodeInfo new_edge; - get_nth_value(ind_s - 1, &new_edge, false); - this->set_nth_value(total_entries, new_edge); - this->sub_entries(total_entries - ind_s + 1); - } else { - uint32_t sz = (total_entries - ind_e - 1) * get_nth_obj_size(0); - - if (sz != 0) { std::memmove(get_nth_obj(ind_s), get_nth_obj(ind_e + 1), sz); } - this->sub_entries(ind_e - ind_s + 1); - } - this->inc_gen(); -#ifndef NDEBUG - validate_sanity(); -#endif - } - - void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - RELEASE_ASSERT(false, "Append operation is not supported on simple node"); - } - - uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { - auto& other_node = s_cast< SimpleNode< K, V >& >(o); - - // Minimum of whats to be moved out and how many slots available in other node - nentries = std::min({nentries, this->get_total_entries(), other_node.get_available_entries(cfg)}); - uint32_t sz = nentries * get_nth_obj_size(0); - - if (sz != 0) { - uint32_t othersz = other_node.get_total_entries() * other_node.get_nth_obj_size(0); - std::memmove(other_node.get_nth_obj(nentries), other_node.get_nth_obj(0), othersz); - std::memmove(other_node.get_nth_obj(0), get_nth_obj(this->get_total_entries() - nentries), sz); - } - - other_node.add_entries(nentries); - this->sub_entries(nentries); - - // If there is an edgeEntry in this node, it needs to move to move out as well. - if (!this->is_leaf() && this->has_valid_edge()) { - other_node.set_edge_id(this->get_edge_id()); - this->invalidate_edge(); - } - - other_node.inc_gen(); - this->inc_gen(); - -#ifndef NDEBUG - validate_sanity(); -#endif - return nentries; - } - - uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size) override { - return (get_nth_obj_size(0) * move_out_to_right_by_entries(cfg, o, size / get_nth_obj_size(0))); - } - - uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { - auto& other_node = s_cast< SimpleNode< K, V >& >(o); - - // Minimum of whats to be moved and how many slots available - nentries = std::min({nentries, other_node.get_total_entries(), get_available_entries(cfg)}); - uint32_t sz = nentries * get_nth_obj_size(0); - if (sz != 0) { - uint32_t othersz = (other_node.get_total_entries() - nentries) * other_node.get_nth_obj_size(0); - std::memmove(get_nth_obj(this->get_total_entries()), other_node.get_nth_obj(0), sz); - std::memmove(other_node.get_nth_obj(0), other_node.get_nth_obj(nentries), othersz); - } - - other_node.sub_entries(nentries); - this->add_entries(nentries); - - // If next node does not have any more entries, but only a edge entry - // we need to move that to us, so that if need be next node could be freed. - if ((other_node.get_total_entries() == 0) && other_node.has_valid_edge()) { - DEBUG_ASSERT_EQ(this->has_valid_edge(), false, "node={}", to_string()); - this->set_edge_id(other_node.get_edge_id()); - other_node.invalidate_edge(); - } - - other_node.inc_gen(); - this->inc_gen(); - -#ifndef NDEBUG - validate_sanity(); -#endif - return nentries; - } - - uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size) override { - return (get_nth_obj_size(0) * move_in_from_right_by_entries(cfg, o, size / get_nth_obj_size(0))); - } - - uint32_t get_available_size(const BtreeConfig& cfg) const override { - return (BtreeNode< K >::node_area_size(cfg) - (this->get_total_entries() * get_nth_obj_size(0))); - } - - K get_nth_key(uint32_t ind, bool copy) const override { - DEBUG_ASSERT_LT(ind, this->get_total_entries(), "node={}", to_string()); - sisl::blob b; - b.bytes = (uint8_t*)(this->node_data_area_const() + (get_nth_obj_size(ind) * ind)); - b.size = get_obj_key_size(ind); - return K{b, copy}; - } - - void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const override { - DEBUG_ASSERT_LT(ind, this->get_total_entries(), "node={}", to_string()); - sisl::blob b; - if (ind == this->get_total_entries()) { - RELEASE_ASSERT_EQ(this->is_leaf(), false, "setting value outside bounds on leaf node"); - DEBUG_ASSERT_EQ(this->has_valid_edge(), true, "node={}", to_string()); - b.bytes = const_cast< uint8_t* >(reinterpret_cast< const uint8_t* >(this->get_edge_id())); - b.size = sizeof(bnodeid_t); - } else { - b.bytes = const_cast< uint8_t* >(reinterpret_cast< const uint8_t* >( - this->node_data_area_const() + (get_nth_obj_size(ind) * ind) + get_obj_key_size(ind))); - b.size = V::get_fixed_size(); - } - return out_val->deserialize(b, copy); - } - - /*V get_nth_value(uint32_t ind, bool copy) const { - V val; - get_nth_value(ind, &val, copy); - return val; - }*/ - - std::string to_string(bool print_friendly = false) const override { - auto str = fmt::format("{}id={} nEntries={} {} ", - (print_friendly ? "------------------------------------------------------------\n" : ""), - this->get_node_id(), this->get_total_entries(), (this->is_leaf() ? "LEAF" : "INTERIOR")); - if (!this->is_leaf() && (this->has_valid_edge())) { - fmt::format_to(std::back_inserter(str), "edge_id={} ", this->get_edge_id()); - } - - for (uint32_t i{0}; i < this->get_total_entries(); ++i) { - V val; - get_nth_value(i, &val, false); - fmt::format_to(std::back_inserter(str), "{}Entry{} [Key={} Val={}]", (print_friendly ? "\n\t" : " "), i + 1, - get_nth_key(i, false).to_string(), val.to_string()); - } - return str; - } - -#ifndef NDEBUG - void validate_sanity() { - if (this->get_total_entries() == 0) { return; } - - // validate if keys are in ascending order - uint32_t i{1}; - K prevKey = get_nth_key(0, false); - - while (i < this->get_total_entries()) { - K key = get_nth_key(i, false); - if (i > 0 && prevKey.compare(key) > 0) { - LOGDEBUG("non sorted entry : {} -> {} ", prevKey.to_string(), key.to_string()); - DEBUG_ASSERT(false, "node={}", to_string()); - } - ++i; - prevKey = key; - } - } -#endif - - inline uint32_t get_nth_obj_size(uint32_t ind) const override { - return (get_obj_key_size(ind) + get_obj_value_size(ind)); - } - - int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const override { - return get_nth_key(ind, false).compare(cmp_key); - } - - // Simple/Fixed node doesn't need a record to point key/value object - uint16_t get_record_size() const override { return 0; } - - /*int compare_nth_key_range(const BtreeKeyRange& range, uint32_t ind) const override { - return get_nth_key(ind, false).compare_range(range); - }*/ - - /////////////// Other Internal Methods ///////////// - void set_nth_obj(uint32_t ind, const BtreeKey& k, const BtreeValue& v) { - if (ind > this->get_total_entries()) { - set_nth_value(ind, v); - } else { - uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind); - sisl::blob key_blob = k.serialize(); - memcpy((void*)entry, key_blob.bytes, key_blob.size); - - sisl::blob val_blob = v.serialize(); - memcpy((void*)(entry + key_blob.size), val_blob.bytes, val_blob.size); - } - } - - uint32_t get_available_entries(const BtreeConfig& cfg) const { - return get_available_size(cfg) / get_nth_obj_size(0); - } - - inline uint32_t get_obj_key_size(uint32_t ind) const { return K::get_fixed_size(); } - - inline uint32_t get_obj_value_size(uint32_t ind) const { return V::get_fixed_size(); } - - uint8_t* get_nth_obj(uint32_t ind) { return (this->node_data_area() + (get_nth_obj_size(ind) * ind)); } - - void set_nth_key(uint32_t ind, BtreeKey* key) { - uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind); - sisl::blob b = key->serialize(); - memcpy(entry, b.bytes, b.size); - } - - void set_nth_value(uint32_t ind, const BtreeValue& v) { - sisl::blob b = v.serialize(); - if (ind > this->get_total_entries()) { - RELEASE_ASSERT_EQ(this->is_leaf(), false, "setting value outside bounds on leaf node"); - DEBUG_ASSERT_EQ(b.size, sizeof(bnodeid_t), "Invalid value size being set for non-leaf node"); - this->set_edge_id(*r_cast< bnodeid_t* >(b.bytes)); - } else { - uint8_t* entry = this->node_data_area() + (get_nth_obj_size(ind) * ind) + get_obj_key_size(ind); - std::memcpy(entry, b.bytes, b.size); - } - } -}; -} // namespace btree -} // namespace sisl diff --git a/src/btree/tests/btree_test_kvs.hpp b/src/btree/tests/btree_test_kvs.hpp deleted file mode 100644 index 862e2896..00000000 --- a/src/btree/tests/btree_test_kvs.hpp +++ /dev/null @@ -1,294 +0,0 @@ -#pragma once -#include -#include -#include -#include -#include -#include "../btree_kv.hpp" - -static constexpr uint32_t g_max_keys{6000}; -static constexpr uint32_t g_max_keysize{120}; -static constexpr uint32_t g_max_valsize{120}; -static std::random_device g_rd{}; -static std::default_random_engine g_re{g_rd()}; -static std::uniform_int_distribution< uint32_t > g_randkey_generator{0, g_max_keys}; -static std::uniform_int_distribution< uint32_t > g_randkeysize_generator{2, g_max_keysize}; -static std::uniform_int_distribution< uint32_t > g_randval_generator{1, 30000}; -static std::uniform_int_distribution< uint32_t > g_randvalsize_generator{2, g_max_valsize}; - -static std::map< uint32_t, std::shared_ptr< std::string > > g_key_pool; - -static constexpr std::array< const char, 62 > alphanum{ - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', - 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', - 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}; - -static std::string gen_random_string(size_t len, uint32_t preamble = std::numeric_limits< uint32_t >::max()) { - std::string str; - if (preamble != std::numeric_limits< uint32_t >::max()) { - std::stringstream ss; - ss << std::setw(8) << std::setfill('0') << std::hex << preamble; - str += ss.str(); - } - - static thread_local std::random_device rd{}; - static thread_local std::default_random_engine re{rd()}; - std::uniform_int_distribution< size_t > rand_char{0, alphanum.size() - 1}; - for (size_t i{0}; i < len; ++i) { - str += alphanum[rand_char(re)]; - } - str += '\0'; - return str; -} - -using namespace sisl::btree; - -class TestFixedKey : public BtreeKey { -private: - uint32_t m_key{0}; - -public: - TestFixedKey() = default; - TestFixedKey(uint32_t k) : m_key{k} {} - TestFixedKey(const TestFixedKey& other) : TestFixedKey(other.serialize(), true) {} - TestFixedKey(const BtreeKey& other) : TestFixedKey(other.serialize(), true) {} - TestFixedKey(const sisl::blob& b, bool copy) : BtreeKey(), m_key{*(r_cast< const uint32_t* >(b.bytes))} {} - TestFixedKey& operator=(const TestFixedKey& other) { - clone(other); - return *this; - }; - virtual void clone(const BtreeKey& other) override { m_key = ((TestFixedKey&)other).m_key; } - - virtual ~TestFixedKey() = default; - - int compare(const BtreeKey& o) const override { - const TestFixedKey& other = s_cast< const TestFixedKey& >(o); - if (m_key < other.m_key) { - return -1; - } else if (m_key > other.m_key) { - return 1; - } else { - return 0; - } - } - - int compare_range(const BtreeKeyRange& range) const override { - if (m_key == start_key(range)) { - return range.is_start_inclusive() ? 0 : -1; - } else if (m_key < start_key(range)) { - return -1; - } else if (m_key == end_key(range)) { - return range.is_end_inclusive() ? 0 : 1; - } else if (m_key > end_key(range)) { - return 1; - } else { - return 0; - } - } - - sisl::blob serialize() const override { - return sisl::blob{uintptr_cast(const_cast< uint32_t* >(&m_key)), uint32_cast(sizeof(uint32_t))}; - } - uint32_t serialized_size() const override { return get_fixed_size(); } - static uint32_t get_fixed_size() { return (sizeof(uint32_t)); } - std::string to_string() const { return fmt::format("{}", m_key); } - - static uint32_t get_estimate_max_size() { return get_fixed_size(); } - friend std::ostream& operator<<(std::ostream& os, const TestFixedKey& k) { - os << k.to_string(); - return os; - } - - bool operator<(const TestFixedKey& o) const { return (compare(o) < 0); } - bool operator==(const TestFixedKey& other) const { return (compare(other) == 0); } - - uint32_t key() const { return m_key; } - uint32_t start_key(const BtreeKeyRange& range) const { - const TestFixedKey& k = (const TestFixedKey&)(range.start_key()); - return k.m_key; - } - uint32_t end_key(const BtreeKeyRange& range) const { - const TestFixedKey& k = (const TestFixedKey&)(range.end_key()); - return k.m_key; - } -}; - -class TestVarLenKey : public BtreeKey { -private: - uint32_t m_key{0}; - - static std::shared_ptr< std::string > idx_to_key(uint32_t idx) { - auto it = g_key_pool.find(idx); - if (it == g_key_pool.end()) { - const auto& [it, happened] = g_key_pool.emplace( - idx, std::make_shared< std::string >(gen_random_string(g_randkeysize_generator(g_re), idx))); - assert(happened); - return it->second; - } else { - return it->second; - } - } - -public: - TestVarLenKey() = default; - TestVarLenKey(uint32_t k) : BtreeKey(), m_key{k} {} - TestVarLenKey(const BtreeKey& other) : TestVarLenKey(other.serialize(), true) {} - TestVarLenKey(const TestVarLenKey& other) = default; - TestVarLenKey(const sisl::blob& b, bool copy) : BtreeKey() { - std::string data{r_cast< const char* >(b.bytes), b.size}; - std::stringstream ss; - ss << std::hex << data.substr(0, 8); - ss >> m_key; - assert(data == *idx_to_key(m_key)); - } - virtual ~TestVarLenKey() = default; - - virtual void clone(const BtreeKey& other) override { m_key = ((TestVarLenKey&)other).m_key; } - - sisl::blob serialize() const override { - const auto& data = idx_to_key(m_key); - return sisl::blob{(uint8_t*)(data->c_str()), (uint32_t)data->size()}; - } - - uint32_t serialized_size() const override { return idx_to_key(m_key)->size(); } - - static uint32_t get_fixed_size() { - assert(0); - return 0; - } - - static uint32_t get_estimate_max_size() { return g_max_keysize; } - - int compare(const BtreeKey& o) const override { - const TestVarLenKey& other = s_cast< const TestVarLenKey& >(o); - if (m_key < other.m_key) { - return -1; - } else if (m_key > other.m_key) { - return 1; - } else { - return 0; - } - } - - int compare_range(const BtreeKeyRange& range) const override { - if (m_key == start_key(range)) { - return range.is_start_inclusive() ? 0 : -1; - } else if (m_key < start_key(range)) { - return -1; - } else if (m_key == end_key(range)) { - return range.is_end_inclusive() ? 0 : 1; - } else if (m_key > end_key(range)) { - return 1; - } else { - return 0; - } - } - - std::string to_string() const { return fmt::format("{}-{}", m_key, idx_to_key(m_key)->substr(0, 8)); } - - friend std::ostream& operator<<(std::ostream& os, const TestVarLenKey& k) { - os << k.to_string(); - return os; - } - - bool operator<(const TestVarLenKey& o) const { return (compare(o) < 0); } - bool operator==(const TestVarLenKey& other) const { return (compare(other) == 0); } - - uint32_t key() const { return m_key; } - uint32_t start_key(const BtreeKeyRange& range) const { - const TestVarLenKey& k = (const TestVarLenKey&)(range.start_key()); - return k.m_key; - } - uint32_t end_key(const BtreeKeyRange& range) const { - const TestVarLenKey& k = (const TestVarLenKey&)(range.end_key()); - return k.m_key; - } -}; - -class TestFixedValue : public BtreeValue { -private: -public: - TestFixedValue(bnodeid_t val) { assert(0); } - TestFixedValue(uint32_t val) : BtreeValue() { m_val = val; } - TestFixedValue() : TestFixedValue((uint32_t)-1) {} - TestFixedValue(const TestFixedValue& other) : BtreeValue() { m_val = other.m_val; }; - TestFixedValue(const sisl::blob& b, bool copy) : BtreeValue() { m_val = *(r_cast< uint32_t* >(b.bytes)); } - virtual ~TestFixedValue() = default; - - static TestFixedValue generate_rand() { return TestFixedValue{g_randval_generator(g_re)}; } - - TestFixedValue& operator=(const TestFixedValue& other) { - m_val = other.m_val; - return *this; - } - - sisl::blob serialize() const override { - sisl::blob b; - b.bytes = uintptr_cast(const_cast< uint32_t* >(&m_val)); - b.size = sizeof(m_val); - return b; - } - - uint32_t serialized_size() const override { return sizeof(m_val); } - static uint32_t get_fixed_size() { return sizeof(m_val); } - void deserialize(const sisl::blob& b, bool copy) { m_val = *(r_cast< uint32_t* >(b.bytes)); } - - std::string to_string() const override { return fmt::format("{}", m_val); } - - friend ostream& operator<<(ostream& os, const TestFixedValue& v) { - os << v.to_string(); - return os; - } - - // This is not mandatory overridden method for BtreeValue, but for testing comparision - bool operator==(const TestFixedValue& other) const { return (m_val == other.m_val); } - - uint32_t value() const { return m_val; } - -private: - uint32_t m_val; -}; - -class TestVarLenValue : public BtreeValue { -public: - TestVarLenValue(bnodeid_t val) { assert(0); } - TestVarLenValue(const std::string& val) : BtreeValue(), m_val{val} {} - TestVarLenValue() = default; - TestVarLenValue(const TestVarLenValue& other) : BtreeValue() { m_val = other.m_val; }; - TestVarLenValue(const sisl::blob& b, bool copy) : BtreeValue(), m_val{std::string((const char*)b.bytes, b.size)} {} - virtual ~TestVarLenValue() = default; - - TestVarLenValue& operator=(const TestVarLenValue& other) { - m_val = other.m_val; - return *this; - } - - static TestVarLenValue generate_rand() { return TestVarLenValue{gen_random_string(g_randvalsize_generator(g_re))}; } - - sisl::blob serialize() const override { - sisl::blob b; - b.bytes = uintptr_cast(const_cast< char* >(m_val.c_str())); - b.size = m_val.size(); - return b; - } - - uint32_t serialized_size() const override { return (uint32_t)m_val.size(); } - static uint32_t get_fixed_size() { return 0; } - - void deserialize(const sisl::blob& b, bool copy) { m_val = std::string((const char*)b.bytes, b.size); } - - std::string to_string() const override { return fmt::format("{}", m_val); } - - friend ostream& operator<<(ostream& os, const TestVarLenValue& v) { - os << v.to_string(); - return os; - } - - // This is not mandatory overridden method for BtreeValue, but for testing comparision - bool operator==(const TestVarLenValue& other) const { return (m_val == other.m_val); } - - std::string value() const { return m_val; } - -private: - std::string m_val; -}; diff --git a/src/btree/tests/test_btree_node.cpp b/src/btree/tests/test_btree_node.cpp deleted file mode 100644 index 6d71d97f..00000000 --- a/src/btree/tests/test_btree_node.cpp +++ /dev/null @@ -1,347 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ - -#include -#include -#include -#include - -#include "options/options.h" -#include "logging/logging.h" -#include "simple_node.hpp" -#include "varlen_node.hpp" -#include "utility/enum.hpp" -#include "btree_test_kvs.hpp" - -static constexpr uint32_t g_node_size{4096}; -using namespace sisl::btree; -SISL_LOGGING_INIT(btree) - -struct FixedLenNodeTest { - using NodeType = SimpleNode< TestFixedKey, TestFixedValue >; - using KeyType = TestFixedKey; - using ValueType = TestFixedValue; -}; - -struct VarKeySizeNodeTest { - using NodeType = VarKeySizeNode< TestVarLenKey, TestFixedValue >; - using KeyType = TestVarLenKey; - using ValueType = TestFixedValue; -}; - -struct VarValueSizeNodeTest { - using NodeType = VarValueSizeNode< TestFixedKey, TestVarLenValue >; - using KeyType = TestFixedKey; - using ValueType = TestVarLenValue; -}; - -struct VarObjSizeNodeTest { - using NodeType = VarObjSizeNode< TestVarLenKey, TestVarLenValue >; - using KeyType = TestVarLenKey; - using ValueType = TestVarLenValue; -}; - -template < typename TestType > -struct NodeTest : public testing::Test { - using T = TestType; - using K = TestType::KeyType; - using V = TestType::ValueType; - - std::unique_ptr< typename T::NodeType > m_node1; - std::unique_ptr< typename T::NodeType > m_node2; - std::map< K, V > m_shadow_map; - BtreeConfig m_cfg{g_node_size}; - - void SetUp() override { - m_node1 = std::make_unique< typename T::NodeType >(new uint8_t[g_node_size], 1ul, true, true, m_cfg); - m_node2 = std::make_unique< typename T::NodeType >(new uint8_t[g_node_size], 2ul, true, true, m_cfg); - } - - void put(uint32_t k, btree_put_type put_type) { - K key{k}; - V value{V::generate_rand()}; - V existing_v; - bool done = m_node1->put(key, value, put_type, &existing_v); - - bool expected_done{true}; - if (m_shadow_map.find(key) != m_shadow_map.end()) { - expected_done = (put_type != btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - ASSERT_EQ(done, expected_done) << "Expected put of key " << k << " of put_type " << enum_name(put_type) - << " to be " << expected_done; - if (expected_done) { - m_shadow_map.insert(std::make_pair(key, value)); - } else { - const auto r = m_shadow_map.find(key); - ASSERT_NE(r, m_shadow_map.end()) << "Testcase issue, expected inserted slots to be in shadow map"; - ASSERT_EQ(existing_v, r->second) - << "Insert existing value doesn't return correct data for key " << r->first; - } - } - - void update(uint32_t k, bool validate_update = true) { - K key{k}; - V value{V::generate_rand()}; - V existing_v; - const bool done = m_node1->update_one(key, value, &existing_v); - const auto expected_done = (m_shadow_map.find(key) != m_shadow_map.end()); - ASSERT_EQ(done, expected_done) << "Not updated for key=" << k << " where it is expected to"; - - if (done) { - validate_data(key, existing_v); - m_shadow_map[key] = value; - } - - if (validate_update) { validate_specific(k); } - } - - void remove(uint32_t k, bool validate_remove = true) { - K key{k}; - K existing_key; - V existing_value; - const bool shadow_found = (m_shadow_map.find(key) != m_shadow_map.end()); - auto removed_1 = m_node1->remove_one(K{key}, &existing_key, &existing_value); - if (removed_1) { - ASSERT_EQ(key.key(), k) << "Whats removed is different than whats asked for"; - validate_data(key, existing_value); - m_shadow_map.erase(key); - } - - auto removed_2 = m_node2->remove_one(K{key}, &existing_key, &existing_value); - if (removed_2) { - ASSERT_EQ(key.key(), k) << "Whats removed is different than whats asked for"; - validate_data(key, existing_value); - m_shadow_map.erase(key); - } - - ASSERT_EQ(removed_1 || removed_2, shadow_found) << "To remove key=" << k << " is not present in the nodes"; - - if (validate_remove) { validate_specific(k); } - } - - void validate_get_all() const { - uint32_t start_ind{0}; - uint32_t end_ind{0}; - std::vector< std::pair< K, V > > out_vector; - auto ret = m_node1->get_all(BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}, g_max_keys, start_ind, - end_ind, &out_vector); - ret += m_node2->get_all(BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}, g_max_keys, start_ind, - end_ind, &out_vector); - - ASSERT_EQ(ret, m_shadow_map.size()) << "Expected number of entries to be same with shadow_map size"; - ASSERT_EQ(out_vector.size(), m_shadow_map.size()) - << "Expected number of entries to be same with shadow_map size"; - - uint64_t idx{0}; - for (auto& [key, value] : m_shadow_map) { - ASSERT_EQ(out_vector[idx].second, value) - << "Range get doesn't return correct data for key=" << key << " idx=" << idx; - ++idx; - } - } - - void validate_get_any(uint32_t start, uint32_t end) const { - K start_key{start}; - K end_key{end}; - K out_k; - V out_v; - auto result = - m_node1->get_any(BtreeKeyRangeSafe< K >{start_key, true, end_key, true}, &out_k, &out_v, true, true); - if (result.first) { - validate_data(out_k, out_v); - } else { - result = - m_node2->get_any(BtreeKeyRangeSafe< K >{start_key, true, end_key, true}, &out_k, &out_v, true, true); - if (result.first) { - validate_data(out_k, out_v); - } else { - const auto r = m_shadow_map.lower_bound(start_key); - const bool found = ((r != m_shadow_map.end()) && (r->first.key() <= end)); - ASSERT_EQ(found, false) << "Node key range=" << start << "-" << end - << " missing, Its present in shadow map at " << r->first; - } - } - } - - void validate_specific(uint32_t k) const { - K key{k}; - V val; - const auto ret1 = m_node1->find(key, &val, true); - if (ret1.first) { - ASSERT_NE(m_shadow_map.find(key), m_shadow_map.end()) - << "Node key " << k << " is present when its expected not to be"; - validate_data(key, val); - } - - const auto ret2 = m_node2->find(key, &val, true); - if (ret2.first) { - ASSERT_NE(m_shadow_map.find(key), m_shadow_map.end()) - << "Node key " << k << " is present when its expected not to be"; - validate_data(key, val); - } - - ASSERT_EQ(ret1.first || ret2.first, m_shadow_map.find(key) != m_shadow_map.end()) - << "Node key " << k << " is incorrect presence compared to shadow map"; - } - -protected: - void put_list(const std::vector< uint32_t >& keys) { - for (const auto& k : keys) { - if (!this->has_room()) { break; } - put(k, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - } - - void print() const { - LOGDEBUG("Node1:\n {}", m_node1->to_string(true)); - LOGDEBUG("Node2:\n {}", m_node2->to_string(true)); - } - - uint32_t remaining_space() const { return m_node1->get_available_size(m_cfg); } - bool has_room() const { return remaining_space() > (g_max_keysize + g_max_valsize + 32); } - -private: - void validate_data(const K& key, const V& node_val) const { - const auto r = m_shadow_map.find(key); - ASSERT_NE(r, m_shadow_map.end()) << "Node key is not present in shadow map"; - ASSERT_EQ(node_val, r->second) << "Found value in node doesn't return correct data for key=" << r->first; - } -}; - -using NodeTypes = testing::Types< FixedLenNodeTest, VarKeySizeNodeTest, VarValueSizeNodeTest, VarObjSizeNodeTest >; -TYPED_TEST_SUITE(NodeTest, NodeTypes); - -TYPED_TEST(NodeTest, SequentialInsert) { - for (uint32_t i{0}; (i < 100 && this->has_room()); ++i) { - this->put(i, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - this->print(); - this->validate_get_all(); - this->validate_get_any(0, 2); - this->validate_get_any(3, 3); - this->validate_get_any(98, 102); -} - -TYPED_TEST(NodeTest, ReverseInsert) { - for (uint32_t i{100}; (i > 0 && this->has_room()); --i) { - this->put(i - 1, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - this->print(); - this->validate_get_all(); - this->validate_get_any(0, 2); - this->validate_get_any(3, 3); - this->validate_get_any(98, 102); -} - -TYPED_TEST(NodeTest, Remove) { - this->put_list({0, 1, 2, g_max_keys / 2, g_max_keys / 2 + 1, g_max_keys / 2 - 1}); - this->remove(0); - this->remove(0); // Remove non-existing - this->remove(1); - this->remove(2); - this->remove(g_max_keys / 2 - 1); - this->print(); - this->validate_get_all(); - this->validate_get_any(0, 2); - this->validate_get_any(3, 3); - this->validate_get_any(g_max_keys / 2, g_max_keys - 1); -} - -TYPED_TEST(NodeTest, Update) { - this->put_list({0, 1, 2, g_max_keys / 2, g_max_keys / 2 + 1, g_max_keys / 2 - 1}); - this->update(1); - this->update(g_max_keys / 2); - this->update(2); - this->remove(0); - this->update(0); // Update non-existing - this->print(); - this->validate_get_all(); -} - -TYPED_TEST(NodeTest, RandomInsertRemoveUpdate) { - uint32_t num_inserted{0}; - while (this->has_room()) { - this->put(g_randkey_generator(g_re), btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - ++num_inserted; - } - LOGDEBUG("After random insertion of {} objects", num_inserted); - this->print(); - this->validate_get_all(); - - for (uint32_t i{0}; i < num_inserted / 2; ++i) { - const auto k = g_randkey_generator(g_re) % this->m_shadow_map.rbegin()->first.key(); - const auto r = this->m_shadow_map.lower_bound(typename TestFixture::K{k}); - this->remove(r->first.key()); - } - LOGDEBUG("After random removal of {} objects", num_inserted / 2); - this->print(); - this->validate_get_all(); - - uint32_t num_updated{0}; - for (uint32_t i{0}; i < num_inserted / 2 && this->has_room(); ++i) { - const auto k = g_randkey_generator(g_re) % this->m_shadow_map.rbegin()->first.key(); - const auto r = this->m_shadow_map.lower_bound(typename TestFixture::K{k}); - this->update(r->first.key()); - ++num_updated; - } - LOGDEBUG("After update of {} entries", num_updated); - this->print(); - this->validate_get_all(); -} - -TYPED_TEST(NodeTest, Move) { - std::vector< uint32_t > list{0, 1, 2, g_max_keys / 2 - 1}; - this->put_list(list); - this->print(); - - this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size()); - this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size()); // Empty move - ASSERT_EQ(this->m_node1->get_total_entries(), 0u) << "Move out to right has failed"; - ASSERT_EQ(this->m_node2->get_total_entries(), list.size()) << "Move out to right has failed"; - this->validate_get_all(); - - this->m_node1->move_in_from_right_by_entries(this->m_cfg, *this->m_node2, list.size()); - this->m_node1->move_in_from_right_by_entries(this->m_cfg, *this->m_node2, list.size()); // Empty move - ASSERT_EQ(this->m_node2->get_total_entries(), 0u) << "Move in from right has failed"; - ASSERT_EQ(this->m_node1->get_total_entries(), list.size()) << "Move in from right has failed"; - this->validate_get_all(); - - this->m_node1->move_out_to_right_by_entries(this->m_cfg, *this->m_node2, list.size() / 2); - ASSERT_EQ(this->m_node1->get_total_entries(), list.size() / 2) << "Move out half entries to right has failed"; - ASSERT_EQ(this->m_node2->get_total_entries(), list.size() - list.size() / 2) - << "Move out half entries to right has failed"; - this->validate_get_all(); - this->print(); - - ASSERT_EQ(this->m_node1->validate_key_order(), true) << "Key order validation of node1 has failed"; - ASSERT_EQ(this->m_node2->validate_key_order(), true) << "Key order validation of node2 has failed"; -} - -SISL_OPTIONS_ENABLE(logging, test_btree_node) -SISL_OPTION_GROUP(test_btree_node, - (num_iters, "", "num_iters", "number of iterations for rand ops", - ::cxxopts::value< uint32_t >()->default_value("65536"), "number")) - -int main(int argc, char* argv[]) { - ::testing::InitGoogleTest(&argc, argv); - SISL_OPTIONS_LOAD(argc, argv, logging, test_btree_node) - sisl::logging::SetLogger("test_btree_node"); - spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); - - auto ret = RUN_ALL_TESTS(); - return ret; -} \ No newline at end of file diff --git a/src/btree/tests/test_mem_btree.cpp b/src/btree/tests/test_mem_btree.cpp deleted file mode 100644 index 27737422..00000000 --- a/src/btree/tests/test_mem_btree.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ - -#include -#include -#include -#include - -#include "options/options.h" -#include "logging/logging.h" -#include "simple_node.hpp" -#include "varlen_node.hpp" -#include "utility/enum.hpp" -#include "btree_test_kvs.hpp" -#include "../mem_btree.hpp" - -static constexpr uint32_t g_node_size{4096}; -using namespace sisl::btree; -SISL_LOGGING_INIT(btree) - -SISL_OPTIONS_ENABLE(logging, test_mem_btree) -SISL_OPTION_GROUP(test_mem_btree, - (num_iters, "", "num_iters", "number of iterations for rand ops", - ::cxxopts::value< uint32_t >()->default_value("65536"), "number")) - -struct FixedLenBtreeTest { - using BtreeType = MemBtree< TestFixedKey, TestFixedValue >; - using KeyType = TestFixedKey; - using ValueType = TestFixedValue; - static constexpr btree_node_type leaf_node_type = btree_node_type::FIXED; - static constexpr btree_node_type interior_node_type = btree_node_type::FIXED; -}; - -struct VarKeySizeBtreeTest { - using BtreeType = MemBtree< TestVarLenKey, TestFixedValue >; - using KeyType = TestVarLenKey; - using ValueType = TestFixedValue; - static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_KEY; - static constexpr btree_node_type interior_node_type = btree_node_type::VAR_KEY; -}; - -struct VarValueSizeBtreeTest { - using BtreeType = MemBtree< TestFixedKey, TestVarLenValue >; - using KeyType = TestFixedKey; - using ValueType = TestVarLenValue; - static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_VALUE; - static constexpr btree_node_type interior_node_type = btree_node_type::FIXED; -}; - -struct VarObjSizeBtreeTest { - using BtreeType = MemBtree< TestVarLenKey, TestVarLenValue >; - using KeyType = TestVarLenKey; - using ValueType = TestVarLenValue; - static constexpr btree_node_type leaf_node_type = btree_node_type::VAR_OBJECT; - static constexpr btree_node_type interior_node_type = btree_node_type::VAR_OBJECT; -}; - -template < typename TestType > -struct BtreeTest : public testing::Test { - using T = TestType; - using K = TestType::KeyType; - using V = TestType::ValueType; - - std::unique_ptr< typename T::BtreeType > m_bt; - std::map< K, V > m_shadow_map; - BtreeConfig m_cfg{g_node_size}; - - void SetUp() override { - m_cfg.m_leaf_node_type = T::leaf_node_type; - m_cfg.m_int_node_type = T::interior_node_type; - m_bt = std::make_unique< typename T::BtreeType >(m_cfg); - m_bt->init(nullptr); - } - - void put(uint32_t k, btree_put_type put_type) { - std::unique_ptr< V > existing_v; - - BtreeMutateRequest req = BtreeSinglePutRequest{ - std::make_unique< K >(k), std::make_unique< V >(V::generate_rand()), put_type, std::move(existing_v)}; - bool done = (m_bt->put(req) == btree_status_t::success); - - auto& sreq = to_single_put_req(req); - bool expected_done{true}; - if (m_shadow_map.find(*sreq.m_k) != m_shadow_map.end()) { - expected_done = (put_type != btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - ASSERT_EQ(done, expected_done) << "Expected put of key " << k << " of put_type " << enum_name(put_type) - << " to be " << expected_done; - if (expected_done) { - m_shadow_map.insert(std::make_pair((const K&)*sreq.m_k, (const V&)*sreq.m_v)); - } else { - const auto r = m_shadow_map.find(*sreq.m_k); - ASSERT_NE(r, m_shadow_map.end()) << "Testcase issue, expected inserted slots to be in shadow map"; - ASSERT_EQ((const V&)*sreq.m_existing_val, r->second) - << "Insert existing value doesn't return correct data for key " << r->first; - } - } - - void validate_get_all() const { - std::vector< std::pair< K, V > > out_vector; - BtreeQueryRequest qreq{BtreeSearchState{BtreeKeyRangeSafe< K >{K{0u}, true, K{g_max_keys}, false}}}; - auto ret = m_bt->query(qreq, out_vector); - - ASSERT_EQ(ret, btree_status_t::success) << "Expected success on query"; - ASSERT_EQ(out_vector.size(), m_shadow_map.size()) - << "Expected number of entries to be same with shadow_map size"; - - uint64_t idx{0}; - for (auto& [key, value] : m_shadow_map) { - ASSERT_EQ(out_vector[idx].second, value) - << "Range get doesn't return correct data for key=" << key << " idx=" << idx; - ++idx; - } - } - - void print() const { m_bt->print_tree(); } -}; - -using BtreeTypes = testing::Types< FixedLenBtreeTest, VarKeySizeBtreeTest, VarValueSizeBtreeTest, VarObjSizeBtreeTest >; -TYPED_TEST_SUITE(BtreeTest, BtreeTypes); - -TYPED_TEST(BtreeTest, SequentialInsert) { - for (uint32_t i{0}; i < 100; ++i) { - this->put(i, btree_put_type::INSERT_ONLY_IF_NOT_EXISTS); - } - this->print(); - this->validate_get_all(); -} - -int main(int argc, char* argv[]) { - SISL_OPTIONS_LOAD(argc, argv, logging, test_mem_btree) - sisl::logging::SetLogger("test_mem_btree"); - spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); - - auto ret = RUN_ALL_TESTS(); - return ret; -} \ No newline at end of file diff --git a/src/btree/varlen_node.hpp b/src/btree/varlen_node.hpp deleted file mode 100644 index e69b370a..00000000 --- a/src/btree/varlen_node.hpp +++ /dev/null @@ -1,695 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam, Rishabh Mittal - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ - -#pragma once - -#include "logging/logging.h" -#include "btree_node.hpp" -#include "btree_kv.hpp" - -SISL_LOGGING_DECL(btree) - -namespace sisl { -namespace btree { -#pragma pack(1) -struct btree_obj_record { - uint16_t m_obj_offset : 14; - uint16_t reserved : 2; -}; - -struct var_node_header { - uint16_t m_tail_arena_offset; // Tail side of the arena where new keys are inserted - uint16_t m_available_space; - uint16_t m_init_available_space; // remember initial node area size to later use for compaction - // TODO: - // We really dont require storing m_init_available_space in each node. - // Instead add method in variant node to fetch config - - uint16_t tail_offset() const { return m_tail_arena_offset; } - uint16_t available_space() const { return m_available_space; } -}; -#pragma pack() - -/** - * Internal format of variable node: - * [var node header][Record][Record].. ... ... [key][value][key][value] - * key and value both can be variying. - */ -template < typename K, typename V > -class VariableNode : public BtreeNode< K > { -public: - VariableNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : - BtreeNode< K >(node_buf, id, init, is_leaf) { - if (init) { - // Tail arena points to the edge of the node as data arena grows backwards. Entire space is now available - // except for the header itself - get_var_node_header()->m_init_available_space = BtreeNode< K >::node_area_size(cfg); - get_var_node_header()->m_tail_arena_offset = BtreeNode< K >::node_area_size(cfg); - get_var_node_header()->m_available_space = - get_var_node_header()->m_tail_arena_offset - sizeof(var_node_header); - } - } - - virtual ~VariableNode() = default; - - /* Insert the key and value in provided index - * Assumption: Node lock is already taken */ - btree_status_t insert(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - LOGTRACEMOD(btree, "{}:{}", key.to_string(), val.to_string()); - auto sz = insert(ind, key.serialize(), val.serialize()); -#ifndef NDEBUG - validate_sanity(); -#endif - if (sz == 0) { return btree_status_t::insert_failed; } - return btree_status_t::success; - } - -#ifndef NDEBUG - void validate_sanity() { - uint32_t i{0}; - // validate if keys are in ascending order - K prevKey; - while (i < this->get_total_entries()) { - K key = get_nth_key(i, false); - uint64_t kp = *(uint64_t*)key.serialize().bytes; - if (i > 0 && prevKey.compare(key) > 0) { - DEBUG_ASSERT(false, "Found non sorted entry: {} -> {}", kp, to_string()); - } - prevKey = key; - ++i; - } - } -#endif - - /* Update a value in a given index to the provided value. It will support change in size of the new value. - * Assumption: Node lock is already taken, size check for the node to support new value is already done */ - void update(uint32_t ind, const BtreeValue& val) override { - // If we are updating the edge value, none of the other logic matter. Just update edge value and move on - if (ind == this->get_total_entries()) { - DEBUG_ASSERT_EQ(this->is_leaf(), false); - this->set_edge_value(val); - this->inc_gen(); - } else { - K key = get_nth_key(ind, true); - update(ind, key, val); - } - } - - // TODO - currently we do not support variable size key - void update(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - LOGTRACEMOD(btree, "Update called:{}", to_string()); - DEBUG_ASSERT_LE(ind, this->get_total_entries()); - - // If we are updating the edge value, none of the other logic matter. Just update edge value and move on - if (ind == this->get_total_entries()) { - DEBUG_ASSERT_EQ(this->is_leaf(), false); - this->set_edge_value(val); - this->inc_gen(); - return; - } - - // Determine if we are doing same size update or smaller size update, in that case, reuse the space. - uint16_t nth_key_len = get_nth_key_len(ind); - uint16_t new_obj_size = nth_key_len + val.serialized_size(); - uint16_t cur_obj_size = get_nth_obj_size(ind); - - if (cur_obj_size >= new_obj_size) { - uint8_t* val_ptr = (uint8_t*)get_nth_obj(ind) + nth_key_len; - sisl::blob vblob = val.serialize(); - DEBUG_ASSERT_EQ(vblob.size, val.serialized_size(), - "Serialized size returned different after serialization"); - - // we can avoid memcpy if addresses of val_ptr and vblob.bytes is same. In place update - if (val_ptr != vblob.bytes) { - // TODO - we can reclaim space if new obj size is lower than cur obj size - // Same or smaller size update, just copy the value blob - LOGTRACEMOD(btree, "Not an in-place update, have to copying data of size {}", vblob.size); - memcpy(val_ptr, vblob.bytes, vblob.size); - } else { - // do nothing - LOGTRACEMOD(btree, "In place update, not copying data."); - } - set_nth_value_len(get_nth_record_mutable(ind), vblob.size); - get_var_node_header()->m_available_space += cur_obj_size - new_obj_size; - this->inc_gen(); - return; - } - - remove(ind, ind); - insert(ind, key, val); - LOGTRACEMOD(btree, "Size changed for either key or value. Had to delete and insert :{}", to_string()); - } - - // ind_s and ind_e are inclusive - void remove(uint32_t ind_s, uint32_t ind_e) override { - uint32_t total_entries = this->get_total_entries(); - assert(total_entries >= ind_s); - assert(total_entries >= ind_e); - uint32_t recSize = this->get_record_size(); - uint32_t no_of_elem = ind_e - ind_s + 1; - if (ind_e == this->get_total_entries()) { - assert(!this->is_leaf() && this->has_valid_edge()); - - V last_1_val; - get_nth_value(ind_s - 1, &last_1_val, false); - this->set_edge_value(last_1_val); - - for (uint32_t i = ind_s; i < total_entries; i++) { - get_var_node_header()->m_available_space += get_nth_key_len(i) + get_nth_value_len(i) + recSize; - } - this->sub_entries(total_entries - ind_s + 1); - } else { - // claim available memory - for (uint32_t i = ind_s; i <= ind_e; i++) { - get_var_node_header()->m_available_space += get_nth_key_len(i) + get_nth_value_len(i) + recSize; - } - uint8_t* rec_ptr = get_nth_record_mutable(ind_s); - memmove(rec_ptr, rec_ptr + recSize * no_of_elem, (this->get_total_entries() - ind_e - 1) * recSize); - - this->sub_entries(no_of_elem); - } - this->inc_gen(); - } - - /*V get(uint32_t ind, bool copy) const { - // Need edge index - if (ind == this->get_total_entries()) { - assert(!this->is_leaf()); - assert(this->has_valid_edge()); - return this->get_edge_value(); - } else { - return get_nth_value(ind, copy); - } - }*/ - - uint32_t move_out_to_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { - auto& other = static_cast< VariableNode& >(o); - const auto this_gen = this->get_gen(); - const auto other_gen = other.get_gen(); - - const auto this_nentries = this->get_total_entries(); - nentries = std::min(nentries, this_nentries); - if (nentries == 0) { return 0; /* Nothing to move */ } - - const uint32_t start_ind = this_nentries - 1; - const uint32_t end_ind = this_nentries - nentries; - uint32_t ind = start_ind; - bool full_move{false}; - while (ind >= end_ind) { - // Get the ith key and value blob and then remove the entry from here and insert to the other node - sisl::blob kb; - kb.bytes = (uint8_t*)get_nth_obj(ind); - kb.size = get_nth_key_len(ind); - - sisl::blob vb; - vb.bytes = kb.bytes + kb.size; - vb.size = get_nth_value_len(ind); - - auto sz = other.insert(0, kb, vb); - if (!sz) { break; } - if (ind == 0) { - full_move = true; - break; - } - --ind; - } - - if (!this->is_leaf() && (other.get_total_entries() != 0)) { - // Incase this node is an edge node, move the stick to the right hand side node - other.set_edge_id(this->get_edge_id()); - this->invalidate_edge(); - } - remove(full_move ? 0u : ind + 1, start_ind); // Remove all entries in bulk - - // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 - // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce - // interface around it. - this->set_gen(this_gen + 1); - other.set_gen(other_gen + 1); - - return (start_ind - ind); - } - - uint32_t move_out_to_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size_to_move) override { - auto& other = static_cast< VariableNode& >(o); - uint32_t moved_size = 0U; - auto this_gen = this->get_gen(); - auto other_gen = other.get_gen(); - - uint32_t ind = this->get_total_entries() - 1; - while (ind > 0) { - sisl::blob kb; - kb.bytes = (uint8_t*)get_nth_obj(ind); - kb.size = get_nth_key_len(ind); - - sisl::blob vb; - vb.bytes = kb.bytes + kb.size; - vb.size = get_nth_value_len(ind); - - auto sz = other.insert(0, kb, vb); // Keep on inserting on the first index, thus moving everything to right - if (!sz) break; - moved_size += sz; - --ind; - if ((kb.size + vb.size + this->get_record_size()) > size_to_move) { - // We reached threshold of how much we could move - break; - } - size_to_move -= sz; - } - remove(ind + 1, this->get_total_entries() - 1); - - if (!this->is_leaf() && (other.get_total_entries() != 0)) { - // Incase this node is an edge node, move the stick to the right hand side node - other.set_edge_id(this->get_edge_id()); - this->invalidate_edge(); - } - - // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 - // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce - // interface around it. - this->set_gen(this_gen + 1); - other.set_gen(other_gen + 1); - - return moved_size; - } - - uint32_t move_in_from_right_by_entries(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t nentries) override { - auto& other = static_cast< VariableNode& >(o); - auto this_gen = this->get_gen(); - auto other_gen = other.get_gen(); - nentries = std::min(nentries, other.get_total_entries()); - - if (nentries == 0) { return 0; /* Nothing to move */ } - uint32_t other_ind = 0; - while (nentries) { - // Get the ith key and value blob and then remove the entry from here and insert to the other node - sisl::blob kb; - kb.bytes = (uint8_t*)other.get_nth_obj(other_ind); - kb.size = other.get_nth_key_len(other_ind); - - sisl::blob vb; - vb.bytes = kb.bytes + kb.size; - vb.size = other.get_nth_value_len(other_ind); - - auto sz = insert(this->get_total_entries(), kb, vb); - if (!sz) { break; } - --nentries; - ++other_ind; - } - - other.remove(0, other_ind - 1); // Remove all entries in bulk - assert(other.get_total_entries() == nentries); - - if (!other.is_leaf() && (other.get_total_entries() == 0)) { - // Incase other node is an edge node and we moved all the data into this node, move over the edge info as - // well. - this->set_edge_id(other.get_edge_id()); - other.invalidate_edge(); - } - - // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 - // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce - // interface around it. - this->set_gen(this_gen + 1); - other.set_gen(other_gen + 1); - - return (other_ind); - } - - uint32_t move_in_from_right_by_size(const BtreeConfig& cfg, BtreeNode< K >& o, uint32_t size_to_move) override { - auto& other = static_cast< VariableNode& >(o); - uint32_t moved_size = 0U; - auto this_gen = this->get_gen(); - auto other_gen = other.get_gen(); - - uint32_t ind = 0; - while (ind < this->get_total_entries()) { - sisl::blob kb; - kb.bytes = (uint8_t*)other.get_nth_obj(ind); - kb.size = other.get_nth_key_len(ind); - - sisl::blob vb; - vb.bytes = kb.bytes + kb.size; - vb.size = other.get_nth_value_len(ind); - - if ((kb.size + vb.size + other.get_record_size()) > size_to_move) { - // We reached threshold of how much we could move - break; - } - auto sz = insert(this->get_total_entries(), kb, vb); // Keep on inserting on the last index. - if (!sz) break; - moved_size += sz; - ind++; - size_to_move -= sz; - } - if (ind) other.remove(0, ind - 1); - - if (!other.is_leaf() && (other.get_total_entries() == 0)) { - // Incase other node is an edge node and we moved all the data into this node, move over the edge info as - // well. - this->set_edge_id(other.get_edge_id()); - other.invalidate_edge(); - } - - // Remove and insert would have set the gen multiple increments, just reset it to increment only by 1 - // TODO: This is bit ugly but needed in-order to avoid repeat the same code again, but see if we can produce - // interface around it. - this->set_gen(this_gen + 1); - other.set_gen(other_gen + 1); - - return moved_size; - } - void append(uint32_t ind, const BtreeKey& key, const BtreeValue& val) override { - RELEASE_ASSERT(false, "Append operation is not supported on var node"); - } - - uint32_t get_available_size(const BtreeConfig& cfg) const override { - return get_var_node_header_const()->m_available_space; - } - - uint32_t get_nth_obj_size(uint32_t ind) const override { return get_nth_key_len(ind) + get_nth_value_len(ind); } - - void set_nth_key(uint32_t ind, const BtreeKey& key) { - const auto kb = key.serialize(); - assert(ind < this->get_total_entries()); - assert(kb.size == get_nth_key_len(ind)); - memcpy(uintptr_cast(get_nth_obj(ind)), kb.bytes, kb.size); - } - - virtual uint16_t get_nth_key_len(uint32_t ind) const = 0; - virtual uint16_t get_nth_value_len(uint32_t ind) const = 0; - virtual void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) = 0; - virtual void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) = 0; - - K get_nth_key(uint32_t ind, bool copy) const { - assert(ind < this->get_total_entries()); - sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)), get_nth_key_len(ind)}; - return K{b, copy}; - } - - void get_nth_value(uint32_t ind, BtreeValue* out_val, bool copy) const override { - sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)) + get_nth_key_len(ind), get_nth_value_len(ind)}; - out_val->deserialize(b, copy); - } - - /*V get_nth_value(uint32_t ind, bool copy) const { - assert(ind < this->get_total_entries()); - sisl::blob b{const_cast< uint8_t* >(get_nth_obj(ind)) + get_nth_key_len(ind), get_nth_value_len(ind)}; - return V{b, copy}; - }*/ - - std::string to_string(bool print_friendly = false) const override { - auto str = fmt::format( - "{}id={} nEntries={} {} free_space={} ", - (print_friendly ? "---------------------------------------------------------------------\n" : ""), - this->get_node_id(), this->get_total_entries(), (this->is_leaf() ? "LEAF" : "INTERIOR"), - get_var_node_header_const()->m_available_space); - if (!this->is_leaf() && (this->has_valid_edge())) { - fmt::format_to(std::back_inserter(str), "edge_id={} ", this->get_edge_id()); - } - for (uint32_t i{0}; i < this->get_total_entries(); ++i) { - V val; - get_nth_value(i, &val, false); - fmt::format_to(std::back_inserter(str), "{}Entry{} [Key={} Val={}]", (print_friendly ? "\n\t" : " "), i + 1, - get_nth_key(i, false).to_string(), val.to_string()); - } - return str; - } - - int compare_nth_key(const BtreeKey& cmp_key, uint32_t ind) const { - return get_nth_key(ind, false).compare(cmp_key); - } - - /*int compare_nth_key_range(const BtreeKeyRange& range, uint32_t ind) const { - return get_nth_key(ind, false).compare_range(range); - }*/ - -protected: - uint32_t insert(uint32_t ind, const sisl::blob& key_blob, const sisl::blob& val_blob) { - assert(ind <= this->get_total_entries()); - LOGTRACEMOD(btree, "{}:{}:{}:{}", ind, get_var_node_header()->tail_offset(), get_arena_free_space(), - get_var_node_header()->available_space()); - uint16_t obj_size = key_blob.size + val_blob.size; - uint16_t to_insert_size = obj_size + this->get_record_size(); - if (to_insert_size > get_var_node_header()->available_space()) { - LOGDEBUGMOD(btree, "insert failed insert size {} available size {}", to_insert_size, - get_var_node_header()->available_space()); - return 0; - } - - // If we don't have enough space in the tail arena area, we need to compact and get the space. - if (to_insert_size > get_arena_free_space()) { - compact(); - assert(to_insert_size <= - get_arena_free_space()); // Expect after compaction to have available space to insert - } - - // Create a room for a new record - uint8_t* rec_ptr = uintptr_cast(get_nth_record_mutable(ind)); - memmove((void*)(rec_ptr + this->get_record_size()), rec_ptr, - (this->get_total_entries() - ind) * this->get_record_size()); - - // Move up the tail area - assert(get_var_node_header()->m_tail_arena_offset > obj_size); - get_var_node_header()->m_tail_arena_offset -= obj_size; - get_var_node_header()->m_available_space -= (obj_size + this->get_record_size()); - - // Create a new record - set_nth_key_len(rec_ptr, key_blob.size); - set_nth_value_len(rec_ptr, val_blob.size); - set_record_data_offset(rec_ptr, get_var_node_header()->m_tail_arena_offset); - - // Copy the contents of key and value in the offset - uint8_t* raw_data_ptr = offset_to_ptr_mutable(get_var_node_header()->m_tail_arena_offset); - memcpy(raw_data_ptr, key_blob.bytes, key_blob.size); - raw_data_ptr += key_blob.size; - memcpy(raw_data_ptr, val_blob.bytes, val_blob.size); - - // Increment the entries and generation number - this->inc_entries(); - this->inc_gen(); - -#ifndef NDEBUG - this->validate_sanity(); -#endif - -#ifdef DEBUG - // print(); -#endif - return to_insert_size; - } - - /* - * This method compacts and provides contiguous tail arena space - * so that available space == tail arena space - * */ - void compact() { -#ifndef NDEBUG - this->validate_sanity(); -#endif - // temp ds to sort records in stack space - struct Record { - uint16_t m_obj_offset; - uint16_t orig_record_index; - }; - - uint32_t no_of_entries = this->get_total_entries(); - if (no_of_entries == 0) { - // this happens when there is only entry and in update, we first remove and than insert - get_var_node_header()->m_tail_arena_offset = get_var_node_header()->m_init_available_space; - LOGTRACEMOD(btree, "Full available size reclaimed"); - return; - } - std::vector< Record > rec; - rec.reserve(no_of_entries); - - uint32_t ind = 0; - while (ind < no_of_entries) { - btree_obj_record* rec_ptr = (btree_obj_record*)(get_nth_record_mutable(ind)); - rec[ind].m_obj_offset = rec_ptr->m_obj_offset; - rec[ind].orig_record_index = ind; - ind++; - } - - // use comparator to sort based on m_obj_offset in desc order - std::sort(rec.begin(), rec.begin() + no_of_entries, - [](Record const& a, Record const& b) -> bool { return b.m_obj_offset < a.m_obj_offset; }); - - uint16_t last_offset = get_var_node_header()->m_init_available_space; - - ind = 0; - uint16_t sparce_space = 0; - // loop records - while (ind < no_of_entries) { - uint16_t total_key_value_len = - get_nth_key_len(rec[ind].orig_record_index) + get_nth_value_len(rec[ind].orig_record_index); - sparce_space = last_offset - (rec[ind].m_obj_offset + total_key_value_len); - if (sparce_space > 0) { - // do compaction - uint8_t* old_key_ptr = (uint8_t*)get_nth_obj(rec[ind].orig_record_index); - uint8_t* raw_data_ptr = old_key_ptr + sparce_space; - memmove(raw_data_ptr, old_key_ptr, total_key_value_len); - - // update original record - btree_obj_record* rec_ptr = (btree_obj_record*)(get_nth_record_mutable(rec[ind].orig_record_index)); - rec_ptr->m_obj_offset += sparce_space; - - last_offset = rec_ptr->m_obj_offset; - - } else { - assert(sparce_space == 0); - last_offset = rec[ind].m_obj_offset; - } - ind++; - } - get_var_node_header()->m_tail_arena_offset = last_offset; -#ifndef NDEBUG - this->validate_sanity(); -#endif - LOGTRACEMOD(btree, "Sparse space reclaimed:{}", sparce_space); - } - - const uint8_t* get_nth_record(uint32_t ind) const { - return this->node_data_area_const() + sizeof(var_node_header) + (ind * this->get_record_size()); - } - uint8_t* get_nth_record_mutable(uint32_t ind) { - return this->node_data_area() + sizeof(var_node_header) + (ind * this->get_record_size()); - } - - const uint8_t* get_nth_obj(uint32_t ind) const { - return offset_to_ptr(((btree_obj_record*)get_nth_record(ind))->m_obj_offset); - } - uint8_t* get_nth_obj_mutable(uint32_t ind) { - return offset_to_ptr_mutable(((btree_obj_record*)get_nth_record(ind))->m_obj_offset); - } - - void set_record_data_offset(uint8_t* rec_ptr, uint16_t offset) { - auto r = (btree_obj_record*)rec_ptr; - r->m_obj_offset = offset; - } - - uint8_t* offset_to_ptr_mutable(uint16_t offset) { return this->node_data_area() + offset; } - - const uint8_t* offset_to_ptr(uint16_t offset) const { return this->node_data_area_const() + offset; } - - ///////////// Other Private Methods ////////////////// - inline var_node_header* get_var_node_header() { return r_cast< var_node_header* >(this->node_data_area()); } - - inline const var_node_header* get_var_node_header_const() const { - return r_cast< const var_node_header* >(this->node_data_area_const()); - } - - uint16_t get_arena_free_space() const { - return get_var_node_header_const()->m_tail_arena_offset - sizeof(var_node_header) - - (this->get_total_entries() * this->get_record_size()); - } -}; - -template < typename K, typename V > -class VarKeySizeNode : public VariableNode< K, V > { -public: - VarKeySizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : - VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { - this->set_node_type(btree_node_type::VAR_KEY); - } - - uint16_t get_nth_key_len(uint32_t ind) const override { - return r_cast< const var_key_record* >(this->get_nth_record(ind))->m_key_len; - } - uint16_t get_nth_value_len(uint32_t ind) const override { return V::get_fixed_size(); } - uint16_t get_record_size() const override { return sizeof(var_key_record); } - - void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { - r_cast< var_key_record* >(rec_ptr)->m_key_len = key_len; - } - void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { assert(value_len == V::get_fixed_size()); } - -private: -#pragma pack(1) - struct var_key_record : public btree_obj_record { - uint16_t m_key_len : 14; - uint16_t reserved : 2; - }; -#pragma pack() -}; - -/***************** Template Specialization for variable value records ******************/ -template < typename K, typename V > -class VarValueSizeNode : public VariableNode< K, V > { -public: - VarValueSizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : - VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { - this->set_node_type(btree_node_type::VAR_VALUE); - } - - uint16_t get_nth_key_len(uint32_t ind) const override { return K::get_fixed_size(); } - uint16_t get_nth_value_len(uint32_t ind) const override { - return r_cast< const var_value_record* >(this->get_nth_record(ind))->m_value_len; - } - uint16_t get_record_size() const override { return sizeof(var_value_record); } - - void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { assert(key_len == K::get_fixed_size()); } - void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { - r_cast< var_value_record* >(rec_ptr)->m_value_len = value_len; - } - -private: -#pragma pack(1) - struct var_value_record : public btree_obj_record { - uint16_t m_value_len : 14; - uint16_t reserved : 2; - }; -#pragma pack() -}; - -/***************** Template Specialization for variable object records ******************/ -template < typename K, typename V > -class VarObjSizeNode : public VariableNode< K, V > { -public: - VarObjSizeNode(uint8_t* node_buf, bnodeid_t id, bool init, bool is_leaf, const BtreeConfig& cfg) : - VariableNode< K, V >(node_buf, id, init, is_leaf, cfg) { - this->set_node_type(btree_node_type::VAR_OBJECT); - } - - uint16_t get_nth_key_len(uint32_t ind) const override { - return r_cast< const var_obj_record* >(this->get_nth_record(ind))->m_key_len; - } - uint16_t get_nth_value_len(uint32_t ind) const override { - return r_cast< const var_obj_record* >(this->get_nth_record(ind))->m_value_len; - } - uint16_t get_record_size() const override { return sizeof(var_obj_record); } - - void set_nth_key_len(uint8_t* rec_ptr, uint16_t key_len) override { - r_cast< var_obj_record* >(rec_ptr)->m_key_len = key_len; - } - void set_nth_value_len(uint8_t* rec_ptr, uint16_t value_len) override { - r_cast< var_obj_record* >(rec_ptr)->m_value_len = value_len; - } - -private: -#pragma pack(1) - struct var_obj_record : public btree_obj_record { - uint16_t m_key_len : 14; - uint16_t reserved : 2; - - uint16_t m_value_len : 14; - uint16_t reserved2 : 2; - }; -#pragma pack() -}; -} // namespace btree -} // namespace sisl diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 78469f6b..4eac4a39 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -28,6 +28,7 @@ target_link_libraries(flip sisl gRPC::grpc++ spdlog::spdlog + nlohmann_json::nlohmann_json ) add_executable(test_flip lib/test_flip.cpp) From b77ec4352dbffaf0d6f18bbeddaf9d04470337d0 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Thu, 29 Sep 2022 16:35:05 -0700 Subject: [PATCH 138/385] Fixed release build errors because of unused variables --- src/flip/CMakeLists.txt | 2 +- .../client/local/test_flip_local_client.cpp | 140 ++++++++++-------- src/flip/lib/flip_rpc_server.cpp | 13 +- src/flip/lib/test_flip.cpp | 139 +++++++++-------- src/logging/lib/logging.cpp | 26 ++-- src/logging/logging.h | 2 +- 6 files changed, 172 insertions(+), 150 deletions(-) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 4eac4a39..3173450b 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -40,6 +40,6 @@ target_link_libraries(test_flip_server flip cxxopts::cxxopts) add_executable(test_flip_local_client client/local/test_flip_local_client.cpp) target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) - +add_test(NAME TestFlipLocalClient COMMAND test_flip_local_client) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 0e37e9ec..8c16f0d8 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -20,6 +20,7 @@ #include #include "options/options.h" +#include "logging/logging.h" using namespace flip; @@ -32,11 +33,12 @@ void run_and_validate_noret_flip() { int valid_cmd = 1; int invalid_cmd = -1; - assert(!g_flip.test_flip("noret_flip", invalid_cmd)); - assert(g_flip.test_flip("noret_flip", valid_cmd)); - assert(!g_flip.test_flip("noret_flip", invalid_cmd)); - assert(g_flip.test_flip("noret_flip", valid_cmd)); - assert(!g_flip.test_flip("noret_flip", valid_cmd)); // Not more than 2 + RELEASE_ASSERT(!g_flip.test_flip("noret_flip", invalid_cmd), "notret_flip invalid cmd succeeeded - unexpected"); + RELEASE_ASSERT(g_flip.test_flip("noret_flip", valid_cmd), "notret_flip valid cmd failed - unexpected"); + RELEASE_ASSERT(!g_flip.test_flip("noret_flip", invalid_cmd), "notret_flip valid cmd succeeeded - unexpected"); + RELEASE_ASSERT(g_flip.test_flip("noret_flip", valid_cmd), "notret_flip valid cmd failed - unexpected"); + RELEASE_ASSERT(!g_flip.test_flip("noret_flip", valid_cmd), + "notret_flip valid cmd succeeeded - no more than 2 expected to succeed"); // Not more than 2 } void run_and_validate_ret_flip() { @@ -46,21 +48,21 @@ void run_and_validate_ret_flip() { std::string invalid_dev_name = "/boot/sda"; auto result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); - assert(result); - assert(result.get() == "Simulated error value"); + RELEASE_ASSERT(result, "get_test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT_EQ(result.get(), "Simulated error value", "Incorrect flip returned"); result = g_flip.get_test_flip< std::string >("simval_flip", unknown_vol, valid_dev_name); - assert(!result); + RELEASE_ASSERT(!result, "get_test_flip succeeded for incorrect conditions - unexpected"); result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, invalid_dev_name); - assert(!result); + RELEASE_ASSERT(!result, "get_test_flip succeeded for incorrect conditions - unexpected"); result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); - assert(result); - assert(result.get() == "Simulated error value"); + RELEASE_ASSERT(result, "get_test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT_EQ(result.get(), "Simulated error value", "Incorrect flip returned"); result = g_flip.get_test_flip< std::string >("simval_flip", my_vol, valid_dev_name); - assert(!result); // Not more than 2 + RELEASE_ASSERT(!result, "get_test_flip freq set to 2, but 3rd time hit as well - unexpected"); // Not more than 2 } void run_and_validate_delay_flip() { @@ -71,19 +73,24 @@ void run_and_validate_delay_flip() { long invalid_size_bytes = 4096; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(g_flip.delay_flip( - "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1)); - assert(!g_flip.delay_flip( - "delay_flip", [closure_calls]() { (*closure_calls)++; }, invalid_cmd, valid_size_bytes1)); - assert(g_flip.delay_flip( - "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes2)); - assert(!g_flip.delay_flip( - "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, invalid_size_bytes)); - assert(!g_flip.delay_flip( - "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1)); + RELEASE_ASSERT(g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1), + "delay_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, invalid_cmd, valid_size_bytes1), + "delay_flip succeeded for invalid conditions - unexpected"); + RELEASE_ASSERT(g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes2), + "delay_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, invalid_size_bytes), + "delay_flip succeeded for invalid conditions - unexpected"); + RELEASE_ASSERT(!g_flip.delay_flip( + "delay_flip", [closure_calls]() { (*closure_calls)++; }, valid_cmd, valid_size_bytes1), + "delay_flip hit more than the frequency set - unexpected"); sleep(2); - DEBUG_ASSERT_EQ((*closure_calls).load(), 2); + RELEASE_ASSERT_EQ((*closure_calls).load(), 2, "Not all delay flips hit are called back"); } void run_and_validate_delay_return_flip() { @@ -91,49 +98,54 @@ void run_and_validate_delay_return_flip() { double invalid_double = 1.85; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(g_flip.get_delay_flip< std::string >( - "delay_simval_flip", - [closure_calls](std::string error) { - (*closure_calls)++; - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - }, - valid_double)); - - assert(!g_flip.get_delay_flip< std::string >( - "delay_simval_flip", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_double)); - - assert(g_flip.get_delay_flip< std::string >( - "delay_simval_flip", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - (*closure_calls)++; - }, - valid_double)); - - assert(!g_flip.get_delay_flip< std::string >( - "delay_simval_flip", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_double)); - - assert(!g_flip.get_delay_flip< std::string >( - "delay_simval_flip", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Simulated delayed errval"); - (*closure_calls)++; - LOGINFO("Called with error = {}", error); - }, - valid_double)); + RELEASE_ASSERT(g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + (*closure_calls)++; + RELEASE_ASSERT_EQ(error, "Simulated delayed errval", "Invalid closure called"); + }, + valid_double), + "delay_flip failed for valid conditions - unexpected"); + + RELEASE_ASSERT(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + RELEASE_ASSERT(false, "Invalid closure called"); + (*closure_calls)++; + }, + invalid_double), + "delay_flip succeeded for invalid conditions - unexpected"); + + RELEASE_ASSERT(g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + RELEASE_ASSERT_EQ(error, "Simulated delayed errval", "Invalid closure called"); + (*closure_calls)++; + }, + valid_double), + "delay_flip failed for valid conditions - unexpected"); + + RELEASE_ASSERT(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + RELEASE_ASSERT(false, "Invalid closure called"); + (*closure_calls)++; + }, + invalid_double), + "delay_flip succeeded for invalid conditions - unexpected"); + + RELEASE_ASSERT(!g_flip.get_delay_flip< std::string >( + "delay_simval_flip", + [closure_calls](std::string error) { + RELEASE_ASSERT_EQ(error, "Simulated delayed errval", "Invalid closure called"); + (*closure_calls)++; + LOGINFO("Called with error = {}", error); + }, + valid_double), + "delay_flip hit more than the frequency set - unexpected"); sleep(2); - DEBUG_ASSERT_EQ((*closure_calls).load(), 2); + RELEASE_ASSERT_EQ((*closure_calls).load(), 2, "Not all delay flips hit are called back"); } int main(int argc, char* argv[]) { diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 94fa2d9e..90512242 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -27,7 +27,7 @@ namespace flip { grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) { - // LOG(INFO) << "Flipspec request = " << request->DebugString() << "\n"; + LOGTRACEMOD(flip, "Flipspec request = {}", request->DebugString()); flip::Flip::instance().add(*request); response->set_success(true); return grpc::Status::OK; @@ -35,12 +35,12 @@ grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const Flip grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, FlipListResponse* response) { - // LOG(INFO) << "GetFaults request = " << request->DebugString(); + LOGTRACEMOD(flip, "GetFaults request = {}", request->DebugString()); auto resp = request->name().size() ? flip::Flip::instance().get(request->name()) : flip::Flip::instance().get_all(); for (const auto& r : resp) { response->add_infos()->set_info(r); } - // LOG(INFO) << "GetFaults response = " << response->DebugString(); + LOGTRACEMOD(flip, "GetFaults response = {}", response->DebugString()); return grpc::Status::OK; } @@ -49,7 +49,7 @@ class FlipRPCServiceWrapper : public FlipRPCServer::Service { void print_method_names() { for (auto i = 0; i < 2; ++i) { auto method = (::grpc::internal::RpcServiceMethod*)GetHandler(i); - if (method) { std::cout << "Method name = " << method->name() << "\n"; } + if (method) { LOGINFOMOD(flip, "Method name = {}", method->name()); } } } }; @@ -60,10 +60,9 @@ void FlipRPCServer::rpc_thread() { grpc::ServerBuilder builder; builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); - builder.RegisterService((FlipRPCServer*)&service); - service.print_method_names(); + builder.RegisterService((FlipRPCServer::Service*)&service); std::unique_ptr< grpc::Server > server(builder.BuildAndStart()); - std::cout << "Server listening on " << server_address << std::endl; + LOGINFOMOD(flip, "Flip GRPC Server listening on {}", server_address); server->Wait(); } diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index ace5bc04..9be3963e 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -45,18 +45,18 @@ void run_and_validate_ret_flip(flip::Flip* flip) { std::string unknown_coll = "unknown_collection"; auto result = flip->get_test_flip< std::string >("ret_fspec", my_coll); - assert(result); - assert(result.get() == "Error simulated value"); + RELEASE_ASSERT(result, "get_test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT_EQ(result.get(), "Error simulated value", "Incorrect flip returned"); result = flip->get_test_flip< std::string >("ret_fspec", unknown_coll); - assert(!result); + RELEASE_ASSERT(!result, "get_test_flip succeeded for incorrect conditions - unexpected"); result = flip->get_test_flip< std::string >("ret_fspec", my_coll); - assert(result); - assert(result.get() == "Error simulated value"); + RELEASE_ASSERT(result, "get_test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT_EQ(result.get(), "Error simulated value", "Incorrect flip returned"); result = flip->get_test_flip< std::string >("ret_fspec", my_coll); - assert(!result); // Not more than 2 + RELEASE_ASSERT(!result, "get_test_flip freq set to 2, but 3rd time hit as well - unexpected"); // Not more than 2 } void create_check_fspec(flip::FlipSpec* fspec) { @@ -76,11 +76,14 @@ void run_and_validate_check_flip(flip::Flip* flip) { int valid_cmd = 1; int invalid_cmd = -1; - assert(!flip->test_flip("check_fspec", invalid_cmd)); - assert(flip->test_flip("check_fspec", valid_cmd)); - assert(!flip->test_flip("check_fspec", invalid_cmd)); - assert(flip->test_flip("check_fspec", valid_cmd)); - assert(!flip->test_flip("check_fspec", valid_cmd)); // Not more than 2 + RELEASE_ASSERT(!flip->test_flip("check_fspec", invalid_cmd), + "test_flip succeeded for incorrect conditions - unexpected"); + RELEASE_ASSERT(flip->test_flip("check_fspec", valid_cmd), "test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT(!flip->test_flip("check_fspec", invalid_cmd), + "test_flip succeeded for incorrect conditions - unexpected"); + RELEASE_ASSERT(flip->test_flip("check_fspec", valid_cmd), "test_flip failed for valid conditions - unexpected"); + RELEASE_ASSERT(!flip->test_flip("check_fspec", valid_cmd), + "test_flip freq set to 2, but 3rd time hit as well - unexpected"); // Not more than 2 } void create_delay_fspec(flip::FlipSpec* fspec) { @@ -102,23 +105,28 @@ void run_and_validate_delay_flip(flip::Flip* flip) { int invalid_cmd = -1; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(flip->delay_flip( - "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + RELEASE_ASSERT(flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd), + "delay_flip failed for valid conditions - unexpected"); - assert(!flip->delay_flip( - "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); + RELEASE_ASSERT(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd), + "delay_flip succeeded for invalid conditions - unexpected"); - assert(flip->delay_flip( - "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + RELEASE_ASSERT(flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd), + "delay_flip failed for valid conditions - unexpected"); - assert(!flip->delay_flip( - "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd)); + RELEASE_ASSERT(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, invalid_cmd), + "delay_flip succeeded for invalid conditions - unexpected"); - assert(!flip->delay_flip( - "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd)); + RELEASE_ASSERT(!flip->delay_flip( + "delay_fspec", [closure_calls]() { (*closure_calls)++; }, valid_cmd), + "delay_flip hit more than the frequency set - unexpected"); sleep(2); - DEBUG_ASSERT_EQ((*closure_calls).load(), 2); + RELEASE_ASSERT_EQ((*closure_calls).load(), 2, "Not all delay flips hit are called back"); } void create_delay_ret_fspec(flip::FlipSpec* fspec) { @@ -143,49 +151,54 @@ void run_and_validate_delay_return_flip(flip::Flip* flip) { int invalid_cmd = -1; std::shared_ptr< std::atomic< int > > closure_calls = std::make_shared< std::atomic< int > >(0); - assert(flip->get_delay_flip< std::string >( - "delay_ret_fspec", - [closure_calls](std::string error) { - (*closure_calls)++; - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - }, - valid_cmd)); - - assert(!flip->get_delay_flip< std::string >( - "delay_ret_fspec", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_cmd)); - - assert(flip->get_delay_flip< std::string >( - "delay_ret_fspec", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - }, - valid_cmd)); - - assert(!flip->get_delay_flip< std::string >( - "delay_ret_fspec", - [closure_calls](std::string error) { - assert(0); - (*closure_calls)++; - }, - invalid_cmd)); - - assert(!flip->get_delay_flip< std::string >( - "delay_ret_fspec", - [closure_calls](std::string error) { - DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); - (*closure_calls)++; - LOGINFO("Called with error = {}", error); - }, - valid_cmd)); + RELEASE_ASSERT(flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + (*closure_calls)++; + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + }, + valid_cmd), + "delay_flip failed for valid conditions - unexpected"); + + RELEASE_ASSERT(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd), + "delay_flip succeeded for invalid conditions - unexpected"); + + RELEASE_ASSERT(flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + }, + valid_cmd), + "delay_flip failed for valid conditions - unexpected"); + + RELEASE_ASSERT(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + assert(0); + (*closure_calls)++; + }, + invalid_cmd), + "delay_flip succeeded for invalid conditions - unexpected"); + + RELEASE_ASSERT(!flip->get_delay_flip< std::string >( + "delay_ret_fspec", + [closure_calls](std::string error) { + DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); + (*closure_calls)++; + LOGINFO("Called with error = {}", error); + }, + valid_cmd), + "delay_flip hit more than the frequency set - unexpected"); sleep(2); - DEBUG_ASSERT_EQ((*closure_calls).load(), 2); + RELEASE_ASSERT_EQ((*closure_calls).load(), 2, "Not all delay flips hit are called back"); } #if 0 diff --git a/src/logging/lib/logging.cpp b/src/logging/lib/logging.cpp index d66edf48..3bef31fb 100644 --- a/src/logging/lib/logging.cpp +++ b/src/logging/lib/logging.cpp @@ -215,15 +215,19 @@ void set_global_logger(N const& name, S const& sinks, S const& crit_sinks) { spdlog::register_logger(glob_critical_logger); } -static void set_module_log_level(const std::string& module_name, const spdlog::level::level_enum level) { - const auto sym{std::string{"module_level_"} + module_name}; - auto* const mod_level{static_cast< spdlog::level::level_enum* >(::dlsym(RTLD_DEFAULT, sym.c_str()))}; +static spdlog::level::level_enum* to_mod_log_level_ptr(const std::string& module_name) { + const auto sym = std::string{"module_level_"} + module_name; + auto* mod_level = static_cast< spdlog::level::level_enum* >(::dlsym(RTLD_DEFAULT, sym.c_str())); if (mod_level == nullptr) { - LOGWARN("Unable to locate the module {} in registered modules", module_name); - return; + std::cout << fmt::format("Unable to locate the module {} in registered modules, error: {}\n", module_name, + dlerror()); } + return mod_level; +} - *mod_level = level; +static void set_module_log_level(const std::string& module_name, const spdlog::level::level_enum level) { + auto* mod_level = to_mod_log_level_ptr(module_name); + if (mod_level != nullptr) { *mod_level = level; } } static std::string setup_modules() { @@ -347,14 +351,8 @@ void SetModuleLogLevel(const std::string& module_name, const spdlog::level::leve } spdlog::level::level_enum GetModuleLogLevel(const std::string& module_name) { - const auto sym{std::string{"module_level_"} + module_name}; - auto* const mod_level{static_cast< spdlog::level::level_enum* >(::dlsym(RTLD_DEFAULT, sym.c_str()))}; - if (mod_level == nullptr) { - LOGWARN("Unable to locate the module {} in registered modules", module_name); - return spdlog::level::level_enum::off; - } - - return *mod_level; + auto* mod_level = to_mod_log_level_ptr(module_name); + return mod_level ? *mod_level : spdlog::level::level_enum::off; } nlohmann::json GetAllModuleLogLevel() { diff --git a/src/logging/logging.h b/src/logging/logging.h index 7593c368..dd7d2e4a 100644 --- a/src/logging/logging.h +++ b/src/logging/logging.h @@ -431,7 +431,7 @@ MODLEVELDEC(_, _, base) #define MODLEVELDEF(r, l, module) \ extern "C" { \ - spdlog::level::level_enum BOOST_PP_CAT(module_level_, module){l}; \ + __attribute__((visibility("default"))) spdlog::level::level_enum BOOST_PP_CAT(module_level_, module){l}; \ } #define MOD_LEVEL_STRING(r, _, module) BOOST_PP_STRINGIZE(module), From a18f7dd3fac7e2931708b9841c3f98c2ec6af9aa Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 3 Oct 2022 09:33:26 -0700 Subject: [PATCH 139/385] Export flip library along with sisl lib, gcc-10 support for future (#30) --- conanfile.py | 4 +- src/fds/CMakeLists.txt | 6 - src/fds/tests/group_commit.cpp | 379 ------------------------ src/logging/lib/backtrace.cpp | 6 +- src/sisl_version/tests/test_version.cpp | 12 +- 5 files changed, 13 insertions(+), 394 deletions(-) delete mode 100644 src/fds/tests/group_commit.cpp diff --git a/conanfile.py b/conanfile.py index a1403ba8..0fccf35a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -109,6 +109,8 @@ def package(self): copy(self, "*.dylib*", self.build_folder, lib_dir, keep_path=False) copy(self, "*.dll*", self.build_folder, join(self.package_folder, "bin"), keep_path=False) copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) + copy(self, "*.proto", join(self.source_folder, "src/flip/proto/"), join(self.package_folder, "proto/flip/"), keep_path=False) + copy(self, "*", join(self.source_folder, "src/flip/client/python/"), join(self.package_folder, "bindings/flip/python/"), keep_path=False) hdr_dir = join(self.package_folder, join("include", "sisl")) copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True, excludes="flip/*") @@ -120,7 +122,7 @@ def package(self): copy(self, "*.h", join(self.source_folder, "src/flip"), flip_hdr_dir, keep_path=False) def package_info(self): - self.cpp_info.libs = ["sisl"] + self.cpp_info.libs = ["sisl", "flip"] self.cpp_info.cppflags.extend(["-Wno-unused-local-typedefs", "-fconcepts"]) if self.settings.os == "Linux": diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 7455e22b..80ab4c40 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -20,12 +20,6 @@ add_executable(test_stream_tracker ${TEST_STREAM_TRACKER_SOURCES}) target_link_libraries(test_stream_tracker sisl ${COMMON_DEPS} GTest::gtest) #add_test(NAME HttpServerTest COMMAND test_http_server) -set(GROUP_COMMIT_SOURCES - tests/group_commit.cpp - ) -add_executable(group_commit ${GROUP_COMMIT_SOURCES}) -target_link_libraries(group_commit sisl ${COMMON_DEPS}) - set(TEST_ATOMIC_STATUS_COUNTER_SOURCES tests/test_atomic_status_counter.cpp ) diff --git a/src/fds/tests/group_commit.cpp b/src/fds/tests/group_commit.cpp deleted file mode 100644 index 06047c2f..00000000 --- a/src/fds/tests/group_commit.cpp +++ /dev/null @@ -1,379 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __linux__ -#include -#endif - -#include -#include "logging/logging.h" -#include - -#include "memvector.hpp" -#include "obj_allocator.hpp" -#include "stream_tracker.hpp" - -using namespace sisl; - -SISL_LOGGING_INIT(group_commit) - -#pragma pack(1) -struct log_group_header { - uint32_t magic; - uint32_t n_log_records; // Total number of log records - uint32_t group_size; // Total size of this group including this header - uint32_t inline_data_size; // Out of group size how much data is inlined along with log_record_header - uint32_t prev_grp_checksum; - uint32_t cur_grp_checksum; -}; -#pragma pack() - -template < typename charT, typename traits > -std::basic_ostream< charT, traits >& operator<<(std::basic_ostream< charT, traits >& outStream, - const log_group_header& h) { - // copy the stream formatting - std::basic_ostringstream< charT, traits > outStringStream; - outStringStream.copyfmt(outStream); - - // output the date time - const auto s{fmt::format("magic = {} n_log_records = {} group_size = {} inline_data_size = {} " - "prev_grp_checksum = {} cur_grp_checksum = {}", - h.magic, h.n_log_records, h.group_size, h.inline_data_size, h.prev_grp_checksum, - h.cur_grp_checksum)}; - outStringStream << s; - - // print the stream - outStream << outStringStream.str(); - - return outStream; -} - -#pragma pack(1) -struct serialized_log_record { - int64_t log_idx; - uint32_t size : 31; - uint32_t is_inlined : 1; - uint8_t data[1]; -}; -#pragma pack() - -static constexpr uint32_t dma_boundary{512}; - -class LogGroup; -struct log_record { - static constexpr uint32_t inline_size{dma_boundary}; - - serialized_log_record* pers_record{nullptr}; - uint8_t* data_ptr; - uint32_t size; - void* context; - LogGroup* log_group; // Group this record is part of - - log_record(uint8_t* const d, const uint32_t sz, void* const ctx) { - data_ptr = d; - size = sz; - context = ctx; - } - - serialized_log_record* create_serialized(uint8_t* const buf_ptr, const int64_t log_idx) const { - serialized_log_record* const sr{reinterpret_cast< serialized_log_record* >(buf_ptr)}; - sr->log_idx = log_idx; - sr->size = size; - sr->is_inlined = is_inlinebale(); - if (is_inlinebale()) { - std::memcpy(static_cast< void* >(sr->data), static_cast< const void* >(data_ptr), size); - } - return sr; - } - - // NOTE: serialized_log_record contains 1 byte of data already which is why - 1 - size_t inlined_size() const { - return sizeof(serialized_log_record) + (((size > 0) && is_inlinebale()) ? size - 1 : 0); - } - size_t serialized_size() const { return sizeof(serialized_log_record) + ((size > 0) ? (size - 1) : 0); } - bool is_inlinebale() const { return (size <= inline_size); } - static size_t serialized_size(const uint32_t sz) { - return sizeof(serialized_log_record) + ((sz > 0 ? (sz - 1) : 0)); - } -}; - -static constexpr uint32_t flush_idx_frequency{64}; - -struct iovec_wrapper : public iovec { - iovec_wrapper(void* const base, const size_t len) { - iov_base = base; - iov_len = len; - } -}; - -class LogGroup { - template < typename charT, typename traits > - friend std::basic_ostream< charT, traits >& operator<<(std::basic_ostream< charT, traits >& outStream, - const LogGroup& lg); - -public: - static constexpr uint32_t estimated_iovs{10}; - static constexpr size_t inline_log_buf_size{log_record::inline_size * flush_idx_frequency}; - static constexpr size_t max_log_group_size{8192}; - - typedef sisl::FlexArray< iovec_wrapper, estimated_iovs > iovec_array; - friend class LogDev; - - LogGroup() { - m_cur_log_buf = &m_log_buf[0]; - m_cur_buf_len = inline_log_buf_size; - m_cur_buf_pos = sizeof(log_group_header); - m_overflow_log_buf = nullptr; - m_nrecords = 0; - m_total_non_inlined_size = 0; - - m_iovecs.emplace_back(static_cast< void* >(m_cur_log_buf), 0); - } - - void create_overflow_buf(const uint32_t min_needed) { - const uint32_t new_len{std::max(min_needed, m_cur_buf_len * 2)}; - auto new_buf{std::unique_ptr< uint8_t[] >(new uint8_t[new_len])}; - std::memcpy(static_cast< void* >(new_buf.get()), static_cast< const void* >(m_cur_log_buf), m_cur_buf_len); - m_overflow_log_buf = std::move(new_buf); - m_cur_log_buf = m_overflow_log_buf.get(); - m_cur_buf_len = new_len; - m_iovecs[0].iov_base = static_cast< void* >(m_cur_log_buf); - } - - bool can_accomodate(const log_record& record) const { - return ((record.serialized_size() + m_cur_buf_pos + m_total_non_inlined_size) <= max_log_group_size); - } - - bool add_record(const log_record& record, const int64_t log_idx) { - if (!can_accomodate(record)) { - std::cout << "Will exceed max_log_group_size=" << max_log_group_size - << " if we add this record for idx=" << log_idx << " Hence stopping adding in this batch"; - return false; - } - - const auto size{record.inlined_size()}; - if ((m_cur_buf_pos + size) >= m_cur_buf_len) { create_overflow_buf(m_cur_buf_pos + size); } - - // If serialized size is within inline budget and also we have enough room to hold this data, we can copy - // them, instead of having a iovec element. - // std::cout << "size to insert=" << size << " inline_size=" << log_record::inline_size - // << " cur_buf_pos=" << m_cur_buf_pos << " inline_log_buf_size=" << inline_log_buf_size << "\n"; - record.create_serialized(&m_cur_log_buf[m_cur_buf_pos], log_idx); - m_cur_buf_pos += size; - m_iovecs[0].iov_len += size; - if (!record.is_inlinebale()) { - // TODO: Round this up to 512 byte boundary - m_iovecs.emplace_back(static_cast< void* >(record.data_ptr), record.size); - m_total_non_inlined_size += record.size; - } - m_nrecords++; - - return true; - } - - const iovec_array* finish() { - log_group_header* const hdr{header()}; - hdr->magic = 0xDABAF00D; - hdr->n_log_records = m_nrecords; - hdr->inline_data_size = m_cur_buf_pos; - hdr->group_size = hdr->inline_data_size + m_total_non_inlined_size; - hdr->prev_grp_checksum = 0; - hdr->cur_grp_checksum = 0; - - return &m_iovecs; - } - - log_group_header* header() const { return reinterpret_cast< log_group_header* >(m_cur_log_buf); } - iovec_array& iovecs() { return m_iovecs; } - - uint32_t data_size() const { return header()->group_size - sizeof(log_group_header); } - -private: - std::array< uint8_t, inline_log_buf_size > m_log_buf; - uint8_t* m_cur_log_buf{m_log_buf.data()}; - uint32_t m_cur_buf_len{inline_log_buf_size}; - uint32_t m_cur_buf_pos{sizeof(log_group_header)}; - - std::unique_ptr< uint8_t[] > m_overflow_log_buf; - - uint32_t m_nrecords{0}; - uint32_t m_total_non_inlined_size{0}; - - // Info about the final data - iovec_array m_iovecs; - int64_t m_flush_log_idx_from; - int64_t m_flush_log_idx_upto; - uint64_t m_log_dev_offset; -}; - -template < typename charT, typename traits > -std::basic_ostream< charT, traits >& operator<<(std::basic_ostream< charT, traits >& outStream, const LogGroup& lg) { - // copy the stream formatting - std::basic_ostringstream< charT, traits > outStringStream; - outStringStream.copyfmt(outStream); - - // output the date time - const auto s{fmt::format("-----------------------------------------------------------------\n" - "Header: [{}]\nLog_idx_range:[{} - {}] Offset={} non_inlined_size={}\n" - "-----------------------------------------------------------------\n", - *(reinterpret_cast< log_group_header* >(lg.m_cur_log_buf)), lg.m_flush_log_idx_from, - lg.m_flush_log_idx_upto, lg.m_log_dev_offset, lg.m_total_non_inlined_size)}; - outStringStream << s; - - // print the stream - outStream << outStringStream.str(); - - return outStream; -} - -typedef sisl::ObjectAllocator< LogGroup, 100 > LogGroupAllocator; - -class LogDev { -public: - typedef std::function< void(int64_t, uint64_t, void*) > log_append_cb_t; - void register_cb(const log_append_cb_t& cb) { m_append_cb = cb; } - - // static constexpr int64_t flush_threshold_size{ 4096 }; - static constexpr int64_t flush_threshold_size{100}; - static constexpr int64_t flush_data_threshold_size{flush_threshold_size - sizeof(log_group_header)}; - - int64_t append(uint8_t* const data, const uint32_t size, void* const cb_context) { - flush_if_needed(size); - const auto idx{m_log_idx.fetch_add(1, std::memory_order_acq_rel)}; - m_log_records.create(idx, data, size, cb_context); - return idx; - } - - void flush_if_needed(const uint32_t record_size) { - // If after adding the record size, if we have enough to flush, attempt to flush by setting the atomic bool - // variable. - const auto actual_size{record_size ? log_record::serialized_size(record_size) : 0}; - const auto pending_sz{m_pending_flush_size.fetch_add(actual_size, std::memory_order_relaxed) + actual_size}; - if (pending_sz >= flush_data_threshold_size) { - std::cout << "Pending size to flush is " << pending_sz << " greater than flush data threshold " - << flush_data_threshold_size << " Flushing now\n"; - bool expected_flushing = false; - if (m_is_flushing.compare_exchange_strong(expected_flushing, true, std::memory_order_acq_rel)) { - // We were able to win the flushing competition and now we gather all the flush data and reserve a slot. - auto lg = prepare_flush(); - m_pending_flush_size.fetch_sub(lg->data_size(), std::memory_order_relaxed); - std::cout << "After flushing prepared pending size is " << m_pending_flush_size.load() << "\n"; - dummy_do_io(lg->iovecs(), [lg, this](const bool success) { on_flush_completion(lg, success); }); - } - } - } - - LogGroup* prepare_flush() { - int64_t flushing_upto_idx{0}; - - auto* lg{LogGroupAllocator::make_object()}; - m_log_records.foreach_active(m_last_flush_idx + 1, - [&](const int64_t idx, const int64_t upto_idx, const log_record& record) -> bool { - if (lg->add_record(record, idx)) { - flushing_upto_idx = upto_idx; - return true; - } else { - return false; - } - }); - lg->finish(); - lg->m_flush_log_idx_from = m_last_flush_idx + 1; - lg->m_flush_log_idx_upto = flushing_upto_idx; - lg->m_log_dev_offset = reserve(lg->data_size() + sizeof(log_group_header)); - - std::cout << "Flushing upto log_idx = " << flushing_upto_idx << "\n"; - std::cout << "Log Group:\n" << *lg; - return lg; - } - - void on_flush_completion(LogGroup* const lg, const bool is_success) { - assert(is_success); - m_log_records.complete(lg->m_flush_log_idx_from, lg->m_flush_log_idx_upto); - m_last_flush_idx = lg->m_flush_log_idx_upto; - - for (auto idx = lg->m_flush_log_idx_from; idx <= lg->m_flush_log_idx_upto; ++idx) { - auto& record{m_log_records.at(idx)}; - m_append_cb(idx, lg->m_log_dev_offset, record.context); - } -#if 0 - if (upto_idx > (m_last_truncate_idx + LogDev::truncate_idx_frequency)) { - std::cout << "Truncating upto log_idx = " << upto_idx << "\n"; - m_log_records.truncate(); - } -#endif - m_is_flushing.store(false, std::memory_order_release); - LogGroupAllocator::deallocate(lg); - - // Try to do chain flush if its really needed. - flush_if_needed(0); - } - - void dummy_do_io(const LogGroup::iovec_array& iovecs, const std::function< void(bool) >& cb) { - // LOG INFO("iovecs with {} pieces", iovecs.size()); - for (size_t i{0}; i < iovecs.size(); ++i) { - std::cout << "Base = " << iovecs[i].iov_base << " Length = " << iovecs[i].iov_len << "\n"; - // LOGINFO("Base = {} Length = {}", iovec.iov_base, iovec.iov_len); - } - cb(true); - } - - uint64_t reserve(const uint32_t size) { - static uint64_t offset{0}; - auto cur_offset{offset}; - offset += size; - return cur_offset; - } - -public: - static constexpr uint32_t truncate_idx_frequency{flush_idx_frequency * 10}; - -private: - sisl::StreamTracker< log_record > m_log_records; - std::atomic< int64_t > m_log_idx{0}; - std::atomic< int64_t > m_pending_flush_size{0}; - std::atomic< bool > m_is_flushing{false}; - - // sisl::atomic_status_counter< flush_status_t, flush_status_t::Normal > m_flush_status; - int64_t m_last_flush_idx{-1}; - int64_t m_last_truncate_idx{-1}; - uint64_t m_offset{0}; - - log_append_cb_t m_append_cb; -}; - -static void on_append_completion(const int64_t idx, const uint64_t offset, void* const ctx) { - std::cout << "Append completed with log_idx = " << idx << " offset = " << offset << "\n"; -} - -int main(int argc, char* argv[]) { - std::array< std::string, 1024 > s; - LogDev ld; - ld.register_cb(on_append_completion); - - for (size_t i{0}; i < 200; ++i) { - s[i] = std::to_string(i); - ld.append(reinterpret_cast< uint8_t* >(const_cast< char* >(s[i].c_str())), s[i].size() + 1, nullptr); - } -} diff --git a/src/logging/lib/backtrace.cpp b/src/logging/lib/backtrace.cpp index f6019526..55d95129 100644 --- a/src/logging/lib/backtrace.cpp +++ b/src/logging/lib/backtrace.cpp @@ -155,12 +155,12 @@ template < typename... Args > } template < typename... Args > -[[maybe_unused]] void log_message(const char* const format, Args&&... args) { +[[maybe_unused]] void log_message(fmt::format_string< Args... > msg_fmt, Args&&... args) { auto& logger{sisl::logging::GetLogger()}; auto& critical_logger{sisl::logging::GetCriticalLogger()}; - if (logger) { logger->critical(format, std::forward< Args >(args)...); } - if (critical_logger) { critical_logger->critical(format, std::forward< Args >(args)...); } + if (logger) { logger->critical(msg_fmt, std::forward< Args >(args)...); } + if (critical_logger) { critical_logger->critical(msg_fmt, std::forward< Args >(args)...); } } #ifdef __linux__ diff --git a/src/sisl_version/tests/test_version.cpp b/src/sisl_version/tests/test_version.cpp index a432801f..a868cac6 100644 --- a/src/sisl_version/tests/test_version.cpp +++ b/src/sisl_version/tests/test_version.cpp @@ -17,13 +17,15 @@ void entry() { TEST(entryTest, entry) { entry(); - const std::string dummy_ver{fmt::format("{0}", sisl::VersionMgr::getVersion("dummy"))}; - LOGINFO("Dummy ver. {}", dummy_ver); + std::stringstream dummy_ver; + dummy_ver << sisl::VersionMgr::getVersion("dummy"); + LOGINFO("Dummy ver. {}", dummy_ver.str()); - const std::string sisl_ver{fmt::format("{0}", sisl::VersionMgr::getVersion("sisl"))}; - LOGINFO("SISL ver. {}", sisl_ver); + std::stringstream sisl_ver; + sisl_ver << sisl::VersionMgr::getVersion("sisl"); + LOGINFO("SISL ver. {}", sisl_ver.str()); - EXPECT_EQ(dummy_ver, sisl_ver); + EXPECT_EQ(dummy_ver.str(), sisl_ver.str()); auto versions{sisl::VersionMgr::getVersions()}; EXPECT_EQ((int)versions.size(), 2); From 3d8512f9a886a6e8db12b2a0b610d505a53de7ab Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 5 Oct 2022 08:58:54 -0700 Subject: [PATCH 140/385] Added tests for settings and fixed reload issue with latest flatbuffers (#31) --- conanfile.py | 2 +- src/auth_manager/CMakeLists.txt | 1 + src/auth_manager/tests/dummy_grant.cg | 1 + src/flip/client/local/flip_client.hpp | 2 +- src/flip/{lib => }/flip.hpp | 0 src/flip/{lib => }/flip_rpc_server.hpp | 0 src/settings/CMakeLists.txt | 7 +- src/settings/settings.cpp | 60 ++++++------ src/settings/settings.hpp | 31 +++---- src/settings/tests/test_settings.cpp | 123 ++++++++++++++++++++++--- src/utility/non_null_ptr.hpp | 6 +- 11 files changed, 173 insertions(+), 60 deletions(-) create mode 100644 src/auth_manager/tests/dummy_grant.cg rename src/flip/{lib => }/flip.hpp (100%) rename src/flip/{lib => }/flip_rpc_server.hpp (100%) diff --git a/conanfile.py b/conanfile.py index 0fccf35a..1f92eb9d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.1.2" + version = "8.2.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index bdf63298..8817af3c 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -18,6 +18,7 @@ target_link_libraries(sisl_auth_manager flatbuffers::flatbuffers jwt-cpp::jwt-cpp ) +set(FLATBUFFERS_FLATC_EXECUTABLE, ${CONAN_BIN_DIRS_FLATBUFFER}/flatc) message("Flatbuffers parser: [${FLATBUFFERS_FLATC_EXECUTABLE}]") settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ sisl_auth_manager security_config.fbs) diff --git a/src/auth_manager/tests/dummy_grant.cg b/src/auth_manager/tests/dummy_grant.cg new file mode 100644 index 00000000..e525b7a0 --- /dev/null +++ b/src/auth_manager/tests/dummy_grant.cg @@ -0,0 +1 @@ +dummy cg contents diff --git a/src/flip/client/local/flip_client.hpp b/src/flip/client/local/flip_client.hpp index 94c420a9..10493678 100644 --- a/src/flip/client/local/flip_client.hpp +++ b/src/flip/client/local/flip_client.hpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #pragma once -#include "lib/flip.hpp" +#include namespace flip { class FlipClient { diff --git a/src/flip/lib/flip.hpp b/src/flip/flip.hpp similarity index 100% rename from src/flip/lib/flip.hpp rename to src/flip/flip.hpp diff --git a/src/flip/lib/flip_rpc_server.hpp b/src/flip/flip_rpc_server.hpp similarity index 100% rename from src/flip/lib/flip_rpc_server.hpp rename to src/flip/flip_rpc_server.hpp diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index 4c5999a4..1c12d099 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -6,10 +6,9 @@ endif() include_directories(BEFORE ..) include_directories(BEFORE .) -include_directories(BEFORE . ${CMAKE_CURRENT_BINARY_DIR}/generated/) +include_directories(BEFORE . ${CMAKE_CURRENT_SOURCE_DIR}/) find_package(FlatBuffers REQUIRED) - set(SETTINGS_SOURCE_FILES settings.cpp ) @@ -23,7 +22,7 @@ set(TEST_SETTINGS_SOURCE_FILES tests/test_settings.cpp ) add_executable(test_settings ${TEST_SETTINGS_SOURCE_FILES}) +set(FLATBUFFERS_FLATC_EXECUTABLE, ${CONAN_BIN_DIRS_FLATBUFFER}/flatc) settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ test_settings tests/test_app_schema.fbs) -target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers) +target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers GTest::gtest) add_test(NAME SettingsTest COMMAND test_settings) -add_test(NAME SettingsTestOverride COMMAND test_settings --config_path=${CMAKE_BINARY_DIR} --override_config test_app_schema.config.database.databaseHost:myhost.com) diff --git a/src/settings/settings.cpp b/src/settings/settings.cpp index 3e92dab0..a4e2b94b 100644 --- a/src/settings/settings.cpp +++ b/src/settings/settings.cpp @@ -50,38 +50,46 @@ static nlohmann::json kv_path_to_json(const std::vector< std::string >& paths, c return nlohmann::json::parse(json_str); } -SettingsFactoryRegistry::SettingsFactoryRegistry() { +SettingsFactoryRegistry::SettingsFactoryRegistry(const std::string& path, + const std::vector< std::string >& override_cfgs) : + m_config_path{path} { + if (SISL_OPTIONS.count("config_path") != 0) { m_config_path = SISL_OPTIONS["config_path"].as< std::string >(); } + + std::vector< std::string > cfgs; if (SISL_OPTIONS.count("override_config") != 0) { - const auto cfgs{SISL_OPTIONS["override_config"].as< std::vector< std::string > >()}; - for (const auto& cfg : cfgs) { - // Get the entire path along with module name and its value - std::vector< std::string > kv; - boost::split(kv, cfg, boost::is_any_of(":=")); - if (kv.size() < 2) { continue; } - - // Split this and convert to a json string which json library can parse. I am sure - // there are cuter ways to do this, but well someother time.... - std::vector< std::string > paths; - boost::split(paths, kv[0], boost::is_any_of(".")); - if (paths.size() < 2) { continue; } - auto schema_name{std::move(paths.front())}; - paths.erase(std::begin(paths)); - - auto j = kv_path_to_json(paths, kv[1]); // Need a copy constructor here. - const auto it{m_override_cfgs.find(schema_name)}; - if (it != std::cend(m_override_cfgs)) { - it->second.merge_patch(j); - } else { - m_override_cfgs.emplace(std::move(schema_name), std::move(j)); - } + cfgs = SISL_OPTIONS["override_config"].as< std::vector< std::string > >(); + } else { + cfgs = override_cfgs; + } + + for (const auto& cfg : cfgs) { + // Get the entire path along with module name and its value + std::vector< std::string > kv; + boost::split(kv, cfg, boost::is_any_of(":=")); + if (kv.size() < 2) { continue; } + + // Split this and convert to a json string which json library can parse. I am sure + // there are cuter ways to do this, but well someother time.... + std::vector< std::string > paths; + boost::split(paths, kv[0], boost::is_any_of(".")); + if (paths.size() < 2) { continue; } + auto schema_name{std::move(paths.front())}; + paths.erase(std::begin(paths)); + + auto j = kv_path_to_json(paths, kv[1]); // Need a copy constructor here. + const auto it{m_override_cfgs.find(schema_name)}; + if (it != std::cend(m_override_cfgs)) { + it->second.merge_patch(j); + } else { + m_override_cfgs.emplace(std::move(schema_name), std::move(j)); } } } -void SettingsFactoryRegistry::register_factory(const std::string& name, SettingsFactoryBase* const f) { - if (SISL_OPTIONS.count("config_path") == 0) { return; } +void SettingsFactoryRegistry::register_factory(const std::string& name, SettingsFactoryBase* f) { + if (m_config_path.empty()) { return; } - const auto config_file{fmt::format("{}/{}.json", SISL_OPTIONS["config_path"].as< std::string >(), name)}; + const auto config_file{fmt::format("{}/{}.json", m_config_path, name)}; { std::unique_lock lg{m_mtx}; f->set_config_file(config_file); diff --git a/src/settings/settings.hpp b/src/settings/settings.hpp index ff6f64d9..f927fac6 100644 --- a/src/settings/settings.hpp +++ b/src/settings/settings.hpp @@ -123,8 +123,7 @@ static bool diff(const reflection::Schema* schema, const reflection::Object* sch break; } - case reflection::BaseType::Array: - case reflection::BaseType::Union: { + default: { // Please do not use unions or arrays in settings. It's crazy! // LOG_ASSERT(false) << "reflection::BaseType::Union type in settings is not supported"; break; @@ -210,8 +209,7 @@ static bool diff_vector(const reflection::Schema* schema, const reflection::Fiel break; } - case reflection::BaseType::Array: - case reflection::BaseType::Union: { + default: { // Please do not use unions or arrays in settings. It's crazy! // LOG_ASSERT(false) << "reflection::BaseType::Union type in settings is not supported"; break; @@ -235,12 +233,13 @@ class SettingsFactoryBase : public boost::noncopyable { class SettingsFactoryRegistry { public: - static SettingsFactoryRegistry& instance() { - static SettingsFactoryRegistry _inst; + static SettingsFactoryRegistry& instance(const std::string& path = "", + const std::vector< std::string >& override_cfgs = {}) { + static SettingsFactoryRegistry _inst{path, override_cfgs}; return _inst; } - SettingsFactoryRegistry(); + SettingsFactoryRegistry(const std::string& path = "", const std::vector< std::string >& override_cfgs = {}); void register_factory(const std::string& s, SettingsFactoryBase* f); void unregister_factory(const std::string& s); @@ -250,6 +249,7 @@ class SettingsFactoryRegistry { private: mutable std::shared_mutex m_mtx; + std::string m_config_path; std::unordered_map< std::string, SettingsFactoryBase* > m_factories; std::unordered_map< std::string, nlohmann::json > m_override_cfgs; }; @@ -335,9 +335,10 @@ class SettingsFactory : public sisl::SettingsFactoryBase { private: void load(const std::string& config, bool is_config_file) { try { - auto new_settings = parse_config(config, is_config_file); + SettingsT new_settings; + parse_config(config, is_config_file, new_settings); // post_process(true, &new_settings); - m_rcu_data.make_and_exchange(new_settings); + m_rcu_data.make_and_exchange(std::move(new_settings)); } catch (std::exception& e) { throw std::runtime_error(fmt::format("Exception reading config {} (errmsg = {})", (is_config_file ? config : " in json"), e.what())); @@ -346,7 +347,8 @@ class SettingsFactory : public sisl::SettingsFactoryBase { bool reload(const std::string& config, bool is_config_file) { try { - auto new_settings = parse_config(config, is_config_file /* is_config_file */); + SettingsT new_settings; + parse_config(config, is_config_file /* is_config_file */, new_settings); /* post_process may reconfigure some settings, therefore this has to be called before taking diff */ // post_process(false, &new_settings); @@ -354,7 +356,7 @@ class SettingsFactory : public sisl::SettingsFactoryBase { m_current_settings = ""; /* getSettings will return empty briefly before exiting */ return true; } else { - m_rcu_data.make_and_exchange(new_settings); + m_rcu_data.make_and_exchange(std::move(new_settings)); } } catch (std::exception& e) { LOGERROR("Exception reading config {} (errmsg = {})", (is_config_file ? config : " in json"), e.what()); @@ -362,7 +364,7 @@ class SettingsFactory : public sisl::SettingsFactoryBase { return false; } - SettingsT parse_config(const std::string& config, bool is_file) { + void parse_config(const std::string& config, bool is_file, SettingsT& out_settings) { std::string json_config_str; if (is_file) { if (!flatbuffers::LoadFile(config.c_str(), false, &json_config_str)) { @@ -392,11 +394,8 @@ class SettingsFactory : public sisl::SettingsFactoryBase { /* parsing succeeded, update current settings string */ m_current_settings = std::move(json_config_str); - SettingsT settings; flatbuffers::GetRoot< typename SettingsT::TableType >(parser.builder_.GetBufferPointer()) - ->UnPackTo(&settings, nullptr); - - return settings; + ->UnPackTo(&out_settings, nullptr); } bool check_restart_needed(const SettingsT* new_settings, const std::shared_ptr< SettingsT > current_settings) { diff --git a/src/settings/tests/test_settings.cpp b/src/settings/tests/test_settings.cpp index b16c5fc9..22fdddbb 100644 --- a/src/settings/tests/test_settings.cpp +++ b/src/settings/tests/test_settings.cpp @@ -14,28 +14,118 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include -#include - +#include +#include #include + #include "logging/logging.h" #include "options/options.h" - -#include "test_app_schema_generated.h" -//#include "generated/test_app_schema_bindump.hpp" -#include "settings/settings.hpp" - -#define MY_SETTINGS_FACTORY SETTINGS_FACTORY(test_app_schema) +#include "generated/test_app_schema_generated.h" +#include "settings.hpp" SISL_OPTIONS_ENABLE(logging, test_settings, config) +SISL_LOGGING_INIT(test_settings, settings) +SETTINGS_INIT(testapp::TestAppSettings, test_app_schema) SISL_OPTION_GROUP(test_settings, (num_threads, "", "num_threads", "number of threads", ::cxxopts::value< uint32_t >()->default_value("1"), "number")) -SISL_LOGGING_INIT(test_settings, settings) -SETTINGS_INIT(testapp::TestAppSettings, test_app_schema) +static const char* g_schema_file{"/tmp/test_app_schema.json"}; + +class SettingsTest : public ::testing::Test { +protected: + void SetUp() override { std::remove(g_schema_file); } + void init(const std::vector< std::string >& override_cfgs = {}) { + auto reg_mem = &sisl::SettingsFactoryRegistry::instance(); + new (reg_mem) sisl::SettingsFactoryRegistry("/tmp", override_cfgs); + + auto fac_mem = &test_app_schema_factory::instance(); + sisl::SettingsFactoryRegistry::instance().unregister_factory("test_app_schema"); + new (fac_mem) test_app_schema_factory(); + } +}; + +TEST_F(SettingsTest, LoadReload) { + init(); + + LOGINFO("Step 1: Validating default load"); + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->dbconnection->dbConnectionOptimalLoad), 100UL) + << "Incorrect load of dbConnectionOptimalLoad - default load"; + SETTINGS(test_app_schema, s, { + ASSERT_EQ(s.config.database.databaseHost, "") << "Incorrect load of databaseHost - default load"; + ASSERT_EQ(s.config.database.databasePort, 27017u) << "Incorrect load of databasePort - default load"; + ASSERT_EQ(s.config.database.numThreads, 8u) << "Incorrect load of numThreads - default load"; + }); + sisl::SettingsFactoryRegistry::instance().save_all(); + ASSERT_EQ(std::filesystem::exists("/tmp/test_app_schema.json"), true) << "Expect settings save to create the file"; + ASSERT_EQ(sisl::SettingsFactoryRegistry::instance().reload_all(), false) + << "Incorrectly asking to reload when hotswap variable is changed and reloaded"; + + LOGINFO("Step 2: Reload by dumping the settings to json, edit hotswap variable and reload it"); + nlohmann::json j = nlohmann::json::parse(SETTINGS_FACTORY(test_app_schema).get_json()); + j["config"]["dbconnection"]["dbConnectionOptimalLoad"] = 800; + ASSERT_EQ(SETTINGS_FACTORY(test_app_schema).reload_json(j.dump()), false) + << "Incorrectly asking restart when hotswap variable is changed and reloaded"; + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->dbconnection->dbConnectionOptimalLoad), 800UL) + << "Incorrect load of dbConnectionOptimalLoad - after reload json"; + SETTINGS(test_app_schema, s, { + ASSERT_EQ(s.config.database.databaseHost, "") << "Incorrect load of databaseHost - after reload json"; + ASSERT_EQ(s.config.database.databasePort, 27017u) << "Incorrect load of databasePort - after reload json"; + ASSERT_EQ(s.config.database.numThreads, 8u) << "Incorrect load of numThreads - after reload json"; + }); + + LOGINFO("Step 3: Reload by dumping the settings to json, edit non-hotswap variable and dump to file and reload " + "settings"); + j = nlohmann::json::parse(SETTINGS_FACTORY(test_app_schema).get_json()); + j["config"]["database"]["databasePort"] = 25000u; + { + std::ofstream file(g_schema_file); + file << j; + } + ASSERT_EQ(sisl::SettingsFactoryRegistry::instance().reload_all(), true) + << "Incorrectly marking no restart when non-hotswap variable is changed and reloaded"; + + LOGINFO("Step 4: Simulate the app restart and validate new values"); + init(); + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->dbconnection->dbConnectionOptimalLoad), 800UL) + << "Incorrect load of dbConnectionOptimalLoad - after restart"; + SETTINGS(test_app_schema, s, { + ASSERT_EQ(s.config.database.databaseHost, "") << "Incorrect load of databaseHost - after reload json"; + ASSERT_EQ(s.config.database.databasePort, 25000u) << "Incorrect load of databasePort - after reload json"; + ASSERT_EQ(s.config.database.numThreads, 8u) << "Incorrect load of numThreads - after reload json"; + }); + sisl::SettingsFactoryRegistry::instance().save_all(); +} + +TEST_F(SettingsTest, OverrideConfig) { + LOGINFO("Step 1: Validating overridden config load"); + init({"test_app_schema.config.database.databaseHost:myhost.com", + "test_app_schema.config.glog.FLAGS_logbuflevel:100"}); + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->database->databaseHost), "myhost.com") + << "Incorrect load of databaseHost with override config"; + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->glog->FLAGS_logbuflevel), 100) + << "Incorrect load of FLAGS_logbuflevel with override config"; + SETTINGS(test_app_schema, s, { + ASSERT_EQ(s.config.database.databasePort, 27017u) << "Incorrect load of databasePort - default load"; + ASSERT_EQ(s.config.database.numThreads, 8u) << "Incorrect load of numThreads - default load"; + }); + sisl::SettingsFactoryRegistry::instance().save_all(); + + LOGINFO("Step 2: Simulate restart and default load saved the previously overridden config"); + init(); + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->database->databaseHost), "myhost.com") + << "Incorrect load of databaseHost with override config"; + ASSERT_EQ(SETTINGS_VALUE(test_app_schema, config->glog->FLAGS_logbuflevel), 100) + << "Incorrect load of FLAGS_logbuflevel with override config"; + SETTINGS(test_app_schema, s, { + ASSERT_EQ(s.config.database.databasePort, 27017u) << "Incorrect load of databasePort - default load"; + ASSERT_EQ(s.config.database.numThreads, 8u) << "Incorrect load of numThreads - default load"; + }); +} + +#if 0 int main(int argc, char* argv[]) { SISL_OPTIONS_LOAD(argc, argv, logging, test_settings, config); sisl::logging::SetLogger("test_settings"); @@ -81,3 +171,14 @@ int main(int argc, char* argv[]) { MY_SETTINGS_FACTORY.save("/tmp/settings_out"); return 0; } +#endif + +int main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging, test_settings) + sisl::logging::SetLogger("test_settings"); + spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); + + auto ret = RUN_ALL_TESTS(); + return ret; +} \ No newline at end of file diff --git a/src/utility/non_null_ptr.hpp b/src/utility/non_null_ptr.hpp index 13db2f8c..95c57198 100644 --- a/src/utility/non_null_ptr.hpp +++ b/src/utility/non_null_ptr.hpp @@ -93,7 +93,11 @@ struct embedded_t : public T { explicit operator bool() const noexcept { return true; } - T* release() noexcept { return nullptr; } + T* release() noexcept { + embedded_t* ret = new embedded_t(); + *ret = *this; + return static_cast< T* >(ret); + } }; template < class T > From 7b703fd1b72c5025164f38e3aeafe159f87677db Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 5 Oct 2022 09:36:26 -0700 Subject: [PATCH 141/385] Removed duplicate .cmake files and fixed flip CMakelists to use correct one (#32) --- {src/flip/cmake => cmake}/grpc.cmake | 0 {src/flip/cmake => cmake}/protobuf.cmake | 0 src/flip/CMakeLists.txt | 7 - src/flip/cmake/CodeCoverage.cmake | 303 ----------------------- src/flip/cmake/debug_flags.cmake | 69 ------ 5 files changed, 379 deletions(-) rename {src/flip/cmake => cmake}/grpc.cmake (100%) rename {src/flip/cmake => cmake}/protobuf.cmake (100%) delete mode 100644 src/flip/cmake/CodeCoverage.cmake delete mode 100644 src/flip/cmake/debug_flags.cmake diff --git a/src/flip/cmake/grpc.cmake b/cmake/grpc.cmake similarity index 100% rename from src/flip/cmake/grpc.cmake rename to cmake/grpc.cmake diff --git a/src/flip/cmake/protobuf.cmake b/cmake/protobuf.cmake similarity index 100% rename from src/flip/cmake/protobuf.cmake rename to cmake/protobuf.cmake diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 3173450b..33581cce 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -4,13 +4,6 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() -if (${CMAKE_BUILD_TYPE} STREQUAL Debug) - include (cmake/debug_flags.cmake) -endif () -if (${MEMORY_SANITIZER_ON}) - include (cmake/mem_sanitizer.cmake) -endif () - find_package(gRPC REQUIRED) include_directories(BEFORE include) diff --git a/src/flip/cmake/CodeCoverage.cmake b/src/flip/cmake/CodeCoverage.cmake deleted file mode 100644 index 932c3d06..00000000 --- a/src/flip/cmake/CodeCoverage.cmake +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2012 - 2017, Lars Bilke -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# CHANGES: -# -# 2012-01-31, Lars Bilke -# - Enable Code Coverage -# -# 2013-09-17, Joakim Söderberg -# - Added support for Clang. -# - Some additional usage instructions. -# -# 2016-02-03, Lars Bilke -# - Refactored functions to use named parameters -# -# 2017-06-02, Lars Bilke -# - Merged with modified version from github.com/ufz/ogs -# -# -# USAGE: -# -# 1. Copy this file into your cmake modules path. -# -# 2. Add the following line to your CMakeLists.txt: -# include(CodeCoverage) -# -# 3. Append necessary compiler flags: -# APPEND_COVERAGE_COMPILER_FLAGS() -# -# 4. If you need to exclude additional directories from the report, specify them -# using the COVERAGE_LCOV_EXCLUDES variable before calling SETUP_TARGET_FOR_COVERAGE_LCOV. -# Example: -# set(COVERAGE_LCOV_EXCLUDES 'dir1/*' 'dir2/*') -# -# 5. Use the functions described below to create a custom make target which -# runs your test executable and produces a code coverage report. -# -# 6. Build a Debug build: -# cmake -DCMAKE_BUILD_TYPE=Debug .. -# make -# make my_coverage_target -# - -include(CMakeParseArguments) - -# Check prereqs -find_program( GCOV_PATH gcov ) -find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl) -find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat ) -find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test) -find_program( SIMPLE_PYTHON_EXECUTABLE python ) - -if(NOT GCOV_PATH) - message(FATAL_ERROR "gcov not found! Aborting...") -endif() # NOT GCOV_PATH - -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") - if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3) - message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...") - endif() -elseif(NOT CMAKE_COMPILER_IS_GNUCXX) - message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") -endif() - -set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage -fprofile-arcs -ftest-coverage" - CACHE INTERNAL "") - -set(CMAKE_CXX_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C++ compiler during coverage builds." - FORCE ) -set(CMAKE_C_FLAGS_COVERAGE - ${COVERAGE_COMPILER_FLAGS} - CACHE STRING "Flags used by the C compiler during coverage builds." - FORCE ) -set(CMAKE_EXE_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used for linking binaries during coverage builds." - FORCE ) -set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE - "" - CACHE STRING "Flags used by the shared libraries linker during coverage builds." - FORCE ) -mark_as_advanced( - CMAKE_CXX_FLAGS_COVERAGE - CMAKE_C_FLAGS_COVERAGE - CMAKE_EXE_LINKER_FLAGS_COVERAGE - CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) - -if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") - message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") -endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" - -if(CMAKE_C_COMPILER_ID STREQUAL "GNU") - link_libraries(gcov) -else() - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") -endif() - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# SETUP_TARGET_FOR_COVERAGE_LCOV( -# NAME testrunner_coverage # New target name -# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES testrunner # Dependencies to build first -# ) -function(SETUP_TARGET_FOR_COVERAGE_LCOV) - - set(options NONE) - set(oneValueArgs NAME) - set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT LCOV_PATH) - message(FATAL_ERROR "lcov not found! Aborting...") - endif() # NOT LCOV_PATH - - if(NOT GENHTML_PATH) - message(FATAL_ERROR "genhtml not found! Aborting...") - endif() # NOT GENHTML_PATH - - # Setup target - add_custom_target(${Coverage_NAME} - - # Cleanup lcov - COMMAND ${LCOV_PATH} --directory . --zerocounters - # Create baseline to make sure untouched files show up in the report - COMMAND ${LCOV_PATH} -c -i -d . -o ${Coverage_NAME}.base - - # Run tests - COMMAND ${Coverage_EXECUTABLE} - - # Capturing lcov counters and generating report - COMMAND ${LCOV_PATH} --directory . --capture --output-file ${Coverage_NAME}.info - # add baseline counters - COMMAND ${LCOV_PATH} -a ${Coverage_NAME}.base -a ${Coverage_NAME}.info --output-file ${Coverage_NAME}.total - COMMAND ${LCOV_PATH} --remove ${Coverage_NAME}.total ${COVERAGE_LCOV_EXCLUDES} --output-file ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned - COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned - COMMAND ${CMAKE_COMMAND} -E remove ${Coverage_NAME}.base ${Coverage_NAME}.total ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned - - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." - ) - - # Show where to find the lcov info report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # SETUP_TARGET_FOR_COVERAGE_LCOV - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# SETUP_TARGET_FOR_COVERAGE_GCOVR_XML( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# ) -function(SETUP_TARGET_FOR_COVERAGE_GCOVR_XML) - - set(options NONE) - set(oneValueArgs NAME) - set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT SIMPLE_PYTHON_EXECUTABLE) - message(FATAL_ERROR "python not found! Aborting...") - endif() # NOT SIMPLE_PYTHON_EXECUTABLE - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${COVERAGE_GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDES "-e") - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} - - # Running gcovr - COMMAND ${GCOVR_PATH} --xml - -r ${PROJECT_SOURCE_DIR} ${GCOVR_EXCLUDES} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}.xml - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - COMMENT "Running gcovr to produce Cobertura code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml." - ) - -endfunction() # SETUP_TARGET_FOR_COVERAGE_GCOVR_XML - -# Defines a target for running and collection code coverage information -# Builds dependencies, runs the given executable and outputs reports. -# NOTE! The executable should always have a ZERO as exit code otherwise -# the coverage generation will not complete. -# -# SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML( -# NAME ctest_coverage # New target name -# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR -# DEPENDENCIES executable_target # Dependencies to build first -# ) -function(SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML) - - set(options NONE) - set(oneValueArgs NAME) - set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) - cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if(NOT SIMPLE_PYTHON_EXECUTABLE) - message(FATAL_ERROR "python not found! Aborting...") - endif() # NOT SIMPLE_PYTHON_EXECUTABLE - - if(NOT GCOVR_PATH) - message(FATAL_ERROR "gcovr not found! Aborting...") - endif() # NOT GCOVR_PATH - - # Combine excludes to several -e arguments - set(GCOVR_EXCLUDES "") - foreach(EXCLUDE ${COVERAGE_GCOVR_EXCLUDES}) - list(APPEND GCOVR_EXCLUDES "-e") - list(APPEND GCOVR_EXCLUDES "${EXCLUDE}") - endforeach() - - add_custom_target(${Coverage_NAME} - # Run tests - ${Coverage_EXECUTABLE} - - # Create folder - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/${Coverage_NAME} - - # Running gcovr - COMMAND ${GCOVR_PATH} --html --html-details - -r ${PROJECT_SOURCE_DIR} ${GCOVR_EXCLUDES} - --object-directory=${PROJECT_BINARY_DIR} - -o ${Coverage_NAME}/index.html - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - DEPENDS ${Coverage_DEPENDENCIES} - COMMENT "Running gcovr to produce HTML code coverage report." - ) - - # Show info where to find the report - add_custom_command(TARGET ${Coverage_NAME} POST_BUILD - COMMAND ; - COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) - -endfunction() # SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML - -function(APPEND_COVERAGE_COMPILER_FLAGS) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) - message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") -endfunction() # APPEND_COVERAGE_COMPILER_FLAGS diff --git a/src/flip/cmake/debug_flags.cmake b/src/flip/cmake/debug_flags.cmake deleted file mode 100644 index a0ad3d3f..00000000 --- a/src/flip/cmake/debug_flags.cmake +++ /dev/null @@ -1,69 +0,0 @@ -# This list is generated from the output of: -# -# gcc -Q --help=optimizers -O0 -# -# with GCC 4.8.4 (Ubuntu 4.8.4-2ubuntu1-14.04.3). Yes, every one of these flags -# is on even with -O0 specified, and nothing changes when you add debugging -# options (-g/-g3/-gdwarf-4/etc.) in there. This should be updated every time -# the version of GCC used to compile changes. -# -# If you add an option here, it is your responsibility to comment it, with the -# following convention (feel free to add your own if there's not one suitable). -# DO YOUR RESEARCH. -# -# CBWITPOB: Can be wrong in the presence of bugs. When are you usually -# debugging? When there's a bug. Optimizations that can be wrong -# in the presence of bugs mean that, for example, you won't see -# a variable be modified when it actually happens--if it's -# modified due to the bug, as far as the debugger is concerned, -# it wasn't modified by the program, and things like conditional -# breakpoints won't work right, unless maybe it's a volatile -# variable. -# Inlining: Although GDB claims to track this correctly with -g3 and inject -# the code while you're stepping, it does not. You'll either be -# missing stack frames, or unable to view locals when you step -# to that frame--even if those locals exist nowhere else (i.e. -# not a function argument or tail return value). -# Eliding: Behavior may not change, but who knows where the values come -# from. -# Hoisting: Your program is not running instructions in the order of the -# code. Again, GDB claims to handle this, but it does not, or at -# least not well. -# Vectorizing: Great optimization, but the simulation of going through for -# loops is far from perfect, especially when you're dealing -# with bugs. -# -# And yes, these optimizations severely effect the quality of the debugging -# experience. Without these, you're lucky to be able to step into 80% of the -# stack, and of that 80%, you'll see anywhere from 50% to 100% of locals -# missing values. With these, I've never seen a stack frame I couldn't step -# into, and never seen when I look at a local. -# -set (REALLY_NO_OPTIMIZATION_FLAGS "-fno-short-enums" )# Binary-incompatible with code compiled otherwise. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-aggressive-loop-optimizations" ) # Changes behavior on overflow. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-branch-count-reg" )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dce )# Can be wrong in the presence of bugs (CBWITPOB). set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-delete-null-pointer-checks )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dse )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-early-inlining )# NO INLINING! Because... set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-gcse-lm )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-inline )# ...inlining also does things like elide locals. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ira-hoist-pressure )# Might be irrelevant, but NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ivopts )# Elides and changes instructions. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-jump-tables )# Changes CPU instructions for switch statements. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-move-loop-invariants )# NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-peephole )# Exploiting CPU quirks. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-prefetch-loop-arrays )# Changes CPU instructions, even GCC manual is ambivalent. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-rename-registers" )# Maybe wrong in the presence of bugs? -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-toplevel-reorder" )# Elides unused static variable, reorders globals. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-coalesce-vars" )# Elides temporaries. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-cselim" )# Reorders, violates C++ mem model, CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-forwprop" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-if-convert" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-im" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-optimize" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-phiprop" )# NO HOISTING! Reorders and changes. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-pta" )# Less analysis means maybe less interference. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-reassoc" )# Elides and vectories. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-scev-cprop" )# Elides and changes instructions. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-vect-loop-version" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. - -if (DEFINED ${BUILD_COVERAGE}) - if (${BUILD_COVERAGE}) - include (cmake/CodeCoverage.cmake) - APPEND_COVERAGE_COMPILER_FLAGS() - SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) - endif () -endif () -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${REALLY_NO_OPTIMIZATION_FLAGS}") From 20b5925ec167eaadd7b5169cbdbfed43d438b118 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 5 Oct 2022 11:09:21 -0700 Subject: [PATCH 142/385] Fixed sanitize error of possible mem leaks reported (#33) --- src/flip/flip.hpp | 1 + src/settings/tests/test_settings.cpp | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/src/flip/flip.hpp b/src/flip/flip.hpp index b777ba42..83408be4 100644 --- a/src/flip/flip.hpp +++ b/src/flip/flip.hpp @@ -316,6 +316,7 @@ using io_work = boost::asio::io_service::work; class FlipTimerBase { public: + virtual ~FlipTimerBase() = default; virtual void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) = 0; }; diff --git a/src/settings/tests/test_settings.cpp b/src/settings/tests/test_settings.cpp index 22fdddbb..9a6cfafa 100644 --- a/src/settings/tests/test_settings.cpp +++ b/src/settings/tests/test_settings.cpp @@ -39,12 +39,20 @@ class SettingsTest : public ::testing::Test { void init(const std::vector< std::string >& override_cfgs = {}) { auto reg_mem = &sisl::SettingsFactoryRegistry::instance(); + sisl::SettingsFactoryRegistry::instance().~SettingsFactoryRegistry(); new (reg_mem) sisl::SettingsFactoryRegistry("/tmp", override_cfgs); auto fac_mem = &test_app_schema_factory::instance(); sisl::SettingsFactoryRegistry::instance().unregister_factory("test_app_schema"); + test_app_schema_factory::instance().~test_app_schema_factory(); new (fac_mem) test_app_schema_factory(); } + + void Teardown() { + test_app_schema_factory::instance().~test_app_schema_factory(); + sisl::SettingsFactoryRegistry::instance().~SettingsFactoryRegistry(); + std::remove(g_schema_file); + } }; TEST_F(SettingsTest, LoadReload) { From 751e7b2b47f8e70e6299795a48bea90e3d93d815 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Fri, 7 Oct 2022 12:08:01 -0700 Subject: [PATCH 143/385] use oss sisl --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 402e82c0..aec70d05 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,7 @@ find_package(sisl REQUIRED) find_package(gRPC REQUIRED) include_directories(BEFORE "include") +include_directories(BEFORE ${CONAN_INCLUDE_DIRS_SISL}/sisl) add_library(${PROJECT_NAME}) target_sources(${PROJECT_NAME} PRIVATE From 6c1efe66d5a22f77d2618a41b54f6747d67cd87d Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 10 Oct 2022 11:42:00 -0700 Subject: [PATCH 144/385] Link tcmalloc if chosen as part of sisl (#34) --- CMakeLists.txt | 23 ++++++- conanfile.py | 4 +- src/fds/CMakeLists.txt | 9 ++- src/fds/tests/test_tcmalloc_helper.cpp | 93 ++++++++++++++++++++++++++ 4 files changed, 125 insertions(+), 4 deletions(-) create mode 100644 src/fds/tests/test_tcmalloc_helper.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 9d115f28..7ed3545f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -55,6 +55,9 @@ find_package(cpr REQUIRED) find_package(cxxopts REQUIRED) find_package(folly REQUIRED) find_package(GTest REQUIRED) +if (${MALLOC_IMPL} STREQUAL "tcmalloc") + find_package(gperftools REQUIRED) +endif() find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) find_package(prerelease_dummy QUIET) @@ -76,6 +79,12 @@ if (${prerelease_dummy_FOUND}) list (APPEND COMMON_DEPS prerelease_dummy::prerelease_dummy) endif () +if (DEFINED MALLOC_IMPL) + if (${MALLOC_IMPL} STREQUAL "tcmalloc") + list(APPEND COMMON_DEPS gperftools::gperftools) + endif() +endif() + find_program(CCACHE_FOUND ccache) if (CCACHE_FOUND) set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) @@ -134,9 +143,19 @@ add_library(sisl $ $ ) -target_link_libraries(sisl + +list(APPEND SISL_DEPS Folly::Folly - ) +) +if (DEFINED MALLOC_IMPL) + if (${MALLOC_IMPL} STREQUAL "tcmalloc") + list(APPEND SISL_DEPS gperftools::gperftools) + endif() +endif() + +target_link_libraries(sisl + ${SISL_DEPS} +) # build info string(TOUPPER "${CMAKE_BUILD_TYPE}" UC_CMAKE_BUILD_TYPE) diff --git a/conanfile.py b/conanfile.py index 1f92eb9d..2a27acbf 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.1" + version = "8.2.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -136,3 +136,5 @@ def package_info(self): self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") elif self.options.malloc_impl == 'tcmalloc': self.cpp_info.cppflags.append("-DUSING_TCMALLOC=1") + self.cpp_info.libdirs += self.deps_cpp_info["gperftools"].lib_paths + self.cpp_info.libs += ["tcmalloc"] diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 80ab4c40..6f56a898 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -71,7 +71,14 @@ if (DEFINED MALLOC_IMPL) tests/test_jemalloc_helper.cpp ) add_executable(test_jemalloc ${TEST_JEMALLOC_SOURCE_FILES}) - target_link_libraries(test_jemalloc sisl jemalloc) + target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} jemalloc GTest::gtest) add_test(NAME TestJemalloc COMMAND test_jemalloc) + elseif (${MALLOC_IMPL} STREQUAL "tcmalloc") + set(TEST_TCMALLOC_SOURCE_FILES + tests/test_tcmalloc_helper.cpp + ) + add_executable(test_tcmalloc ${TEST_TCMALLOC_SOURCE_FILES}) + target_link_libraries(test_tcmalloc sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME TestTcmalloc COMMAND test_tcmalloc) endif() endif() diff --git a/src/fds/tests/test_tcmalloc_helper.cpp b/src/fds/tests/test_tcmalloc_helper.cpp new file mode 100644 index 00000000..69f2f94d --- /dev/null +++ b/src/fds/tests/test_tcmalloc_helper.cpp @@ -0,0 +1,93 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Bryan Zimmerman + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#ifdef USING_TCMALLOC +#include +#include +#include +#include +#include + +#include "logging/logging.h" +#include "options/options.h" + +#include + +#include "utility/thread_buffer.hpp" + +#include "malloc_helper.hpp" + +using namespace sisl; + +SISL_LOGGING_INIT(test_jemalloc) + +namespace { +uint32_t g_num_threads; + +struct TcmallocTest : public testing::Test { +public: + TcmallocTest() : testing::Test{} { LOGINFO("Initializing new TcmallocTest class"); } + TcmallocTest(const TcmallocTest&) = delete; + TcmallocTest(TcmallocTest&&) noexcept = delete; + TcmallocTest& operator=(const TcmallocTest&) = delete; + TcmallocTest& operator=(TcmallocTest&&) noexcept = delete; + virtual ~TcmallocTest() override = default; + +protected: + void SetUp() override {} + void TearDown() override {} + + void MultiThreadedAllocDealloc(const size_t iterations, const size_t mem_count = 1000000) const { + const auto thread_lambda{[&iterations, &mem_count]() { + // allocated/deallocate memory + for (size_t iteration{0}; iteration < iterations; ++iteration) { + std::unique_ptr< uint64_t[] > mem{new uint64_t[mem_count]}; + } + }}; + + std::vector< std::thread > threads; + for (uint32_t thread_num{0}; thread_num < g_num_threads; ++thread_num) { + threads.emplace_back(thread_lambda); + } + + for (auto& alloc_dealloc_thread : threads) { + if (alloc_dealloc_thread.joinable()) alloc_dealloc_thread.join(); + }; + } +}; +} // namespace + +TEST_F(TcmallocTest, GetDirtyPageCount) { MultiThreadedAllocDealloc(100); } + +SISL_OPTIONS_ENABLE(logging, test_tcmalloc) + +SISL_OPTION_GROUP(test_tcmalloc, + (num_threads, "", "num_threads", "number of threads", + ::cxxopts::value< uint32_t >()->default_value("8"), "number")) + +int main(int argc, char* argv[]) { + SISL_OPTIONS_LOAD(argc, argv, logging, test_tcmalloc); + ::testing::InitGoogleTest(&argc, argv); + sisl::logging::SetLogger("test_bitset"); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + g_num_threads = SISL_OPTIONS["num_threads"].as< uint32_t >(); + + const auto ret{RUN_ALL_TESTS()}; + return ret; +} + +#endif From 5f2ddce833eb123d965b4a25be181413d0cd8a98 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 12:01:41 -0700 Subject: [PATCH 145/385] Made all internal includes relative path (#35) --- conanfile.py | 8 ++------ src/auth_manager/auth_manager.hpp | 2 +- src/auth_manager/security_config.hpp | 4 ++-- src/cache/evictor.hpp | 2 +- src/cache/lru_evictor.hpp | 2 +- src/cache/range_hashmap.hpp | 6 +++--- src/fds/bitset.hpp | 2 +- src/fds/bitword.hpp | 2 +- src/fds/buffer.hpp | 10 ++++------ src/fds/freelist_allocator.hpp | 2 +- src/fds/id_reserver.hpp | 3 +-- src/fds/malloc_helper.hpp | 6 +++--- src/fds/stream_tracker.hpp | 2 +- src/fds/thread_vector.hpp | 4 ++-- src/fds/utils.hpp | 4 ++-- src/file_watcher/file_watcher.hpp | 2 +- src/flip/flip.hpp | 2 +- src/flip/{client/local => }/flip_client.hpp | 2 +- src/metrics/metrics.hpp | 4 ++-- src/metrics/metrics_group_impl.hpp | 2 +- src/metrics/metrics_rcu.hpp | 2 +- src/metrics/prometheus_reporter.hpp | 2 +- src/settings/settings.hpp | 7 +++---- src/utility/obj_life_counter.hpp | 2 +- src/utility/status_factory.hpp | 2 +- src/utility/thread_buffer.hpp | 7 +++---- src/wisr/wisr_framework.hpp | 4 ++-- 27 files changed, 44 insertions(+), 53 deletions(-) rename src/flip/{client/local => }/flip_client.hpp (99%) diff --git a/conanfile.py b/conanfile.py index 2a27acbf..da451fd8 100644 --- a/conanfile.py +++ b/conanfile.py @@ -113,14 +113,10 @@ def package(self): copy(self, "*", join(self.source_folder, "src/flip/client/python/"), join(self.package_folder, "bindings/flip/python/"), keep_path=False) hdr_dir = join(self.package_folder, join("include", "sisl")) - copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True, excludes="flip/*") - copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True, excludes="flip/*") + copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True) + copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True) copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) - flip_hdr_dir = join(self.package_folder, join("include", "flip")) - copy(self, "*.hpp", join(self.source_folder, "src/flip"), flip_hdr_dir, keep_path=False) - copy(self, "*.h", join(self.source_folder, "src/flip"), flip_hdr_dir, keep_path=False) - def package_info(self): self.cpp_info.libs = ["sisl", "flip"] self.cpp_info.cppflags.extend(["-Wno-unused-local-typedefs", "-fconcepts"]) diff --git a/src/auth_manager/auth_manager.hpp b/src/auth_manager/auth_manager.hpp index 45ad6213..0fbc7c67 100644 --- a/src/auth_manager/auth_manager.hpp +++ b/src/auth_manager/auth_manager.hpp @@ -16,7 +16,7 @@ #pragma GCC diagnostic pop #endif -#include "utility/enum.hpp" +#include "../utility/enum.hpp" #include "security_config.hpp" namespace sisl { diff --git a/src/auth_manager/security_config.hpp b/src/auth_manager/security_config.hpp index cf33d7b1..9a2a043f 100644 --- a/src/auth_manager/security_config.hpp +++ b/src/auth_manager/security_config.hpp @@ -1,6 +1,6 @@ #pragma once -#include "settings/settings.hpp" -#include "options/options.h" +#include "../settings/settings.hpp" +#include "../options/options.h" #include "generated/security_config_generated.h" SETTINGS_INIT(securitycfg::SecuritySettings, security_config) diff --git a/src/cache/evictor.hpp b/src/cache/evictor.hpp index 500fe492..d2714bd0 100644 --- a/src/cache/evictor.hpp +++ b/src/cache/evictor.hpp @@ -19,7 +19,7 @@ #include #include #include -#include "logging/logging.h" +#include "../logging/logging.h" #include "hash_entry_base.hpp" namespace sisl { diff --git a/src/cache/lru_evictor.hpp b/src/cache/lru_evictor.hpp index ea461a48..7430cc67 100644 --- a/src/cache/lru_evictor.hpp +++ b/src/cache/lru_evictor.hpp @@ -21,7 +21,7 @@ #include #include #include -#include "fds/utils.hpp" +#include "../fds/utils.hpp" #include "evictor.hpp" using namespace boost::intrusive; diff --git a/src/cache/range_hashmap.hpp b/src/cache/range_hashmap.hpp index e919eae7..7c7670ad 100644 --- a/src/cache/range_hashmap.hpp +++ b/src/cache/range_hashmap.hpp @@ -31,10 +31,10 @@ #pragma GCC diagnostic pop #endif -#include "fds/buffer.hpp" +#include "../fds/buffer.hpp" #include "hash_entry_base.hpp" -#include "fds/utils.hpp" -#include "utility/enum.hpp" +#include "../fds/utils.hpp" +#include "../utility/enum.hpp" namespace sisl { diff --git a/src/fds/bitset.hpp b/src/fds/bitset.hpp index 240afb7f..b4c579a3 100644 --- a/src/fds/bitset.hpp +++ b/src/fds/bitset.hpp @@ -38,7 +38,7 @@ #pragma GCC diagnostic pop #endif -#include "logging/logging.h" +#include "../logging/logging.h" #include "bitword.hpp" #include "buffer.hpp" diff --git a/src/fds/bitword.hpp b/src/fds/bitword.hpp index aea5d4d8..5d20e1d3 100644 --- a/src/fds/bitword.hpp +++ b/src/fds/bitword.hpp @@ -29,7 +29,7 @@ #include -#include "utility/enum.hpp" +#include "../utility/enum.hpp" namespace sisl { diff --git a/src/fds/buffer.hpp b/src/fds/buffer.hpp index e112f1db..0643b344 100644 --- a/src/fds/buffer.hpp +++ b/src/fds/buffer.hpp @@ -26,9 +26,9 @@ #include #endif -#include -#include -#include +#include "../metrics/metrics.hpp" +#include "../utility/enum.hpp" +#include "../fds/utils.hpp" namespace sisl { struct blob { @@ -104,9 +104,7 @@ class AlignedAllocatorImpl { virtual uint8_t* aligned_pool_alloc(const size_t align, const size_t sz, const sisl::buftag tag) { return aligned_alloc(align, sz, tag); }; - virtual void aligned_pool_free(uint8_t* const b, const size_t sz, const sisl::buftag tag) { - aligned_free(b, tag); - }; + virtual void aligned_pool_free(uint8_t* const b, const size_t sz, const sisl::buftag tag) { aligned_free(b, tag); }; virtual size_t buf_size(uint8_t* buf) const { #ifdef __linux__ diff --git a/src/fds/freelist_allocator.hpp b/src/fds/freelist_allocator.hpp index 9b3fb57c..d0d58973 100644 --- a/src/fds/freelist_allocator.hpp +++ b/src/fds/freelist_allocator.hpp @@ -33,7 +33,7 @@ #pragma GCC diagnostic pop #endif -#include "metrics/metrics.hpp" +#include "../metrics/metrics.hpp" #include "utils.hpp" namespace sisl { diff --git a/src/fds/id_reserver.hpp b/src/fds/id_reserver.hpp index 4c123a2f..ad872a4b 100644 --- a/src/fds/id_reserver.hpp +++ b/src/fds/id_reserver.hpp @@ -21,8 +21,7 @@ #include #include -#include - +#include "../fds/bitset.hpp" #include "bitset.hpp" #include "utils.hpp" diff --git a/src/fds/malloc_helper.hpp b/src/fds/malloc_helper.hpp index e0081353..57a036c1 100644 --- a/src/fds/malloc_helper.hpp +++ b/src/fds/malloc_helper.hpp @@ -36,11 +36,11 @@ #include #endif -#include "logging/logging.h" #include -#include "metrics/histogram_buckets.hpp" -#include "metrics/metrics.hpp" +#include "../logging/logging.h" +#include "../metrics/histogram_buckets.hpp" +#include "../metrics/metrics.hpp" #if defined(USING_TCMALLOC) #include diff --git a/src/fds/stream_tracker.hpp b/src/fds/stream_tracker.hpp index 3366acb2..c061036d 100644 --- a/src/fds/stream_tracker.hpp +++ b/src/fds/stream_tracker.hpp @@ -16,8 +16,8 @@ *********************************************************************************/ #pragma once +#include #include "bitset.hpp" -#include "folly/SharedMutex.h" #include "../metrics/metrics_group_impl.hpp" #include "../metrics/metrics.hpp" diff --git a/src/fds/thread_vector.hpp b/src/fds/thread_vector.hpp index 3628e90d..f55a10df 100644 --- a/src/fds/thread_vector.hpp +++ b/src/fds/thread_vector.hpp @@ -20,8 +20,8 @@ #include #include -#include "wisr/wisr_framework.hpp" -#include "wisr/wisr_ds.hpp" +#include "../wisr/wisr_framework.hpp" +#include "../wisr/wisr_ds.hpp" namespace sisl { diff --git a/src/fds/utils.hpp b/src/fds/utils.hpp index 094c1df7..a71c09fb 100644 --- a/src/fds/utils.hpp +++ b/src/fds/utils.hpp @@ -28,8 +28,8 @@ #include #include -#include "boost/preprocessor/arithmetic/inc.hpp" -#include "boost/preprocessor/repetition/repeat_from_to.hpp" +#include +#include // NOTE: In future should use [[likely]] and [[unlikely]] but not valid syntax in if predicate #if defined __GNUC__ || defined __llvm__ diff --git a/src/file_watcher/file_watcher.hpp b/src/file_watcher/file_watcher.hpp index 6e7b79c9..7959de3c 100644 --- a/src/file_watcher/file_watcher.hpp +++ b/src/file_watcher/file_watcher.hpp @@ -2,7 +2,7 @@ #pragma once #include -#include "logging/logging.h" +#include "../logging/logging.h" #include namespace sisl { diff --git a/src/flip/flip.hpp b/src/flip/flip.hpp index 83408be4..54d8ff86 100644 --- a/src/flip/flip.hpp +++ b/src/flip/flip.hpp @@ -32,7 +32,7 @@ #include "flip_spec.pb.h" #include "flip_rpc_server.hpp" -#include "logging/logging.h" +#include "../logging/logging.h" SISL_LOGGING_DECL(flip) diff --git a/src/flip/client/local/flip_client.hpp b/src/flip/flip_client.hpp similarity index 99% rename from src/flip/client/local/flip_client.hpp rename to src/flip/flip_client.hpp index 10493678..768fee74 100644 --- a/src/flip/client/local/flip_client.hpp +++ b/src/flip/flip_client.hpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #pragma once -#include +#include "flip.hpp" namespace flip { class FlipClient { diff --git a/src/metrics/metrics.hpp b/src/metrics/metrics.hpp index 2768d70a..3fdbb7c0 100644 --- a/src/metrics/metrics.hpp +++ b/src/metrics/metrics.hpp @@ -32,9 +32,9 @@ #include #include #include -#include "logging/logging.h" -#include "options/options.h" +#include "../logging/logging.h" +#include "../options/options.h" #include "metrics_atomic.hpp" #include "metrics_group_impl.hpp" #include "metrics_rcu.hpp" diff --git a/src/metrics/metrics_group_impl.hpp b/src/metrics/metrics_group_impl.hpp index 8f159a95..d7efab69 100644 --- a/src/metrics/metrics_group_impl.hpp +++ b/src/metrics/metrics_group_impl.hpp @@ -32,7 +32,7 @@ #include "histogram_buckets.hpp" #include "prometheus_reporter.hpp" -#include "utility/thread_buffer.hpp" +#include "../utility/thread_buffer.hpp" namespace sisl { using on_gather_cb_t = std::function< void(void) >; diff --git a/src/metrics/metrics_rcu.hpp b/src/metrics/metrics_rcu.hpp index 03c887a3..81a97efc 100644 --- a/src/metrics/metrics_rcu.hpp +++ b/src/metrics/metrics_rcu.hpp @@ -22,7 +22,7 @@ #include #include #include "metrics_tlocal.hpp" -#include "wisr/wisr_framework.hpp" +#include "../wisr/wisr_framework.hpp" namespace sisl { using WisrBufferMetrics = diff --git a/src/metrics/prometheus_reporter.hpp b/src/metrics/prometheus_reporter.hpp index efc497d8..500eb6e1 100644 --- a/src/metrics/prometheus_reporter.hpp +++ b/src/metrics/prometheus_reporter.hpp @@ -33,7 +33,7 @@ #include #pragma GCC diagnostic pop -#include "logging/logging.h" +#include "../logging/logging.h" namespace sisl { diff --git a/src/settings/settings.hpp b/src/settings/settings.hpp index f927fac6..3ee5316b 100644 --- a/src/settings/settings.hpp +++ b/src/settings/settings.hpp @@ -29,10 +29,9 @@ #include -#include "logging/logging.h" -#include "options/options.h" - -#include "utility/urcu_helper.hpp" +#include "../logging/logging.h" +#include "../options/options.h" +#include "../utility/urcu_helper.hpp" #define SETTINGS_INIT(schema_type, schema_name) \ extern unsigned char schema_name##_fbs[]; \ diff --git a/src/utility/obj_life_counter.hpp b/src/utility/obj_life_counter.hpp index 8c689967..a3fc30a3 100644 --- a/src/utility/obj_life_counter.hpp +++ b/src/utility/obj_life_counter.hpp @@ -27,7 +27,7 @@ #if defined(__linux__) || defined(__APPLE__) #include #endif -#include +#include "../metrics/metrics.hpp" namespace sisl { diff --git a/src/utility/status_factory.hpp b/src/utility/status_factory.hpp index 9a59ccc1..070ecffb 100644 --- a/src/utility/status_factory.hpp +++ b/src/utility/status_factory.hpp @@ -16,7 +16,7 @@ *********************************************************************************/ #pragma once #include -#include "logging/logging.h" +#include "../logging/logging.h" namespace sisl { template < typename StatusT > diff --git a/src/utility/thread_buffer.hpp b/src/utility/thread_buffer.hpp index 11ebf1b0..efc6b03c 100644 --- a/src/utility/thread_buffer.hpp +++ b/src/utility/thread_buffer.hpp @@ -34,10 +34,9 @@ #include -#include "fds/flexarray.hpp" -#include "fds/sparse_vector.hpp" -#include "utility/atomic_counter.hpp" - +#include "../fds/flexarray.hpp" +#include "../fds/sparse_vector.hpp" +#include "atomic_counter.hpp" #include "enum.hpp" #include "urcu_helper.hpp" diff --git a/src/wisr/wisr_framework.hpp b/src/wisr/wisr_framework.hpp index 22991dd3..4b90ebaf 100644 --- a/src/wisr/wisr_framework.hpp +++ b/src/wisr/wisr_framework.hpp @@ -19,8 +19,8 @@ #include #include -#include "utility/thread_buffer.hpp" -#include "utility/urcu_helper.hpp" +#include "../utility/thread_buffer.hpp" +#include "../utility/urcu_helper.hpp" namespace sisl { From 499c1adbdd5e875fa4b47883e98cb4139d0392e7 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 12:27:14 -0700 Subject: [PATCH 146/385] Updated jenkins matrix to streamline internal ebay builds (#36) --- .jenkins/Jenkinsfile | 100 +++++++++++++++++++------------------------ 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 060262e5..4bca57b4 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -62,60 +62,25 @@ pipeline { axes { axis { name 'BUILD_TYPE' - values 'sanitize', 'debug', 'test' + values 'sanitize', 'release', 'debug', 'test' } axis { name 'COVERAGE' values 'False' } - axis { - name 'PRERELEASE' - values 'True', 'False' - } - axis { - name 'ALLOC_IMPL' - values 'tcmalloc', 'libc' - } } - excludes { exclude { - axis { - name 'BUILD_TYPE' - values 'sanitize', 'test' - } - axis { - name 'COVERAGE' - values 'True' - } } + excludes { exclude { - axis { - name 'PRERELEASE' - values 'False' - } - axis { - name 'BUILD_TYPE' - values 'debug', 'sanitize' - } - } - exclude { - axis { - name 'BUILD_TYPE' - values 'test' - } - axis { - name 'ALLOC_IMPL' - values 'libc' + axis { + name 'BUILD_TYPE' + values 'sanitize', 'test', 'release' + } + axis { + name 'COVERAGE' + values 'True' + } } } - exclude { - axis { - name 'BUILD_TYPE' - values 'sanitize' - } - axis { - name 'ALLOC_IMPL' - values 'tcmalloc' - } - } } stages { stage('Adjust Tag for Master/PR') { @@ -128,40 +93,51 @@ pipeline { } } - stage("Build") { + stage("PR Build") { when { allOf { expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } + expression { "${BUILD_TYPE}" != 'debug' } + expression { { branch "PR_*" } } } } steps { - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=${ALLOC_IMPL} -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + def PRERELEASE_OPT = 'True' + if ("${BUILD_TYPE}" == 'release') { + PRERELEASE_OPT = 'False' + } + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } - stage("Sanitize") { + stage("Branch Build") { when { allOf { - expression { "${BUILD_TYPE}" == 'sanitize' } + expression { "${COVERAGE}" == 'False' } + expression { "${BUILD_TYPE}" != 'sanitize' } + expression { not { branch "PR_*" } } } } steps { - sh "conan create ${BUILD_MISSING} -o malloc_impl=${ALLOC_IMPL} -o sanitize=True -pr debug . ${PROJECT}/${TAG}" + def PRERELEASE_OPT = 'True' + if ("${BUILD_TYPE}" == 'release') { + PRERELEASE_OPT = 'False' + } + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } - stage("Deploy") { + stage("Sanitize") { when { allOf { - expression { "${COVERAGE}" == 'False' } - expression { "${BUILD_TYPE}" != 'sanitize' } - expression { not { branch "PR_*" } } + expression { "${BUILD_TYPE}" == 'sanitize' } } } steps { - sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" - sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + sh "conan create ${BUILD_MISSING} -o malloc_impl=libc -o sanitize=True -pr debug . ${PROJECT}/${TAG}" } } + stage('Coverage') { when { not { anyOf { branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } + expression { not { branch "PR_*" } } } } } stages { @@ -196,6 +172,18 @@ pipeline { } } } + + stage("Deploy") { + when { allOf { + expression { "${COVERAGE}" == 'False' } + expression { "${BUILD_TYPE}" != 'sanitize' } + expression { not { branch "PR_*" } } + } } + steps { + sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" + sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + } + } } } } From eab1414f5c7596f89d7191409ba0d9426dab8b82 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 12:31:01 -0700 Subject: [PATCH 147/385] fixed jenkinsfile incorrect version (#37) --- .jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 4bca57b4..d23051ed 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -98,7 +98,7 @@ pipeline { expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } expression { "${BUILD_TYPE}" != 'debug' } - expression { { branch "PR_*" } } + expression { branch == "PR_*" } } } steps { def PRERELEASE_OPT = 'True' From b02b34667b543db22cf73429aa9fa7a09ce48e11 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 12:35:01 -0700 Subject: [PATCH 148/385] fixed jenkinsfile incorrect version - 2 (#38) --- .jenkins/Jenkinsfile | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index d23051ed..d718598c 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -101,11 +101,13 @@ pipeline { expression { branch == "PR_*" } } } steps { - def PRERELEASE_OPT = 'True' - if ("${BUILD_TYPE}" == 'release') { - PRERELEASE_OPT = 'False' + script { + def PRERELEASE_OPT = 'True' + if ("${BUILD_TYPE}" == 'release') { + PRERELEASE_OPT = 'False' + } + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } @@ -116,11 +118,13 @@ pipeline { expression { not { branch "PR_*" } } } } steps { - def PRERELEASE_OPT = 'True' - if ("${BUILD_TYPE}" == 'release') { - PRERELEASE_OPT = 'False' + script { + def PRERELEASE_OPT = 'True' + if ("${BUILD_TYPE}" == 'release') { + PRERELEASE_OPT = 'False' + } + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" } } From 0903f2a75e635267c03783acf46bdc816fe52987 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 14:30:57 -0700 Subject: [PATCH 149/385] fixed jenkinsfile incorrect version - 3 (#39) --- .jenkins/Jenkinsfile | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index d718598c..a58471ba 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -102,11 +102,13 @@ pipeline { } } steps { script { - def PRERELEASE_OPT = 'True' + def PRERELEASE = 'True' + def BUILD_PROFILE = ${BUILD_TYPE} if ("${BUILD_TYPE}" == 'release') { - PRERELEASE_OPT = 'False' + PRERELEASE = 'False' + BUILD_PROFILE = "test" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=tcmalloc -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } @@ -119,11 +121,13 @@ pipeline { } } steps { script { - def PRERELEASE_OPT = 'True' + def PRERELEASE = 'True' + def BUILD_PROFILE = ${BUILD_TYPE} if ("${BUILD_TYPE}" == 'release') { - PRERELEASE_OPT = 'False' + PRERELEASE = 'False' + BUILD_PROFILE = "test" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE_OPT} -o malloc_impl=tcmalloc -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=tcmalloc -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } From d392e94cb5bae82b975a0bded1cbfd7628d88bca Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 17:19:47 -0700 Subject: [PATCH 150/385] Seperate PR and Branch build in ebay internal build steps (#40) --- .jenkins/Jenkinsfile | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index a58471ba..d5f8e1da 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -95,15 +95,15 @@ pipeline { stage("PR Build") { when { allOf { - expression { "${COVERAGE}" == 'False' } - expression { "${BUILD_TYPE}" != 'sanitize' } - expression { "${BUILD_TYPE}" != 'debug' } - expression { branch == "PR_*" } + expression { "${COVERAGE}" == "False" } + expression { "${BUILD_TYPE}" != "sanitize" } + expression { "${BUILD_TYPE}" != "debug" } + expression { BRANCH_NAME ==~ /^PR_*/ } } } steps { script { def PRERELEASE = 'True' - def BUILD_PROFILE = ${BUILD_TYPE} + def BUILD_PROFILE = "${BUILD_TYPE}" if ("${BUILD_TYPE}" == 'release') { PRERELEASE = 'False' BUILD_PROFILE = "test" @@ -115,15 +115,15 @@ pipeline { stage("Branch Build") { when { allOf { - expression { "${COVERAGE}" == 'False' } - expression { "${BUILD_TYPE}" != 'sanitize' } + expression { "${COVERAGE}" == "False" } + expression { "${BUILD_TYPE}" != "sanitize" } expression { not { branch "PR_*" } } } } steps { script { def PRERELEASE = 'True' - def BUILD_PROFILE = ${BUILD_TYPE} - if ("${BUILD_TYPE}" == 'release') { + def BUILD_PROFILE = "${BUILD_TYPE}" + if ("${BUILD_TYPE}" == "release") { PRERELEASE = 'False' BUILD_PROFILE = "test" } From 76bc416670a13129b14f213099a3fda70299cdd5 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 19:49:07 -0700 Subject: [PATCH 151/385] Missed some headers to make it full path, fixed them (#42) --- conanfile.py | 2 +- src/auth_manager/security_config.fbs | 2 +- src/flip/client/local/test_flip_local_client.cpp | 4 ++-- src/flip/flip.hpp | 2 +- src/flip/flip_rpc_server.hpp | 4 ++-- src/flip/lib/test_flip.cpp | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/conanfile.py b/conanfile.py index da451fd8..4eddf0ae 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.2" + version = "8.2.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs index def2fac2..2c62a436 100644 --- a/src/auth_manager/security_config.fbs +++ b/src/auth_manager/security_config.fbs @@ -1,4 +1,4 @@ -native_include "utility/non_null_ptr.hpp"; +native_include "../../utility/non_null_ptr.hpp"; namespace securitycfg; attribute "hotswap"; diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 8c16f0d8..37f46dad 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -14,8 +14,8 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "flip_spec.pb.h" -#include "flip_client.hpp" +#include "../../proto/flip_spec.pb.h" +#include "../../flip_client.hpp" #include #include diff --git a/src/flip/flip.hpp b/src/flip/flip.hpp index 54d8ff86..f839a0ea 100644 --- a/src/flip/flip.hpp +++ b/src/flip/flip.hpp @@ -30,7 +30,7 @@ #include #include -#include "flip_spec.pb.h" +#include "proto/flip_spec.pb.h" #include "flip_rpc_server.hpp" #include "../logging/logging.h" diff --git a/src/flip/flip_rpc_server.hpp b/src/flip/flip_rpc_server.hpp index 41c4471e..8168ae1f 100644 --- a/src/flip/flip_rpc_server.hpp +++ b/src/flip/flip_rpc_server.hpp @@ -16,8 +16,8 @@ *********************************************************************************/ #pragma once -#include "flip_spec.pb.h" -#include "flip_server.grpc.pb.h" +#include "proto/flip_spec.pb.h" +#include "proto/flip_server.grpc.pb.h" namespace flip { class FlipRPCServer final : public FlipServer::Service { diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 9be3963e..4770569e 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "flip_spec.pb.h" +#include "../proto/flip_spec.pb.h" #include "flip.hpp" #include #include From 81f31f0cb36e6baf78f69a6ace3ade567a9e3eea Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 11 Oct 2022 19:53:06 -0700 Subject: [PATCH 152/385] Use correct PR tag for internal jenkins build --- .jenkins/Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index d5f8e1da..23f1e54c 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -98,7 +98,7 @@ pipeline { expression { "${COVERAGE}" == "False" } expression { "${BUILD_TYPE}" != "sanitize" } expression { "${BUILD_TYPE}" != "debug" } - expression { BRANCH_NAME ==~ /^PR_*/ } + expression { (env.BRANCH_NAME =~ /PR_/) } } } steps { script { @@ -117,7 +117,7 @@ pipeline { when { allOf { expression { "${COVERAGE}" == "False" } expression { "${BUILD_TYPE}" != "sanitize" } - expression { not { branch "PR_*" } } + expression { !(env.BRANCH_NAME =~ /PR_/) } } } steps { script { @@ -145,7 +145,7 @@ pipeline { when { not { anyOf { branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } - expression { not { branch "PR_*" } } + expression { !(env.BRANCH_NAME =~ /PR_/) } } } } stages { @@ -185,7 +185,7 @@ pipeline { when { allOf { expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } - expression { not { branch "PR_*" } } + expression { !(env.BRANCH_NAME =~ /PR_/) } } } steps { sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" From 8f260dc40345aa1bcdefdb3d04c466fcbc06fd31 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 12 Oct 2022 09:46:27 -0700 Subject: [PATCH 153/385] Put correct PR search pattern in internal jenkins build (#43) --- .jenkins/Jenkinsfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 23f1e54c..b08ca244 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -98,7 +98,7 @@ pipeline { expression { "${COVERAGE}" == "False" } expression { "${BUILD_TYPE}" != "sanitize" } expression { "${BUILD_TYPE}" != "debug" } - expression { (env.BRANCH_NAME =~ /PR_/) } + expression { (env.BRANCH_NAME =~ /PR-/) } } } steps { script { @@ -117,7 +117,7 @@ pipeline { when { allOf { expression { "${COVERAGE}" == "False" } expression { "${BUILD_TYPE}" != "sanitize" } - expression { !(env.BRANCH_NAME =~ /PR_/) } + expression { !(env.BRANCH_NAME =~ /PR-/) } } } steps { script { @@ -132,7 +132,7 @@ pipeline { } } - stage("Sanitize") { + stage("Sanitize PR/Branch Build") { when { allOf { expression { "${BUILD_TYPE}" == 'sanitize' } } } @@ -145,7 +145,7 @@ pipeline { when { not { anyOf { branch "${STABLE_BRANCH}" expression { "${COVERAGE}" == 'False' } - expression { !(env.BRANCH_NAME =~ /PR_/) } + expression { !(env.BRANCH_NAME =~ /PR-/) } } } } stages { @@ -185,7 +185,7 @@ pipeline { when { allOf { expression { "${COVERAGE}" == 'False' } expression { "${BUILD_TYPE}" != 'sanitize' } - expression { !(env.BRANCH_NAME =~ /PR_/) } + expression { !(env.BRANCH_NAME =~ /PR-/) } } } steps { sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" From b051e4857b88911e9bf02def714d6fb18af28ebe Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 13 Oct 2022 11:07:22 -0700 Subject: [PATCH 154/385] Made the malloc_impl options explicitly in conanfile itself (#44) --- .jenkins/Jenkinsfile | 43 ++++++++++++------------------------------- conanfile.py | 9 +++++++-- 2 files changed, 19 insertions(+), 33 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index b08ca244..a061a297 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,51 +93,32 @@ pipeline { } } - stage("PR Build") { + stage("Build") { when { allOf { expression { "${COVERAGE}" == "False" } - expression { "${BUILD_TYPE}" != "sanitize" } - expression { "${BUILD_TYPE}" != "debug" } - expression { (env.BRANCH_NAME =~ /PR-/) } } } steps { script { def PRERELEASE = 'True' def BUILD_PROFILE = "${BUILD_TYPE}" - if ("${BUILD_TYPE}" == 'release') { - PRERELEASE = 'False' - BUILD_PROFILE = "test" + def SANITIZE = 'False' + + if ("${BUILD_TYPE}" == 'sanitize') { + SANITIZE = 'True' + BUILD_PROFILE = "debug" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=tcmalloc -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" - } - } - } - stage("Branch Build") { - when { allOf { - expression { "${COVERAGE}" == "False" } - expression { "${BUILD_TYPE}" != "sanitize" } - expression { !(env.BRANCH_NAME =~ /PR-/) } - } } - steps { - script { - def PRERELEASE = 'True' - def BUILD_PROFILE = "${BUILD_TYPE}" if ("${BUILD_TYPE}" == "release") { PRERELEASE = 'False' BUILD_PROFILE = "test" } - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o malloc_impl=tcmalloc -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" - } - } - } - stage("Sanitize PR/Branch Build") { - when { allOf { - expression { "${BUILD_TYPE}" == 'sanitize' } - } } - steps { - sh "conan create ${BUILD_MISSING} -o malloc_impl=libc -o sanitize=True -pr debug . ${PROJECT}/${TAG}" + if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { + sh "echo Skipping debug build for PR branch" + } else { + sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" + } + } } } diff --git a/conanfile.py b/conanfile.py index 4eddf0ae..62c38338 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.3" + version = "8.2.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -31,7 +31,7 @@ class SISLConan(ConanFile): 'coverage': False, 'sanitize': False, 'prerelease': True, - 'malloc_impl': 'libc', + 'malloc_impl': 'tcmalloc', } generators = "cmake", "cmake_find_package" @@ -77,6 +77,11 @@ def validate(self): def configure(self): if self.options.shared: del self.options.fPIC + if self.settings.build_type == "Debug": + if self.options.coverage and self.options.sanitize: + raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") + if self.options.coverage or self.options.sanitize: + self.options.malloc_impl = 'libc' def build(self): cmake = CMake(self) From 42d84f06717a61f872458311977cef0426bdb004 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 13 Oct 2022 12:12:15 -0700 Subject: [PATCH 155/385] Deploy Sanitize build also for branch build --- .jenkins/Jenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index a061a297..deb674f6 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -165,7 +165,6 @@ pipeline { stage("Deploy") { when { allOf { expression { "${COVERAGE}" == 'False' } - expression { "${BUILD_TYPE}" != 'sanitize' } expression { !(env.BRANCH_NAME =~ /PR-/) } } } steps { From 7d829cec33fed70ec7325cf739a948c76fd1b73d Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 13 Oct 2022 15:22:28 -0700 Subject: [PATCH 156/385] Make conan update packages during build --- .jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index deb674f6..2646885b 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -116,7 +116,7 @@ pipeline { if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { sh "echo Skipping debug build for PR branch" } else { - sh "conan create ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" + sh "conan create -u ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } From 42fca03874a6a4f61b154ace4099c5b9d2edd70a Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 20 Oct 2022 17:00:29 -0700 Subject: [PATCH 157/385] Make github default libc since gperftools does not exist. --- .github/workflows/build_with_conan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index b92509ba..c7800353 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -41,7 +41,7 @@ jobs: - name: Install dependencies # Build your program with the given configuration run: | - conan install -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . + conan install -o malloc_impl=libc -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . - name: Build # Build your program with the given configuration From 177766090afc586645c539ec694f0deec98eef87 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 20 Oct 2022 17:14:05 -0700 Subject: [PATCH 158/385] FlatBuffers renamed to flatbuffers. --- src/auth_manager/CMakeLists.txt | 2 +- src/settings/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index 8817af3c..c2980617 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -5,7 +5,7 @@ add_flags("-Wno-unused-parameter") include_directories(BEFORE ..) include_directories(BEFORE .) -find_package(FlatBuffers REQUIRED) +find_package(flatbuffers REQUIRED) find_package(Pistache REQUIRED) set(AUTH_MGR_SOURCE_FILES diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index 1c12d099..11975ffe 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -8,7 +8,7 @@ include_directories(BEFORE ..) include_directories(BEFORE .) include_directories(BEFORE . ${CMAKE_CURRENT_SOURCE_DIR}/) -find_package(FlatBuffers REQUIRED) +find_package(flatbuffers REQUIRED) set(SETTINGS_SOURCE_FILES settings.cpp ) From 52285a817a7e8ba39b6d048fe08115e5946a95eb Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 21 Oct 2022 09:40:47 -0700 Subject: [PATCH 159/385] Disable CBMutex test. --- src/fds/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 6f56a898..c59ed51b 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -63,7 +63,7 @@ set(TEST_CBMUTEX_SOURCE_FILES ) add_executable(test_cb_mutex ${TEST_CBMUTEX_SOURCE_FILES}) target_link_libraries(test_cb_mutex sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME TestCBMutex COMMAND test_cb_mutex) +#add_test(NAME TestCBMutex COMMAND test_cb_mutex) if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "jemalloc") From 8b94c0a36fd75653e3be5f2e1f76a7ff76499987 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 27 Oct 2022 14:57:37 -0700 Subject: [PATCH 160/385] add default ssl ca file from env variable (#46) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- src/auth_manager/security_config.hpp | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 62c38338..02b63d0d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.4" + version = "8.2.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/auth_manager/security_config.hpp b/src/auth_manager/security_config.hpp index 9a2a043f..7923891d 100644 --- a/src/auth_manager/security_config.hpp +++ b/src/auth_manager/security_config.hpp @@ -26,6 +26,7 @@ class SecurityDynamicConfig { inline static const std::string default_app_env{get_env("APP_ENV")}; inline static const std::string default_ssl_cert_file{get_env("SSL_CERT")}; inline static const std::string default_ssl_key_file{get_env("SSL_KEY")}; + inline static const std::string default_ssl_ca_file{get_env("SSL_CA")}; inline static const std::string default_tf_token_url{get_env("TOKEN_URL")}; inline static const std::string default_issuer{get_env("TOKEN_ISSUER")}; inline static const std::string default_server{get_env("TOKEN_SERVER")}; @@ -46,6 +47,11 @@ class SecurityDynamicConfig { ssl_key_file = default_ssl_key_file; is_modified = true; } + auto& ssl_ca_file = s.ssl_ca_file; + if (ssl_ca_file.empty()) { + ssl_ca_file = default_ssl_ca_file; + is_modified = true; + } auto& server = s.trf_client->server; if (server.empty()) { server = std::string_view(default_server); From 0e59a8febf4fd6b488fb9360c258d89ad814b022 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 31 Oct 2022 12:15:40 -0700 Subject: [PATCH 161/385] populate app_env config properly (#47) Co-authored-by: Ravi Akella email = raakella@ebay.com --- src/auth_manager/security_config.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/auth_manager/security_config.hpp b/src/auth_manager/security_config.hpp index 7923891d..41789ab9 100644 --- a/src/auth_manager/security_config.hpp +++ b/src/auth_manager/security_config.hpp @@ -88,7 +88,7 @@ class SecurityDynamicConfig { is_modified = true; } auto& app_env = s.trf_client->app_env; - if (app_name.empty()) { + if (app_env.empty()) { app_env = std::string_view(default_app_env); is_modified = true; } From d217c29fc04f17c727b5cae654cf0299ef3ac24c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 1 Nov 2022 08:15:47 -0700 Subject: [PATCH 162/385] Need to propagate sanitize flags when option is enabled. --- conanfile.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/conanfile.py b/conanfile.py index 02b63d0d..c03be5b5 100644 --- a/conanfile.py +++ b/conanfile.py @@ -133,6 +133,11 @@ def package_info(self): self.cpp_info.system_libs.extend(["dl", "pthread"]) self.cpp_info.exelinkflags.extend(["-export-dynamic"]) + if self.options.sanitize: + self.cpp_info.sharedlinkflags.append("-fsanitize=address") + self.cpp_info.exelinkflags.append("-fsanitize=address") + self.cpp_info.sharedlinkflags.append("-fsanitize=undefined") + self.cpp_info.exelinkflags.append("-fsanitize=undefined") if self.options.malloc_impl == 'jemalloc': self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") elif self.options.malloc_impl == 'tcmalloc': From 7875910c5366f931c93ca944306afd4197e10213 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 1 Nov 2022 22:46:49 -0700 Subject: [PATCH 163/385] trf token server response 'expires_in' format is unsigned int. Protect trf client data memebers using shared mutex (#49) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- src/auth_manager/tests/AuthTest.cpp | 3 +-- src/auth_manager/trf_client.cpp | 23 +++++++++++++++++------ src/auth_manager/trf_client.hpp | 10 ++++++++-- 4 files changed, 27 insertions(+), 11 deletions(-) diff --git a/conanfile.py b/conanfile.py index c03be5b5..1d271473 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.5" + version = "8.2.6" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index 2c320709..bd81de10 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -156,7 +156,6 @@ class MockTrfClient : public TrfClient { void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } std::string get_access_token() { return m_access_token; } - std::string get_token_type() { return m_token_type; } }; static void load_trf_settings() { @@ -226,7 +225,7 @@ static void set_token_response(const std::string& raw_token) { raw_token + "\",\n" " \"token_type\": \"Bearer\",\n" - " \"expires_in\": \"2000\",\n" + " \"expires_in\": 2000,\n" " \"refresh_token\": \"dummy_refresh_token\"\n" "}"; } diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp index 6ef8fbbb..3219b483 100644 --- a/src/auth_manager/trf_client.cpp +++ b/src/auth_manager/trf_client.cpp @@ -65,24 +65,35 @@ void TrfClient::request_with_grant_token() { session.SetTimeout(std::chrono::milliseconds{5000}); const auto resp{session.Post()}; if (resp.error || resp.status_code != 200) { - // TODO: log error, rest call failed + LOGDEBUG("request grant token from server failed, error: {}, status code: {}", resp.error.message, + resp.status_code); return; } try { const nlohmann::json resp_json = nlohmann::json::parse(resp.text); - const std::string expires_in{resp_json["expires_in"]}; - m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(std::stoi(expires_in)); + m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(resp_json["expires_in"]); m_access_token = resp_json["access_token"]; m_token_type = resp_json["token_type"]; } catch ([[maybe_unused]] const nlohmann::detail::exception& e) { - // TODO: log error, parsing failed - return; + LOGDEBUG("parsing token response failed, what: {}", e.what()); } } std::string TrfClient::get_token() { - if (m_access_token.empty() || access_token_expired()) { request_with_grant_token(); } + { + std::shared_lock< std::shared_mutex > lock(m_mtx); + if (!(m_access_token.empty() || access_token_expired())) { return m_access_token; } + } + + // Not a frequent code path, occurs for the first time or when token expires + std::unique_lock< std::shared_mutex > lock(m_mtx); + request_with_grant_token(); return m_access_token; } + +std::string TrfClient::get_token_type() { + std::shared_lock< std::shared_mutex > lock(m_mtx); + return m_token_type; +} } // namespace sisl diff --git a/src/auth_manager/trf_client.hpp b/src/auth_manager/trf_client.hpp index e9eba20c..1eab98d1 100644 --- a/src/auth_manager/trf_client.hpp +++ b/src/auth_manager/trf_client.hpp @@ -16,9 +16,11 @@ class TrfClient { public: TrfClient(); std::string get_token(); + std::string get_token_type(); std::string get_typed_token() { - const auto token_str{get_token()}; - return fmt::format("{} {}", m_token_type, token_str); + // get_token needs to be called first which might potentially set token type + const auto token{get_token()}; + return fmt::format("{} {}", get_token_type(), token); } private: @@ -30,7 +32,11 @@ class TrfClient { } static bool get_file_contents(const std::string& file_name, std::string& contents); +private: + std::shared_mutex m_mtx; + protected: + // acquire unique lock before calling virtual void request_with_grant_token(); protected: From 3c4f32e27ff0fefd96883a72a3f3ae47d41ffb2e Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Wed, 2 Nov 2022 13:30:32 -0700 Subject: [PATCH 164/385] fix Auth test --- tests/unit/auth_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 72236ac0..27632250 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -31,7 +31,7 @@ static void set_token_response(const std::string& raw_token) { raw_token + "\",\n" " \"token_type\": \"Bearer\",\n" - " \"expires_in\": \"2000\",\n" + " \"expires_in\": 2000,\n" " \"refresh_token\": \"dummy_refresh_token\"\n" "}"; } From 6e1fec52637138a87e825cc9f0669494119f04a8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Nov 2022 08:34:30 -0700 Subject: [PATCH 165/385] Update spdlog, some removals of relative include paths. --- conanfile.py | 2 +- src/auth_manager/security_config.fbs | 2 +- src/flip/CMakeLists.txt | 2 +- src/flip/client/local/test_flip_local_client.cpp | 4 ++-- src/flip/lib/test_flip.cpp | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conanfile.py b/conanfile.py index 1d271473..d7519a90 100644 --- a/conanfile.py +++ b/conanfile.py @@ -57,7 +57,7 @@ def requirements(self): self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("zmarok-semver/1.1.0") - self.requires("spdlog/1.10.0") + self.requires("spdlog/1.11.0") self.requires("userspace-rcu/0.11.4") self.requires("prometheus-cpp/1.0.1") self.requires("fmt/8.1.1", override=True) diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs index 2c62a436..def2fac2 100644 --- a/src/auth_manager/security_config.fbs +++ b/src/auth_manager/security_config.fbs @@ -1,4 +1,4 @@ -native_include "../../utility/non_null_ptr.hpp"; +native_include "utility/non_null_ptr.hpp"; namespace securitycfg; attribute "hotswap"; diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 33581cce..53d7cbb4 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -7,7 +7,7 @@ endif() find_package(gRPC REQUIRED) include_directories(BEFORE include) -include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/proto) +include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) include_directories(BEFORE ..) include_directories(BEFORE .) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 37f46dad..8e99e867 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -14,8 +14,8 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "../../proto/flip_spec.pb.h" -#include "../../flip_client.hpp" +#include "proto/flip_spec.pb.h" +#include "flip_client.hpp" #include #include diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 4770569e..b9e4c0f8 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "../proto/flip_spec.pb.h" +#include "proto/flip_spec.pb.h" #include "flip.hpp" #include #include From d9ae38fd5c0ef1aaba91d77fe0230df5a2c220fc Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Nov 2022 10:51:38 -0700 Subject: [PATCH 166/385] Publish libc variants for Debug and RelWithDebInfo,PRERELEASE=Fase builds. --- .jenkins/Jenkinsfile | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 2646885b..91137a59 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -64,6 +64,10 @@ pipeline { name 'BUILD_TYPE' values 'sanitize', 'release', 'debug', 'test' } + axis { + name 'ALLOC' + values 'libc', 'tcmalloc' + } axis { name 'COVERAGE' values 'False' @@ -80,6 +84,16 @@ pipeline { values 'True' } } + exclude { + axis { + name 'BUILD_TYPE' + values 'sanitize', 'test' + } + axis { + name 'ALLOC' + values 'libc' + } + } } stages { @@ -116,7 +130,7 @@ pipeline { if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { sh "echo Skipping debug build for PR branch" } else { - sh "conan create -u ${BUILD_MISSING} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" + sh "conan create -u ${BUILD_MISSING} -o malloc_impl=${ALLOC} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } From 0f9ef2653f77aa3929cabaf892936d6e2475ff37 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 4 Nov 2022 15:19:44 -0600 Subject: [PATCH 167/385] Buildable on MacOS. --- CMakeLists.txt | 64 ++++++++++++++++++++++------------ README.md | 4 +++ cmake/debug_flags.cmake | 41 ++++++++++++---------- conanfile.py | 13 ++++--- src/logging/backtrace.h | 1 - src/logging/lib/backtrace.cpp | 6 ++-- src/logging/lib/logging.cpp | 5 +-- src/logging/lib/stacktrace.cpp | 18 ++++++++++ 8 files changed, 100 insertions(+), 52 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ed3545f..4db2c433 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -53,7 +53,6 @@ find_package(benchmark REQUIRED) find_package(Boost REQUIRED) find_package(cpr REQUIRED) find_package(cxxopts REQUIRED) -find_package(folly REQUIRED) find_package(GTest REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") find_package(gperftools REQUIRED) @@ -65,7 +64,12 @@ find_package(prometheus-cpp REQUIRED) find_package(zmarok-semver REQUIRED) find_package(spdlog REQUIRED) find_package(Threads REQUIRED) -find_package(userspace-rcu REQUIRED) + +# Linux Specific dependencies +if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) + find_package(folly REQUIRED) + find_package(userspace-rcu REQUIRED) +endif() list (APPEND COMMON_DEPS Boost::headers @@ -73,8 +77,13 @@ list (APPEND COMMON_DEPS nlohmann_json::nlohmann_json prometheus-cpp::prometheus-cpp spdlog::spdlog - userspace-rcu::userspace-rcu ) +if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) + list (APPEND COMMON_DEPS + userspace-rcu::userspace-rcu + ) +endif() + if (${prerelease_dummy_FOUND}) list (APPEND COMMON_DEPS prerelease_dummy::prerelease_dummy) endif () @@ -118,35 +127,46 @@ include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/auth_manager) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/settings) #add_subdirectory (src/btree) -add_subdirectory (src/cache) add_subdirectory (src/logging) add_subdirectory (src/options) -add_subdirectory (src/wisr) -add_subdirectory (src/metrics) -add_subdirectory (src/fds) -add_subdirectory (src/settings) -add_subdirectory (src/utility) add_subdirectory (src/sisl_version) -add_subdirectory (src/auth_manager) -add_subdirectory (src/file_watcher) -add_subdirectory (src/flip) + +# These sub-libraries currently do not support MacOS due to dependencies +# on Folly and pistache. It is unknown if Windows is supported... +list(APPEND POSIX_LIBRARIES ) +list(APPEND SISL_DEPS ) +if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) + add_subdirectory (src/auth_manager) + add_subdirectory (src/cache) + add_subdirectory (src/fds) + add_subdirectory (src/file_watcher) + add_subdirectory (src/flip) + add_subdirectory (src/metrics) + add_subdirectory (src/settings) + add_subdirectory (src/utility) + add_subdirectory (src/wisr) + + list(APPEND POSIX_LIBRARIES + $ + $ + $ + $ + $ + $ + $ + ) + list(APPEND SISL_DEPS + Folly::Folly + ) +endif() add_library(sisl - $ - $ + ${POSIX_LIBRARIES} $ $ $ - $ - $ - $ - $ - $ ) -list(APPEND SISL_DEPS - Folly::Folly -) if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "tcmalloc") list(APPEND SISL_DEPS gperftools::gperftools) diff --git a/README.md b/README.md index 7a31b6ba..cf6f02f6 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,8 @@ sent to prometheus whichever caller choose from. It is meant to scale with multi metrics. The collection is extremely fast <<5ns per metric, but pay penalty during metrics result gathering which is rare. It uses Wisr framework which will be detailed next +*Lacks MacOS support* + ### Wisr WISR stands for Waitfree Inserts Snoozy Rest. This is a framework and data structures on top of this framework which provides @@ -33,6 +35,8 @@ More details in the Wisr README under [src/wisr/README.md] This is a bunch of data structures meant for high performance or specific use cases. Each of these structures are detailed in their corresponding source files. Some of the major data structures are listed below: +*Lacks MacOS support* + #### Bitset A high performance bitset to have various functionalities to scan the contiguous 1s, 0s, set/reset multiple bits without iterating over every bit, ability to serialize/deserialize bitsets, atomically update concurrent bits, ability to dynamically resize and shrink. It diff --git a/cmake/debug_flags.cmake b/cmake/debug_flags.cmake index 45c1e7f3..6dab2ec2 100644 --- a/cmake/debug_flags.cmake +++ b/cmake/debug_flags.cmake @@ -39,25 +39,28 @@ # missing values. With these, I've never seen a stack frame I couldn't step # into, and never seen when I look at a local. # -set (REALLY_NO_OPTIMIZATION_FLAGS "-fno-short-enums" )# Binary-incompatible with code compiled otherwise. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-aggressive-loop-optimizations" ) # Changes behavior on overflow. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-branch-count-reg" )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dce )# Can be wrong in the presence of bugs (CBWITPOB). set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-delete-null-pointer-checks )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dse )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-early-inlining )# NO INLINING! Because... set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-gcse-lm )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-inline )# ...inlining also does things like elide locals. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ira-hoist-pressure )# Might be irrelevant, but NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ivopts )# Elides and changes instructions. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-jump-tables )# Changes CPU instructions for switch statements. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-move-loop-invariants )# NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-peephole )# Exploiting CPU quirks. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-prefetch-loop-arrays )# Changes CPU instructions, even GCC manual is ambivalent. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-rename-registers" )# Maybe wrong in the presence of bugs? -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-toplevel-reorder" )# Elides unused static variable, reorders globals. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-coalesce-vars" )# Elides temporaries. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-cselim" )# Reorders, violates C++ mem model, CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-forwprop" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-if-convert" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-im" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-optimize" )# Reorders and changes instructions. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-phiprop" )# NO HOISTING! Reorders and changes. CBWITPOB. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-pta" )# Less analysis means maybe less interference. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-reassoc" )# Elides and vectories. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-scev-cprop" )# Elides and changes instructions. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-vect-loop-version" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. -set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. +set (REALLY_NO_OPTIMIZATION_FLAGS ) +if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")) + set (REALLY_NO_OPTIMIZATION_FLAGS "-fno-short-enums" )# Binary-incompatible with code compiled otherwise. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-aggressive-loop-optimizations" ) # Changes behavior on overflow. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-branch-count-reg" )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dce )# Can be wrong in the presence of bugs (CBWITPOB). set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-delete-null-pointer-checks )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dse )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-early-inlining )# NO INLINING! Because... set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-gcse-lm )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-inline )# ...inlining also does things like elide locals. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ira-hoist-pressure )# Might be irrelevant, but NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ivopts )# Elides and changes instructions. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-jump-tables )# Changes CPU instructions for switch statements. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-move-loop-invariants )# NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-peephole )# Exploiting CPU quirks. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-prefetch-loop-arrays )# Changes CPU instructions, even GCC manual is ambivalent. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-rename-registers" )# Maybe wrong in the presence of bugs? + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-toplevel-reorder" )# Elides unused static variable, reorders globals. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-coalesce-vars" )# Elides temporaries. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-cselim" )# Reorders, violates C++ mem model, CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-forwprop" )# Reorders and changes instructions. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-if-convert" )# Reorders and changes instructions. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-im" )# Reorders and changes instructions. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-optimize" )# Reorders and changes instructions. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-phiprop" )# NO HOISTING! Reorders and changes. CBWITPOB. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-pta" )# Less analysis means maybe less interference. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-reassoc" )# Elides and vectories. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-scev-cprop" )# Elides and changes instructions. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-vect-loop-version" )# E&C. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" )# E&C. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. + set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. +endif() if (DEFINED CONAN_BUILD_COVERAGE) if (${CONAN_BUILD_COVERAGE}) diff --git a/conanfile.py b/conanfile.py index d7519a90..661845c7 100644 --- a/conanfile.py +++ b/conanfile.py @@ -40,7 +40,8 @@ class SISLConan(ConanFile): def build_requirements(self): self.build_requires("benchmark/1.6.1") self.build_requires("gtest/1.11.0") - self.build_requires("pistache/cci.20201127") + if self.settings.os in ["Linux"]: + self.build_requires("pistache/cci.20201127") def requirements(self): # Custom packages @@ -52,14 +53,16 @@ def requirements(self): self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - self.requires("folly/2022.01.31.00") + if self.settings.os in ["Linux"]: + self.requires("folly/2022.01.31.00") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") - self.requires("zmarok-semver/1.1.0") - self.requires("spdlog/1.11.0") - self.requires("userspace-rcu/0.11.4") self.requires("prometheus-cpp/1.0.1") + self.requires("spdlog/1.11.0") + if self.settings.os in ["Linux"]: + self.requires("userspace-rcu/0.11.4") + self.requires("zmarok-semver/1.1.0") self.requires("fmt/8.1.1", override=True) self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) diff --git a/src/logging/backtrace.h b/src/logging/backtrace.h index 4b654600..5e862d89 100644 --- a/src/logging/backtrace.h +++ b/src/logging/backtrace.h @@ -81,7 +81,6 @@ constexpr uint64_t pipe_timeout_ms{15000}; // 15 seconds. Addr2line can be extr [[maybe_unused]] extern size_t stack_interpret_apple(const void* const* const stack_ptr, const char* const* const stack_msg, const size_t stack_size, char* const output_buf, const size_t output_buflen, - const bool trim_internal, [[maybe_unused]] const bool trim_internal); #else [[maybe_unused]] extern size_t stack_interpret_other(const void* const* const stack_ptr, diff --git a/src/logging/lib/backtrace.cpp b/src/logging/lib/backtrace.cpp index 55d95129..1ac215ff 100644 --- a/src/logging/lib/backtrace.cpp +++ b/src/logging/lib/backtrace.cpp @@ -563,7 +563,7 @@ size_t stack_interpret_linux_file(const void* const* const stack_ptr, FILE* cons #ifdef __APPLE__ size_t stack_interpret_apple([[maybe_unused]] const void* const* const stack_ptr, const char* const* const stack_msg, - const size_t stack_size, char* const output_buf, const size_t output_buflen, , + const size_t stack_size, char* const output_buf, const size_t output_buflen, [[maybe_unused]] const bool trim_internal) { size_t cur_len{0}; @@ -639,14 +639,14 @@ size_t stack_interpret_apple([[maybe_unused]] const void* const* const stack_ptr ss << std::hex << load_base; ss << " -o " << exec_full_path; ss << " " << address; - const std::unique_ptr< FILE, std::function< void(FILE* const) > > fp{::popen(ss.str().c_str() "r"), + const std::unique_ptr< FILE, std::function< void(FILE* const) > > fp{::popen(ss.str().c_str(), "r"), [](FILE* const ptr) { if (ptr) ::pclose(ptr); }}; if (!fp) continue; std::array< char, 4096 > atos_cstr; - std::fgets(atos_cstr.data(), atos_cstr.size() - 1, fp); + std::fgets(atos_cstr.data(), atos_cstr.size() - 1, fp.get()); const std::string atos_str{atos_cstr.data()}; size_t d_pos{atos_str.find(" (in ")}; diff --git a/src/logging/lib/logging.cpp b/src/logging/lib/logging.cpp index 3bef31fb..88effd46 100644 --- a/src/logging/lib/logging.cpp +++ b/src/logging/lib/logging.cpp @@ -26,14 +26,15 @@ #include #if defined(__linux__) || defined(__APPLE__) +#if defined(__APPLE__) +#undef _POSIX_C_SOURCE +#endif #include #include #endif #if defined(__linux__) #include -#elif defined(__APPLE__) -#include #endif #include "options/options.h" diff --git a/src/logging/lib/stacktrace.cpp b/src/logging/lib/stacktrace.cpp index f96711b4..095464f7 100644 --- a/src/logging/lib/stacktrace.cpp +++ b/src/logging/lib/stacktrace.cpp @@ -64,8 +64,10 @@ static bool exit_in_progress() { pthread_t current_id{tracing_thread_id.load()}; pthread_t new_id{pthread_self()}; +#ifndef __APPLE__ if (logger) { logger->critical("Thread num: {} entered exit handler\n", new_id); } if (critical_logger) { critical_logger->critical("Thread num: {} entered exit handler\n", new_id); } +#endif if (current_id == new_id) { // we are already marked in exit handler @@ -192,11 +194,19 @@ static void log_stack_trace_all_threads() { if (signal_thread) { const auto log_failure{[&logger, &critical_logger, &thread_count, &thread_id](const char* const msg) { if (logger) { +#ifndef __APPLE__ logger->critical("Thread ID: {}, Thread num: {} - {}\n", thread_id, thread_count, msg); +#else + logger->critical("Thread num: {} - {}\n", thread_count, msg); +#endif logger->flush(); } if (critical_logger) { +#ifndef __APPLE__ critical_logger->critical("Thread ID: {}, Thread num: {} - {}\n", thread_id, thread_count, msg); +#else + critical_logger->critical("Thread num: {} - {}\n", thread_count, msg); +#endif critical_logger->flush(); } }}; @@ -234,12 +244,20 @@ static void log_stack_trace_all_threads() { } if (logger) { +#ifndef __APPLE__ logger->critical("Thread ID: {}, Thread num: {}\n{}", thread_id, thread_count, g_stacktrace_buff.data()); +#else + logger->critical("Thread num: {}\n{}", thread_count, g_stacktrace_buff.data()); +#endif logger->flush(); } if (critical_logger) { +#ifndef __APPLE__ critical_logger->critical("Thread ID: {}, Thread num: {}\n{}", thread_id, thread_count, g_stacktrace_buff.data()); +#else + critical_logger->critical("Thread num: {}\n{}", thread_count, g_stacktrace_buff.data()); +#endif critical_logger->flush(); } }}; From 33e1cd9d6c14c6edbda89e805c0ef8e639461d8d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 08:48:07 -0700 Subject: [PATCH 168/385] Move exported header into public interface subdir. --- CMakeLists.txt | 7 +++++-- {src => include/sisl}/utility/non_null_ptr.hpp | 0 src/auth_manager/security_config.fbs | 2 +- src/settings/tests/test_app_schema.fbs | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) rename {src => include/sisl}/utility/non_null_ptr.hpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4db2c433..c37b6617 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,8 +123,11 @@ if(UNIX) add_flags("-D_POSIX_C_SOURCE=200809L -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE") endif() -include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/auth_manager) -include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src/settings) +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_CURRENT_BINARY_DIR}/src/auth_manager + ${CMAKE_CURRENT_BINARY_DIR}/src/settings +) #add_subdirectory (src/btree) add_subdirectory (src/logging) diff --git a/src/utility/non_null_ptr.hpp b/include/sisl/utility/non_null_ptr.hpp similarity index 100% rename from src/utility/non_null_ptr.hpp rename to include/sisl/utility/non_null_ptr.hpp diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs index def2fac2..e560455b 100644 --- a/src/auth_manager/security_config.fbs +++ b/src/auth_manager/security_config.fbs @@ -1,4 +1,4 @@ -native_include "utility/non_null_ptr.hpp"; +native_include "sisl/utility/non_null_ptr.hpp"; namespace securitycfg; attribute "hotswap"; diff --git a/src/settings/tests/test_app_schema.fbs b/src/settings/tests/test_app_schema.fbs index b72c60ed..feceb2b6 100644 --- a/src/settings/tests/test_app_schema.fbs +++ b/src/settings/tests/test_app_schema.fbs @@ -1,4 +1,4 @@ -native_include "utility/non_null_ptr.hpp"; +native_include "sisl/utility/non_null_ptr.hpp"; namespace testapp; From ac4e02b7c016af83f98e733cde14f984419ed442 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 09:08:23 -0700 Subject: [PATCH 169/385] Add missing export --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 661845c7..6425536b 100644 --- a/conanfile.py +++ b/conanfile.py @@ -35,7 +35,7 @@ class SISLConan(ConanFile): } generators = "cmake", "cmake_find_package" - exports_sources = ("CMakeLists.txt", "cmake/*", "src/*", "LICENSE") + exports_sources = ("CMakeLists.txt", "cmake/*", "include/*", "src/*", "LICENSE") def build_requirements(self): self.build_requires("benchmark/1.6.1") From b3c01b369c41bb7e00fe1fdde7e7991f4afcbef2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 09:31:22 -0700 Subject: [PATCH 170/385] Copy missing headers from new API dir. --- conanfile.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/conanfile.py b/conanfile.py index 6425536b..fd89262c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -120,9 +120,12 @@ def package(self): copy(self, "*.proto", join(self.source_folder, "src/flip/proto/"), join(self.package_folder, "proto/flip/"), keep_path=False) copy(self, "*", join(self.source_folder, "src/flip/client/python/"), join(self.package_folder, "bindings/flip/python/"), keep_path=False) - hdr_dir = join(self.package_folder, join("include", "sisl")) - copy(self, "*.hpp", join(self.source_folder, "src"), hdr_dir, keep_path=True) - copy(self, "*.h", join(self.source_folder, "src"), hdr_dir, keep_path=True) + hdr_dir = join(self.package_folder, "include") + copy(self, "*.h*", join(self.source_folder, "include"), hdr_dir, keep_path=True) + + old_hdr_dir = join(self.package_folder, "include", "sisl") + copy(self, "*.hpp", join(self.source_folder, "src"), old_hdr_dir, keep_path=True) + copy(self, "*.h", join(self.source_folder, "src"), old_hdr_dir, keep_path=True) copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) def package_info(self): From e7ca26d9c6dfb57b71959d03cb58121accb40129 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 10:03:54 -0700 Subject: [PATCH 171/385] Begin moving all public (API) headers to conform to standard CMake layout. --- .gitignore | 1 + {src => include/sisl}/logging/logging.h | 1 + {src => include/sisl}/options/options.h | 0 src/auth_manager/security_config.hpp | 2 +- src/cache/evictor.hpp | 2 +- src/cache/tests/test_range_cache.cpp | 4 +-- src/cache/tests/test_range_hashmap.cpp | 4 +-- src/fds/bitset.hpp | 2 +- src/fds/malloc_helper.hpp | 2 +- src/fds/tests/obj_allocator_benchmark.cpp | 4 +-- src/fds/tests/test_atomic_status_counter.cpp | 4 +-- src/fds/tests/test_bitset.cpp | 4 +-- src/fds/tests/test_bitword.cpp | 4 +-- src/fds/tests/test_cb_mutex.cpp | 4 +-- src/fds/tests/test_idreserver.cpp | 4 +-- src/fds/tests/test_jemalloc_helper.cpp | 4 +-- src/fds/tests/test_obj_allocator.cpp | 4 +-- src/fds/tests/test_tcmalloc_helper.cpp | 4 +-- src/file_watcher/file_watcher.hpp | 2 +- src/file_watcher/file_watcher_test.cpp | 2 +- .../client/local/test_flip_local_client.cpp | 4 +-- src/flip/flip.hpp | 2 +- src/flip/lib/test_flip.cpp | 2 +- src/flip/lib/test_flip_server.cpp | 2 +- src/logging/lib/backtrace.cpp | 2 +- src/logging/lib/logging.cpp | 4 +-- src/logging/lib/stacktrace.cpp | 2 +- src/logging/test/example.cpp | 4 +-- src/metrics/metrics.cpp | 2 +- src/metrics/metrics.hpp | 4 +-- src/metrics/metrics_atomic.cpp | 2 +- src/metrics/metrics_group_impl.cpp | 2 +- src/metrics/metrics_rcu.cpp | 2 +- src/metrics/metrics_tlocal.cpp | 2 +- src/metrics/prometheus_reporter.hpp | 2 +- src/metrics/tests/farm_test.cpp | 2 +- src/metrics/tests/functionality_test.cpp | 2 +- src/metrics/tests/wrapper_test.cpp | 2 +- src/options/lib/options.cpp | 2 +- src/options/tests/basic.cpp | 2 +- src/settings/settings.cpp | 2 +- src/settings/settings.hpp | 4 +-- src/settings/tests/test_settings.cpp | 6 ++-- src/sisl_version/tests/test_version.cpp | 4 +-- src/utility/status_factory.hpp | 2 +- src/utility/tests/test_atomic_counter.cpp | 4 +-- src/utility/tests/test_objlife_counter.cpp | 4 +-- test_package/CMakeLists.txt | 11 ++++++++ test_package/conanfile.py | 18 ++++++++++++ test_package/example_decl.cpp | 7 +++++ test_package/test_package.cpp | 28 +++++++++++++++++++ 51 files changed, 131 insertions(+), 65 deletions(-) rename {src => include/sisl}/logging/logging.h (99%) rename {src => include/sisl}/options/options.h (100%) create mode 100644 test_package/CMakeLists.txt create mode 100644 test_package/conanfile.py create mode 100644 test_package/example_decl.cpp create mode 100644 test_package/test_package.cpp diff --git a/.gitignore b/.gitignore index bd9e5aef..5d114c73 100644 --- a/.gitignore +++ b/.gitignore @@ -93,6 +93,7 @@ src/.tags* # build generated products build/** +test_package/build debug/** release/** cmake-*/** diff --git a/src/logging/logging.h b/include/sisl/logging/logging.h similarity index 99% rename from src/logging/logging.h rename to include/sisl/logging/logging.h index dd7d2e4a..3b244526 100644 --- a/src/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/src/options/options.h b/include/sisl/options/options.h similarity index 100% rename from src/options/options.h rename to include/sisl/options/options.h diff --git a/src/auth_manager/security_config.hpp b/src/auth_manager/security_config.hpp index 41789ab9..550774aa 100644 --- a/src/auth_manager/security_config.hpp +++ b/src/auth_manager/security_config.hpp @@ -1,6 +1,6 @@ #pragma once #include "../settings/settings.hpp" -#include "../options/options.h" +#include #include "generated/security_config_generated.h" SETTINGS_INIT(securitycfg::SecuritySettings, security_config) diff --git a/src/cache/evictor.hpp b/src/cache/evictor.hpp index d2714bd0..e4643479 100644 --- a/src/cache/evictor.hpp +++ b/src/cache/evictor.hpp @@ -19,7 +19,7 @@ #include #include #include -#include "../logging/logging.h" +#include #include "hash_entry_base.hpp" namespace sisl { diff --git a/src/cache/tests/test_range_cache.cpp b/src/cache/tests/test_range_cache.cpp index 5f5b283b..c0b7fd3a 100644 --- a/src/cache/tests/test_range_cache.cpp +++ b/src/cache/tests/test_range_cache.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #include -#include "options/options.h" +#include #include #include #include @@ -27,7 +27,7 @@ #include #endif -#include "logging/logging.h" +#include #include "range_cache.hpp" #include "lru_evictor.hpp" #include "utility/enum.hpp" diff --git a/src/cache/tests/test_range_hashmap.cpp b/src/cache/tests/test_range_hashmap.cpp index f2910c77..f5312669 100644 --- a/src/cache/tests/test_range_hashmap.cpp +++ b/src/cache/tests/test_range_hashmap.cpp @@ -15,12 +15,12 @@ * *********************************************************************************/ #include -#include "options/options.h" +#include #include #include #include -#include "logging/logging.h" +#include #include "fds/bitset.hpp" #include "range_hashmap.hpp" #include "utility/enum.hpp" diff --git a/src/fds/bitset.hpp b/src/fds/bitset.hpp index b4c579a3..1141615c 100644 --- a/src/fds/bitset.hpp +++ b/src/fds/bitset.hpp @@ -38,7 +38,7 @@ #pragma GCC diagnostic pop #endif -#include "../logging/logging.h" +#include #include "bitword.hpp" #include "buffer.hpp" diff --git a/src/fds/malloc_helper.hpp b/src/fds/malloc_helper.hpp index 57a036c1..c2323eb2 100644 --- a/src/fds/malloc_helper.hpp +++ b/src/fds/malloc_helper.hpp @@ -38,7 +38,7 @@ #include -#include "../logging/logging.h" +#include #include "../metrics/histogram_buckets.hpp" #include "../metrics/metrics.hpp" diff --git a/src/fds/tests/obj_allocator_benchmark.cpp b/src/fds/tests/obj_allocator_benchmark.cpp index 37605ea9..30f8e7d9 100644 --- a/src/fds/tests/obj_allocator_benchmark.cpp +++ b/src/fds/tests/obj_allocator_benchmark.cpp @@ -22,8 +22,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include "metrics/metrics.hpp" #include "obj_allocator.hpp" diff --git a/src/fds/tests/test_atomic_status_counter.cpp b/src/fds/tests/test_atomic_status_counter.cpp index 6f80b08f..24f84d60 100644 --- a/src/fds/tests/test_atomic_status_counter.cpp +++ b/src/fds/tests/test_atomic_status_counter.cpp @@ -14,8 +14,8 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/fds/tests/test_bitset.cpp b/src/fds/tests/test_bitset.cpp index 2f4515c7..660e5167 100644 --- a/src/fds/tests/test_bitset.cpp +++ b/src/fds/tests/test_bitset.cpp @@ -21,8 +21,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/fds/tests/test_bitword.cpp b/src/fds/tests/test_bitword.cpp index 53a48054..bf06a9d5 100644 --- a/src/fds/tests/test_bitword.cpp +++ b/src/fds/tests/test_bitword.cpp @@ -20,8 +20,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/fds/tests/test_cb_mutex.cpp b/src/fds/tests/test_cb_mutex.cpp index f07d594f..a0a1d36b 100644 --- a/src/fds/tests/test_cb_mutex.cpp +++ b/src/fds/tests/test_cb_mutex.cpp @@ -17,8 +17,8 @@ #include #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include #pragma GCC diagnostic push diff --git a/src/fds/tests/test_idreserver.cpp b/src/fds/tests/test_idreserver.cpp index 251e9dc1..ea4fc607 100644 --- a/src/fds/tests/test_idreserver.cpp +++ b/src/fds/tests/test_idreserver.cpp @@ -18,8 +18,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include "id_reserver.hpp" diff --git a/src/fds/tests/test_jemalloc_helper.cpp b/src/fds/tests/test_jemalloc_helper.cpp index 18d1e2ca..4bdbfbbe 100644 --- a/src/fds/tests/test_jemalloc_helper.cpp +++ b/src/fds/tests/test_jemalloc_helper.cpp @@ -23,8 +23,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/fds/tests/test_obj_allocator.cpp b/src/fds/tests/test_obj_allocator.cpp index 06849f2b..f0996d39 100644 --- a/src/fds/tests/test_obj_allocator.cpp +++ b/src/fds/tests/test_obj_allocator.cpp @@ -17,8 +17,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include "obj_allocator.hpp" diff --git a/src/fds/tests/test_tcmalloc_helper.cpp b/src/fds/tests/test_tcmalloc_helper.cpp index 69f2f94d..9947afc0 100644 --- a/src/fds/tests/test_tcmalloc_helper.cpp +++ b/src/fds/tests/test_tcmalloc_helper.cpp @@ -21,8 +21,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/file_watcher/file_watcher.hpp b/src/file_watcher/file_watcher.hpp index 7959de3c..7eff1b6e 100644 --- a/src/file_watcher/file_watcher.hpp +++ b/src/file_watcher/file_watcher.hpp @@ -2,7 +2,7 @@ #pragma once #include -#include "../logging/logging.h" +#include #include namespace sisl { diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index c5059958..7d20e64c 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -10,7 +10,7 @@ #include #include "file_watcher.hpp" -#include "options/options.h" +#include SISL_LOGGING_INIT(test_file_watcher) SISL_OPTIONS_ENABLE(logging) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index 8e99e867..dba24d98 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -19,8 +19,8 @@ #include #include -#include "options/options.h" -#include "logging/logging.h" +#include +#include using namespace flip; diff --git a/src/flip/flip.hpp b/src/flip/flip.hpp index f839a0ea..0142850d 100644 --- a/src/flip/flip.hpp +++ b/src/flip/flip.hpp @@ -32,7 +32,7 @@ #include "proto/flip_spec.pb.h" #include "flip_rpc_server.hpp" -#include "../logging/logging.h" +#include SISL_LOGGING_DECL(flip) diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index b9e4c0f8..af3e5126 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -19,7 +19,7 @@ #include #include -#include "options/options.h" +#include SISL_LOGGING_INIT(flip) SISL_OPTIONS_ENABLE(logging) diff --git a/src/flip/lib/test_flip_server.cpp b/src/flip/lib/test_flip_server.cpp index 1cb6b25a..3e2b631e 100644 --- a/src/flip/lib/test_flip_server.cpp +++ b/src/flip/lib/test_flip_server.cpp @@ -16,7 +16,7 @@ *********************************************************************************/ #include "flip.hpp" -#include "options/options.h" +#include SISL_LOGGING_INIT(flip) diff --git a/src/logging/lib/backtrace.cpp b/src/logging/lib/backtrace.cpp index 1ac215ff..9ad62b47 100644 --- a/src/logging/lib/backtrace.cpp +++ b/src/logging/lib/backtrace.cpp @@ -39,7 +39,7 @@ #include #endif -#include "logging.h" +#include #include "backtrace.h" diff --git a/src/logging/lib/logging.cpp b/src/logging/lib/logging.cpp index 88effd46..5b4f3e3f 100644 --- a/src/logging/lib/logging.cpp +++ b/src/logging/lib/logging.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ -#include "logging.h" +#include #include #include @@ -37,7 +37,7 @@ #include #endif -#include "options/options.h" +#include #include #include #include diff --git a/src/logging/lib/stacktrace.cpp b/src/logging/lib/stacktrace.cpp index 095464f7..f7656c87 100644 --- a/src/logging/lib/stacktrace.cpp +++ b/src/logging/lib/stacktrace.cpp @@ -32,8 +32,8 @@ #include #endif +#include #include "backtrace.h" -#include "logging.h" namespace { constexpr uint64_t backtrace_timeout_ms{4 * backtrace_detail::pipe_timeout_ms}; diff --git a/src/logging/test/example.cpp b/src/logging/test/example.cpp index 17abe762..907dd6f2 100644 --- a/src/logging/test/example.cpp +++ b/src/logging/test/example.cpp @@ -20,9 +20,9 @@ #include #include -#include "options/options.h" +#include -#include "logging.h" +#include SISL_LOGGING_INIT(my_module) diff --git a/src/metrics/metrics.cpp b/src/metrics/metrics.cpp index 34910d73..26c94599 100644 --- a/src/metrics/metrics.cpp +++ b/src/metrics/metrics.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "logging/logging.h" +#include #include "metrics.hpp" diff --git a/src/metrics/metrics.hpp b/src/metrics/metrics.hpp index 3fdbb7c0..e3c01f9b 100644 --- a/src/metrics/metrics.hpp +++ b/src/metrics/metrics.hpp @@ -33,8 +33,8 @@ #include #include -#include "../logging/logging.h" -#include "../options/options.h" +#include +#include #include "metrics_atomic.hpp" #include "metrics_group_impl.hpp" #include "metrics_rcu.hpp" diff --git a/src/metrics/metrics_atomic.cpp b/src/metrics/metrics_atomic.cpp index 9cecebee..9a2c1240 100644 --- a/src/metrics/metrics_atomic.cpp +++ b/src/metrics/metrics_atomic.cpp @@ -18,7 +18,7 @@ #include #include -#include "logging/logging.h" +#include #include "metrics_atomic.hpp" diff --git a/src/metrics/metrics_group_impl.cpp b/src/metrics/metrics_group_impl.cpp index e21d0af2..34e8e4a7 100644 --- a/src/metrics/metrics_group_impl.cpp +++ b/src/metrics/metrics_group_impl.cpp @@ -26,7 +26,7 @@ #endif #include -#include "logging/logging.h" +#include #include "metrics_group_impl.hpp" #include "metrics.hpp" diff --git a/src/metrics/metrics_rcu.cpp b/src/metrics/metrics_rcu.cpp index f70de42f..e02c3202 100644 --- a/src/metrics/metrics_rcu.cpp +++ b/src/metrics/metrics_rcu.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #include "metrics_rcu.hpp" -#include "logging/logging.h" +#include namespace sisl { diff --git a/src/metrics/metrics_tlocal.cpp b/src/metrics/metrics_tlocal.cpp index f0d40671..d04b711e 100644 --- a/src/metrics/metrics_tlocal.cpp +++ b/src/metrics/metrics_tlocal.cpp @@ -18,7 +18,7 @@ #include #include -#include "logging/logging.h" +#include #include "metrics_tlocal.hpp" diff --git a/src/metrics/prometheus_reporter.hpp b/src/metrics/prometheus_reporter.hpp index 500eb6e1..a51bddb4 100644 --- a/src/metrics/prometheus_reporter.hpp +++ b/src/metrics/prometheus_reporter.hpp @@ -33,7 +33,7 @@ #include #pragma GCC diagnostic pop -#include "../logging/logging.h" +#include namespace sisl { diff --git a/src/metrics/tests/farm_test.cpp b/src/metrics/tests/farm_test.cpp index a84f581a..8658efb8 100644 --- a/src/metrics/tests/farm_test.cpp +++ b/src/metrics/tests/farm_test.cpp @@ -23,7 +23,7 @@ #include #include -#include "logging/logging.h" +#include #include "metrics.hpp" diff --git a/src/metrics/tests/functionality_test.cpp b/src/metrics/tests/functionality_test.cpp index 35fafbd4..59d7be76 100644 --- a/src/metrics/tests/functionality_test.cpp +++ b/src/metrics/tests/functionality_test.cpp @@ -23,7 +23,7 @@ #include #include -#include "logging/logging.h" +#include #include "../metrics.hpp" #include "../metrics_group_impl.hpp" diff --git a/src/metrics/tests/wrapper_test.cpp b/src/metrics/tests/wrapper_test.cpp index c22eeb84..05aeb09c 100644 --- a/src/metrics/tests/wrapper_test.cpp +++ b/src/metrics/tests/wrapper_test.cpp @@ -20,7 +20,7 @@ #include #include "metrics.hpp" #include -#include "options/options.h" +#include SISL_LOGGING_INIT(vmod_metrics_framework) diff --git a/src/options/lib/options.cpp b/src/options/lib/options.cpp index f25c8a4f..33346a9f 100644 --- a/src/options/lib/options.cpp +++ b/src/options/lib/options.cpp @@ -14,6 +14,6 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "options/options.h" +#include SISL_OPTION_GROUP(main, (help, "h", "help", "Help message", ::cxxopts::value< bool >(), "")) diff --git a/src/options/tests/basic.cpp b/src/options/tests/basic.cpp index c24cbc0d..7102121c 100644 --- a/src/options/tests/basic.cpp +++ b/src/options/tests/basic.cpp @@ -20,7 +20,7 @@ #include #include -#include "options.h" +#include SISL_OPTION_GROUP(logging, (verbosity, "v", "verbosity", "Verbosity level (0-5)", diff --git a/src/settings/settings.cpp b/src/settings/settings.cpp index a4e2b94b..7d03e73d 100644 --- a/src/settings/settings.cpp +++ b/src/settings/settings.cpp @@ -22,7 +22,7 @@ #include -#include "options/options.h" +#include #include "settings.hpp" diff --git a/src/settings/settings.hpp b/src/settings/settings.hpp index 3ee5316b..ae2c9fb0 100644 --- a/src/settings/settings.hpp +++ b/src/settings/settings.hpp @@ -29,8 +29,8 @@ #include -#include "../logging/logging.h" -#include "../options/options.h" +#include +#include #include "../utility/urcu_helper.hpp" #define SETTINGS_INIT(schema_type, schema_name) \ diff --git a/src/settings/tests/test_settings.cpp b/src/settings/tests/test_settings.cpp index 9a6cfafa..68fa1e63 100644 --- a/src/settings/tests/test_settings.cpp +++ b/src/settings/tests/test_settings.cpp @@ -18,8 +18,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include "generated/test_app_schema_generated.h" #include "settings.hpp" @@ -189,4 +189,4 @@ int main(int argc, char* argv[]) { auto ret = RUN_ALL_TESTS(); return ret; -} \ No newline at end of file +} diff --git a/src/sisl_version/tests/test_version.cpp b/src/sisl_version/tests/test_version.cpp index a868cac6..f2e1e845 100644 --- a/src/sisl_version/tests/test_version.cpp +++ b/src/sisl_version/tests/test_version.cpp @@ -1,6 +1,6 @@ #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include #include diff --git a/src/utility/status_factory.hpp b/src/utility/status_factory.hpp index 070ecffb..aedef943 100644 --- a/src/utility/status_factory.hpp +++ b/src/utility/status_factory.hpp @@ -16,7 +16,7 @@ *********************************************************************************/ #pragma once #include -#include "../logging/logging.h" +#include namespace sisl { template < typename StatusT > diff --git a/src/utility/tests/test_atomic_counter.cpp b/src/utility/tests/test_atomic_counter.cpp index 066b0293..2ec6efda 100644 --- a/src/utility/tests/test_atomic_counter.cpp +++ b/src/utility/tests/test_atomic_counter.cpp @@ -5,8 +5,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include diff --git a/src/utility/tests/test_objlife_counter.cpp b/src/utility/tests/test_objlife_counter.cpp index 1b96a503..a7e9351c 100644 --- a/src/utility/tests/test_objlife_counter.cpp +++ b/src/utility/tests/test_objlife_counter.cpp @@ -7,8 +7,8 @@ #include #include -#include "logging/logging.h" -#include "options/options.h" +#include +#include #include "fds/buffer.hpp" diff --git a/test_package/CMakeLists.txt b/test_package/CMakeLists.txt new file mode 100644 index 00000000..cad2f97c --- /dev/null +++ b/test_package/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.11) +project(test_package) + +include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) +conan_basic_setup(TARGETS) + +find_package(sisl CONFIG REQUIRED) + +add_executable(${PROJECT_NAME} test_package.cpp example_decl.cpp) +target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17) +target_link_libraries(${PROJECT_NAME} sisl::sisl) diff --git a/test_package/conanfile.py b/test_package/conanfile.py new file mode 100644 index 00000000..9ebf2674 --- /dev/null +++ b/test_package/conanfile.py @@ -0,0 +1,18 @@ +from conans import ConanFile +from conan.tools.build import cross_building +from conans import CMake +import os + +class TestPackageConan(ConanFile): + settings = "os", "compiler", "build_type", "arch" + generators = "cmake", "cmake_find_package_multi" + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + if not cross_building(self): + bin_path = os.path.join("bin", "test_package") + self.run(bin_path, run_environment=True) diff --git a/test_package/example_decl.cpp b/test_package/example_decl.cpp new file mode 100644 index 00000000..e6580b2b --- /dev/null +++ b/test_package/example_decl.cpp @@ -0,0 +1,7 @@ +#include + +SISL_LOGGING_DECL(my_module) + +void example_decl() { + LOGINFOMOD(my_module, "Example def!"); +} diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp new file mode 100644 index 00000000..c82da06f --- /dev/null +++ b/test_package/test_package.cpp @@ -0,0 +1,28 @@ +#include +#include + +SISL_LOGGING_INIT(my_module) + +SISL_OPTIONS_ENABLE(logging) + +extern void example_decl(); + +int main(int argc, char** argv) { + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger(std::string(argv[0])); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + LOGTRACE("Trace"); + LOGDEBUG("Debug"); + LOGINFO("Info"); + LOGWARN("Warning"); + LOGERROR("Error"); + LOGCRITICAL("Critical"); + + example_decl(); + + auto custom_logger = sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); + LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); + DEBUG_ASSERT(true, "Always True"); + return 0; +} From aa7f3f06184b011d04ce0a186201d8bbe4b51826 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 10:16:56 -0700 Subject: [PATCH 172/385] Fix Jenkins options for test_package. --- .jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 91137a59..5306dd73 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -130,7 +130,7 @@ pipeline { if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { sh "echo Skipping debug build for PR branch" } else { - sh "conan create -u ${BUILD_MISSING} -o malloc_impl=${ALLOC} -o prerelease=${PRERELEASE} -o sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" + sh "conan create -u ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=${ALLOC} -o ${PROJECT}:prerelease=${PRERELEASE} -o ${PROJECT}:sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } From 119faf69374ff1572989489f1006f6eb84a3745a Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Nov 2022 12:17:48 -0700 Subject: [PATCH 173/385] Standardize version library. --- CMakeLists.txt | 4 +-- {src => include/sisl}/version.hpp | 0 src/logging/CMakeLists.txt | 12 ++++---- src/options/CMakeLists.txt | 28 +++++++------------ src/{sisl_version => version}/CMakeLists.txt | 16 ++++------- .../tests/test_version.cpp | 2 +- src/{sisl_version => version}/version.cpp | 2 +- 7 files changed, 24 insertions(+), 40 deletions(-) rename {src => include/sisl}/version.hpp (100%) rename src/{sisl_version => version}/CMakeLists.txt (60%) rename src/{sisl_version => version}/tests/test_version.cpp (97%) rename src/{sisl_version => version}/version.cpp (98%) diff --git a/CMakeLists.txt b/CMakeLists.txt index c37b6617..19f4e6df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,14 +125,12 @@ endif() include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/include - ${CMAKE_CURRENT_BINARY_DIR}/src/auth_manager - ${CMAKE_CURRENT_BINARY_DIR}/src/settings ) #add_subdirectory (src/btree) add_subdirectory (src/logging) add_subdirectory (src/options) -add_subdirectory (src/sisl_version) +add_subdirectory (src/version) # These sub-libraries currently do not support MacOS due to dependencies # on Folly and pistache. It is unknown if Windows is supported... diff --git a/src/version.hpp b/include/sisl/version.hpp similarity index 100% rename from src/version.hpp rename to include/sisl/version.hpp diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 680c2ce7..882cc777 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -4,19 +4,17 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() -include_directories(BEFORE ..) -include_directories(BEFORE .) - -set(LOGGING_SOURCE_FILES +add_library(sisl_logging OBJECT) +target_sources(sisl_logging PRIVATE lib/backtrace.cpp lib/logging.cpp lib/stacktrace.cpp ) -add_library(sisl_logging OBJECT ${LOGGING_SOURCE_FILES}) +target_include_directories(sisl_logging BEFORE PRIVATE .) target_link_libraries(sisl_logging ${COMMON_DEPS}) -set(TEST_LOGGING_FILES +add_executable(logging_example) +target_sources(logging_example PRIVATE test/example.cpp ) -add_executable(logging_example ${TEST_LOGGING_FILES}) target_link_libraries(logging_example sisl ${COMMON_DEPS}) diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index f181f962..5b24b5ce 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -1,28 +1,20 @@ cmake_minimum_required (VERSION 3.10) -include_directories(BEFORE include) - -file(GLOB API_HEADERS include/*.h) -file(GLOB LIB_HEADERS lib/*.h) -file(GLOB LIB_SOURCES lib/*.cpp) - -include_directories(BEFORE ..) -include_directories(BEFORE .) - -add_library(sisl_options OBJECT - ${API_HEADERS} - ${LIB_HEADERS} - ${LIB_SOURCES} - ) +add_library(sisl_options OBJECT) +target_sources(sisl_options PRIVATE + lib/options.cpp +) target_link_libraries(sisl_options ${COMMON_DEPS}) -set(BASIC_TEST_SOURCES tests/basic.cpp) -add_executable(basic_test ${BASIC_TEST_SOURCES}) +add_executable(basic_test) +target_sources(basic_test PRIVATE + tests/basic.cpp +) target_link_libraries(basic_test sisl ${COMMON_DEPS} GTest::gtest) -set(extra_args "") + if (DEFINED CONAN_BUILD_COVERAGE) if (${CONAN_BUILD_COVERAGE}) - set(extra_args "--gtest_output=xml:/output/test_basic.xml") + list(APPEND extra_args "--gtest_output=xml:/output/test_basic.xml") endif () endif () add_test(NAME BasicTest COMMAND basic_test ${extra_args}) diff --git a/src/sisl_version/CMakeLists.txt b/src/version/CMakeLists.txt similarity index 60% rename from src/sisl_version/CMakeLists.txt rename to src/version/CMakeLists.txt index f6aab51a..5fd9f2e9 100644 --- a/src/sisl_version/CMakeLists.txt +++ b/src/version/CMakeLists.txt @@ -4,19 +4,15 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-attributes") # needed for C++ 20 folly compilation endif() -include_directories(BEFORE ..) -include_directories(BEFORE .) - -set(VERSION_SOURCE_FILES +add_library(sisl_version OBJECT) +target_sources(sisl_version PRIVATE version.cpp - ) -add_library(sisl_version OBJECT ${VERSION_SOURCE_FILES}) +) target_link_libraries(sisl_version ${COMMON_DEPS} zmarok-semver::zmarok-semver) -target_include_directories(sisl_version PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) -set(TEST_VERSION_SOURCE_FILES +add_executable(test_version) +target_sources(test_version PRIVATE tests/test_version.cpp - ) -add_executable(test_version ${TEST_VERSION_SOURCE_FILES}) +) target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) add_test(NAME VersionTest COMMAND test_version) diff --git a/src/sisl_version/tests/test_version.cpp b/src/version/tests/test_version.cpp similarity index 97% rename from src/sisl_version/tests/test_version.cpp rename to src/version/tests/test_version.cpp index f2e1e845..ee8f5f2b 100644 --- a/src/sisl_version/tests/test_version.cpp +++ b/src/version/tests/test_version.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/src/sisl_version/version.cpp b/src/version/version.cpp similarity index 98% rename from src/sisl_version/version.cpp rename to src/version/version.cpp index 4326b94b..faf1abd7 100644 --- a/src/sisl_version/version.cpp +++ b/src/version/version.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "version.hpp" +#include #include namespace sisl { From 3de5e8ea0a849522ee3a9c5dee8bf55cd782c2ef Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 8 Nov 2022 12:56:27 -0700 Subject: [PATCH 174/385] Fix include paths for when build occurs in seperate build directory. --- CMakeLists.txt | 1 - src/auth_manager/CMakeLists.txt | 59 +++++++++----------- src/auth_manager/tests/AuthTest.cpp | 4 +- src/auth_manager/tests/basic_http_server.hpp | 4 +- src/settings/CMakeLists.txt | 31 +++++----- 5 files changed, 47 insertions(+), 52 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 19f4e6df..e306931a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -154,7 +154,6 @@ if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) $ $ $ - $ ) list(APPEND SISL_DEPS Folly::Folly diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index c2980617..ad813117 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -1,50 +1,45 @@ cmake_minimum_required (VERSION 3.10) -add_flags("-Wno-unused-parameter") - -include_directories(BEFORE ..) -include_directories(BEFORE .) - find_package(flatbuffers REQUIRED) find_package(Pistache REQUIRED) -set(AUTH_MGR_SOURCE_FILES +add_library(sisl_auth_manager OBJECT) +target_sources(sisl_auth_manager PRIVATE auth_manager.cpp - ) -add_library(sisl_auth_manager OBJECT ${AUTH_MGR_SOURCE_FILES}) + trf_client.cpp + ) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(sisl_auth_manager BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(sisl_auth_manager BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(sisl_auth_manager ${COMMON_DEPS} cpr::cpr flatbuffers::flatbuffers jwt-cpp::jwt-cpp ) -set(FLATBUFFERS_FLATC_EXECUTABLE, ${CONAN_BIN_DIRS_FLATBUFFER}/flatc) -message("Flatbuffers parser: [${FLATBUFFERS_FLATC_EXECUTABLE}]") -settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ sisl_auth_manager security_config.fbs) +settings_gen_cpp( + ${FLATBUFFERS_FLATC_EXECUTABLE} + ${CMAKE_CURRENT_BINARY_DIR}/generated/ + sisl_auth_manager + security_config.fbs + ) -set(TRF_CLIENT_SOURCE_FILES - trf_client.cpp - ) -add_library(sisl_trf_client OBJECT ${TRF_CLIENT_SOURCE_FILES}) -target_link_libraries(sisl_trf_client +add_executable(test_auth_mgr) +target_sources(test_auth_mgr PRIVATE + tests/AuthTest.cpp + ) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_auth_mgr BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(test_auth_mgr BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(test_auth_mgr + sisl ${COMMON_DEPS} cpr::cpr + pistache::pistache flatbuffers::flatbuffers + jwt-cpp::jwt-cpp + GTest::gmock ) -settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ sisl_trf_client security_config.fbs) - -set(AUTH_DEPS - sisl - ${COMMON_DEPS} - cpr::cpr - pistache::pistache - flatbuffers::flatbuffers - jwt-cpp::jwt-cpp - GTest::gmock - ) - -add_executable(test_auth_mgr - tests/AuthTest.cpp - ) -target_link_libraries(test_auth_mgr ${AUTH_DEPS}) add_test(NAME test_auth_mgr COMMAND test_auth_mgr) diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index bd81de10..4138db8a 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -8,8 +8,8 @@ #include #include -#include "auth_manager/auth_manager.hpp" -#include "auth_manager/trf_client.hpp" +#include "auth_manager.hpp" +#include "trf_client.hpp" #include "test_token.hpp" #include "basic_http_server.hpp" diff --git a/src/auth_manager/tests/basic_http_server.hpp b/src/auth_manager/tests/basic_http_server.hpp index f5453045..92aac0c2 100644 --- a/src/auth_manager/tests/basic_http_server.hpp +++ b/src/auth_manager/tests/basic_http_server.hpp @@ -40,11 +40,11 @@ class TokenApi : public APIBase { Pistache::Rest::Routes::bind(&TokenApi::get_token_handler, this)); } - void get_token_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + void get_token_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { this->get_token_impl(response); } virtual void get_token_impl(Pistache::Http::ResponseWriter& response) = 0; virtual ~TokenApi() { Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); } -}; \ No newline at end of file +}; diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index 11975ffe..faa03181 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -1,28 +1,29 @@ cmake_minimum_required (VERSION 3.10) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-attributes") # needed for C++ 20 folly compilation -endif() - -include_directories(BEFORE ..) -include_directories(BEFORE .) -include_directories(BEFORE . ${CMAKE_CURRENT_SOURCE_DIR}/) - find_package(flatbuffers REQUIRED) -set(SETTINGS_SOURCE_FILES + +add_library(sisl_settings OBJECT) +target_sources(sisl_settings PRIVATE settings.cpp ) -add_library(sisl_settings OBJECT ${SETTINGS_SOURCE_FILES}) target_link_libraries(sisl_settings ${COMMON_DEPS} flatbuffers::flatbuffers ) -set(TEST_SETTINGS_SOURCE_FILES +add_executable(test_settings) +target_sources(test_settings PRIVATE tests/test_settings.cpp - ) -add_executable(test_settings ${TEST_SETTINGS_SOURCE_FILES}) -set(FLATBUFFERS_FLATC_EXECUTABLE, ${CONAN_BIN_DIRS_FLATBUFFER}/flatc) -settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/generated/ test_settings tests/test_app_schema.fbs) + ) +settings_gen_cpp( + ${FLATBUFFERS_FLATC_EXECUTABLE} + ${CMAKE_CURRENT_BINARY_DIR}/generated/ + test_settings + tests/test_app_schema.fbs + ) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers GTest::gtest) add_test(NAME SettingsTest COMMAND test_settings) From f95cdf4943c3457f01e9948853761ef029643de9 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 8 Nov 2022 12:01:29 -0800 Subject: [PATCH 175/385] use release build when ALLOC type is libc. This flavor is required for OM (#59) Co-authored-by: Ravi Akella email = raakella@ebay.com --- .jenkins/Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 5306dd73..0f748138 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -124,7 +124,9 @@ pipeline { if ("${BUILD_TYPE}" == "release") { PRERELEASE = 'False' - BUILD_PROFILE = "test" + if ("${ALLOC}" != 'libc') { + BUILD_PROFILE = "test" + } } if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { From f6e7bbdb4209fb0b18c0678d6139e363d248951d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 10:25:28 -0700 Subject: [PATCH 176/385] Remove all relative ("../") includes --- .../sisl}/auth_manager/auth_manager.hpp | 2 +- .../sisl}/auth_manager/security_config.hpp | 2 +- .../sisl}/auth_manager/trf_client.hpp | 0 {src => include/sisl}/fds/bitset.hpp | 0 {src => include/sisl}/fds/bitword.hpp | 2 +- {src => include/sisl}/fds/buffer.hpp | 6 ++--- {src => include/sisl}/fds/flexarray.hpp | 0 .../sisl}/fds/freelist_allocator.hpp | 2 +- {src => include/sisl}/fds/id_reserver.hpp | 1 - {src => include/sisl}/fds/obj_allocator.hpp | 0 {src => include/sisl}/fds/sparse_vector.hpp | 0 {src => include/sisl}/fds/stream_tracker.hpp | 5 ++-- {src => include/sisl}/fds/thread_vector.hpp | 4 ++-- {src => include/sisl}/fds/utils.hpp | 0 {src => include/sisl}/flip/flip.hpp | 0 {src => include/sisl}/flip/flip_client.hpp | 0 .../sisl}/flip/flip_rpc_server.hpp | 0 .../sisl}/metrics/histogram_buckets.hpp | 0 {src => include/sisl}/metrics/metrics.hpp | 0 .../sisl}/metrics/metrics_atomic.hpp | 0 .../sisl}/metrics/metrics_group_impl.hpp | 3 ++- {src => include/sisl}/metrics/metrics_rcu.hpp | 6 +++-- .../sisl}/metrics/metrics_tlocal.hpp | 0 .../sisl}/metrics/prometheus_reporter.hpp | 0 {src => include/sisl}/metrics/reporter.hpp | 0 {src => include/sisl}/settings/settings.hpp | 2 +- .../sisl}/utility/atomic_counter.hpp | 0 {src => include/sisl}/utility/enum.hpp | 0 .../sisl}/utility/obj_life_counter.hpp | 2 +- .../sisl}/utility/status_factory.hpp | 2 +- .../sisl}/utility/thread_buffer.hpp | 4 ++-- .../sisl}/utility/thread_factory.hpp | 0 {src => include/sisl}/utility/urcu_helper.hpp | 0 {src => include/sisl}/wisr/wisr_ds.hpp | 0 {src => include/sisl}/wisr/wisr_framework.hpp | 4 ++-- src/auth_manager/auth_manager.cpp | 2 +- src/auth_manager/tests/AuthTest.cpp | 4 ++-- src/auth_manager/trf_client.cpp | 2 +- src/cache/CMakeLists.txt | 1 - src/cache/lru_evictor.hpp | 2 +- src/cache/range_hashmap.hpp | 7 +++--- src/cache/tests/test_range_cache.cpp | 2 +- src/cache/tests/test_range_hashmap.cpp | 4 ++-- src/fds/CMakeLists.txt | 1 - src/fds/buffer.cpp | 2 +- src/fds/malloc_helper.hpp | 4 ++-- src/fds/memvector.hpp | 2 +- src/fds/tests/obj_allocator_benchmark.cpp | 8 +++---- src/fds/tests/test_bitset.cpp | 2 +- src/fds/tests/test_bitword.cpp | 2 +- src/fds/tests/test_cb_mutex.cpp | 2 +- src/fds/tests/test_idreserver.cpp | 2 +- src/fds/tests/test_jemalloc_helper.cpp | 7 +++--- src/fds/tests/test_obj_allocator.cpp | 6 ++--- src/fds/tests/test_stream_tracker.cpp | 7 +++--- src/fds/tests/test_tcmalloc_helper.cpp | 7 +++--- src/file_watcher/CMakeLists.txt | 1 - src/file_watcher/file_watcher.cpp | 2 +- src/flip/CMakeLists.txt | 23 +++++++++++++------ .../client/local/test_flip_local_client.cpp | 2 +- src/flip/lib/flip_rpc_server.cpp | 4 ++-- src/flip/lib/test_flip.cpp | 2 +- src/flip/lib/test_flip_server.cpp | 2 +- src/flip/proto/CMakeLists.txt | 9 ++++---- src/logging/CMakeLists.txt | 7 +++--- src/logging/{lib => }/backtrace.cpp | 0 src/logging/{lib => }/logging.cpp | 0 src/logging/{lib => }/stacktrace.cpp | 0 src/metrics/CMakeLists.txt | 1 - src/metrics/metrics.cpp | 2 +- src/metrics/metrics_atomic.cpp | 2 +- src/metrics/metrics_group_impl.cpp | 6 ++--- src/metrics/metrics_rcu.cpp | 2 +- src/metrics/metrics_tlocal.cpp | 2 +- src/metrics/tests/farm_test.cpp | 2 +- src/metrics/tests/functionality_test.cpp | 4 ++-- src/metrics/tests/metrics_benchmark.cpp | 5 ++-- src/metrics/tests/wrapper_test.cpp | 4 +++- src/settings/README.md | 2 +- src/settings/settings.cpp | 2 +- src/settings/tests/test_settings.cpp | 2 +- src/utility/CMakeLists.txt | 1 - src/utility/tests/test_atomic_counter.cpp | 2 +- src/utility/tests/test_enum.cpp | 4 ++-- src/utility/tests/test_objlife_counter.cpp | 5 ++-- src/utility/tests/test_status_factory.cpp | 3 ++- src/utility/tests/test_thread_buffer.cpp | 2 +- src/wisr/CMakeLists.txt | 1 - src/wisr/tests/test_wisr_vector.cpp | 4 ++-- src/wisr/tests/wisr_deque_benchmark.cpp | 4 ++-- .../tests/wisr_intrusive_slist_benchmark.cpp | 4 ++-- src/wisr/tests/wisr_list_benchmark.cpp | 4 ++-- src/wisr/tests/wisr_vector_benchmark.cpp | 4 ++-- 93 files changed, 127 insertions(+), 122 deletions(-) rename {src => include/sisl}/auth_manager/auth_manager.hpp (96%) rename {src => include/sisl}/auth_manager/security_config.hpp (99%) rename {src => include/sisl}/auth_manager/trf_client.hpp (100%) rename {src => include/sisl}/fds/bitset.hpp (100%) rename {src => include/sisl}/fds/bitword.hpp (99%) rename {src => include/sisl}/fds/buffer.hpp (99%) rename {src => include/sisl}/fds/flexarray.hpp (100%) rename {src => include/sisl}/fds/freelist_allocator.hpp (99%) rename {src => include/sisl}/fds/id_reserver.hpp (99%) rename {src => include/sisl}/fds/obj_allocator.hpp (100%) rename {src => include/sisl}/fds/sparse_vector.hpp (100%) rename {src => include/sisl}/fds/stream_tracker.hpp (99%) rename {src => include/sisl}/fds/thread_vector.hpp (97%) rename {src => include/sisl}/fds/utils.hpp (100%) rename {src => include/sisl}/flip/flip.hpp (100%) rename {src => include/sisl}/flip/flip_client.hpp (100%) rename {src => include/sisl}/flip/flip_rpc_server.hpp (100%) rename {src => include/sisl}/metrics/histogram_buckets.hpp (100%) rename {src => include/sisl}/metrics/metrics.hpp (100%) rename {src => include/sisl}/metrics/metrics_atomic.hpp (100%) rename {src => include/sisl}/metrics/metrics_group_impl.hpp (99%) rename {src => include/sisl}/metrics/metrics_rcu.hpp (98%) rename {src => include/sisl}/metrics/metrics_tlocal.hpp (100%) rename {src => include/sisl}/metrics/prometheus_reporter.hpp (100%) rename {src => include/sisl}/metrics/reporter.hpp (100%) rename {src => include/sisl}/settings/settings.hpp (99%) rename {src => include/sisl}/utility/atomic_counter.hpp (100%) rename {src => include/sisl}/utility/enum.hpp (100%) rename {src => include/sisl}/utility/obj_life_counter.hpp (99%) rename {src => include/sisl}/utility/status_factory.hpp (98%) rename {src => include/sisl}/utility/thread_buffer.hpp (99%) rename {src => include/sisl}/utility/thread_factory.hpp (100%) rename {src => include/sisl}/utility/urcu_helper.hpp (100%) rename {src => include/sisl}/wisr/wisr_ds.hpp (100%) rename {src => include/sisl}/wisr/wisr_framework.hpp (98%) rename src/logging/{lib => }/backtrace.cpp (100%) rename src/logging/{lib => }/logging.cpp (100%) rename src/logging/{lib => }/stacktrace.cpp (100%) diff --git a/src/auth_manager/auth_manager.hpp b/include/sisl/auth_manager/auth_manager.hpp similarity index 96% rename from src/auth_manager/auth_manager.hpp rename to include/sisl/auth_manager/auth_manager.hpp index 0fbc7c67..bf5ea957 100644 --- a/src/auth_manager/auth_manager.hpp +++ b/include/sisl/auth_manager/auth_manager.hpp @@ -16,7 +16,7 @@ #pragma GCC diagnostic pop #endif -#include "../utility/enum.hpp" +#include #include "security_config.hpp" namespace sisl { diff --git a/src/auth_manager/security_config.hpp b/include/sisl/auth_manager/security_config.hpp similarity index 99% rename from src/auth_manager/security_config.hpp rename to include/sisl/auth_manager/security_config.hpp index 550774aa..fa0f14dc 100644 --- a/src/auth_manager/security_config.hpp +++ b/include/sisl/auth_manager/security_config.hpp @@ -1,6 +1,6 @@ #pragma once -#include "../settings/settings.hpp" #include +#include #include "generated/security_config_generated.h" SETTINGS_INIT(securitycfg::SecuritySettings, security_config) diff --git a/src/auth_manager/trf_client.hpp b/include/sisl/auth_manager/trf_client.hpp similarity index 100% rename from src/auth_manager/trf_client.hpp rename to include/sisl/auth_manager/trf_client.hpp diff --git a/src/fds/bitset.hpp b/include/sisl/fds/bitset.hpp similarity index 100% rename from src/fds/bitset.hpp rename to include/sisl/fds/bitset.hpp diff --git a/src/fds/bitword.hpp b/include/sisl/fds/bitword.hpp similarity index 99% rename from src/fds/bitword.hpp rename to include/sisl/fds/bitword.hpp index 5d20e1d3..d65d021b 100644 --- a/src/fds/bitword.hpp +++ b/include/sisl/fds/bitword.hpp @@ -29,7 +29,7 @@ #include -#include "../utility/enum.hpp" +#include namespace sisl { diff --git a/src/fds/buffer.hpp b/include/sisl/fds/buffer.hpp similarity index 99% rename from src/fds/buffer.hpp rename to include/sisl/fds/buffer.hpp index 0643b344..66fad1bd 100644 --- a/src/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -26,9 +26,9 @@ #include #endif -#include "../metrics/metrics.hpp" -#include "../utility/enum.hpp" -#include "../fds/utils.hpp" +#include +#include +#include "utils.hpp" namespace sisl { struct blob { diff --git a/src/fds/flexarray.hpp b/include/sisl/fds/flexarray.hpp similarity index 100% rename from src/fds/flexarray.hpp rename to include/sisl/fds/flexarray.hpp diff --git a/src/fds/freelist_allocator.hpp b/include/sisl/fds/freelist_allocator.hpp similarity index 99% rename from src/fds/freelist_allocator.hpp rename to include/sisl/fds/freelist_allocator.hpp index d0d58973..60879f39 100644 --- a/src/fds/freelist_allocator.hpp +++ b/include/sisl/fds/freelist_allocator.hpp @@ -33,7 +33,7 @@ #pragma GCC diagnostic pop #endif -#include "../metrics/metrics.hpp" +#include #include "utils.hpp" namespace sisl { diff --git a/src/fds/id_reserver.hpp b/include/sisl/fds/id_reserver.hpp similarity index 99% rename from src/fds/id_reserver.hpp rename to include/sisl/fds/id_reserver.hpp index ad872a4b..b18f592b 100644 --- a/src/fds/id_reserver.hpp +++ b/include/sisl/fds/id_reserver.hpp @@ -21,7 +21,6 @@ #include #include -#include "../fds/bitset.hpp" #include "bitset.hpp" #include "utils.hpp" diff --git a/src/fds/obj_allocator.hpp b/include/sisl/fds/obj_allocator.hpp similarity index 100% rename from src/fds/obj_allocator.hpp rename to include/sisl/fds/obj_allocator.hpp diff --git a/src/fds/sparse_vector.hpp b/include/sisl/fds/sparse_vector.hpp similarity index 100% rename from src/fds/sparse_vector.hpp rename to include/sisl/fds/sparse_vector.hpp diff --git a/src/fds/stream_tracker.hpp b/include/sisl/fds/stream_tracker.hpp similarity index 99% rename from src/fds/stream_tracker.hpp rename to include/sisl/fds/stream_tracker.hpp index c061036d..2ae80c44 100644 --- a/src/fds/stream_tracker.hpp +++ b/include/sisl/fds/stream_tracker.hpp @@ -17,9 +17,10 @@ #pragma once #include +#include +#include + #include "bitset.hpp" -#include "../metrics/metrics_group_impl.hpp" -#include "../metrics/metrics.hpp" namespace sisl { class StreamTrackerMetrics : public MetricsGroupWrapper { diff --git a/src/fds/thread_vector.hpp b/include/sisl/fds/thread_vector.hpp similarity index 97% rename from src/fds/thread_vector.hpp rename to include/sisl/fds/thread_vector.hpp index f55a10df..59e335a9 100644 --- a/src/fds/thread_vector.hpp +++ b/include/sisl/fds/thread_vector.hpp @@ -20,8 +20,8 @@ #include #include -#include "../wisr/wisr_framework.hpp" -#include "../wisr/wisr_ds.hpp" +#include +#include namespace sisl { diff --git a/src/fds/utils.hpp b/include/sisl/fds/utils.hpp similarity index 100% rename from src/fds/utils.hpp rename to include/sisl/fds/utils.hpp diff --git a/src/flip/flip.hpp b/include/sisl/flip/flip.hpp similarity index 100% rename from src/flip/flip.hpp rename to include/sisl/flip/flip.hpp diff --git a/src/flip/flip_client.hpp b/include/sisl/flip/flip_client.hpp similarity index 100% rename from src/flip/flip_client.hpp rename to include/sisl/flip/flip_client.hpp diff --git a/src/flip/flip_rpc_server.hpp b/include/sisl/flip/flip_rpc_server.hpp similarity index 100% rename from src/flip/flip_rpc_server.hpp rename to include/sisl/flip/flip_rpc_server.hpp diff --git a/src/metrics/histogram_buckets.hpp b/include/sisl/metrics/histogram_buckets.hpp similarity index 100% rename from src/metrics/histogram_buckets.hpp rename to include/sisl/metrics/histogram_buckets.hpp diff --git a/src/metrics/metrics.hpp b/include/sisl/metrics/metrics.hpp similarity index 100% rename from src/metrics/metrics.hpp rename to include/sisl/metrics/metrics.hpp diff --git a/src/metrics/metrics_atomic.hpp b/include/sisl/metrics/metrics_atomic.hpp similarity index 100% rename from src/metrics/metrics_atomic.hpp rename to include/sisl/metrics/metrics_atomic.hpp diff --git a/src/metrics/metrics_group_impl.hpp b/include/sisl/metrics/metrics_group_impl.hpp similarity index 99% rename from src/metrics/metrics_group_impl.hpp rename to include/sisl/metrics/metrics_group_impl.hpp index d7efab69..7af5f661 100644 --- a/src/metrics/metrics_group_impl.hpp +++ b/include/sisl/metrics/metrics_group_impl.hpp @@ -30,9 +30,10 @@ #include +#include + #include "histogram_buckets.hpp" #include "prometheus_reporter.hpp" -#include "../utility/thread_buffer.hpp" namespace sisl { using on_gather_cb_t = std::function< void(void) >; diff --git a/src/metrics/metrics_rcu.hpp b/include/sisl/metrics/metrics_rcu.hpp similarity index 98% rename from src/metrics/metrics_rcu.hpp rename to include/sisl/metrics/metrics_rcu.hpp index 81a97efc..54092c63 100644 --- a/src/metrics/metrics_rcu.hpp +++ b/include/sisl/metrics/metrics_rcu.hpp @@ -16,13 +16,15 @@ *********************************************************************************/ #pragma once -#include "histogram_buckets.hpp" #include #include #include #include + +#include + +#include "histogram_buckets.hpp" #include "metrics_tlocal.hpp" -#include "../wisr/wisr_framework.hpp" namespace sisl { using WisrBufferMetrics = diff --git a/src/metrics/metrics_tlocal.hpp b/include/sisl/metrics/metrics_tlocal.hpp similarity index 100% rename from src/metrics/metrics_tlocal.hpp rename to include/sisl/metrics/metrics_tlocal.hpp diff --git a/src/metrics/prometheus_reporter.hpp b/include/sisl/metrics/prometheus_reporter.hpp similarity index 100% rename from src/metrics/prometheus_reporter.hpp rename to include/sisl/metrics/prometheus_reporter.hpp diff --git a/src/metrics/reporter.hpp b/include/sisl/metrics/reporter.hpp similarity index 100% rename from src/metrics/reporter.hpp rename to include/sisl/metrics/reporter.hpp diff --git a/src/settings/settings.hpp b/include/sisl/settings/settings.hpp similarity index 99% rename from src/settings/settings.hpp rename to include/sisl/settings/settings.hpp index ae2c9fb0..512025da 100644 --- a/src/settings/settings.hpp +++ b/include/sisl/settings/settings.hpp @@ -31,7 +31,7 @@ #include #include -#include "../utility/urcu_helper.hpp" +#include #define SETTINGS_INIT(schema_type, schema_name) \ extern unsigned char schema_name##_fbs[]; \ diff --git a/src/utility/atomic_counter.hpp b/include/sisl/utility/atomic_counter.hpp similarity index 100% rename from src/utility/atomic_counter.hpp rename to include/sisl/utility/atomic_counter.hpp diff --git a/src/utility/enum.hpp b/include/sisl/utility/enum.hpp similarity index 100% rename from src/utility/enum.hpp rename to include/sisl/utility/enum.hpp diff --git a/src/utility/obj_life_counter.hpp b/include/sisl/utility/obj_life_counter.hpp similarity index 99% rename from src/utility/obj_life_counter.hpp rename to include/sisl/utility/obj_life_counter.hpp index a3fc30a3..640309ae 100644 --- a/src/utility/obj_life_counter.hpp +++ b/include/sisl/utility/obj_life_counter.hpp @@ -27,7 +27,7 @@ #if defined(__linux__) || defined(__APPLE__) #include #endif -#include "../metrics/metrics.hpp" +#include namespace sisl { diff --git a/src/utility/status_factory.hpp b/include/sisl/utility/status_factory.hpp similarity index 98% rename from src/utility/status_factory.hpp rename to include/sisl/utility/status_factory.hpp index aedef943..e8c8382c 100644 --- a/src/utility/status_factory.hpp +++ b/include/sisl/utility/status_factory.hpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #pragma once -#include +#include "urcu_helper.hpp" #include namespace sisl { diff --git a/src/utility/thread_buffer.hpp b/include/sisl/utility/thread_buffer.hpp similarity index 99% rename from src/utility/thread_buffer.hpp rename to include/sisl/utility/thread_buffer.hpp index efc6b03c..093e3515 100644 --- a/src/utility/thread_buffer.hpp +++ b/include/sisl/utility/thread_buffer.hpp @@ -34,8 +34,8 @@ #include -#include "../fds/flexarray.hpp" -#include "../fds/sparse_vector.hpp" +#include +#include #include "atomic_counter.hpp" #include "enum.hpp" #include "urcu_helper.hpp" diff --git a/src/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp similarity index 100% rename from src/utility/thread_factory.hpp rename to include/sisl/utility/thread_factory.hpp diff --git a/src/utility/urcu_helper.hpp b/include/sisl/utility/urcu_helper.hpp similarity index 100% rename from src/utility/urcu_helper.hpp rename to include/sisl/utility/urcu_helper.hpp diff --git a/src/wisr/wisr_ds.hpp b/include/sisl/wisr/wisr_ds.hpp similarity index 100% rename from src/wisr/wisr_ds.hpp rename to include/sisl/wisr/wisr_ds.hpp diff --git a/src/wisr/wisr_framework.hpp b/include/sisl/wisr/wisr_framework.hpp similarity index 98% rename from src/wisr/wisr_framework.hpp rename to include/sisl/wisr/wisr_framework.hpp index 4b90ebaf..08919e80 100644 --- a/src/wisr/wisr_framework.hpp +++ b/include/sisl/wisr/wisr_framework.hpp @@ -19,8 +19,8 @@ #include #include -#include "../utility/thread_buffer.hpp" -#include "../utility/urcu_helper.hpp" +#include +#include namespace sisl { diff --git a/src/auth_manager/auth_manager.cpp b/src/auth_manager/auth_manager.cpp index 89663a54..38396cca 100644 --- a/src/auth_manager/auth_manager.cpp +++ b/src/auth_manager/auth_manager.cpp @@ -3,7 +3,7 @@ #include -#include "auth_manager.hpp" +#include "sisl/auth_manager/auth_manager.hpp" namespace sisl { diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index 4138db8a..f998a17f 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -8,8 +8,8 @@ #include #include -#include "auth_manager.hpp" -#include "trf_client.hpp" +#include "sisl/auth_manager/auth_manager.hpp" +#include "sisl/auth_manager/trf_client.hpp" #include "test_token.hpp" #include "basic_http_server.hpp" diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp index 3219b483..49d43cfe 100644 --- a/src/auth_manager/trf_client.cpp +++ b/src/auth_manager/trf_client.cpp @@ -7,7 +7,7 @@ #include #include -#include "trf_client.hpp" +#include "sisl/auth_manager/trf_client.hpp" namespace sisl { TrfClient::TrfClient() { validate_grant_path(); } diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index d7961930..4b075a99 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -4,7 +4,6 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() -include_directories(BEFORE ..) include_directories(BEFORE .) set(CACHE_SOURCE_FILES diff --git a/src/cache/lru_evictor.hpp b/src/cache/lru_evictor.hpp index 7430cc67..f5628ef5 100644 --- a/src/cache/lru_evictor.hpp +++ b/src/cache/lru_evictor.hpp @@ -21,7 +21,7 @@ #include #include #include -#include "../fds/utils.hpp" +#include #include "evictor.hpp" using namespace boost::intrusive; diff --git a/src/cache/range_hashmap.hpp b/src/cache/range_hashmap.hpp index 7c7670ad..7e9a487e 100644 --- a/src/cache/range_hashmap.hpp +++ b/src/cache/range_hashmap.hpp @@ -31,10 +31,11 @@ #pragma GCC diagnostic pop #endif -#include "../fds/buffer.hpp" +#include +#include +#include + #include "hash_entry_base.hpp" -#include "../fds/utils.hpp" -#include "../utility/enum.hpp" namespace sisl { diff --git a/src/cache/tests/test_range_cache.cpp b/src/cache/tests/test_range_cache.cpp index c0b7fd3a..a8a19f7f 100644 --- a/src/cache/tests/test_range_cache.cpp +++ b/src/cache/tests/test_range_cache.cpp @@ -28,9 +28,9 @@ #endif #include +#include #include "range_cache.hpp" #include "lru_evictor.hpp" -#include "utility/enum.hpp" using namespace sisl; SISL_LOGGING_INIT(test_rangecache) diff --git a/src/cache/tests/test_range_hashmap.cpp b/src/cache/tests/test_range_hashmap.cpp index f5312669..3ad5e70b 100644 --- a/src/cache/tests/test_range_hashmap.cpp +++ b/src/cache/tests/test_range_hashmap.cpp @@ -21,9 +21,9 @@ #include #include -#include "fds/bitset.hpp" +#include +#include "sisl/fds/bitset.hpp" #include "range_hashmap.hpp" -#include "utility/enum.hpp" using namespace sisl; SISL_LOGGING_INIT(test_hashmap) diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index c59ed51b..044aa0e9 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -4,7 +4,6 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-unused-parameter -Wno-cast-function-type") endif() -include_directories(BEFORE ..) include_directories(BEFORE .) set(BUFFER_SOURCE_FILES diff --git a/src/fds/buffer.cpp b/src/fds/buffer.cpp index d694755d..ff46f5fa 100644 --- a/src/fds/buffer.cpp +++ b/src/fds/buffer.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #include -#include "buffer.hpp" +#include "sisl/fds/buffer.hpp" namespace sisl { uint8_t* AlignedAllocatorImpl::aligned_alloc(const size_t align, const size_t sz, const sisl::buftag tag) { diff --git a/src/fds/malloc_helper.hpp b/src/fds/malloc_helper.hpp index c2323eb2..8b948c4a 100644 --- a/src/fds/malloc_helper.hpp +++ b/src/fds/malloc_helper.hpp @@ -39,8 +39,8 @@ #include #include -#include "../metrics/histogram_buckets.hpp" -#include "../metrics/metrics.hpp" +#include +#include #if defined(USING_TCMALLOC) #include diff --git a/src/fds/memvector.hpp b/src/fds/memvector.hpp index 92557557..f4834bb3 100644 --- a/src/fds/memvector.hpp +++ b/src/fds/memvector.hpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include "buffer.hpp" diff --git a/src/fds/tests/obj_allocator_benchmark.cpp b/src/fds/tests/obj_allocator_benchmark.cpp index 30f8e7d9..7fca13ec 100644 --- a/src/fds/tests/obj_allocator_benchmark.cpp +++ b/src/fds/tests/obj_allocator_benchmark.cpp @@ -22,11 +22,11 @@ #include #include -#include -#include +#include "sisl/logging/logging.h" +#include "sisl/options/options.h" -#include "metrics/metrics.hpp" -#include "obj_allocator.hpp" +#include "sisl/metrics/metrics.hpp" +#include "sisl/fds/obj_allocator.hpp" SISL_LOGGING_INIT(HOMESTORE_LOG_MODS) RCU_REGISTER_INIT diff --git a/src/fds/tests/test_bitset.cpp b/src/fds/tests/test_bitset.cpp index 660e5167..25fa3cbd 100644 --- a/src/fds/tests/test_bitset.cpp +++ b/src/fds/tests/test_bitset.cpp @@ -26,7 +26,7 @@ #include -#include "bitset.hpp" +#include "sisl/fds/bitset.hpp" using namespace sisl; diff --git a/src/fds/tests/test_bitword.cpp b/src/fds/tests/test_bitword.cpp index bf06a9d5..7ac61772 100644 --- a/src/fds/tests/test_bitword.cpp +++ b/src/fds/tests/test_bitword.cpp @@ -25,7 +25,7 @@ #include -#include "bitword.hpp" +#include "sisl/fds/bitword.hpp" using namespace sisl; diff --git a/src/fds/tests/test_cb_mutex.cpp b/src/fds/tests/test_cb_mutex.cpp index a0a1d36b..ff72f644 100644 --- a/src/fds/tests/test_cb_mutex.cpp +++ b/src/fds/tests/test_cb_mutex.cpp @@ -27,7 +27,7 @@ #pragma GCC diagnostic pop #include "callback_mutex.hpp" -#include "utils.hpp" +#include "sisl/fds/utils.hpp" SISL_LOGGING_INIT(test_cb_mutex) diff --git a/src/fds/tests/test_idreserver.cpp b/src/fds/tests/test_idreserver.cpp index ea4fc607..a519b32d 100644 --- a/src/fds/tests/test_idreserver.cpp +++ b/src/fds/tests/test_idreserver.cpp @@ -21,7 +21,7 @@ #include #include -#include "id_reserver.hpp" +#include "sisl/fds/id_reserver.hpp" #include diff --git a/src/fds/tests/test_jemalloc_helper.cpp b/src/fds/tests/test_jemalloc_helper.cpp index 4bdbfbbe..b164c495 100644 --- a/src/fds/tests/test_jemalloc_helper.cpp +++ b/src/fds/tests/test_jemalloc_helper.cpp @@ -23,13 +23,12 @@ #include #include -#include -#include +#include "sisl/logging/logging.h" +#include "sisl/options/options.h" +#include "sisl/utility/thread_buffer.hpp" #include -#include "utility/thread_buffer.hpp" - #include "malloc_helper.hpp" using namespace sisl; diff --git a/src/fds/tests/test_obj_allocator.cpp b/src/fds/tests/test_obj_allocator.cpp index f0996d39..f1624dd9 100644 --- a/src/fds/tests/test_obj_allocator.cpp +++ b/src/fds/tests/test_obj_allocator.cpp @@ -17,10 +17,10 @@ #include #include -#include -#include +#include "sisl/logging/logging.h" +#include "sisl/options/options.h" -#include "obj_allocator.hpp" +#include "sisl/fds/obj_allocator.hpp" SISL_LOGGING_INIT(HOMESTORE_LOG_MODS) diff --git a/src/fds/tests/test_stream_tracker.cpp b/src/fds/tests/test_stream_tracker.cpp index 092165a8..eb0c55a1 100644 --- a/src/fds/tests/test_stream_tracker.cpp +++ b/src/fds/tests/test_stream_tracker.cpp @@ -19,12 +19,11 @@ #include #include - -#include "fds/thread_vector.hpp" -#include "stream_tracker.hpp" - #include +#include "sisl/fds/thread_vector.hpp" +#include "sisl/fds/stream_tracker.hpp" + using namespace sisl; SISL_LOGGING_INIT(test_stream_tracker) diff --git a/src/fds/tests/test_tcmalloc_helper.cpp b/src/fds/tests/test_tcmalloc_helper.cpp index 9947afc0..f4b71834 100644 --- a/src/fds/tests/test_tcmalloc_helper.cpp +++ b/src/fds/tests/test_tcmalloc_helper.cpp @@ -21,13 +21,12 @@ #include #include -#include -#include +#include "sisl/logging/logging.h" +#include "sisl/options/options.h" +#include "sisl/utility/thread_buffer.hpp" #include -#include "utility/thread_buffer.hpp" - #include "malloc_helper.hpp" using namespace sisl; diff --git a/src/file_watcher/CMakeLists.txt b/src/file_watcher/CMakeLists.txt index 8efb5f48..80fc3020 100644 --- a/src/file_watcher/CMakeLists.txt +++ b/src/file_watcher/CMakeLists.txt @@ -2,7 +2,6 @@ cmake_minimum_required (VERSION 3.10) add_flags("-Wno-unused-parameter") -include_directories(BEFORE ..) include_directories(BEFORE .) set(FILE_WATCHER_SOURCE_FILES diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index 7bd8db42..f83fd590 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -7,7 +7,7 @@ #include #include "file_watcher.hpp" -#include "utility/thread_factory.hpp" +#include "sisl/utility/thread_factory.hpp" namespace sisl { namespace fs = std::filesystem; diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 53d7cbb4..62114d6a 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -6,17 +6,16 @@ endif() find_package(gRPC REQUIRED) -include_directories(BEFORE include) -include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) -include_directories(BEFORE ..) -include_directories(BEFORE .) - add_subdirectory (proto) add_library(flip lib/flip_rpc_server.cpp $ ) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(flip BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(flip BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(flip sisl gRPC::grpc++ @@ -26,13 +25,23 @@ target_link_libraries(flip add_executable(test_flip lib/test_flip.cpp) target_link_libraries(test_flip flip cxxopts::cxxopts) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_flip BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(test_flip BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) add_test(NAME TestFlip COMMAND test_flip) add_executable(test_flip_server lib/test_flip_server.cpp) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_flip_server BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(test_flip_server BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_flip_server flip cxxopts::cxxopts) add_executable(test_flip_local_client client/local/test_flip_local_client.cpp) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_flip_local_client BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +target_include_directories(test_flip_local_client BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) add_test(NAME TestFlipLocalClient COMMAND test_flip_local_client) - - diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index dba24d98..b7d8c5bc 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #include "proto/flip_spec.pb.h" -#include "flip_client.hpp" +#include "sisl/flip/flip_client.hpp" #include #include diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 90512242..d03c2e6d 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -22,8 +22,8 @@ #include #include -#include "flip_rpc_server.hpp" -#include "flip.hpp" +#include "sisl/flip/flip_rpc_server.hpp" +#include "sisl/flip/flip.hpp" namespace flip { grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) { diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index af3e5126..2861f5ba 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -15,7 +15,7 @@ * *********************************************************************************/ #include "proto/flip_spec.pb.h" -#include "flip.hpp" +#include "sisl/flip/flip.hpp" #include #include diff --git a/src/flip/lib/test_flip_server.cpp b/src/flip/lib/test_flip_server.cpp index 3e2b631e..9f395282 100644 --- a/src/flip/lib/test_flip_server.cpp +++ b/src/flip/lib/test_flip_server.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "flip.hpp" +#include "sisl/flip/flip.hpp" #include diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt index 0e0310b2..bb08c7f4 100644 --- a/src/flip/proto/CMakeLists.txt +++ b/src/flip/proto/CMakeLists.txt @@ -4,11 +4,6 @@ add_library(flip_proto OBJECT flip_server.proto flip_spec.proto ) -target_link_libraries(flip_proto - protobuf::libprotobuf - gRPC::grpc++ - ) - protobuf_generate(LANGUAGE cpp TARGET flip_proto PROTOS flip_spec.proto) protobuf_generate(LANGUAGE cpp TARGET flip_proto PROTOS flip_server.proto) protobuf_generate( @@ -17,3 +12,7 @@ protobuf_generate( GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc PLUGIN protoc-gen-grpc=$ ) +target_link_libraries(flip_proto + protobuf::libprotobuf + gRPC::grpc++ + ) diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 882cc777..220ec409 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -6,11 +6,10 @@ endif() add_library(sisl_logging OBJECT) target_sources(sisl_logging PRIVATE - lib/backtrace.cpp - lib/logging.cpp - lib/stacktrace.cpp + backtrace.cpp + logging.cpp + stacktrace.cpp ) -target_include_directories(sisl_logging BEFORE PRIVATE .) target_link_libraries(sisl_logging ${COMMON_DEPS}) add_executable(logging_example) diff --git a/src/logging/lib/backtrace.cpp b/src/logging/backtrace.cpp similarity index 100% rename from src/logging/lib/backtrace.cpp rename to src/logging/backtrace.cpp diff --git a/src/logging/lib/logging.cpp b/src/logging/logging.cpp similarity index 100% rename from src/logging/lib/logging.cpp rename to src/logging/logging.cpp diff --git a/src/logging/lib/stacktrace.cpp b/src/logging/stacktrace.cpp similarity index 100% rename from src/logging/lib/stacktrace.cpp rename to src/logging/stacktrace.cpp diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index 0720f949..977b9f33 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -4,7 +4,6 @@ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQU add_flags("-Wno-attributes") # needed for C++ 20 folly compilation endif() -include_directories(BEFORE ..) include_directories(BEFORE .) set(METRICS_SOURCE_FILES diff --git a/src/metrics/metrics.cpp b/src/metrics/metrics.cpp index 26c94599..553e122c 100644 --- a/src/metrics/metrics.cpp +++ b/src/metrics/metrics.cpp @@ -16,7 +16,7 @@ *********************************************************************************/ #include -#include "metrics.hpp" +#include "sisl/metrics/metrics.hpp" THREAD_BUFFER_INIT diff --git a/src/metrics/metrics_atomic.cpp b/src/metrics/metrics_atomic.cpp index 9a2c1240..ad5c5b01 100644 --- a/src/metrics/metrics_atomic.cpp +++ b/src/metrics/metrics_atomic.cpp @@ -20,7 +20,7 @@ #include -#include "metrics_atomic.hpp" +#include "sisl/metrics/metrics_atomic.hpp" namespace sisl { diff --git a/src/metrics/metrics_group_impl.cpp b/src/metrics/metrics_group_impl.cpp index 34e8e4a7..1a9e64c4 100644 --- a/src/metrics/metrics_group_impl.cpp +++ b/src/metrics/metrics_group_impl.cpp @@ -28,9 +28,9 @@ #include #include -#include "metrics_group_impl.hpp" -#include "metrics.hpp" -#include "metrics_tlocal.hpp" +#include "sisl/metrics/metrics_group_impl.hpp" +#include "sisl/metrics/metrics.hpp" +#include "sisl/metrics/metrics_tlocal.hpp" namespace sisl { diff --git a/src/metrics/metrics_rcu.cpp b/src/metrics/metrics_rcu.cpp index e02c3202..bd69c34f 100644 --- a/src/metrics/metrics_rcu.cpp +++ b/src/metrics/metrics_rcu.cpp @@ -14,7 +14,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include "metrics_rcu.hpp" +#include "sisl/metrics/metrics_rcu.hpp" #include namespace sisl { diff --git a/src/metrics/metrics_tlocal.cpp b/src/metrics/metrics_tlocal.cpp index d04b711e..795d2be3 100644 --- a/src/metrics/metrics_tlocal.cpp +++ b/src/metrics/metrics_tlocal.cpp @@ -20,7 +20,7 @@ #include -#include "metrics_tlocal.hpp" +#include "sisl/metrics/metrics_tlocal.hpp" namespace sisl { diff --git a/src/metrics/tests/farm_test.cpp b/src/metrics/tests/farm_test.cpp index 8658efb8..451bda93 100644 --- a/src/metrics/tests/farm_test.cpp +++ b/src/metrics/tests/farm_test.cpp @@ -25,7 +25,7 @@ #include #include -#include "metrics.hpp" +#include "sisl/metrics/metrics.hpp" constexpr size_t ITERATIONS{3}; diff --git a/src/metrics/tests/functionality_test.cpp b/src/metrics/tests/functionality_test.cpp index 59d7be76..8cd9b357 100644 --- a/src/metrics/tests/functionality_test.cpp +++ b/src/metrics/tests/functionality_test.cpp @@ -25,8 +25,8 @@ #include #include -#include "../metrics.hpp" -#include "../metrics_group_impl.hpp" +#include "sisl/metrics/metrics.hpp" +#include "sisl/metrics/metrics_group_impl.hpp" constexpr size_t ITERATIONS{2}; diff --git a/src/metrics/tests/metrics_benchmark.cpp b/src/metrics/tests/metrics_benchmark.cpp index 611cf0be..8d06c361 100644 --- a/src/metrics/tests/metrics_benchmark.cpp +++ b/src/metrics/tests/metrics_benchmark.cpp @@ -14,12 +14,13 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#include #include -#include "metrics.hpp" #include +#include #include +#include "sisl/metrics/metrics.hpp" + SISL_LOGGING_INIT(vmod_metrics_framework) RCU_REGISTER_INIT diff --git a/src/metrics/tests/wrapper_test.cpp b/src/metrics/tests/wrapper_test.cpp index 05aeb09c..42f2a298 100644 --- a/src/metrics/tests/wrapper_test.cpp +++ b/src/metrics/tests/wrapper_test.cpp @@ -18,10 +18,12 @@ #include #include #include -#include "metrics.hpp" + #include #include +#include "sisl/metrics/metrics.hpp" + SISL_LOGGING_INIT(vmod_metrics_framework) RCU_REGISTER_INIT diff --git a/src/settings/README.md b/src/settings/README.md index 8c5f8f99..89641d29 100644 --- a/src/settings/README.md +++ b/src/settings/README.md @@ -75,7 +75,7 @@ settings_gen_cpp(${FLATBUFFERS_FLATC_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/gen In your main include code or separate code, add the following lines outside your namespace definition ```c++ -#include +#include #include "generated/homeblks_config_generated.h" // <--- Format is diff --git a/src/settings/settings.cpp b/src/settings/settings.cpp index 7d03e73d..2221810d 100644 --- a/src/settings/settings.cpp +++ b/src/settings/settings.cpp @@ -24,7 +24,7 @@ #include -#include "settings.hpp" +#include "sisl/settings/settings.hpp" SISL_OPTION_GROUP(config, (config_path, "", "config_path", "Path to dynamic config of app", cxxopts::value< std::string >(), diff --git a/src/settings/tests/test_settings.cpp b/src/settings/tests/test_settings.cpp index 68fa1e63..0354b83c 100644 --- a/src/settings/tests/test_settings.cpp +++ b/src/settings/tests/test_settings.cpp @@ -21,7 +21,7 @@ #include #include #include "generated/test_app_schema_generated.h" -#include "settings.hpp" +#include "sisl/settings/settings.hpp" SISL_OPTIONS_ENABLE(logging, test_settings, config) SISL_LOGGING_INIT(test_settings, settings) diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index ea33be93..09db905e 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -2,7 +2,6 @@ cmake_minimum_required (VERSION 3.10) add_flags("-Wno-unused-parameter") -include_directories(BEFORE ..) include_directories(BEFORE .) set(TEST_ATOMIC_COUNTER_SOURCES diff --git a/src/utility/tests/test_atomic_counter.cpp b/src/utility/tests/test_atomic_counter.cpp index 2ec6efda..f627eb37 100644 --- a/src/utility/tests/test_atomic_counter.cpp +++ b/src/utility/tests/test_atomic_counter.cpp @@ -10,7 +10,7 @@ #include -#include "atomic_counter.hpp" +#include "sisl/utility/atomic_counter.hpp" using namespace sisl; diff --git a/src/utility/tests/test_enum.cpp b/src/utility/tests/test_enum.cpp index 3155b179..51543df1 100644 --- a/src/utility/tests/test_enum.cpp +++ b/src/utility/tests/test_enum.cpp @@ -9,8 +9,8 @@ #include -#include "thread_buffer.hpp" -#include "utility/enum.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/utility/enum.hpp" class EnumTest : public testing::Test { public: diff --git a/src/utility/tests/test_objlife_counter.cpp b/src/utility/tests/test_objlife_counter.cpp index a7e9351c..f9c0e721 100644 --- a/src/utility/tests/test_objlife_counter.cpp +++ b/src/utility/tests/test_objlife_counter.cpp @@ -10,9 +10,8 @@ #include #include -#include "fds/buffer.hpp" - -#include "obj_life_counter.hpp" +#include "sisl/fds/buffer.hpp" +#include "sisl/utility/obj_life_counter.hpp" SISL_LOGGING_INIT(test_objlife) diff --git a/src/utility/tests/test_status_factory.cpp b/src/utility/tests/test_status_factory.cpp index 9d170862..b0f3c0d1 100644 --- a/src/utility/tests/test_status_factory.cpp +++ b/src/utility/tests/test_status_factory.cpp @@ -1,10 +1,11 @@ #include -#include #include #include #include #include +#include "sisl/utility/urcu_helper.hpp" + RCU_REGISTER_INIT #define ITERATIONS 10000 diff --git a/src/utility/tests/test_thread_buffer.cpp b/src/utility/tests/test_thread_buffer.cpp index ce12c36c..e3cf4fb7 100644 --- a/src/utility/tests/test_thread_buffer.cpp +++ b/src/utility/tests/test_thread_buffer.cpp @@ -13,7 +13,7 @@ #include -#include "utility/thread_buffer.hpp" +#include "sisl/utility/thread_buffer.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT diff --git a/src/wisr/CMakeLists.txt b/src/wisr/CMakeLists.txt index b3d601fe..465532ee 100644 --- a/src/wisr/CMakeLists.txt +++ b/src/wisr/CMakeLists.txt @@ -2,7 +2,6 @@ cmake_minimum_required (VERSION 3.10) add_flags("-Wno-unused-parameter") -include_directories(BEFORE ..) include_directories(BEFORE .) set(WISR_VECTOR_TEST diff --git a/src/wisr/tests/test_wisr_vector.cpp b/src/wisr/tests/test_wisr_vector.cpp index 383e1b93..4d2803eb 100644 --- a/src/wisr/tests/test_wisr_vector.cpp +++ b/src/wisr/tests/test_wisr_vector.cpp @@ -27,8 +27,8 @@ #include -#include "utility/thread_buffer.hpp" -#include "wisr/wisr_ds.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/wisr/wisr_ds.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT diff --git a/src/wisr/tests/wisr_deque_benchmark.cpp b/src/wisr/tests/wisr_deque_benchmark.cpp index e16d938d..70ec3649 100644 --- a/src/wisr/tests/wisr_deque_benchmark.cpp +++ b/src/wisr/tests/wisr_deque_benchmark.cpp @@ -21,8 +21,8 @@ #include -#include "utility/thread_buffer.hpp" -#include "wisr/wisr_ds.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/wisr/wisr_ds.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT diff --git a/src/wisr/tests/wisr_intrusive_slist_benchmark.cpp b/src/wisr/tests/wisr_intrusive_slist_benchmark.cpp index 87b382d2..d8ce6abf 100644 --- a/src/wisr/tests/wisr_intrusive_slist_benchmark.cpp +++ b/src/wisr/tests/wisr_intrusive_slist_benchmark.cpp @@ -24,8 +24,8 @@ #include -#include "utility/thread_buffer.hpp" -#include "wisr/wisr_ds.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/wisr/wisr_ds.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT diff --git a/src/wisr/tests/wisr_list_benchmark.cpp b/src/wisr/tests/wisr_list_benchmark.cpp index d2220050..633e5fef 100644 --- a/src/wisr/tests/wisr_list_benchmark.cpp +++ b/src/wisr/tests/wisr_list_benchmark.cpp @@ -20,8 +20,8 @@ #include -#include "utility/thread_buffer.hpp" -#include "wisr/wisr_ds.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/wisr/wisr_ds.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT diff --git a/src/wisr/tests/wisr_vector_benchmark.cpp b/src/wisr/tests/wisr_vector_benchmark.cpp index 3735d076..ed05ee07 100644 --- a/src/wisr/tests/wisr_vector_benchmark.cpp +++ b/src/wisr/tests/wisr_vector_benchmark.cpp @@ -21,8 +21,8 @@ #include -#include "utility/thread_buffer.hpp" -#include "wisr/wisr_ds.hpp" +#include "sisl/utility/thread_buffer.hpp" +#include "sisl/wisr/wisr_ds.hpp" THREAD_BUFFER_INIT RCU_REGISTER_INIT From ce24dead42503278127f3410bedb27545883a739 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 11:14:32 -0700 Subject: [PATCH 177/385] Continue to clean CMake files. --- CMakeLists.txt | 52 +------------ conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 4 +- include/sisl/fds/stream_tracker.hpp | 2 +- src/CMakeLists.txt | 51 +++++++++++++ src/auth_manager/CMakeLists.txt | 15 ++-- src/cache/CMakeLists.txt | 28 +++---- src/fds/CMakeLists.txt | 73 ++++++++----------- src/fds/malloc_helper.hpp | 4 +- src/fds/tests/obj_allocator_benchmark.cpp | 4 +- src/fds/tests/test_cb_mutex.cpp | 2 +- src/fds/tests/test_obj_allocator.cpp | 2 +- src/file_watcher/CMakeLists.txt | 18 ++--- src/flip/CMakeLists.txt | 45 +++++------- .../client/local/test_flip_local_client.cpp | 4 +- src/flip/lib/flip_rpc_server.cpp | 4 +- src/flip/lib/test_flip.cpp | 4 +- src/logging/CMakeLists.txt | 10 +-- src/metrics/CMakeLists.txt | 32 ++++---- src/options/CMakeLists.txt | 6 +- src/settings/CMakeLists.txt | 4 +- src/utility/CMakeLists.txt | 36 +++++---- src/version/CMakeLists.txt | 10 +-- src/wisr/CMakeLists.txt | 36 +++++---- 24 files changed, 208 insertions(+), 240 deletions(-) create mode 100644 src/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index e306931a..216549e8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) project (sisl) option(DEBUG_CMAKE "Debug CMake messages option" OFF) @@ -127,55 +127,7 @@ include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/include ) -#add_subdirectory (src/btree) -add_subdirectory (src/logging) -add_subdirectory (src/options) -add_subdirectory (src/version) - -# These sub-libraries currently do not support MacOS due to dependencies -# on Folly and pistache. It is unknown if Windows is supported... -list(APPEND POSIX_LIBRARIES ) -list(APPEND SISL_DEPS ) -if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) - add_subdirectory (src/auth_manager) - add_subdirectory (src/cache) - add_subdirectory (src/fds) - add_subdirectory (src/file_watcher) - add_subdirectory (src/flip) - add_subdirectory (src/metrics) - add_subdirectory (src/settings) - add_subdirectory (src/utility) - add_subdirectory (src/wisr) - - list(APPEND POSIX_LIBRARIES - $ - $ - $ - $ - $ - $ - ) - list(APPEND SISL_DEPS - Folly::Folly - ) -endif() - -add_library(sisl - ${POSIX_LIBRARIES} - $ - $ - $ - ) - -if (DEFINED MALLOC_IMPL) - if (${MALLOC_IMPL} STREQUAL "tcmalloc") - list(APPEND SISL_DEPS gperftools::gperftools) - endif() -endif() - -target_link_libraries(sisl - ${SISL_DEPS} -) +add_subdirectory(src) # build info string(TOUPPER "${CMAKE_BUILD_TYPE}" UC_CMAKE_BUILD_TYPE) diff --git a/conanfile.py b/conanfile.py index fd89262c..dccbc9dd 100644 --- a/conanfile.py +++ b/conanfile.py @@ -130,7 +130,7 @@ def package(self): def package_info(self): self.cpp_info.libs = ["sisl", "flip"] - self.cpp_info.cppflags.extend(["-Wno-unused-local-typedefs", "-fconcepts"]) + self.cpp_info.cppflags.extend(["-fconcepts"]) if self.settings.os == "Linux": self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 66fad1bd..ec88a773 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -104,7 +104,7 @@ class AlignedAllocatorImpl { virtual uint8_t* aligned_pool_alloc(const size_t align, const size_t sz, const sisl::buftag tag) { return aligned_alloc(align, sz, tag); }; - virtual void aligned_pool_free(uint8_t* const b, const size_t sz, const sisl::buftag tag) { aligned_free(b, tag); }; + virtual void aligned_pool_free(uint8_t* const b, const size_t, const sisl::buftag tag) { aligned_free(b, tag); }; virtual size_t buf_size(uint8_t* buf) const { #ifdef __linux__ @@ -205,7 +205,7 @@ struct io_blob : public blob { aligned ? sisl_aligned_free(blob::bytes, tag) : std::free(blob::bytes); } - void buf_realloc(const size_t new_size, const uint32_t align_size = 512, const buftag tag = buftag::common) { + void buf_realloc(const size_t new_size, const uint32_t align_size = 512, [[maybe_unused]] const buftag tag = buftag::common) { uint8_t* new_buf{nullptr}; if (aligned) { // aligned before, so do not need check for new align size, once aligned will be aligned on realloc also diff --git a/include/sisl/fds/stream_tracker.hpp b/include/sisl/fds/stream_tracker.hpp index 2ae80c44..d0ef7d8f 100644 --- a/include/sisl/fds/stream_tracker.hpp +++ b/include/sisl/fds/stream_tracker.hpp @@ -44,7 +44,7 @@ class StreamTracker { public: static constexpr size_t alloc_blk_size = 10000; static constexpr size_t compaction_threshold = alloc_blk_size / 2; - static constexpr auto null_processor = [](auto... x) -> bool { return true; }; + static constexpr auto null_processor = []([[maybe_unused]] auto... x) -> bool { return true; }; static_assert(std::is_trivially_copyable< T >::value, "Cannot use StreamTracker for non-trivally copyable classes"); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 00000000..5c59d9b5 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,51 @@ +cmake_minimum_required (VERSION 3.11) + +#add_subdirectory (btree) +add_subdirectory (logging) +add_subdirectory (options) +add_subdirectory (version) + +# These sub-libraries currently do not support MacOS due to dependencies +# on Folly and pistache. It is unknown if Windows is supported... +list(APPEND POSIX_LIBRARIES ) +list(APPEND SISL_DEPS ) +if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) + add_subdirectory (auth_manager) + add_subdirectory (cache) + add_subdirectory (fds) + add_subdirectory (file_watcher) + add_subdirectory (flip) + add_subdirectory (metrics) + add_subdirectory (settings) + add_subdirectory (utility) + add_subdirectory (wisr) + + list(APPEND POSIX_LIBRARIES + $ + $ + $ + $ + $ + $ + ) + list(APPEND SISL_DEPS + Folly::Folly + ) +endif() + +add_library(sisl + ${POSIX_LIBRARIES} + $ + $ + $ + ) + +if (DEFINED MALLOC_IMPL) + if (${MALLOC_IMPL} STREQUAL "tcmalloc") + list(APPEND SISL_DEPS gperftools::gperftools) + endif() +endif() + +target_link_libraries(sisl + ${SISL_DEPS} +) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index ad813117..68a242fa 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -1,17 +1,18 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(Pistache REQUIRED) +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) + add_library(sisl_auth_manager OBJECT) target_sources(sisl_auth_manager PRIVATE auth_manager.cpp trf_client.cpp ) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(sisl_auth_manager BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(sisl_auth_manager BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(sisl_auth_manager ${COMMON_DEPS} cpr::cpr @@ -29,10 +30,6 @@ add_executable(test_auth_mgr) target_sources(test_auth_mgr PRIVATE tests/AuthTest.cpp ) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(test_auth_mgr BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(test_auth_mgr BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_auth_mgr sisl ${COMMON_DEPS} diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index 4b075a99..707deb4b 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -1,25 +1,21 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-unused-parameter -Wno-cast-function-type") -endif() - -include_directories(BEFORE .) - -set(CACHE_SOURCE_FILES +add_library(sisl_cache OBJECT) +target_sources(sisl_cache PRIVATE lru_evictor.cpp - ) -add_library(sisl_cache OBJECT ${CACHE_SOURCE_FILES}) + ) target_link_libraries(sisl_cache ${COMMON_DEPS}) -set(TEST_RANGEHASH_SOURCE_FILES +add_executable(test_range_hashmap) +target_sources(test_range_hashmap PRIVATE tests/test_range_hashmap.cpp - ) -add_executable(test_range_hashmap ${TEST_RANGEHASH_SOURCE_FILES}) + ) +target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -set(TEST_RANGECACHE_SOURCE_FILES +add_executable(test_range_cache) +target_sources(test_range_cache PRIVATE tests/test_range_cache.cpp - ) -add_executable(test_range_cache ${TEST_RANGECACHE_SOURCE_FILES}) + ) +target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 044aa0e9..86392b74 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -1,82 +1,73 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-unused-parameter -Wno-cast-function-type") -endif() - -include_directories(BEFORE .) +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -set(BUFFER_SOURCE_FILES +add_library(sisl_buffer OBJECT) +target_sources(sisl_buffer PRIVATE buffer.cpp - ) -add_library(sisl_buffer OBJECT ${BUFFER_SOURCE_FILES}) + ) target_link_libraries(sisl_buffer ${COMMON_DEPS}) -set(TEST_STREAM_TRACKER_SOURCES +add_executable(test_stream_tracker) +target_sources(test_stream_tracker PRIVATE tests/test_stream_tracker.cpp - ) -add_executable(test_stream_tracker ${TEST_STREAM_TRACKER_SOURCES}) + ) target_link_libraries(test_stream_tracker sisl ${COMMON_DEPS} GTest::gtest) -#add_test(NAME HttpServerTest COMMAND test_http_server) -set(TEST_ATOMIC_STATUS_COUNTER_SOURCES +add_executable(test_atomic_status_counter) +target_sources(test_atomic_status_counter PRIVATE tests/test_atomic_status_counter.cpp - ) -add_executable(test_atomic_status_counter ${TEST_ATOMIC_STATUS_COUNTER_SOURCES}) + ) target_link_libraries(test_atomic_status_counter sisl ${COMMON_DEPS} GTest::gtest atomic) add_test(NAME atomic_status_counter COMMAND test_atomic_status_counter) -set(TEST_BITSET_SOURCES +add_executable(test_bitset) +target_sources(test_bitset PRIVATE tests/test_bitset.cpp - ) -add_executable(test_bitset ${TEST_BITSET_SOURCES}) + ) target_link_libraries(test_bitset sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME bitset COMMAND test_bitset) -set(TEST_BITWORD_SOURCES +add_executable(test_bitword) +target_sources(test_bitword PRIVATE tests/test_bitword.cpp - ) -add_executable(test_bitword ${TEST_BITWORD_SOURCES}) + ) target_link_libraries(test_bitword sisl ${COMMON_DEPS} GTest::gtest) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-attributes") # needed for C++ 20 folly compilation -endif() - -set(OBJ_ALLOCATOR_BENCHMARK_FILES +add_executable(obj_allocator_benchmark) +target_sources(obj_allocator_benchmark PRIVATE tests/obj_allocator_benchmark.cpp - ) -add_executable(obj_allocator_benchmark ${OBJ_ALLOCATOR_BENCHMARK_FILES}) + ) target_link_libraries(obj_allocator_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) add_test(NAME ObjAllocatorBenchmark COMMAND obj_allocator_benchmark) -set(TEST_OBJALLOCATOR_SOURCE_FILES +add_executable(test_obj_allocator) +target_sources(test_obj_allocator PRIVATE tests/test_obj_allocator.cpp - ) -add_executable(test_obj_allocator ${TEST_OBJALLOCATOR_SOURCE_FILES}) + ) target_link_libraries(test_obj_allocator sisl ${COMMON_DEPS}) add_test(NAME ObjAlloc COMMAND test_obj_allocator) -set(TEST_CBMUTEX_SOURCE_FILES +add_executable(test_cb_mutex) +target_sources(test_cb_mutex PRIVATE tests/test_cb_mutex.cpp - ) -add_executable(test_cb_mutex ${TEST_CBMUTEX_SOURCE_FILES}) + ) target_link_libraries(test_cb_mutex sisl ${COMMON_DEPS} GTest::gtest) #add_test(NAME TestCBMutex COMMAND test_cb_mutex) if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "jemalloc") - set(TEST_JEMALLOC_SOURCE_FILES + add_executable(test_jemalloc) + target_sources(test_jemalloc PRIVATE tests/test_jemalloc_helper.cpp - ) - add_executable(test_jemalloc ${TEST_JEMALLOC_SOURCE_FILES}) + ) target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} jemalloc GTest::gtest) add_test(NAME TestJemalloc COMMAND test_jemalloc) elseif (${MALLOC_IMPL} STREQUAL "tcmalloc") - set(TEST_TCMALLOC_SOURCE_FILES + add_executable(test_tcmalloc) + target_sources(test_tcmalloc PRIVATE tests/test_tcmalloc_helper.cpp - ) - add_executable(test_tcmalloc ${TEST_TCMALLOC_SOURCE_FILES}) + ) target_link_libraries(test_tcmalloc sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME TestTcmalloc COMMAND test_tcmalloc) endif() diff --git a/src/fds/malloc_helper.hpp b/src/fds/malloc_helper.hpp index 8b948c4a..18742629 100644 --- a/src/fds/malloc_helper.hpp +++ b/src/fds/malloc_helper.hpp @@ -297,7 +297,7 @@ static size_t get_jemalloc_muzzy_page_count() { #endif /* Get the application total allocated memory. Relies on jemalloc. Returns 0 for other allocator. */ -[[maybe_unused]] static size_t get_total_memory(const bool refresh = true) { +[[maybe_unused]] static size_t get_total_memory([[maybe_unused]] const bool refresh = true) { size_t allocated{0}; #ifndef USING_TCMALLOC @@ -321,7 +321,7 @@ static size_t get_jemalloc_muzzy_page_count() { } #if defined(USING_TCMALLOC) -static void update_tcmalloc_range_stats(void* const arg, const base::MallocRange* const range) { +static void update_tcmalloc_range_stats([[maybe_unused]] void* const arg, const base::MallocRange* const range) { // LOGINFO("Range: address={}, length={}, Type={}, fraction={}", range->address, range->length, range->type, // range->fraction); diff --git a/src/fds/tests/obj_allocator_benchmark.cpp b/src/fds/tests/obj_allocator_benchmark.cpp index 7fca13ec..751640e6 100644 --- a/src/fds/tests/obj_allocator_benchmark.cpp +++ b/src/fds/tests/obj_allocator_benchmark.cpp @@ -49,7 +49,7 @@ void test_malloc(benchmark::State& state) { static thread_local std::random_device rd{}; static thread_local std::default_random_engine engine{rd()}; - for (auto [[maybe_unused]] si : state) { // Loops up to iteration count + for ([[maybe_unused]] auto si : state) { // Loops up to iteration count my_request* req; benchmark::DoNotOptimize(req = new my_request()); req->m_a = 10; @@ -69,7 +69,7 @@ void test_obj_alloc(benchmark::State& state) { uint64_t counter{0}; static thread_local std::random_device rd{}; static thread_local std::default_random_engine engine{rd()}; - for (auto [[maybe_unused]] si : state) { // Loops up to iteration count + for ([[maybe_unused]] auto si : state) { // Loops up to iteration count my_request* req; benchmark::DoNotOptimize(req = sisl::ObjectAllocator< my_request >::make_object()); req->m_a = 10; diff --git a/src/fds/tests/test_cb_mutex.cpp b/src/fds/tests/test_cb_mutex.cpp index ff72f644..b811ff43 100644 --- a/src/fds/tests/test_cb_mutex.cpp +++ b/src/fds/tests/test_cb_mutex.cpp @@ -69,7 +69,7 @@ class CBMutexTest : public testing::Test { template < typename I = MutexImpl > typename std::enable_if< !sisl::CallbackMutex< I >::shared_mode_supported, void >::type - thread_shared_fn(uint64_t count_per_thread) { + thread_shared_fn(uint64_t) { assert(0); } diff --git a/src/fds/tests/test_obj_allocator.cpp b/src/fds/tests/test_obj_allocator.cpp index f1624dd9..54086aff 100644 --- a/src/fds/tests/test_obj_allocator.cpp +++ b/src/fds/tests/test_obj_allocator.cpp @@ -42,7 +42,7 @@ class Node { }; } // namespace -int main(int argc, char** argv) { +int main() { Node< uint64_t >* const ptr1{sisl::ObjectAllocator< Node< uint64_t > >::make_object(~static_cast< uint64_t >(0))}; std::cout << "ptr1 = " << static_cast< const void* >(ptr1) << " Id = " << ptr1->get_id() << std::endl; sisl::ObjectAllocator< Node< uint64_t > >::deallocate(ptr1); diff --git a/src/file_watcher/CMakeLists.txt b/src/file_watcher/CMakeLists.txt index 80fc3020..7078cd12 100644 --- a/src/file_watcher/CMakeLists.txt +++ b/src/file_watcher/CMakeLists.txt @@ -1,18 +1,14 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -add_flags("-Wno-unused-parameter") - -include_directories(BEFORE .) - -set(FILE_WATCHER_SOURCE_FILES +add_library(sisl_file_watcher OBJECT) +target_sources(sisl_file_watcher PRIVATE file_watcher.cpp - ) -add_library(sisl_file_watcher OBJECT ${FILE_WATCHER_SOURCE_FILES}) + ) target_link_libraries(sisl_file_watcher ${COMMON_DEPS}) -set(TEST_FILE_WATCHER_SOURCES +add_executable(test_file_watcher) +target_sources(test_file_watcher PRIVATE file_watcher_test.cpp - ) -add_executable(test_file_watcher ${TEST_FILE_WATCHER_SOURCES}) + ) target_link_libraries(test_file_watcher sisl ${COMMON_DEPS} GTest::gtest GTest::gmock) add_test(NAME test_file_watcher COMMAND test_file_watcher) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 62114d6a..7fc5013b 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,21 +1,19 @@ -cmake_minimum_required(VERSION 3.10) - -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-unused-parameter -Wno-cast-function-type") -endif() +cmake_minimum_required(VERSION 3.11) find_package(gRPC REQUIRED) add_subdirectory (proto) -add_library(flip +if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) +endif() +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) + +add_library(flip) +target_sources(flip PRIVATE lib/flip_rpc_server.cpp $ ) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(flip BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(flip BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(flip sisl gRPC::grpc++ @@ -23,25 +21,22 @@ target_link_libraries(flip nlohmann_json::nlohmann_json ) -add_executable(test_flip lib/test_flip.cpp) +add_executable(test_flip) +target_sources(test_flip PRIVATE + lib/test_flip.cpp + ) target_link_libraries(test_flip flip cxxopts::cxxopts) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(test_flip BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(test_flip BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) add_test(NAME TestFlip COMMAND test_flip) -add_executable(test_flip_server lib/test_flip_server.cpp) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(test_flip_server BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(test_flip_server BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(test_flip_server) +target_sources(test_flip_server PRIVATE + lib/test_flip_server.cpp + ) target_link_libraries(test_flip_server flip cxxopts::cxxopts) -add_executable(test_flip_local_client client/local/test_flip_local_client.cpp) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(test_flip_local_client BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -target_include_directories(test_flip_local_client BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(test_flip_local_client) +target_sources(test_flip_local_client PRIVATE + client/local/test_flip_local_client.cpp + ) target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) add_test(NAME TestFlipLocalClient COMMAND test_flip_local_client) diff --git a/src/flip/client/local/test_flip_local_client.cpp b/src/flip/client/local/test_flip_local_client.cpp index b7d8c5bc..a98e79a2 100644 --- a/src/flip/client/local/test_flip_local_client.cpp +++ b/src/flip/client/local/test_flip_local_client.cpp @@ -109,7 +109,7 @@ void run_and_validate_delay_return_flip() { RELEASE_ASSERT(!g_flip.get_delay_flip< std::string >( "delay_simval_flip", - [closure_calls](std::string error) { + [closure_calls](std::string) { RELEASE_ASSERT(false, "Invalid closure called"); (*closure_calls)++; }, @@ -127,7 +127,7 @@ void run_and_validate_delay_return_flip() { RELEASE_ASSERT(!g_flip.get_delay_flip< std::string >( "delay_simval_flip", - [closure_calls](std::string error) { + [closure_calls](std::string) { RELEASE_ASSERT(false, "Invalid closure called"); (*closure_calls)++; }, diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index d03c2e6d..2a4a2b4e 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -26,14 +26,14 @@ #include "sisl/flip/flip.hpp" namespace flip { -grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) { +grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext*, const FlipSpec* request, FlipResponse* response) { LOGTRACEMOD(flip, "Flipspec request = {}", request->DebugString()); flip::Flip::instance().add(*request); response->set_success(true); return grpc::Status::OK; } -grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, +grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext*, const FlipNameRequest* request, FlipListResponse* response) { LOGTRACEMOD(flip, "GetFaults request = {}", request->DebugString()); auto resp = request->name().size() ? flip::Flip::instance().get(request->name()) : flip::Flip::instance().get_all(); diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 2861f5ba..485bcfa6 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -162,7 +162,7 @@ void run_and_validate_delay_return_flip(flip::Flip* flip) { RELEASE_ASSERT(!flip->get_delay_flip< std::string >( "delay_ret_fspec", - [closure_calls](std::string error) { + [closure_calls](std::string) { assert(0); (*closure_calls)++; }, @@ -180,7 +180,7 @@ void run_and_validate_delay_return_flip(flip::Flip* flip) { RELEASE_ASSERT(!flip->get_delay_flip< std::string >( "delay_ret_fspec", - [closure_calls](std::string error) { + [closure_calls](std::string) { assert(0); (*closure_calls)++; }, diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 220ec409..97747fc4 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -1,19 +1,15 @@ -cmake_minimum_required (VERSION 3.10) - -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-unused-parameter -Wno-cast-function-type") -endif() +cmake_minimum_required (VERSION 3.11) add_library(sisl_logging OBJECT) target_sources(sisl_logging PRIVATE backtrace.cpp logging.cpp stacktrace.cpp - ) + ) target_link_libraries(sisl_logging ${COMMON_DEPS}) add_executable(logging_example) target_sources(logging_example PRIVATE test/example.cpp - ) + ) target_link_libraries(logging_example sisl ${COMMON_DEPS}) diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index 977b9f33..a27db4f3 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -1,35 +1,37 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-attributes") # needed for C++ 20 folly compilation -endif() +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(BEFORE .) - -set(METRICS_SOURCE_FILES +add_library(sisl_metrics OBJECT) +target_sources(sisl_metrics PRIVATE metrics.cpp metrics_atomic.cpp metrics_group_impl.cpp metrics_rcu.cpp metrics_tlocal.cpp - ) -add_library(sisl_metrics OBJECT ${METRICS_SOURCE_FILES}) + ) target_link_libraries(sisl_metrics ${COMMON_DEPS} Folly::Folly ) -set(FARM_TEST_SOURCES tests/farm_test.cpp) -add_executable(metrics_farm_test ${FARM_TEST_SOURCES}) +add_executable(metrics_farm_test) +target_sources(metrics_farm_test PRIVATE + tests/farm_test.cpp + ) target_link_libraries(metrics_farm_test sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME MetricsFarmTest COMMAND metrics_farm_test) -set(WRAPPER_TEST_SOURCES tests/wrapper_test.cpp) -add_executable(metrics_wrapper_test ${WRAPPER_TEST_SOURCES}) +add_executable(metrics_wrapper_test) +target_sources(metrics_wrapper_test PRIVATE + tests/wrapper_test.cpp + ) target_link_libraries(metrics_wrapper_test sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME MetricsWrapperTest COMMAND metrics_wrapper_test) -set(METRICS_BENCHMARK_SOURCES tests/metrics_benchmark.cpp) -add_executable(metrics_benchmark ${METRICS_BENCHMARK_SOURCES}) +add_executable(metrics_benchmark) +target_sources(metrics_benchmark PRIVATE + tests/metrics_benchmark.cpp + ) target_link_libraries(metrics_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) add_test(NAME MetricsBenchmarkTest COMMAND metrics_benchmark) diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index 5b24b5ce..2f1c38a7 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -1,15 +1,15 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) add_library(sisl_options OBJECT) target_sources(sisl_options PRIVATE lib/options.cpp -) + ) target_link_libraries(sisl_options ${COMMON_DEPS}) add_executable(basic_test) target_sources(basic_test PRIVATE tests/basic.cpp -) + ) target_link_libraries(basic_test sisl ${COMMON_DEPS} GTest::gtest) if (DEFINED CONAN_BUILD_COVERAGE) diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index faa03181..deb6f94f 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -1,11 +1,11 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) add_library(sisl_settings OBJECT) target_sources(sisl_settings PRIVATE settings.cpp - ) + ) target_link_libraries(sisl_settings ${COMMON_DEPS} flatbuffers::flatbuffers diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index 09db905e..cfe00c51 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -1,42 +1,40 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -add_flags("-Wno-unused-parameter") +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(BEFORE .) - -set(TEST_ATOMIC_COUNTER_SOURCES +add_executable(test_atomic_counter) +target_sources(test_atomic_counter PRIVATE tests/test_atomic_counter.cpp - ) -add_executable(test_atomic_counter ${TEST_ATOMIC_COUNTER_SOURCES}) + ) target_link_libraries(test_atomic_counter sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME atomic_counter COMMAND test_atomic_counter) -set(TEST_THREAD_BUFFER +add_executable(test_thread_buffer) +target_sources(test_thread_buffer PRIVATE tests/test_thread_buffer.cpp - ) -add_executable(test_thread_buffer ${TEST_THREAD_BUFFER}) + ) target_link_libraries(test_thread_buffer ${COMMON_DEPS} GTest::gtest) add_test(NAME ThreadBufferTest COMMAND test_thread_buffer) -set(TEST_STATUS_FACTORY +add_executable(test_status_factory) +target_sources(test_status_factory PRIVATE tests/test_status_factory.cpp - ) -add_executable(test_status_factory ${TEST_STATUS_FACTORY}) + ) target_link_libraries(test_status_factory ${COMMON_DEPS} benchmark::benchmark) add_test(NAME StatusFactoryTest COMMAND test_status_factory) -set(TEST_ENUM +add_executable(test_enum) +target_sources(test_enum PRIVATE tests/test_enum.cpp - ) -add_executable(test_enum ${TEST_ENUM}) + ) target_link_libraries(test_enum ${COMMON_DEPS} GTest::gtest) add_test(NAME EnumTest COMMAND test_enum) if (${prerelease_dummy_FOUND}) - set(TEST_OBJLIFE + add_executable(test_objlife) + target_sources(test_objlife PRIVATE tests/test_objlife_counter.cpp - ) - add_executable(test_objlife ${TEST_OBJLIFE}) + ) target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME ObjLifeTest COMMAND test_objlife) endif () diff --git a/src/version/CMakeLists.txt b/src/version/CMakeLists.txt index 5fd9f2e9..5e84678d 100644 --- a/src/version/CMakeLists.txt +++ b/src/version/CMakeLists.txt @@ -1,18 +1,14 @@ -cmake_minimum_required (VERSION 3.10) - -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")) - add_flags("-Wno-attributes") # needed for C++ 20 folly compilation -endif() +cmake_minimum_required (VERSION 3.11) add_library(sisl_version OBJECT) target_sources(sisl_version PRIVATE version.cpp -) + ) target_link_libraries(sisl_version ${COMMON_DEPS} zmarok-semver::zmarok-semver) add_executable(test_version) target_sources(test_version PRIVATE tests/test_version.cpp -) + ) target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) add_test(NAME VersionTest COMMAND test_version) diff --git a/src/wisr/CMakeLists.txt b/src/wisr/CMakeLists.txt index 465532ee..5afe6e81 100644 --- a/src/wisr/CMakeLists.txt +++ b/src/wisr/CMakeLists.txt @@ -1,36 +1,34 @@ -cmake_minimum_required (VERSION 3.10) +cmake_minimum_required (VERSION 3.11) -add_flags("-Wno-unused-parameter") +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(BEFORE .) - -set(WISR_VECTOR_TEST +add_executable(wisr_vector_test) +target_sources(wisr_vector_test PRIVATE tests/test_wisr_vector.cpp - ) -add_executable(wisr_vector_test ${WISR_VECTOR_TEST}) + ) target_link_libraries(wisr_vector_test ${COMMON_DEPS} benchmark::benchmark GTest::gtest) add_test(NAME WisrVectorTest COMMAND wisr_vector_test) -set(WISR_VECTOR_BENCHMARK +add_executable(wisr_vector_benchmark) +target_sources(wisr_vector_benchmark PRIVATE tests/wisr_vector_benchmark.cpp - ) -add_executable(wisr_vector_benchmark ${WISR_VECTOR_BENCHMARK}) + ) target_link_libraries(wisr_vector_benchmark ${COMMON_DEPS} benchmark::benchmark) -set(WISR_LIST_BENCHMARK +add_executable(wisr_list_benchmark) +target_sources(wisr_list_benchmark PRIVATE tests/wisr_list_benchmark.cpp - ) -add_executable(wisr_list_benchmark ${WISR_LIST_BENCHMARK}) + ) target_link_libraries(wisr_list_benchmark ${COMMON_DEPS} benchmark::benchmark) -set(WISR_DEQUE_BENCHMARK +add_executable(wisr_deque_benchmark) +target_sources(wisr_deque_benchmark PRIVATE tests/wisr_deque_benchmark.cpp - ) -add_executable(wisr_deque_benchmark ${WISR_DEQUE_BENCHMARK}) + ) target_link_libraries(wisr_deque_benchmark ${COMMON_DEPS} benchmark::benchmark) -set(WISR_INTRUSIVE_SLIST_BENCHMARK +add_executable(wisr_intrusive_slist_benchmark) +target_sources(wisr_intrusive_slist_benchmark PRIVATE tests/wisr_intrusive_slist_benchmark.cpp - ) -add_executable(wisr_intrusive_slist_benchmark ${WISR_INTRUSIVE_SLIST_BENCHMARK}) + ) target_link_libraries(wisr_intrusive_slist_benchmark ${COMMON_DEPS} benchmark::benchmark) From 98765f50a6070a0d1840d4a793d99deeaa304603 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 11:15:43 -0700 Subject: [PATCH 178/385] Remove flip from non-linux builds. --- conanfile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index dccbc9dd..390c1075 100644 --- a/conanfile.py +++ b/conanfile.py @@ -129,10 +129,11 @@ def package(self): copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) def package_info(self): - self.cpp_info.libs = ["sisl", "flip"] + self.cpp_info.libs = ["sisl"] self.cpp_info.cppflags.extend(["-fconcepts"]) if self.settings.os == "Linux": + self.cpp_info.libs.append("flip") self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") self.cpp_info.cppflags.append("-D_FILE_OFFSET_BITS=64") self.cpp_info.cppflags.append("-D_LARGEFILE64") From 00d0b673dac197867b143310e48367b233085719 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 11:23:09 -0700 Subject: [PATCH 179/385] Fix release build. --- src/flip/lib/test_flip.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/flip/lib/test_flip.cpp b/src/flip/lib/test_flip.cpp index 485bcfa6..a151f518 100644 --- a/src/flip/lib/test_flip.cpp +++ b/src/flip/lib/test_flip.cpp @@ -153,7 +153,7 @@ void run_and_validate_delay_return_flip(flip::Flip* flip) { RELEASE_ASSERT(flip->get_delay_flip< std::string >( "delay_ret_fspec", - [closure_calls](std::string error) { + [closure_calls]([[maybe_unused]] std::string error) { (*closure_calls)++; DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); }, @@ -171,7 +171,7 @@ void run_and_validate_delay_return_flip(flip::Flip* flip) { RELEASE_ASSERT(flip->get_delay_flip< std::string >( "delay_ret_fspec", - [closure_calls](std::string error) { + [closure_calls]([[maybe_unused]] std::string error) { DEBUG_ASSERT_EQ(error, "Delayed error simulated value"); (*closure_calls)++; }, From f7aa76c14932a341e21b21abca6c6967321bb469 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 11:44:17 -0700 Subject: [PATCH 180/385] No fconcepts attribute in CLang. --- conanfile.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 390c1075..7395674f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -130,7 +130,9 @@ def package(self): def package_info(self): self.cpp_info.libs = ["sisl"] - self.cpp_info.cppflags.extend(["-fconcepts"]) + + if self.settings.compiler == "gcc": + self.cpp_info.cppflags.extend(["-fconcepts"]) if self.settings.os == "Linux": self.cpp_info.libs.append("flip") From 0787bf0fb21e2ecbee38c88d76f00f400cfc26fb Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 11:50:43 -0700 Subject: [PATCH 181/385] Remove unused cmake files. --- cmake/grpc.cmake | 57 -------------------------------- cmake/protobuf.cmake | 77 -------------------------------------------- 2 files changed, 134 deletions(-) delete mode 100644 cmake/grpc.cmake delete mode 100644 cmake/protobuf.cmake diff --git a/cmake/grpc.cmake b/cmake/grpc.cmake deleted file mode 100644 index 14df71de..00000000 --- a/cmake/grpc.cmake +++ /dev/null @@ -1,57 +0,0 @@ -find_program(GRPC_CPP_PLUGIN grpc_cpp_plugin) # Get full path to plugin - -function(PROTOBUF_GENERATE_GRPC_CPP SRCS HDRS) - if(NOT ARGN) - message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files") - return() - endif() - - if(PROTOBUF_GENERATE_CPP_APPEND_PATH) # This variable is common for all types of output. - # Create an include path for each file specified - foreach(FIL ${ARGN}) - get_filename_component(ABS_FIL ${FIL} ABSOLUTE) - get_filename_component(ABS_PATH ${ABS_FIL} PATH) - list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) - if(${_contains_already} EQUAL -1) - list(APPEND _protobuf_include_path -I ${ABS_PATH}) - endif() - endforeach() - else() - set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR}) - endif() - - if(DEFINED PROTOBUF_IMPORT_DIRS) - foreach(DIR ${Protobuf_IMPORT_DIRS}) - get_filename_component(ABS_PATH ${DIR} ABSOLUTE) - list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) - if(${_contains_already} EQUAL -1) - list(APPEND _protobuf_include_path -I ${ABS_PATH}) - endif() - endforeach() - endif() - - set(${SRCS}) - set(${HDRS}) - foreach(FIL ${ARGN}) - get_filename_component(ABS_FIL ${FIL} ABSOLUTE) - get_filename_component(FIL_WE ${FIL} NAME_WE) - - list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc") - list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h") - - add_custom_command( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc" - "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h" - COMMAND ${Protobuf_PROTOC_EXECUTABLE} - ARGS --grpc_out=${CMAKE_CURRENT_BINARY_DIR} - --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} - ${_protobuf_include_path} ${ABS_FIL} - DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} - COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}" - VERBATIM) - endforeach() - - set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) - set(${SRCS} ${${SRCS}} PARENT_SCOPE) - set(${HDRS} ${${HDRS}} PARENT_SCOPE) -endfunction() diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake deleted file mode 100644 index a507eaee..00000000 --- a/cmake/protobuf.cmake +++ /dev/null @@ -1,77 +0,0 @@ - -# protobuf_generate -# -------------------------- -# -# Add custom commands to process ``.proto`` files to C++ using protoc and -# GRPC plugin: -# -# protobuf_generate( <*.proto files>) -# -# ``ARGN`` -# ``.proto`` files -# -macro(m_protobuf_generate _target) - message(STATUS "inside protobuf_generate_grpc_cpp") - if(NOT TARGET ${_target}) - message(SEND_ERROR "protobuf_generate requires target as first argument") - return() - endif() - if(NOT ${ARGC} GREATER 1) - message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files as arguments") - return() - endif() - if(NOT _generated_headers) - set(_generated_headers) - endif() - # set(_protobuf_include_path -I . ) - foreach(FIL ${ARGN}) - get_filename_component(ABS_FIL ${FIL} ABSOLUTE) - message(STATUS "protobuf_generate_grpc_cpp: processing ${ABS_FIL}") - get_filename_component(FIL_WE ${FIL} NAME_WE) - file(RELATIVE_PATH REL_FIL ${CMAKE_CURRENT_SOURCE_DIR}/proto ${ABS_FIL}) - get_filename_component(REL_DIR ${REL_FIL} DIRECTORY) - set(RELFIL_WE "${REL_DIR}/${FIL_WE}") - - set(_GEN_HEADERS - ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.grpc.pb.h; - ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.h -# ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}_mock.grpc.pb.h - ) - list(APPEND _generated_headers "${_GEN_HEADERS}") - set(_GEN_SOURCES - ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.grpc.pb.cc - ${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.cc - ) - #add generated sources to the sources of _target - target_sources(${_target} PRIVATE ${_GEN_SOURCES}) - set_source_files_properties(${_GEN_SOURCES} ${_GEN_HEADERS} PROPERTIES GENERATED TRUE) - - add_custom_command( - OUTPUT ${_GEN_SOURCES} ${_GEN_HEADERS} - COMMAND ${deps_prefix}/bin/protoc - ARGS --grpc_out=generate_mock_code=false:${_gRPC_PROTO_GENS_DIR} - --cpp_out=${_gRPC_PROTO_GENS_DIR} - --plugin=protoc-gen-grpc=${deps_prefix}/bin/grpc_cpp_plugin - -I . - ${REL_FIL} - DEPENDS ${FIL} - WORKING_DIRECTORY ${_PROTO_IMPORT_DIR} - COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}" - VERBATIM - ) - message(STATUS "protoc will generate ${_GEN_SOURCES} and ${_GEN_HEADERS}") - #since some of the headers generated by this command are also included in hand-written sources make this command run before the _target - get_property(_sources TARGET ${_target} PROPERTY SOURCES) - set_source_files_properties(${_sources} PROPERTIES OBJECT_DEPENDS "${_GEN_HEADERS}") - - # foreach(_source ${_sources}) - # message(STATUS "setting files ${_source} to depend on ${_GEN_HEADERS}") - # set_source_files_properties(${_source} PROPERTIES OBJECT_DEPENDS "${_GEN_HEADERS}") - # endforeach() - - # #since some of the headers generated by this command also included in hand-written sources make this command run before the _target - # set(_custom_target_name "touch-protoc-${FIL_WE}.proto") - # add_custom_target(${_custom_target_name} touch ${_custom_target_name} DEPENDS ${_GEN_SRCS_CC}) - # add_dependencies(${_target} ${_custom_target_name}) - endforeach() -endmacro() From 3a76f24b28c8bfd0b9ce59b638ee3d128aa7bda1 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 9 Nov 2022 12:06:06 -0700 Subject: [PATCH 182/385] Remove pistache from CLang builds. --- conanfile.py | 2 +- src/CMakeLists.txt | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/conanfile.py b/conanfile.py index 7395674f..dc096341 100644 --- a/conanfile.py +++ b/conanfile.py @@ -40,7 +40,7 @@ class SISLConan(ConanFile): def build_requirements(self): self.build_requires("benchmark/1.6.1") self.build_requires("gtest/1.11.0") - if self.settings.os in ["Linux"]: + if self.settings.compiler in ["gcc"]: self.build_requires("pistache/cci.20201127") def requirements(self): diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 5c59d9b5..03e5d67d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,8 +9,13 @@ add_subdirectory (version) # on Folly and pistache. It is unknown if Windows is supported... list(APPEND POSIX_LIBRARIES ) list(APPEND SISL_DEPS ) -if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) +if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") add_subdirectory (auth_manager) + list(APPEND POSIX_LIBRARIES + $ + ) +endif() +if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) add_subdirectory (cache) add_subdirectory (fds) add_subdirectory (file_watcher) @@ -21,7 +26,6 @@ if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) add_subdirectory (wisr) list(APPEND POSIX_LIBRARIES - $ $ $ $ From a5d3d2558d293f25d6eaa7cdbdf8d357e9ca655f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 10 Nov 2022 08:54:57 -0700 Subject: [PATCH 183/385] Final re-structuring. Limit exported headers to public API. --- conanfile.py | 9 ++++----- {src => include/sisl}/fds/atomic_status_counter.hpp | 0 {src => include/sisl}/fds/compress.hpp | 0 {src => include/sisl}/fds/malloc_helper.hpp | 0 {src => include/sisl}/fds/vector_pool.hpp | 0 {src => include/sisl}/file_watcher/file_watcher.hpp | 0 src/fds/callback_mutex.hpp | 2 +- src/fds/tests/test_atomic_status_counter.cpp | 2 +- src/fds/tests/test_jemalloc_helper.cpp | 2 +- src/fds/tests/test_tcmalloc_helper.cpp | 2 +- src/file_watcher/file_watcher.cpp | 2 +- src/file_watcher/file_watcher_test.cpp | 2 +- src/options/CMakeLists.txt | 2 +- src/options/{lib => }/options.cpp | 0 14 files changed, 11 insertions(+), 12 deletions(-) rename {src => include/sisl}/fds/atomic_status_counter.hpp (100%) rename {src => include/sisl}/fds/compress.hpp (100%) rename {src => include/sisl}/fds/malloc_helper.hpp (100%) rename {src => include/sisl}/fds/vector_pool.hpp (100%) rename {src => include/sisl}/file_watcher/file_watcher.hpp (100%) rename src/options/{lib => }/options.cpp (100%) diff --git a/conanfile.py b/conanfile.py index dc096341..015b08ac 100644 --- a/conanfile.py +++ b/conanfile.py @@ -120,12 +120,11 @@ def package(self): copy(self, "*.proto", join(self.source_folder, "src/flip/proto/"), join(self.package_folder, "proto/flip/"), keep_path=False) copy(self, "*", join(self.source_folder, "src/flip/client/python/"), join(self.package_folder, "bindings/flip/python/"), keep_path=False) - hdr_dir = join(self.package_folder, "include") - copy(self, "*.h*", join(self.source_folder, "include"), hdr_dir, keep_path=True) + copy(self, "*.h*", join(self.source_folder, "include"), join(self.package_folder, "include"), keep_path=True) - old_hdr_dir = join(self.package_folder, "include", "sisl") - copy(self, "*.hpp", join(self.source_folder, "src"), old_hdr_dir, keep_path=True) - copy(self, "*.h", join(self.source_folder, "src"), old_hdr_dir, keep_path=True) + gen_dir = join(self.package_folder, "include", "sisl") + copy(self, "*.pb.h", join(self.build_folder, "src"), gen_dir, keep_path=True) + copy(self, "*security_config_generated.h", join(self.build_folder, "src"), gen_dir, keep_path=True) copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) def package_info(self): diff --git a/src/fds/atomic_status_counter.hpp b/include/sisl/fds/atomic_status_counter.hpp similarity index 100% rename from src/fds/atomic_status_counter.hpp rename to include/sisl/fds/atomic_status_counter.hpp diff --git a/src/fds/compress.hpp b/include/sisl/fds/compress.hpp similarity index 100% rename from src/fds/compress.hpp rename to include/sisl/fds/compress.hpp diff --git a/src/fds/malloc_helper.hpp b/include/sisl/fds/malloc_helper.hpp similarity index 100% rename from src/fds/malloc_helper.hpp rename to include/sisl/fds/malloc_helper.hpp diff --git a/src/fds/vector_pool.hpp b/include/sisl/fds/vector_pool.hpp similarity index 100% rename from src/fds/vector_pool.hpp rename to include/sisl/fds/vector_pool.hpp diff --git a/src/file_watcher/file_watcher.hpp b/include/sisl/file_watcher/file_watcher.hpp similarity index 100% rename from src/file_watcher/file_watcher.hpp rename to include/sisl/file_watcher/file_watcher.hpp diff --git a/src/fds/callback_mutex.hpp b/src/fds/callback_mutex.hpp index 60a7f30d..e99125db 100644 --- a/src/fds/callback_mutex.hpp +++ b/src/fds/callback_mutex.hpp @@ -17,7 +17,7 @@ #include #include #include -#include "vector_pool.hpp" +#include "sisl/fds/vector_pool.hpp" #include // Generate the metafunction diff --git a/src/fds/tests/test_atomic_status_counter.cpp b/src/fds/tests/test_atomic_status_counter.cpp index 24f84d60..b960c75d 100644 --- a/src/fds/tests/test_atomic_status_counter.cpp +++ b/src/fds/tests/test_atomic_status_counter.cpp @@ -19,7 +19,7 @@ #include -#include "atomic_status_counter.hpp" +#include "sisl/fds/atomic_status_counter.hpp" using namespace sisl; diff --git a/src/fds/tests/test_jemalloc_helper.cpp b/src/fds/tests/test_jemalloc_helper.cpp index b164c495..88ca01c2 100644 --- a/src/fds/tests/test_jemalloc_helper.cpp +++ b/src/fds/tests/test_jemalloc_helper.cpp @@ -29,7 +29,7 @@ #include -#include "malloc_helper.hpp" +#include "sisl/fds/malloc_helper.hpp" using namespace sisl; diff --git a/src/fds/tests/test_tcmalloc_helper.cpp b/src/fds/tests/test_tcmalloc_helper.cpp index f4b71834..ad63453d 100644 --- a/src/fds/tests/test_tcmalloc_helper.cpp +++ b/src/fds/tests/test_tcmalloc_helper.cpp @@ -27,7 +27,7 @@ #include -#include "malloc_helper.hpp" +#include "sisl/fds/malloc_helper.hpp" using namespace sisl; diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index f83fd590..b34c7d42 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -6,7 +6,7 @@ #include #include -#include "file_watcher.hpp" +#include "sisl/file_watcher/file_watcher.hpp" #include "sisl/utility/thread_factory.hpp" namespace sisl { diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index 7d20e64c..b6081405 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -9,7 +9,7 @@ #include #include -#include "file_watcher.hpp" +#include "sisl/file_watcher/file_watcher.hpp" #include SISL_LOGGING_INIT(test_file_watcher) diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index 2f1c38a7..03cb3e05 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required (VERSION 3.11) add_library(sisl_options OBJECT) target_sources(sisl_options PRIVATE - lib/options.cpp + options.cpp ) target_link_libraries(sisl_options ${COMMON_DEPS}) diff --git a/src/options/lib/options.cpp b/src/options/options.cpp similarity index 100% rename from src/options/lib/options.cpp rename to src/options/options.cpp From 168bc5336e07957f44c985dc1a85c59b0ef50a5a Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Sun, 23 Oct 2022 02:56:11 -0700 Subject: [PATCH 184/385] Add generic serveice move authorization to grpc server --- include/grpc_helper/generic_service.hpp | 102 ++++++++++++++++++++++++ include/grpc_helper/rpc_call.hpp | 41 ++-------- include/grpc_helper/rpc_common.hpp | 24 ++---- include/grpc_helper/rpc_server.hpp | 20 ++++- lib/rpc_server.cpp | 45 +++++++++++ 5 files changed, 176 insertions(+), 56 deletions(-) create mode 100644 include/grpc_helper/generic_service.hpp diff --git a/include/grpc_helper/generic_service.hpp b/include/grpc_helper/generic_service.hpp new file mode 100644 index 00000000..2d82b693 --- /dev/null +++ b/include/grpc_helper/generic_service.hpp @@ -0,0 +1,102 @@ +#pragma once + +#include +#include "rpc_call.hpp" + +namespace grpc_helper { + +using generic_rpc_handler_cb_t = std::function< bool(const boost::intrusive_ptr< GenericRpcData >&) >; + +/** + * Callbacks are registered by a name. The client generic stub uses the method name to call the RPC + * We assume the Request and Response types are grpc::ByteBuffer + * The user is responsible to serialize / deserialize their messages to and from grpc::ByteBuffer + */ + +class GenericRpcStaticInfo : public RpcStaticInfoBase { +public: + GenericRpcStaticInfo(GrpcServer* server, size_t idx) : m_server{server}, m_rpc_idx{idx} {} + + GrpcServer* m_server; + grpc::AsyncGenericService m_generic_service; + size_t m_rpc_idx; +}; + +class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcData > { +public: + static RpcDataAbstract* make(GenericRpcStaticInfo* rpc_info, size_t queue_idx) { + return new GenericRpcData(rpc_info, queue_idx); + } + + RpcDataAbstract* create_new() override { return new GenericRpcData(m_rpc_info, m_queue_idx); } + void set_status(grpc::Status status) { m_retstatus = status; } + + ~GenericRpcData() override = default; + + size_t get_rpc_idx() const override { return m_rpc_info->m_rpc_idx; } + + void enqueue_call_request(::grpc::ServerCompletionQueue& cq) override { + m_rpc_info->m_generic_service.RequestCall(&m_ctx, &m_stream, &cq, &cq, + static_cast< void* >(m_request_received_tag.ref())); + } + + GenericRpcData(GenericRpcStaticInfo* rpc_info, size_t queue_idx) : + RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(&m_ctx) {} + +private: + GenericRpcStaticInfo* m_rpc_info; + grpc::GenericServerAsyncReaderWriter m_stream; + grpc::GenericServerContext m_ctx; + grpc::ByteBuffer m_request; + grpc::ByteBuffer m_response; + std::atomic_bool m_is_canceled{false}; + grpc::Status m_retstatus{grpc::Status::OK}; + +private: + RpcDataAbstract* on_request_received(bool ok) { + bool in_shutdown = RPCHelper::has_server_shutdown(m_rpc_info->m_server); + + if (ok && !m_is_canceled.load(std::memory_order_relaxed)) { + m_stream.Read(&m_request, static_cast< void* >(m_buf_read_tag.ref())); + } + + return in_shutdown ? nullptr : create_new(); + } + + RpcDataAbstract* on_buf_read(bool ok) { + RPCHelper::run_generic_handler_cb(m_rpc_info->m_server, m_ctx.method(), + boost::intrusive_ptr< GenericRpcData >{this}); + m_stream.Write(m_response, static_cast< void* >(m_buf_write_tag.ref())); + return nullptr; + } + + RpcDataAbstract* on_buf_write(bool ok) { + m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); + return nullptr; + } + + RpcDataAbstract* on_request_completed(bool ok) { + if (m_ctx.IsCancelled()) { m_is_canceled.store(true, std::memory_order_release); } + return nullptr; + } + + struct RpcTagImpl : public RpcTag { + using callback_type = RpcDataAbstract* (GenericRpcData::*)(bool ok); + RpcTagImpl(GenericRpcData* rpc, callback_type cb) : RpcTag{rpc}, m_callback{cb} {} + + RpcDataAbstract* do_process(bool ok) override { + return (static_cast< GenericRpcData* >(m_rpc_data)->*m_callback)(ok); + } + + callback_type m_callback; + }; + + // Used as void* completion markers from grpc to indicate different events of interest for a + // Call. + RpcTagImpl m_request_received_tag{this, &GenericRpcData::on_request_received}; + RpcTagImpl m_buf_read_tag{this, &GenericRpcData::on_buf_read}; + RpcTagImpl m_buf_write_tag{this, &GenericRpcData::on_buf_write}; + RpcTagImpl m_request_completed_tag{this, &GenericRpcData::on_request_completed}; +}; + +} // namespace grpc_helper \ No newline at end of file diff --git a/include/grpc_helper/rpc_call.hpp b/include/grpc_helper/rpc_call.hpp index baea9dd9..576ea7eb 100644 --- a/include/grpc_helper/rpc_call.hpp +++ b/include/grpc_helper/rpc_call.hpp @@ -13,7 +13,6 @@ #include #include #include -#include #include "rpc_common.hpp" SISL_LOGGING_DECL(grpc_server) @@ -117,15 +116,14 @@ class RpcStaticInfo : public RpcStaticInfoBase { public: RpcStaticInfo(GrpcServer* server, typename ServiceT::AsyncService& svc, const request_call_cb_t& call_cb, const rpc_handler_cb_t& rpc_cb, const rpc_completed_cb_t& comp_cb, size_t idx, - const std::string& name, std::shared_ptr< sisl::AuthManager > auth_mgr) : + const std::string& name) : m_server{server}, m_svc{svc}, m_req_call_cb{call_cb}, m_handler_cb{rpc_cb}, m_comp_cb{comp_cb}, m_rpc_idx{idx}, - m_rpc_name{name}, - m_auth_mgr{auth_mgr} {} + m_rpc_name{name} {} GrpcServer* m_server; typename ServiceT::AsyncService& m_svc; @@ -134,7 +132,6 @@ class RpcStaticInfo : public RpcStaticInfoBase { rpc_completed_cb_t m_comp_cb; size_t m_rpc_idx; std::string m_rpc_name; - std::shared_ptr< sisl::AuthManager > m_auth_mgr; }; /** @@ -225,38 +222,10 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, private: bool do_authorization() { - bool ret{true}; - // Auth is enabled if auth mgr is not null - if (m_rpc_info->m_auth_mgr) { - auto& client_headers = m_ctx.client_metadata(); - if (auto it = client_headers.find("authorization"); it != client_headers.end()) { - const std::string bearer{"Bearer "}; - if (it->second.starts_with(bearer)) { - auto token_ref = it->second.substr(bearer.size()); - std::string raw_token{token_ref.begin(), token_ref.end()}; - std::string msg; - m_retstatus = grpc::Status( - RPCHelper::to_grpc_statuscode(m_rpc_info->m_auth_mgr->verify(raw_token, msg)), msg); - ret = m_retstatus.error_code() == grpc::StatusCode::OK; - } else { - m_retstatus = - grpc::Status(grpc::StatusCode::UNAUTHENTICATED, - grpc::string("authorization header value does not start with 'Bearer '")); - RPC_SERVER_LOG(ERROR, - "authorization header value does not start with Bearer, client_req_context={}, " - "from peer={}", - get_client_req_context(), get_peer_info()); - } - } else { - m_retstatus = - grpc::Status(grpc::StatusCode::UNAUTHENTICATED, grpc::string("missing header authorization")); - ret = false; - RPC_SERVER_LOG(ERROR, "missing header authorization, client_req_context={}, from peer={}", - get_client_req_context(), get_peer_info()); - } - } - return ret; + m_retstatus = RPCHelper::do_authorization(m_rpc_info->m_server, &m_ctx); + return m_retstatus.error_code() == grpc::StatusCode::OK; } + // The implementation of this method should dispatch the request for processing by calling // do_start_request_processing One reference on `this` is transferred to the callee, and the // callee is responsible for releasing it (typically via `RpcData::send_response(..)`). diff --git a/include/grpc_helper/rpc_common.hpp b/include/grpc_helper/rpc_common.hpp index ba00a45a..e26abaeb 100644 --- a/include/grpc_helper/rpc_common.hpp +++ b/include/grpc_helper/rpc_common.hpp @@ -2,26 +2,12 @@ namespace grpc_helper { class GrpcServer; +class GenericRpcData; struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); - - static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status) { - grpc::StatusCode ret; - switch (status) { - case sisl::AuthVerifyStatus::OK: - ret = grpc::StatusCode::OK; - break; - case sisl::AuthVerifyStatus::UNAUTH: - ret = grpc::StatusCode::UNAUTHENTICATED; - break; - case sisl::AuthVerifyStatus::FORBIDDEN: - ret = grpc::StatusCode::PERMISSION_DENIED; - break; - default: - ret = grpc::StatusCode::UNKNOWN; - break; - } - return ret; - } + static void run_generic_handler_cb(const GrpcServer* server, const std::string& method, + const boost::intrusive_ptr< GenericRpcData >& rpc_data); + static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); + static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; } // namespace grpc_helper diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index ef1f7e39..d427824f 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -10,6 +10,7 @@ #include #include #include "rpc_call.hpp" +#include "generic_service.hpp" namespace grpc_helper { @@ -74,7 +75,7 @@ class GrpcServer : private boost::noncopyable { std::unique_lock lg(m_rpc_registry_mtx); rpc_idx = m_rpc_registry.size(); m_rpc_registry.emplace_back(new RpcStaticInfo< ServiceT, ReqT, RespT, false >( - this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name, m_auth_mgr)); + this, *svc, request_call_cb, rpc_handler, done_handler, rpc_idx, name)); // Register one call per cq. for (auto i = 0u; i < m_cqs.size(); ++i) { @@ -96,6 +97,22 @@ class GrpcServer : private boost::noncopyable { }); } + void run_generic_handler_cb(const std::string& rpc_name, + const boost::intrusive_ptr< GenericRpcData >& rpc_data) const { + auto it = m_generic_rpc_registry.find(rpc_name); + if (it == m_generic_rpc_registry.end()) { + LOGMSG_ASSERT(false, "generic RPC not registered"); + return; + } + (it->second)(rpc_data); + } + + bool is_auth_enabled() const { return m_auth_mgr != nullptr; } + + sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const { + return m_auth_mgr->verify(token, msg); + } + private: void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); @@ -112,5 +129,6 @@ class GrpcServer : private boost::noncopyable { std::mutex m_rpc_registry_mtx; std::vector< std::unique_ptr< RpcStaticInfoBase > > m_rpc_registry; std::shared_ptr< sisl::AuthManager > m_auth_mgr; + std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; }; } // namespace grpc_helper diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index cf069664..08459213 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -128,8 +128,53 @@ void GrpcServer::shutdown() { } } +// RPCHelper static methods + bool RPCHelper::has_server_shutdown(const GrpcServer* server) { return (server->m_state.load(std::memory_order_acquire) != ServerState::RUNNING); } +void RPCHelper::run_generic_handler_cb(const GrpcServer* server, const std::string& method, + const boost::intrusive_ptr< GenericRpcData >& rpc_data) { + server->run_generic_handler_cb(method, rpc_data); +} + +grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx) { + if (!server->is_auth_enabled()) { return grpc::Status(); } + auto& client_headers = srv_ctx->client_metadata(); + if (auto it = client_headers.find("authorization"); it != client_headers.end()) { + const std::string bearer{"Bearer "}; + if (it->second.starts_with(bearer)) { + auto token_ref = it->second.substr(bearer.size()); + std::string raw_token{token_ref.begin(), token_ref.end()}; + std::string msg; + return grpc::Status(RPCHelper::to_grpc_statuscode(server->auth_verify(raw_token, msg)), msg); + } else { + return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, + grpc::string("authorization header value does not start with 'Bearer '")); + } + } else { + return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, grpc::string("missing header authorization")); + } +} + +grpc::StatusCode RPCHelper::to_grpc_statuscode(const sisl::AuthVerifyStatus status) { + grpc::StatusCode ret; + switch (status) { + case sisl::AuthVerifyStatus::OK: + ret = grpc::StatusCode::OK; + break; + case sisl::AuthVerifyStatus::UNAUTH: + ret = grpc::StatusCode::UNAUTHENTICATED; + break; + case sisl::AuthVerifyStatus::FORBIDDEN: + ret = grpc::StatusCode::PERMISSION_DENIED; + break; + default: + ret = grpc::StatusCode::UNKNOWN; + break; + } + return ret; +} + } // namespace grpc_helper From 2ad39b8575822a2b13a71ad6c588660844e711db Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Mon, 24 Oct 2022 11:09:02 -0700 Subject: [PATCH 185/385] add generic client have option to make generic callback async --- include/grpc_helper/generic_service.hpp | 38 +++++++++++------- include/grpc_helper/rpc_client.hpp | 43 ++++++++++++++++++++ include/grpc_helper/rpc_common.hpp | 4 +- include/grpc_helper/rpc_server.hpp | 52 +++++++++++++++++++++---- lib/rpc_server.cpp | 6 +-- 5 files changed, 118 insertions(+), 25 deletions(-) diff --git a/include/grpc_helper/generic_service.hpp b/include/grpc_helper/generic_service.hpp index 2d82b693..14e7ddab 100644 --- a/include/grpc_helper/generic_service.hpp +++ b/include/grpc_helper/generic_service.hpp @@ -5,7 +5,7 @@ namespace grpc_helper { -using generic_rpc_handler_cb_t = std::function< bool(const boost::intrusive_ptr< GenericRpcData >&) >; +using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; /** * Callbacks are registered by a name. The client generic stub uses the method name to call the RPC @@ -15,11 +15,11 @@ using generic_rpc_handler_cb_t = std::function< bool(const boost::intrusive_ptr< class GenericRpcStaticInfo : public RpcStaticInfoBase { public: - GenericRpcStaticInfo(GrpcServer* server, size_t idx) : m_server{server}, m_rpc_idx{idx} {} + GenericRpcStaticInfo(GrpcServer* server, grpc::AsyncGenericService* service) : + m_server{server}, m_generic_service{service} {} GrpcServer* m_server; - grpc::AsyncGenericService m_generic_service; - size_t m_rpc_idx; + grpc::AsyncGenericService* m_generic_service; }; class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcData > { @@ -33,30 +33,41 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD ~GenericRpcData() override = default; - size_t get_rpc_idx() const override { return m_rpc_info->m_rpc_idx; } + // There is only one generic static rpc data for all rpcs. + size_t get_rpc_idx() const override { return 0; } void enqueue_call_request(::grpc::ServerCompletionQueue& cq) override { - m_rpc_info->m_generic_service.RequestCall(&m_ctx, &m_stream, &cq, &cq, - static_cast< void* >(m_request_received_tag.ref())); + m_rpc_info->m_generic_service->RequestCall(m_ctx, &m_stream, &cq, &cq, + static_cast< void* >(m_request_received_tag.ref())); } + void send_response() { m_stream.Write(m_response, static_cast< void* >(m_buf_write_tag.ref())); } + GenericRpcData(GenericRpcStaticInfo* rpc_info, size_t queue_idx) : - RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(&m_ctx) {} + RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(m_ctx) {} private: GenericRpcStaticInfo* m_rpc_info; grpc::GenericServerAsyncReaderWriter m_stream; - grpc::GenericServerContext m_ctx; + grpc::GenericServerContext* m_ctx; grpc::ByteBuffer m_request; grpc::ByteBuffer m_response; std::atomic_bool m_is_canceled{false}; grpc::Status m_retstatus{grpc::Status::OK}; private: + bool do_authorization() { + m_retstatus = RPCHelper::do_authorization(m_rpc_info->m_server, m_ctx); + return m_retstatus.error_code() == grpc::StatusCode::OK; + } + RpcDataAbstract* on_request_received(bool ok) { bool in_shutdown = RPCHelper::has_server_shutdown(m_rpc_info->m_server); if (ok && !m_is_canceled.load(std::memory_order_relaxed)) { + if (!do_authorization()) { + m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); + } m_stream.Read(&m_request, static_cast< void* >(m_buf_read_tag.ref())); } @@ -64,9 +75,10 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD } RpcDataAbstract* on_buf_read(bool ok) { - RPCHelper::run_generic_handler_cb(m_rpc_info->m_server, m_ctx.method(), - boost::intrusive_ptr< GenericRpcData >{this}); - m_stream.Write(m_response, static_cast< void* >(m_buf_write_tag.ref())); + auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; + if (RPCHelper::run_generic_handler_cb(m_rpc_info->m_server, m_ctx->method(), this_rpc_data)) { + send_response(); + } return nullptr; } @@ -76,7 +88,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD } RpcDataAbstract* on_request_completed(bool ok) { - if (m_ctx.IsCancelled()) { m_is_canceled.store(true, std::memory_order_release); } + if (m_ctx->IsCancelled()) { m_is_canceled.store(true, std::memory_order_release); } return nullptr; } diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 78c9e562..e2371ece 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -50,6 +51,7 @@ template < typename ReqT, typename RespT > class ClientRpcDataInternal : public ClientRpcDataAbstract { public: using ResponseReaderPtr = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< RespT > >; + using GenericResponseReaderPtr = std::unique_ptr< grpc::GenericClientAsyncResponseReader >; /* Allow GrpcAsyncClient and its inner classes to use * ClientCallData. @@ -86,6 +88,7 @@ class ClientRpcDataInternal : public ClientRpcDataAbstract { ::grpc::ClientContext m_context; ::grpc::Status m_status; ResponseReaderPtr m_resp_reader_ptr; + GenericResponseReaderPtr m_generic_resp_reader_ptr; }; template < typename ReqT, typename RespT > @@ -292,6 +295,39 @@ class GrpcAsyncClient : public GrpcBaseClient { ::grpc::CompletionQueue* cq() { return &m_worker->cq(); } }; + /** + * GenericAsyncStub is a wrapper of the grpc::GenericStub which + * provides the interface to call generic methods by name. + * We assume the Request and Response types are grpc::ByteBuffer. + * + * Please use GrpcAsyncClient::make_generic_stub() to create GenericAsyncStub. + */ + + struct GenericAsyncStub { + GenericAsyncStub(std::unique_ptr< grpc::GenericStub > stub, GrpcAsyncClientWorker* worker, + std::shared_ptr< sisl::TrfClient > trf_client) : + m_generic_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} + + void call_unary(const grpc::ByteBuffer& request, const std::string& method, + const unary_callback_t< grpc::ByteBuffer >& callback, uint32_t deadline) { + auto data = new ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >(callback); + data->set_deadline(deadline); + if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + // Note that async unary RPCs don't post a CQ tag in call + data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->context(), method, request, cq()); + data->m_generic_resp_reader_ptr->StartCall(); + // CQ tag posted here + data->m_generic_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + return; + } + + std::unique_ptr< grpc::GenericStub > m_generic_stub; + GrpcAsyncClientWorker* m_worker; + std::shared_ptr< sisl::TrfClient > m_trf_client; + + grpc::CompletionQueue* cq() { return &m_worker->cq(); } + }; + template < typename T, typename... Ts > static auto make(Ts&&... params) { return std::make_unique< T >(std::forward< Ts >(params)...); @@ -304,6 +340,13 @@ class GrpcAsyncClient : public GrpcBaseClient { return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client); } + + auto make_generic_stub(const std::string& worker) { + auto w = GrpcAsyncClientWorker::get_worker(worker); + if (w == nullptr) { throw std::runtime_error("worker thread not available"); } + + return std::make_unique< GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, m_trf_client); + } }; } // namespace grpc_helper diff --git a/include/grpc_helper/rpc_common.hpp b/include/grpc_helper/rpc_common.hpp index e26abaeb..f3e9b463 100644 --- a/include/grpc_helper/rpc_common.hpp +++ b/include/grpc_helper/rpc_common.hpp @@ -5,8 +5,8 @@ class GrpcServer; class GenericRpcData; struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); - static void run_generic_handler_cb(const GrpcServer* server, const std::string& method, - const boost::intrusive_ptr< GenericRpcData >& rpc_data); + static bool run_generic_handler_cb(const GrpcServer* server, const std::string& method, + boost::intrusive_ptr< GenericRpcData >& rpc_data); static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index d427824f..0dce7e97 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -97,20 +97,55 @@ class GrpcServer : private boost::noncopyable { }); } - void run_generic_handler_cb(const std::string& rpc_name, - const boost::intrusive_ptr< GenericRpcData >& rpc_data) const { + bool is_auth_enabled() const { return m_auth_mgr != nullptr; } + + sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const { + return m_auth_mgr->verify(token, msg); + } + + // generic service methods + + bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) const { auto it = m_generic_rpc_registry.find(rpc_name); if (it == m_generic_rpc_registry.end()) { LOGMSG_ASSERT(false, "generic RPC not registered"); - return; + // respond immediately + return true; + ; } - (it->second)(rpc_data); + return (it->second)(rpc_data); } - bool is_auth_enabled() const { return m_auth_mgr != nullptr; } + bool register_async_generic_service() { + DEBUG_ASSERT_EQ(ServerState::INITED, m_state, "register service in non-INITED state"); + if (m_generic_service_registered) { + LOGMSG_ASSERT(false, "Duplicate register generic async service"); + return false; + } + m_builder.RegisterAsyncGenericService(m_generic_service.get()); + m_generic_rpc_static_info = std::make_unique< GenericRpcStaticInfo >(this, m_generic_service.get()); + return true; + } - sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const { - return m_auth_mgr->verify(token, msg); + bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler) { + DEBUG_ASSERT_EQ(ServerState::RUNNING, m_state, "register service in non-INITED state"); + + if (!m_generic_service_registered) { + LOGMSG_ASSERT(false, "RPC registration attempted before generic service is registered"); + return false; + } + if (m_generic_rpc_registry.find(name) != m_generic_rpc_registry.end()) { + LOGMSG_ASSERT(false, "duplicate generic RPC registration attempted"); + return false; + } + m_generic_rpc_registry.emplace(std::make_pair(name, rpc_handler)); + + // Register one call per cq. + for (auto i = 0u; i < m_cqs.size(); ++i) { + auto rpc_call = GenericRpcData::make(m_generic_rpc_static_info.get(), i); + rpc_call->enqueue_call_request(*m_cqs[i]); + } + return true; } private: @@ -129,6 +164,9 @@ class GrpcServer : private boost::noncopyable { std::mutex m_rpc_registry_mtx; std::vector< std::unique_ptr< RpcStaticInfoBase > > m_rpc_registry; std::shared_ptr< sisl::AuthManager > m_auth_mgr; + std::unique_ptr< grpc::AsyncGenericService > m_generic_service; + std::unique_ptr< GenericRpcStaticInfo > m_generic_rpc_static_info; + bool m_generic_service_registered{false}; std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; }; } // namespace grpc_helper diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 08459213..43ba3852 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -134,9 +134,9 @@ bool RPCHelper::has_server_shutdown(const GrpcServer* server) { return (server->m_state.load(std::memory_order_acquire) != ServerState::RUNNING); } -void RPCHelper::run_generic_handler_cb(const GrpcServer* server, const std::string& method, - const boost::intrusive_ptr< GenericRpcData >& rpc_data) { - server->run_generic_handler_cb(method, rpc_data); +bool RPCHelper::run_generic_handler_cb(const GrpcServer* server, const std::string& method, + boost::intrusive_ptr< GenericRpcData >& rpc_data) { + return server->run_generic_handler_cb(method, rpc_data); } grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx) { From 2509901e0ee25ffa59a395e944546b5cdcc6799e Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Tue, 25 Oct 2022 12:53:32 -0700 Subject: [PATCH 186/385] Add functional test to generic client and server Add true async generic test case --- include/grpc_helper/generic_service.hpp | 28 +++-- include/grpc_helper/rpc_client.hpp | 24 +++- include/grpc_helper/rpc_server.hpp | 2 + tests/function/echo_async_client.cpp | 154 ++++++++++++++++++++++-- 4 files changed, 185 insertions(+), 23 deletions(-) diff --git a/include/grpc_helper/generic_service.hpp b/include/grpc_helper/generic_service.hpp index 14e7ddab..6e82fadc 100644 --- a/include/grpc_helper/generic_service.hpp +++ b/include/grpc_helper/generic_service.hpp @@ -36,35 +36,37 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD // There is only one generic static rpc data for all rpcs. size_t get_rpc_idx() const override { return 0; } + const grpc::ByteBuffer& request() const { return m_request; } + grpc::ByteBuffer& response() { return m_response; } + void enqueue_call_request(::grpc::ServerCompletionQueue& cq) override { - m_rpc_info->m_generic_service->RequestCall(m_ctx, &m_stream, &cq, &cq, + m_rpc_info->m_generic_service->RequestCall(&m_ctx, &m_stream, &cq, &cq, static_cast< void* >(m_request_received_tag.ref())); } void send_response() { m_stream.Write(m_response, static_cast< void* >(m_buf_write_tag.ref())); } GenericRpcData(GenericRpcStaticInfo* rpc_info, size_t queue_idx) : - RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(m_ctx) {} + RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(&m_ctx) {} private: GenericRpcStaticInfo* m_rpc_info; grpc::GenericServerAsyncReaderWriter m_stream; - grpc::GenericServerContext* m_ctx; + grpc::GenericServerContext m_ctx; grpc::ByteBuffer m_request; grpc::ByteBuffer m_response; - std::atomic_bool m_is_canceled{false}; grpc::Status m_retstatus{grpc::Status::OK}; private: bool do_authorization() { - m_retstatus = RPCHelper::do_authorization(m_rpc_info->m_server, m_ctx); + m_retstatus = RPCHelper::do_authorization(m_rpc_info->m_server, &m_ctx); return m_retstatus.error_code() == grpc::StatusCode::OK; } RpcDataAbstract* on_request_received(bool ok) { bool in_shutdown = RPCHelper::has_server_shutdown(m_rpc_info->m_server); - if (ok && !m_is_canceled.load(std::memory_order_relaxed)) { + if (ok) { if (!do_authorization()) { m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); } @@ -76,21 +78,21 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD RpcDataAbstract* on_buf_read(bool ok) { auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; - if (RPCHelper::run_generic_handler_cb(m_rpc_info->m_server, m_ctx->method(), this_rpc_data)) { - send_response(); - } + // take a ref before the handler cb is called. + // unref is called in send_response which is handled by us (in case of sync calls) + // or by the handler (for async calls) + ref(); + if (RPCHelper::run_generic_handler_cb(m_rpc_info->m_server, m_ctx.method(), this_rpc_data)) { send_response(); } return nullptr; } RpcDataAbstract* on_buf_write(bool ok) { m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); + unref(); return nullptr; } - RpcDataAbstract* on_request_completed(bool ok) { - if (m_ctx->IsCancelled()) { m_is_canceled.store(true, std::memory_order_release); } - return nullptr; - } + RpcDataAbstract* on_request_completed(bool ok) { return nullptr; } struct RpcTagImpl : public RpcTag { using callback_type = RpcDataAbstract* (GenericRpcData::*)(bool ok); diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index e2371ece..63465fe5 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -42,6 +42,15 @@ using req_builder_cb_t = std::function< void(ReqT&) >; template < typename RespT > using unary_callback_t = std::function< void(RespT&, ::grpc::Status& status) >; +template < typename ReqT, typename RespT > +class ClientRpcDataInternal; + +using GenericClientRpcData = ClientRpcData< grpc::ByteBuffer, grpc::ByteBuffer >; +using generic_rpc_comp_cb_t = rpc_comp_cb_t< grpc::ByteBuffer, grpc::ByteBuffer >; +using generic_req_builder_cb_t = req_builder_cb_t< grpc::ByteBuffer >; +using generic_unary_callback_t = unary_callback_t< grpc::ByteBuffer >; +using GenericClientRpcDataInternal = ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >; + /** * The specialized 'ClientRpcDataInternal' per gRPC call, it stores * the response handler function @@ -309,8 +318,8 @@ class GrpcAsyncClient : public GrpcBaseClient { m_generic_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} void call_unary(const grpc::ByteBuffer& request, const std::string& method, - const unary_callback_t< grpc::ByteBuffer >& callback, uint32_t deadline) { - auto data = new ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >(callback); + const generic_unary_callback_t& callback, uint32_t deadline) { + auto data = new GenericClientRpcDataInternal(callback); data->set_deadline(deadline); if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } // Note that async unary RPCs don't post a CQ tag in call @@ -321,6 +330,17 @@ class GrpcAsyncClient : public GrpcBaseClient { return; } + void call_rpc(const generic_req_builder_cb_t& builder_cb, const std::string& method, + const generic_rpc_comp_cb_t& done_cb, uint32_t deadline) { + auto cd = new GenericClientRpcData(done_cb); + builder_cb(cd->m_req); + cd->set_deadline(deadline); + if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } + cd->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&cd->context(), method, cd->m_req, cq()); + cd->m_generic_resp_reader_ptr->StartCall(); + cd->m_generic_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); + } + std::unique_ptr< grpc::GenericStub > m_generic_stub; GrpcAsyncClientWorker* m_worker; std::shared_ptr< sisl::TrfClient > m_trf_client; diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 0dce7e97..4302afca 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -122,8 +122,10 @@ class GrpcServer : private boost::noncopyable { LOGMSG_ASSERT(false, "Duplicate register generic async service"); return false; } + m_generic_service = std::make_unique< grpc::AsyncGenericService >(); m_builder.RegisterAsyncGenericService(m_generic_service.get()); m_generic_rpc_static_info = std::make_unique< GenericRpcStaticInfo >(this, m_generic_service.get()); + m_generic_service_registered = true; return true; } diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index 3e69d5f5..e88b3a2e 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -17,9 +17,61 @@ using namespace grpc_helper; using namespace ::grpc_helper_test; using namespace std::placeholders; +struct DataMessage { + int m_seqno; + std::string m_buf; + + DataMessage() = default; + DataMessage(const int n, const std::string& buf) : m_seqno{n}, m_buf{buf} {} + + void SerializeToString(std::string& str_buf) const { + // first char denotes number of digits in seq_no + str_buf.append(std::to_string(numDigits(m_seqno))); + // append the seqno + str_buf.append(std::to_string(m_seqno)); + // append the data buffer + str_buf.append(m_buf); + } + void DeserializeFromString(const std::string& str_buf) { + int num_dig = str_buf[0] - '0'; + m_seqno = std::stoi(str_buf.substr(1, num_dig)); + m_buf = str_buf.substr(1 + num_dig); + } + + static int numDigits(int n) { + int ret = 0; + for (; n > 0; ret++) { + n /= 10; + } + return ret; + } +}; + +static void DeserializeFromByteBuffer(const grpc::ByteBuffer& buffer, DataMessage& msg) { + std::vector< grpc::Slice > slices; + (void)buffer.Dump(&slices); + std::string buf; + buf.reserve(buffer.Length()); + for (auto s = slices.begin(); s != slices.end(); s++) { + buf.append(reinterpret_cast< const char* >(s->begin()), s->size()); + } + msg.DeserializeFromString(buf); +} +static void SerializeToByteBuffer(grpc::ByteBuffer& buffer, const DataMessage& msg) { + std::string buf; + msg.SerializeToString(buf); + buffer.Clear(); + grpc::Slice slice(buf); + grpc::ByteBuffer tmp(&slice, 1); + buffer.Swap(&tmp); +} + +static const std::string GENERIC_CLIENT_MESSAGE{"I am a super client!"}; +static const std::string GENERIC_METHOD{"SendData"}; + class TestClient { public: - static constexpr int GRPC_CALL_COUNT = 100; + static constexpr int GRPC_CALL_COUNT = 400; const std::string WORKER_NAME{"Worker-1"}; void validate_echo_reply(const EchoRequest& req, EchoReply& reply, ::grpc::Status& status) { @@ -44,6 +96,19 @@ class TestClient { } } + void validate_generic_reply(const DataMessage& req, grpc::ByteBuffer& reply, ::grpc::Status& status) { + RELEASE_ASSERT_EQ(status.ok(), true, "generic request {} failed, status {}: {}", req.m_seqno, + status.error_code(), status.error_message()); + DataMessage svr_msg; + DeserializeFromByteBuffer(reply, svr_msg); + RELEASE_ASSERT_EQ(req.m_seqno, svr_msg.m_seqno); + RELEASE_ASSERT_EQ(req.m_buf, svr_msg.m_buf); + { + std::unique_lock lk(m_wait_mtx); + if (--m_generic_counter == 0) { m_cv.notify_all(); } + } + } + void run(const std::string& server_address) { auto client = std::make_unique< GrpcAsyncClient >(server_address, "", ""); client->init(); @@ -51,10 +116,14 @@ class TestClient { auto echo_stub = client->make_stub< EchoService >(WORKER_NAME); auto ping_stub = client->make_stub< PingService >(WORKER_NAME); + auto generic_stub = client->make_generic_stub(WORKER_NAME); + + m_echo_counter = static_cast< int >(GRPC_CALL_COUNT / 2); + // all numbers divisible by 3 but not 2 + m_ping_counter = static_cast< int >((GRPC_CALL_COUNT - 3) / 6) + 1; + m_generic_counter = GRPC_CALL_COUNT - m_echo_counter - m_ping_counter; - m_ping_counter = GRPC_CALL_COUNT; - m_echo_counter = GRPC_CALL_COUNT; - for (int i = 1; i <= GRPC_CALL_COUNT * 2; ++i) { + for (int i = 1; i <= GRPC_CALL_COUNT; ++i) { if ((i % 2) == 0) { if ((i % 4) == 0) { EchoRequest req; @@ -74,8 +143,9 @@ class TestClient { }, 1); } - } else { - if ((i % 3) == 0) { + } else if ((i % 3) == 0) { + // divide all numbers divisible by 3 and not by 2 into two equal buckets + if ((((i + 3) / 6) % 2) == 0) { PingRequest req; req.set_seqno(i); ping_stub->call_unary< PingRequest, PingReply >( @@ -92,19 +162,42 @@ class TestClient { }, 1); } + } else { + // divide all numbers not divisible by 2 and 3 into two equal buckets + if (((i + 1) % 6) == 0) { + DataMessage req(i, GENERIC_CLIENT_MESSAGE); + grpc::ByteBuffer cli_buf; + SerializeToByteBuffer(cli_buf, req); + generic_stub->call_unary( + cli_buf, GENERIC_METHOD, + [req, this](grpc::ByteBuffer& reply, ::grpc::Status& status) { + validate_generic_reply(req, reply, status); + }, + 1); + } else { + DataMessage data_msg(i, GENERIC_CLIENT_MESSAGE); + generic_stub->call_rpc([data_msg](grpc::ByteBuffer& req) { SerializeToByteBuffer(req, data_msg); }, + GENERIC_METHOD, + [data_msg, this](GenericClientRpcData& cd) { + validate_generic_reply(data_msg, cd.reply(), cd.status()); + }, + 1); + } } } } void wait() { std::unique_lock lk(m_wait_mtx); - m_cv.wait(lk, [this]() { return ((m_echo_counter == 0) && (m_ping_counter == 0)); }); + m_cv.wait(lk, + [this]() { return ((m_echo_counter == 0) && (m_ping_counter == 0) && (m_generic_counter == 0)); }); GrpcAsyncClientWorker::shutdown_all(); } private: int m_echo_counter; int m_ping_counter; + int m_generic_counter; std::mutex m_wait_mtx; std::condition_variable m_cv; }; @@ -179,6 +272,45 @@ class TestServer { } }; + class GenericServiceImpl final { + std::atomic< uint32_t > num_calls = 0ul; + + static void set_response(const grpc::ByteBuffer& req, grpc::ByteBuffer& resp) { + DataMessage cli_request; + DeserializeFromByteBuffer(req, cli_request); + RELEASE_ASSERT((cli_request.m_buf == GENERIC_CLIENT_MESSAGE), "Could not parse response buffer"); + SerializeToByteBuffer(resp, cli_request); + } + + public: + bool receive_data(boost::intrusive_ptr< GenericRpcData >& rpc_data) { + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); + auto t = std::thread([rpc = rpc_data] { + set_response(rpc->request(), rpc->response()); + rpc->response() = rpc->response(); + rpc->send_response(); + }); + t.detach(); + return false; + } + set_response(rpc_data->request(), rpc_data->response()); + return true; + } + + void register_service(GrpcServer* server) { + auto const res = server->register_async_generic_service(); + RELEASE_ASSERT(res, "Failed to Register Service"); + } + + void register_rpcs(GrpcServer* server) { + LOGINFO("register rpc calls"); + auto const res = + server->register_generic_rpc(GENERIC_METHOD, std::bind(&GenericServiceImpl::receive_data, this, _1)); + RELEASE_ASSERT(res, "register generic rpc failed"); + } + }; + void start(const std::string& server_address) { LOGINFO("Start echo and ping server on {}...", server_address); m_grpc_server = GrpcServer::make(server_address, 4, "", ""); @@ -188,11 +320,15 @@ class TestServer { m_ping_impl = new PingServiceImpl(); m_ping_impl->register_service(m_grpc_server); + m_generic_impl = new GenericServiceImpl(); + m_generic_impl->register_service(m_grpc_server); + m_grpc_server->run(); LOGINFO("Server listening on {}", server_address); m_echo_impl->register_rpcs(m_grpc_server); m_ping_impl->register_rpcs(m_grpc_server); + m_generic_impl->register_rpcs(m_grpc_server); } void shutdown() { @@ -201,12 +337,14 @@ class TestServer { delete m_grpc_server; delete m_echo_impl; delete m_ping_impl; + delete m_generic_impl; } private: GrpcServer* m_grpc_server = nullptr; EchoServiceImpl* m_echo_impl = nullptr; PingServiceImpl* m_ping_impl = nullptr; + GenericServiceImpl* m_generic_impl = nullptr; }; SISL_LOGGING_INIT(logging, grpc_server) @@ -217,7 +355,7 @@ int main(int argc, char** argv) { sisl::logging::SetLogger("async_client"); TestServer server; - std::string server_address("0.0.0.0:50051"); + std::string server_address("0.0.0.0:50052"); server.start(server_address); TestClient client; From 0e82f2abebca938dda61620f75de37b63302ac3d Mon Sep 17 00:00:00 2001 From: shosseinimotlagh Date: Fri, 28 Oct 2022 09:35:23 -0700 Subject: [PATCH 187/385] FIX Sanity review comments --- include/grpc_helper/generic_service.hpp | 2 +- include/grpc_helper/rpc_common.hpp | 2 +- include/grpc_helper/rpc_server.hpp | 33 ++++++++++++++++--------- lib/rpc_server.cpp | 7 +++--- tests/function/echo_async_client.cpp | 1 - 5 files changed, 27 insertions(+), 18 deletions(-) diff --git a/include/grpc_helper/generic_service.hpp b/include/grpc_helper/generic_service.hpp index 6e82fadc..5b94624c 100644 --- a/include/grpc_helper/generic_service.hpp +++ b/include/grpc_helper/generic_service.hpp @@ -29,7 +29,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD } RpcDataAbstract* create_new() override { return new GenericRpcData(m_rpc_info, m_queue_idx); } - void set_status(grpc::Status status) { m_retstatus = status; } + void set_status(grpc::Status& status) { m_retstatus = status; } ~GenericRpcData() override = default; diff --git a/include/grpc_helper/rpc_common.hpp b/include/grpc_helper/rpc_common.hpp index f3e9b463..74748e3c 100644 --- a/include/grpc_helper/rpc_common.hpp +++ b/include/grpc_helper/rpc_common.hpp @@ -5,7 +5,7 @@ class GrpcServer; class GenericRpcData; struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); - static bool run_generic_handler_cb(const GrpcServer* server, const std::string& method, + static bool run_generic_handler_cb(GrpcServer* server, const std::string& method, boost::intrusive_ptr< GenericRpcData >& rpc_data); static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 4302afca..2f238e6f 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -105,15 +105,20 @@ class GrpcServer : private boost::noncopyable { // generic service methods - bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) const { - auto it = m_generic_rpc_registry.find(rpc_name); - if (it == m_generic_rpc_registry.end()) { - LOGMSG_ASSERT(false, "generic RPC not registered"); - // respond immediately - return true; - ; + bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) { + generic_rpc_handler_cb_t cb; + { + std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); + auto it = m_generic_rpc_registry.find(rpc_name); + if (it == m_generic_rpc_registry.end()) { + LOGMSG_ASSERT(false, "generic RPC not registered"); + // respond immediately + return true; + ; + } + cb = it->second; } - return (it->second)(rpc_data); + return cb(rpc_data); } bool register_async_generic_service() { @@ -136,11 +141,14 @@ class GrpcServer : private boost::noncopyable { LOGMSG_ASSERT(false, "RPC registration attempted before generic service is registered"); return false; } - if (m_generic_rpc_registry.find(name) != m_generic_rpc_registry.end()) { - LOGMSG_ASSERT(false, "duplicate generic RPC registration attempted"); - return false; + + { + std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); + if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { + LOGMSG_ASSERT(false, "duplicate generic RPC registration attempted"); + return false; + } } - m_generic_rpc_registry.emplace(std::make_pair(name, rpc_handler)); // Register one call per cq. for (auto i = 0u; i < m_cqs.size(); ++i) { @@ -170,5 +178,6 @@ class GrpcServer : private boost::noncopyable { std::unique_ptr< GenericRpcStaticInfo > m_generic_rpc_static_info; bool m_generic_service_registered{false}; std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; + std::shared_mutex m_generic_rpc_registry_mtx; }; } // namespace grpc_helper diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 43ba3852..9f589e38 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -134,7 +134,7 @@ bool RPCHelper::has_server_shutdown(const GrpcServer* server) { return (server->m_state.load(std::memory_order_acquire) != ServerState::RUNNING); } -bool RPCHelper::run_generic_handler_cb(const GrpcServer* server, const std::string& method, +bool RPCHelper::run_generic_handler_cb(GrpcServer* server, const std::string& method, boost::intrusive_ptr< GenericRpcData >& rpc_data) { return server->run_generic_handler_cb(method, rpc_data); } @@ -146,9 +146,10 @@ grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::S const std::string bearer{"Bearer "}; if (it->second.starts_with(bearer)) { auto token_ref = it->second.substr(bearer.size()); - std::string raw_token{token_ref.begin(), token_ref.end()}; std::string msg; - return grpc::Status(RPCHelper::to_grpc_statuscode(server->auth_verify(raw_token, msg)), msg); + return grpc::Status(RPCHelper::to_grpc_statuscode( + server->auth_verify(std::string(token_ref.begin(), token_ref.end()), msg)), + msg); } else { return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, grpc::string("authorization header value does not start with 'Bearer '")); diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index e88b3a2e..bfb1c257 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -288,7 +288,6 @@ class TestServer { LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); auto t = std::thread([rpc = rpc_data] { set_response(rpc->request(), rpc->response()); - rpc->response() = rpc->response(); rpc->send_response(); }); t.detach(); From 8556ceacfff52150b9b479e4441c227aa48e6092 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Thu, 10 Nov 2022 13:50:30 -0700 Subject: [PATCH 188/385] add unit tests for generic service --- CMakeLists.txt | 1 - include/grpc_helper/generic_service.hpp | 3 +- include/grpc_helper/rpc_server.hpp | 9 +-- tests/unit/auth_test.cpp | 93 +++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index aec70d05..402e82c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,7 +14,6 @@ find_package(sisl REQUIRED) find_package(gRPC REQUIRED) include_directories(BEFORE "include") -include_directories(BEFORE ${CONAN_INCLUDE_DIRS_SISL}/sisl) add_library(${PROJECT_NAME}) target_sources(${PROJECT_NAME} PRIVATE diff --git a/include/grpc_helper/generic_service.hpp b/include/grpc_helper/generic_service.hpp index 5b94624c..4d64ee00 100644 --- a/include/grpc_helper/generic_service.hpp +++ b/include/grpc_helper/generic_service.hpp @@ -69,8 +69,9 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD if (ok) { if (!do_authorization()) { m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); + } else { + m_stream.Read(&m_request, static_cast< void* >(m_buf_read_tag.ref())); } - m_stream.Read(&m_request, static_cast< void* >(m_buf_read_tag.ref())); } return in_shutdown ? nullptr : create_new(); diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 2f238e6f..571666ec 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -111,10 +111,11 @@ class GrpcServer : private boost::noncopyable { std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); auto it = m_generic_rpc_registry.find(rpc_name); if (it == m_generic_rpc_registry.end()) { - LOGMSG_ASSERT(false, "generic RPC not registered"); + auto status{grpc::Status(grpc::StatusCode::UNIMPLEMENTED, + fmt::format("generic RPC {} not registered", rpc_name))}; + rpc_data->set_status(status); // respond immediately return true; - ; } cb = it->second; } @@ -124,7 +125,7 @@ class GrpcServer : private boost::noncopyable { bool register_async_generic_service() { DEBUG_ASSERT_EQ(ServerState::INITED, m_state, "register service in non-INITED state"); if (m_generic_service_registered) { - LOGMSG_ASSERT(false, "Duplicate register generic async service"); + LOGWARN("Duplicate register generic async service"); return false; } m_generic_service = std::make_unique< grpc::AsyncGenericService >(); @@ -145,7 +146,7 @@ class GrpcServer : private boost::noncopyable { { std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { - LOGMSG_ASSERT(false, "duplicate generic RPC registration attempted"); + LOGWARN("duplicate generic RPC {} registration attempted", name); return false; } } diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 27632250..299663d4 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -35,6 +35,7 @@ static void set_token_response(const std::string& raw_token) { " \"refresh_token\": \"dummy_refresh_token\"\n" "}"; } +static const std::string GENERIC_METHOD{"generic_method"}; class EchoServiceImpl final { public: @@ -85,9 +86,12 @@ class AuthBaseTest : public ::testing::Test { m_grpc_server = GrpcServer::make(server_address, auth_mgr, 4, "", ""); m_echo_impl = new EchoServiceImpl(); m_echo_impl->register_service(m_grpc_server); + m_grpc_server->register_async_generic_service(); m_grpc_server->run(); LOGINFO("Server listening on {}", server_address); m_echo_impl->register_rpcs(m_grpc_server); + m_grpc_server->register_generic_rpc(GENERIC_METHOD, + [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }); } void process_echo_reply() { @@ -110,13 +114,31 @@ class AuthBaseTest : public ::testing::Test { } } + void call_async_generic_rpc(grpc::Status& status) { + grpc::ByteBuffer req; + m_generic_stub->call_unary( + req, GENERIC_METHOD, + [&status, this](grpc::ByteBuffer&, ::grpc::Status& status_) { + status = status_; + m_generic_received.store(true); + m_cv.notify_all(); + }, + 1000000); + { + std::unique_lock lk(m_wait_mtx); + m_cv.wait(lk, [this]() { return m_generic_received.load(); }); + } + } + protected: std::shared_ptr< AuthManager > m_auth_mgr; EchoServiceImpl* m_echo_impl = nullptr; GrpcServer* m_grpc_server = nullptr; std::unique_ptr< GrpcAsyncClient > m_async_grpc_client; std::unique_ptr< GrpcAsyncClient::AsyncStub< EchoService > > m_echo_stub; + std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > m_generic_stub; std::atomic_bool m_echo_received{false}; + std::atomic_bool m_generic_received{false}; std::mutex m_wait_mtx; std::condition_variable m_cv; }; @@ -132,6 +154,7 @@ class AuthDisableTest : public AuthBaseTest { m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-1", 4); m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-1"); + m_generic_stub = m_async_grpc_client->make_generic_stub("worker-1"); } void TearDown() override { AuthBaseTest::TearDown(); } @@ -146,6 +169,10 @@ TEST_F(AuthDisableTest, allow_on_disabled_mode) { call_async_echo(req, reply, status); EXPECT_TRUE(status.ok()); EXPECT_EQ(req.message(), reply.message()); + + grpc::Status generic_status; + call_async_generic_rpc(status); + EXPECT_TRUE(generic_status.ok()); } static auto const grant_path = std::string{"dummy_grant.cg"}; @@ -183,6 +210,7 @@ class AuthServerOnlyTest : public AuthBaseTest { m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-2", 4); m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-2"); + m_generic_stub = m_async_grpc_client->make_generic_stub("worker-2"); } void TearDown() override { @@ -201,6 +229,10 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { EXPECT_FALSE(status.ok()); EXPECT_EQ(status.error_code(), grpc::UNAUTHENTICATED); EXPECT_EQ(status.error_message(), "missing header authorization"); + + grpc::Status generic_status; + call_async_generic_rpc(generic_status); + EXPECT_EQ(generic_status.error_code(), grpc::UNAUTHENTICATED); } class TokenApiImpl : public TokenApi { @@ -236,6 +268,7 @@ class AuthEnableTest : public AuthBaseTest { m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-3", 4); m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-3"); + m_generic_stub = m_async_grpc_client->make_generic_stub("worker-3"); } void TearDown() override { @@ -259,6 +292,10 @@ TEST_F(AuthEnableTest, allow_with_auth) { call_async_echo(req, reply, status); EXPECT_TRUE(status.ok()); EXPECT_EQ(req.message(), reply.message()); + + grpc::Status generic_status; + call_async_generic_rpc(status); + EXPECT_TRUE(generic_status.ok()); } // sync client @@ -290,6 +327,62 @@ TEST_F(AuthEnableTest, allow_sync_client_with_auth) { EXPECT_EQ(req.message(), reply.message()); } +void validate_generic_reply(const std::string& method, ::grpc::Status& status) { + if (method == "method1" || method == "method2") { + EXPECT_TRUE(status.ok()); + } else { + EXPECT_EQ(status.error_code(), grpc::UNIMPLEMENTED); + } +} + +TEST(GenericServiceDeathTest, basic_test) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + auto g_grpc_server = GrpcServer::make("0.0.0.0:56789", nullptr, 1, "", ""); + // register rpc before generic service is registered + ASSERT_DEATH(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), + "Assertion .* failed"); + ASSERT_TRUE(g_grpc_server->register_async_generic_service()); + // duplicate register + EXPECT_FALSE(g_grpc_server->register_async_generic_service()); + // register rpc before server is run + ASSERT_DEATH(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), + "Assertion .* failed"); + g_grpc_server->run(); + EXPECT_TRUE(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + EXPECT_TRUE(g_grpc_server->register_generic_rpc( + "method2", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + // re-register method 1 + EXPECT_FALSE(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + + auto client = std::make_unique< GrpcAsyncClient >("0.0.0.0:56789", "", ""); + client->init(); + GrpcAsyncClientWorker::create_worker("generic_worker", 1); + auto generic_stub = client->make_generic_stub("generic_worker"); + grpc::ByteBuffer cli_buf; + generic_stub->call_unary( + cli_buf, "method1", + [method = "method1"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + validate_generic_reply(method, status); + }, + 1); + generic_stub->call_unary( + cli_buf, "method2", + [method = "method2"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + validate_generic_reply(method, status); + }, + 1); + generic_stub->call_unary( + cli_buf, "method_unknown", + [method = "method_unknown"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + validate_generic_reply(method, status); + }, + 1); +} + } // namespace grpc_helper::testing int main(int argc, char* argv[]) { From 43a84c237832ba597d9aff46410f7031821553f9 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Thu, 10 Nov 2022 16:31:11 -0700 Subject: [PATCH 189/385] move method definitions to cpp files fix Death test for release build --- include/grpc_helper/rpc_client.hpp | 29 ++---------- include/grpc_helper/rpc_server.hpp | 68 ++++------------------------ lib/rpc_client.cpp | 32 +++++++++++++ lib/rpc_server.cpp | 68 ++++++++++++++++++++++++++++ tests/function/echo_async_client.cpp | 1 + tests/unit/auth_test.cpp | 13 +++++- 6 files changed, 124 insertions(+), 87 deletions(-) diff --git a/include/grpc_helper/rpc_client.hpp b/include/grpc_helper/rpc_client.hpp index 63465fe5..e64e1da5 100644 --- a/include/grpc_helper/rpc_client.hpp +++ b/include/grpc_helper/rpc_client.hpp @@ -318,28 +318,10 @@ class GrpcAsyncClient : public GrpcBaseClient { m_generic_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} void call_unary(const grpc::ByteBuffer& request, const std::string& method, - const generic_unary_callback_t& callback, uint32_t deadline) { - auto data = new GenericClientRpcDataInternal(callback); - data->set_deadline(deadline); - if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } - // Note that async unary RPCs don't post a CQ tag in call - data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->context(), method, request, cq()); - data->m_generic_resp_reader_ptr->StartCall(); - // CQ tag posted here - data->m_generic_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); - return; - } + const generic_unary_callback_t& callback, uint32_t deadline); void call_rpc(const generic_req_builder_cb_t& builder_cb, const std::string& method, - const generic_rpc_comp_cb_t& done_cb, uint32_t deadline) { - auto cd = new GenericClientRpcData(done_cb); - builder_cb(cd->m_req); - cd->set_deadline(deadline); - if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } - cd->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&cd->context(), method, cd->m_req, cq()); - cd->m_generic_resp_reader_ptr->StartCall(); - cd->m_generic_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); - } + const generic_rpc_comp_cb_t& done_cb, uint32_t deadline); std::unique_ptr< grpc::GenericStub > m_generic_stub; GrpcAsyncClientWorker* m_worker; @@ -361,12 +343,7 @@ class GrpcAsyncClient : public GrpcBaseClient { return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client); } - auto make_generic_stub(const std::string& worker) { - auto w = GrpcAsyncClientWorker::get_worker(worker); - if (w == nullptr) { throw std::runtime_error("worker thread not available"); } - - return std::make_unique< GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, m_trf_client); - } + std::unique_ptr< GenericAsyncStub > make_generic_stub(const std::string& worker); }; } // namespace grpc_helper diff --git a/include/grpc_helper/rpc_server.hpp b/include/grpc_helper/rpc_server.hpp index 571666ec..4bbd298a 100644 --- a/include/grpc_helper/rpc_server.hpp +++ b/include/grpc_helper/rpc_server.hpp @@ -10,9 +10,11 @@ #include #include #include "rpc_call.hpp" -#include "generic_service.hpp" namespace grpc_helper { +class GenericRpcData; +class GenericRpcStaticInfo; +using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; using rpc_thread_start_cb_t = std::function< void(uint32_t) >; @@ -97,67 +99,13 @@ class GrpcServer : private boost::noncopyable { }); } - bool is_auth_enabled() const { return m_auth_mgr != nullptr; } - - sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const { - return m_auth_mgr->verify(token, msg); - } + bool is_auth_enabled() const; + sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const; // generic service methods - - bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) { - generic_rpc_handler_cb_t cb; - { - std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); - auto it = m_generic_rpc_registry.find(rpc_name); - if (it == m_generic_rpc_registry.end()) { - auto status{grpc::Status(grpc::StatusCode::UNIMPLEMENTED, - fmt::format("generic RPC {} not registered", rpc_name))}; - rpc_data->set_status(status); - // respond immediately - return true; - } - cb = it->second; - } - return cb(rpc_data); - } - - bool register_async_generic_service() { - DEBUG_ASSERT_EQ(ServerState::INITED, m_state, "register service in non-INITED state"); - if (m_generic_service_registered) { - LOGWARN("Duplicate register generic async service"); - return false; - } - m_generic_service = std::make_unique< grpc::AsyncGenericService >(); - m_builder.RegisterAsyncGenericService(m_generic_service.get()); - m_generic_rpc_static_info = std::make_unique< GenericRpcStaticInfo >(this, m_generic_service.get()); - m_generic_service_registered = true; - return true; - } - - bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler) { - DEBUG_ASSERT_EQ(ServerState::RUNNING, m_state, "register service in non-INITED state"); - - if (!m_generic_service_registered) { - LOGMSG_ASSERT(false, "RPC registration attempted before generic service is registered"); - return false; - } - - { - std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); - if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { - LOGWARN("duplicate generic RPC {} registration attempted", name); - return false; - } - } - - // Register one call per cq. - for (auto i = 0u; i < m_cqs.size(); ++i) { - auto rpc_call = GenericRpcData::make(m_generic_rpc_static_info.get(), i); - rpc_call->enqueue_call_request(*m_cqs[i]); - } - return true; - } + bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); + bool register_async_generic_service(); + bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler); private: void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index 35b1e571..2026e216 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -112,4 +112,36 @@ void GrpcAsyncClientWorker::shutdown_all() { it.second.reset(); } } + +void GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& request, const std::string& method, + const generic_unary_callback_t& callback, uint32_t deadline) { + auto data = new GenericClientRpcDataInternal(callback); + data->set_deadline(deadline); + if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + // Note that async unary RPCs don't post a CQ tag in call + data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->context(), method, request, cq()); + data->m_generic_resp_reader_ptr->StartCall(); + // CQ tag posted here + data->m_generic_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + return; +} + +void GrpcAsyncClient::GenericAsyncStub::call_rpc(const generic_req_builder_cb_t& builder_cb, const std::string& method, + const generic_rpc_comp_cb_t& done_cb, uint32_t deadline) { + auto cd = new GenericClientRpcData(done_cb); + builder_cb(cd->m_req); + cd->set_deadline(deadline); + if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } + cd->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&cd->context(), method, cd->m_req, cq()); + cd->m_generic_resp_reader_ptr->StartCall(); + cd->m_generic_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); +} + +std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > GrpcAsyncClient::make_generic_stub(const std::string& worker) { + auto w = GrpcAsyncClientWorker::get_worker(worker); + if (w == nullptr) { throw std::runtime_error("worker thread not available"); } + + return std::make_unique< GrpcAsyncClient::GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, + m_trf_client); +} } // namespace grpc_helper diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 9f589e38..d4b14ae7 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -5,6 +5,7 @@ */ #include +#include "grpc_helper/generic_service.hpp" #include "utils.hpp" #ifdef _POSIX_THREADS @@ -128,6 +129,73 @@ void GrpcServer::shutdown() { } } +bool GrpcServer::is_auth_enabled() const { return m_auth_mgr != nullptr; } + +sisl::AuthVerifyStatus GrpcServer::auth_verify(const std::string& token, std::string& msg) const { + return m_auth_mgr->verify(token, msg); +} + +bool GrpcServer::run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) { + generic_rpc_handler_cb_t cb; + { + std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); + auto it = m_generic_rpc_registry.find(rpc_name); + if (it == m_generic_rpc_registry.end()) { + auto status{ + grpc::Status(grpc::StatusCode::UNIMPLEMENTED, fmt::format("generic RPC {} not registered", rpc_name))}; + rpc_data->set_status(status); + // respond immediately + return true; + } + cb = it->second; + } + return cb(rpc_data); +} + +bool GrpcServer::register_async_generic_service() { + if (m_state.load() != ServerState::INITED) { + LOGMSG_ASSERT(false, "register service in non-INITED state"); + return false; + } + + if (m_generic_service_registered) { + LOGWARN("Duplicate register generic async service"); + return false; + } + m_generic_service = std::make_unique< grpc::AsyncGenericService >(); + m_builder.RegisterAsyncGenericService(m_generic_service.get()); + m_generic_rpc_static_info = std::make_unique< GenericRpcStaticInfo >(this, m_generic_service.get()); + m_generic_service_registered = true; + return true; +} + +bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler) { + if (m_state.load() != ServerState::RUNNING) { + LOGMSG_ASSERT(false, "register service in non-INITED state"); + return false; + } + + if (!m_generic_service_registered) { + LOGMSG_ASSERT(false, "RPC registration attempted before generic service is registered"); + return false; + } + + { + std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); + if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { + LOGWARN("duplicate generic RPC {} registration attempted", name); + return false; + } + } + + // Register one call per cq. + for (auto i = 0u; i < m_cqs.size(); ++i) { + auto rpc_call = GenericRpcData::make(m_generic_rpc_static_info.get(), i); + rpc_call->enqueue_call_request(*m_cqs[i]); + } + return true; +} + // RPCHelper static methods bool RPCHelper::has_server_shutdown(const GrpcServer* server) { diff --git a/tests/function/echo_async_client.cpp b/tests/function/echo_async_client.cpp index bfb1c257..8bf11068 100644 --- a/tests/function/echo_async_client.cpp +++ b/tests/function/echo_async_client.cpp @@ -11,6 +11,7 @@ #include "grpc_helper/rpc_client.hpp" #include "grpc_helper/rpc_server.hpp" +#include "grpc_helper/generic_service.hpp" #include "grpc_helper_test.grpc.pb.h" using namespace grpc_helper; diff --git a/tests/unit/auth_test.cpp b/tests/unit/auth_test.cpp index 299663d4..149e2fe0 100644 --- a/tests/unit/auth_test.cpp +++ b/tests/unit/auth_test.cpp @@ -123,7 +123,7 @@ class AuthBaseTest : public ::testing::Test { m_generic_received.store(true); m_cv.notify_all(); }, - 1000000); + 1); { std::unique_lock lk(m_wait_mtx); m_cv.wait(lk, [this]() { return m_generic_received.load(); }); @@ -339,16 +339,27 @@ TEST(GenericServiceDeathTest, basic_test) { testing::GTEST_FLAG(death_test_style) = "threadsafe"; auto g_grpc_server = GrpcServer::make("0.0.0.0:56789", nullptr, 1, "", ""); // register rpc before generic service is registered +#ifndef NDEBUG ASSERT_DEATH(g_grpc_server->register_generic_rpc( "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), "Assertion .* failed"); +#else + EXPECT_FALSE(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); +#endif + ASSERT_TRUE(g_grpc_server->register_async_generic_service()); // duplicate register EXPECT_FALSE(g_grpc_server->register_async_generic_service()); // register rpc before server is run +#ifndef NDEBUG ASSERT_DEATH(g_grpc_server->register_generic_rpc( "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), "Assertion .* failed"); +#else + EXPECT_FALSE(g_grpc_server->register_generic_rpc( + "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); +#endif g_grpc_server->run(); EXPECT_TRUE(g_grpc_server->register_generic_rpc( "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); From a082b4f9fc50f092bba4541eea41195a09b92557 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Tue, 22 Nov 2022 13:49:01 -0700 Subject: [PATCH 190/385] update pistache --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3bae7590..995c4076 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.11) find_package(GTest REQUIRED) -find_package(pistache REQUIRED) +find_package(Pistache REQUIRED) add_subdirectory(proto) From 39d529e9ed62e50871d603f5dbcbaa5bfe572882 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Sun, 27 Nov 2022 16:42:46 -0700 Subject: [PATCH 191/385] load root file contents in client and server --- lib/rpc_client.cpp | 6 +++++- lib/rpc_server.cpp | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index 2026e216..fefcd6bd 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -14,7 +14,11 @@ GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::shared void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; if (!m_ssl_cert.empty()) { - if (load_ssl_cert(m_ssl_cert, ssl_opts.pem_root_certs)) { + if (load_ssl_cert(m_ssl_cert, ssl_opts.pem_cert_chain)) { + // Quick fix to load root file in ssl creds. + // root files do not expire for a very long time + // TODO: handle root file rotation + load_ssl_cert(SECURITY_DYNAMIC_CONFIG(ssl_ca_file), ssl_opts.pem_root_certs); if (!m_target_domain.empty()) { ::grpc::ChannelArguments channel_args; channel_args.SetSslTargetNameOverride(m_target_domain); diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index d4b14ae7..30338a05 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -31,6 +31,7 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s if (!ssl_cert.empty() && !ssl_key.empty()) { std::string key_contents; std::string cert_contents; + std::string root_contents; if (!get_file_contents(ssl_cert, cert_contents)) { throw std::runtime_error("Unable to load ssl certification for grpc server"); @@ -38,10 +39,14 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s if (!get_file_contents(ssl_key, key_contents)) { throw std::runtime_error("Unable to load ssl key for grpc server"); } + // Quick fix to load root file in ssl creds. + // root files do not expire for a very long time + // TODO: handle root file rotation + get_file_contents(SECURITY_DYNAMIC_CONFIG(ssl_ca_file), root_contents); ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; ::grpc::SslServerCredentialsOptions ssl_opts; - ssl_opts.pem_root_certs = ""; + ssl_opts.pem_root_certs = root_contents; ssl_opts.pem_key_cert_pairs.push_back(pkcp); m_builder.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); From 08d70b1dcb7c86b4310f2e1fb99c81fd3711c9c7 Mon Sep 17 00:00:00 2001 From: "Ravi Akella email = raakella@ebay.com" Date: Mon, 28 Nov 2022 00:03:02 -0700 Subject: [PATCH 192/385] revert PR --- lib/rpc_client.cpp | 6 +----- lib/rpc_server.cpp | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/lib/rpc_client.cpp b/lib/rpc_client.cpp index fefcd6bd..2026e216 100644 --- a/lib/rpc_client.cpp +++ b/lib/rpc_client.cpp @@ -14,11 +14,7 @@ GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::shared void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; if (!m_ssl_cert.empty()) { - if (load_ssl_cert(m_ssl_cert, ssl_opts.pem_cert_chain)) { - // Quick fix to load root file in ssl creds. - // root files do not expire for a very long time - // TODO: handle root file rotation - load_ssl_cert(SECURITY_DYNAMIC_CONFIG(ssl_ca_file), ssl_opts.pem_root_certs); + if (load_ssl_cert(m_ssl_cert, ssl_opts.pem_root_certs)) { if (!m_target_domain.empty()) { ::grpc::ChannelArguments channel_args; channel_args.SetSslTargetNameOverride(m_target_domain); diff --git a/lib/rpc_server.cpp b/lib/rpc_server.cpp index 30338a05..d4b14ae7 100644 --- a/lib/rpc_server.cpp +++ b/lib/rpc_server.cpp @@ -31,7 +31,6 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s if (!ssl_cert.empty() && !ssl_key.empty()) { std::string key_contents; std::string cert_contents; - std::string root_contents; if (!get_file_contents(ssl_cert, cert_contents)) { throw std::runtime_error("Unable to load ssl certification for grpc server"); @@ -39,14 +38,10 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s if (!get_file_contents(ssl_key, key_contents)) { throw std::runtime_error("Unable to load ssl key for grpc server"); } - // Quick fix to load root file in ssl creds. - // root files do not expire for a very long time - // TODO: handle root file rotation - get_file_contents(SECURITY_DYNAMIC_CONFIG(ssl_ca_file), root_contents); ::grpc::SslServerCredentialsOptions::PemKeyCertPair pkcp = {key_contents, cert_contents}; ::grpc::SslServerCredentialsOptions ssl_opts; - ssl_opts.pem_root_certs = root_contents; + ssl_opts.pem_root_certs = ""; ssl_opts.pem_key_cert_pairs.push_back(pkcp); m_builder.AddListeningPort(listen_addr, ::grpc::SslServerCredentials(ssl_opts)); From 656e3c11158a06d934cbc94491e8bc1516fa05a6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 28 Nov 2022 10:01:23 -0700 Subject: [PATCH 193/385] Update Options library to 3.x --- conanfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index 015b08ac..d021fff7 100644 --- a/conanfile.py +++ b/conanfile.py @@ -38,7 +38,7 @@ class SISLConan(ConanFile): exports_sources = ("CMakeLists.txt", "cmake/*", "include/*", "src/*", "LICENSE") def build_requirements(self): - self.build_requires("benchmark/1.6.1") + self.build_requires("benchmark/1.7.0") self.build_requires("gtest/1.11.0") if self.settings.compiler in ["gcc"]: self.build_requires("pistache/cci.20201127") @@ -51,7 +51,7 @@ def requirements(self): # Generic packages (conan-center) self.requires("boost/1.79.0") self.requires("cpr/1.8.1") - self.requires("cxxopts/2.2.1") + self.requires("cxxopts/3.0.0") self.requires("flatbuffers/1.12.0") if self.settings.os in ["Linux"]: self.requires("folly/2022.01.31.00") From 1305b265daaaa74c7322055a2298e8121a216470 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 28 Nov 2022 16:09:51 -0700 Subject: [PATCH 194/385] Revert options update, segfaults. --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index d021fff7..31a50f63 100644 --- a/conanfile.py +++ b/conanfile.py @@ -51,7 +51,7 @@ def requirements(self): # Generic packages (conan-center) self.requires("boost/1.79.0") self.requires("cpr/1.8.1") - self.requires("cxxopts/3.0.0") + self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") if self.settings.os in ["Linux"]: self.requires("folly/2022.01.31.00") From bea1796c11b1af7446e0b3dbdf55884cf2850b5f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 7 Dec 2022 11:43:56 -0700 Subject: [PATCH 195/385] Added CHANGELOG.md --- CHANGELOG.md | 15 +++++++++++++++ conanfile.py | 6 +++--- 2 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..3243b80f --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +### Fixed + +- Moved SISL code to github.com; start Changelog + +[Unreleased]: https://github.com/eBay/IOManager/compare/c5b7966...HEAD diff --git a/conanfile.py b/conanfile.py index 31a50f63..2373a591 100644 --- a/conanfile.py +++ b/conanfile.py @@ -110,15 +110,15 @@ def build(self): def package(self): lib_dir = join(self.package_folder, "lib") - copy(self, "LICENSE", self.source_folder, join(self.package_folder, "licenses/"), keep_path=False) + copy(self, "LICENSE", self.source_folder, join(self.package_folder, "licenses"), keep_path=False) copy(self, "*.lib", self.build_folder, lib_dir, keep_path=False) copy(self, "*.a", self.build_folder, lib_dir, keep_path=False) copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) copy(self, "*.dylib*", self.build_folder, lib_dir, keep_path=False) copy(self, "*.dll*", self.build_folder, join(self.package_folder, "bin"), keep_path=False) copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) - copy(self, "*.proto", join(self.source_folder, "src/flip/proto/"), join(self.package_folder, "proto/flip/"), keep_path=False) - copy(self, "*", join(self.source_folder, "src/flip/client/python/"), join(self.package_folder, "bindings/flip/python/"), keep_path=False) + copy(self, "*.proto", join(self.source_folder, "src", "flip", "proto"), join(self.package_folder, "proto", "flip"), keep_path=False) + copy(self, "*", join(self.source_folder, "src", "flip", "client", "python"), join(self.package_folder, "bindings", "flip", "python"), keep_path=False) copy(self, "*.h*", join(self.source_folder, "include"), join(self.package_folder, "include"), keep_path=True) From 6cad84a939d5d3f0a1660861555790f63e25d590 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 7 Dec 2022 23:44:40 -0800 Subject: [PATCH 196/385] New Simple cache utility to store individual key/value instead of range key (#66) --- src/.clang-format => .clang-format | 0 conanfile.py | 2 +- {src => include/sisl}/cache/evictor.hpp | 3 +- .../sisl}/cache/hash_entry_base.hpp | 0 {src => include/sisl}/cache/lru_evictor.hpp | 2 +- {src => include/sisl}/cache/range_cache.hpp | 4 +- {src => include/sisl}/cache/range_hashmap.hpp | 3 +- include/sisl/cache/simple_cache.hpp | 104 +++++++ include/sisl/cache/simple_hashmap.hpp | 273 ++++++++++++++++++ src/CMakeLists.txt | 1 - src/auth_manager/CMakeLists.txt | 2 +- src/cache/CMakeLists.txt | 10 + src/cache/lru_evictor.cpp | 128 ++++---- src/cache/tests/test_range_cache.cpp | 6 +- src/cache/tests/test_range_hashmap.cpp | 88 +----- src/cache/tests/test_simple_cache.cpp | 200 +++++++++++++ src/fds/CMakeLists.txt | 6 +- src/fds/tests/test_stream_tracker.cpp | 4 +- src/file_watcher/CMakeLists.txt | 2 +- src/flip/CMakeLists.txt | 4 +- src/metrics/CMakeLists.txt | 6 +- src/options/CMakeLists.txt | 2 +- src/settings/CMakeLists.txt | 2 +- src/utility/CMakeLists.txt | 10 +- src/version/CMakeLists.txt | 2 +- src/wisr/CMakeLists.txt | 2 +- 26 files changed, 689 insertions(+), 177 deletions(-) rename src/.clang-format => .clang-format (100%) rename {src => include/sisl}/cache/evictor.hpp (97%) rename {src => include/sisl}/cache/hash_entry_base.hpp (100%) rename {src => include/sisl}/cache/lru_evictor.hpp (99%) rename {src => include/sisl}/cache/range_cache.hpp (98%) rename {src => include/sisl}/cache/range_hashmap.hpp (99%) create mode 100644 include/sisl/cache/simple_cache.hpp create mode 100644 include/sisl/cache/simple_hashmap.hpp create mode 100644 src/cache/tests/test_simple_cache.cpp diff --git a/src/.clang-format b/.clang-format similarity index 100% rename from src/.clang-format rename to .clang-format diff --git a/conanfile.py b/conanfile.py index 2373a591..d18d5c37 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.6" + version = "8.2.7" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/cache/evictor.hpp b/include/sisl/cache/evictor.hpp similarity index 97% rename from src/cache/evictor.hpp rename to include/sisl/cache/evictor.hpp index e4643479..da2fb1b8 100644 --- a/src/cache/evictor.hpp +++ b/include/sisl/cache/evictor.hpp @@ -20,7 +20,8 @@ #include #include #include -#include "hash_entry_base.hpp" +#include +#include namespace sisl { typedef ValueEntryBase CacheRecord; diff --git a/src/cache/hash_entry_base.hpp b/include/sisl/cache/hash_entry_base.hpp similarity index 100% rename from src/cache/hash_entry_base.hpp rename to include/sisl/cache/hash_entry_base.hpp diff --git a/src/cache/lru_evictor.hpp b/include/sisl/cache/lru_evictor.hpp similarity index 99% rename from src/cache/lru_evictor.hpp rename to include/sisl/cache/lru_evictor.hpp index f5628ef5..c33f173f 100644 --- a/src/cache/lru_evictor.hpp +++ b/include/sisl/cache/lru_evictor.hpp @@ -22,7 +22,7 @@ #include #include #include -#include "evictor.hpp" +#include using namespace boost::intrusive; diff --git a/src/cache/range_cache.hpp b/include/sisl/cache/range_cache.hpp similarity index 98% rename from src/cache/range_cache.hpp rename to include/sisl/cache/range_cache.hpp index f0ec7c06..cd2488db 100644 --- a/src/cache/range_cache.hpp +++ b/include/sisl/cache/range_cache.hpp @@ -17,8 +17,8 @@ #pragma once #include -#include "evictor.hpp" -#include "range_hashmap.hpp" +#include +#include namespace sisl { diff --git a/src/cache/range_hashmap.hpp b/include/sisl/cache/range_hashmap.hpp similarity index 99% rename from src/cache/range_hashmap.hpp rename to include/sisl/cache/range_hashmap.hpp index 7e9a487e..654fccc7 100644 --- a/src/cache/range_hashmap.hpp +++ b/include/sisl/cache/range_hashmap.hpp @@ -34,8 +34,7 @@ #include #include #include - -#include "hash_entry_base.hpp" +#include namespace sisl { diff --git a/include/sisl/cache/simple_cache.hpp b/include/sisl/cache/simple_cache.hpp new file mode 100644 index 00000000..851a8378 --- /dev/null +++ b/include/sisl/cache/simple_cache.hpp @@ -0,0 +1,104 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include +#include + +using namespace std::placeholders; + +namespace sisl { + +template < typename K, typename V > +class SimpleCache { +private: + std::shared_ptr< Evictor > m_evictor; + key_extractor_cb_t< K, V > m_key_extract_cb; + SimpleHashMap< K, V > m_map; + uint32_t m_record_family_id; + uint32_t m_per_value_size; + + static thread_local std::set< K > t_failed_keys; + +public: + SimpleCache(const std::shared_ptr< Evictor >& evictor, uint32_t num_buckets, uint32_t per_val_size, + key_extractor_cb_t< K, V >&& extract_cb, Evictor::can_evict_cb_t evict_cb = nullptr) : + m_evictor{evictor}, + m_key_extract_cb{std::move(extract_cb)}, + m_map{num_buckets, m_key_extract_cb, std::bind(&SimpleCache< K, V >::on_hash_operation, this, _1, _2, _3)}, + m_per_value_size{per_val_size} { + m_record_family_id = m_evictor->register_record_family(std::move(evict_cb)); + } + + ~SimpleCache() { m_evictor->unregister_record_family(m_record_family_id); } + + bool insert(const V& value) { + K k = m_key_extract_cb(value); + return m_map.insert(k, value); + } + + bool upsert(const V& value) { + K k = m_key_extract_cb(value); + return m_map.upsert(k, value); + } + + bool remove(const K& key, V& out_val) { return m_map.erase(key, out_val); } + + bool get(const K& key, V& out_val) { return m_map.get(key, out_val); } + +private: + void on_hash_operation(const CacheRecord& r, const K& key, const hash_op_t op) { + CacheRecord& record = const_cast< CacheRecord& >(r); + const auto hash_code = SimpleHashMap< K, V >::compute_hash(key); + + switch (op) { + case hash_op_t::CREATE: + record.set_record_family(m_record_family_id); + record.set_size(m_per_value_size); + if (!m_evictor->add_record(hash_code, record)) { + // We were not able to evict any, so mark this record and we will erase them upon all callbacks are done + t_failed_keys.insert(key); + } + break; + + case hash_op_t::DELETE: + if (t_failed_keys.size()) { + // Check if this is a delete of failed keys, if so lets not add it to record + if (t_failed_keys.find(key) != t_failed_keys.end()) { return; } + } + m_evictor->remove_record(hash_code, record); + break; + + case hash_op_t::ACCESS: + m_evictor->record_accessed(hash_code, record); + break; + + case hash_op_t::RESIZE: { + DEBUG_ASSERT(false, "Don't expect RESIZE operation for simple cache entries"); + break; + } + default: + DEBUG_ASSERT(false, "Invalid hash_op"); + break; + } + } +}; + +template < typename K, typename V > +thread_local std::set< K > SimpleCache< K, V >::t_failed_keys; +} // namespace sisl diff --git a/include/sisl/cache/simple_hashmap.hpp b/include/sisl/cache/simple_hashmap.hpp new file mode 100644 index 00000000..8d3bd099 --- /dev/null +++ b/include/sisl/cache/simple_hashmap.hpp @@ -0,0 +1,273 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include +#include +#include +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wattributes" +#endif +#include +#if defined __clang__ or defined __GNUC__ +#pragma GCC diagnostic pop +#endif + +#include +#include +#include + +namespace sisl { + +template < typename K, typename V > +class SimpleHashBucket; + +ENUM(hash_op_t, uint8_t, CREATE, ACCESS, DELETE, RESIZE) + +template < typename K > +using key_access_cb_t = std::function< void(const ValueEntryBase&, const K&, const hash_op_t) >; + +template < typename K, typename V > +using key_extractor_cb_t = std::function< K(const V&) >; + +static constexpr size_t s_start_seed = 0; // TODO: Pickup a better seed + +///////////////////////////////////////////// RangeHashMap Declaration /////////////////////////////////// +template < typename K, typename V > +class SimpleHashMap { +private: + uint32_t m_nbuckets; + SimpleHashBucket< K, V >* m_buckets; + key_extractor_cb_t< K, V > m_key_extract_cb; + key_access_cb_t< K > m_key_access_cb; + + static thread_local SimpleHashMap< K, V >* s_cur_hash_map; + +#ifdef GLOBAL_HASHSET_LOCK + mutable std::mutex m; +#endif + +public: + SimpleHashMap(uint32_t nBuckets, const key_extractor_cb_t< K, V >& key_extractor, + key_access_cb_t< K > access_cb = nullptr); + ~SimpleHashMap(); + + bool insert(const K& key, const V& value); + bool upsert(const K& key, const V& value); + bool get(const K& input_key, V& out_val); + bool erase(const K& key, V& out_val); + + static void set_current_instance(SimpleHashMap< K, V >* hmap) { s_cur_hash_map = hmap; } + static SimpleHashMap< K, V >* get_current_instance() { return s_cur_hash_map; } + static key_access_cb_t< K >& get_access_cb() { return get_current_instance()->m_key_access_cb; } + static key_extractor_cb_t< K, V >& extractor_cb() { return get_current_instance()->m_key_extract_cb; } + + template < typename... Args > + static void call_access_cb(Args&&... args) { + if (get_current_instance()->m_key_access_cb) { + (get_current_instance()->m_key_access_cb)(std::forward< Args >(args)...); + } + } + static size_t compute_hash(const K& key) { + size_t seed = s_start_seed; + boost::hash_combine(seed, key); + return seed; + } + +private: + SimpleHashBucket< K, V >& get_bucket(const K& key) const; + SimpleHashBucket< K, V >& get_bucket(size_t hash_code) const; +}; + +///////////////////////////////////////////// MultiEntryHashNode Definitions /////////////////////////////////// +template < typename V > +struct SingleEntryHashNode : public ValueEntryBase, public boost::intrusive::slist_base_hook<> { + V m_value; + SingleEntryHashNode(const V& value) : m_value{value} {} +}; + +///////////////////////////////////////////// ValueEntryRange Definitions /////////////////////////////////// + +template < typename K, typename V > +thread_local sisl::SimpleHashMap< K, V >* sisl::SimpleHashMap< K, V >::s_cur_hash_map{nullptr}; + +///////////////////////////////////////////// SimpleHashBucket Definitions /////////////////////////////////// +template < typename K, typename V > +class SimpleHashBucket { +private: +#ifndef GLOBAL_HASHSET_LOCK + mutable folly::SharedMutexWritePriority m_lock; +#endif + typedef boost::intrusive::slist< SingleEntryHashNode< V > > hash_node_list_t; + hash_node_list_t m_list; + +public: + SimpleHashBucket() = default; + + ~SimpleHashBucket() { + auto it{m_list.begin()}; + while (it != m_list.end()) { + SingleEntryHashNode< V >* n = &*it; + it = m_list.erase(it); + delete n; + } + } + + bool insert(const K& input_key, const V& input_value, bool overwrite_ok) { +#ifndef GLOBAL_HASHSET_LOCK + folly::SharedMutexWritePriority::WriteHolder holder(m_lock); +#endif + SingleEntryHashNode< V >* n = nullptr; + auto it = m_list.begin(); + for (auto itend{m_list.end()}; it != itend; ++it) { + const K k = SimpleHashMap< K, V >::extractor_cb()(it->m_value); + if (input_key > k) { + break; + } else if (input_key == k) { + n = &*it; + } + } + + if (n == nullptr) { + n = new SingleEntryHashNode< V >(input_value); + m_list.insert(it, *n); + access_cb(*n, input_key, hash_op_t::CREATE); + return true; + } else { + if (overwrite_ok) { + n->m_value = input_value; + access_cb(*n, input_key, hash_op_t::ACCESS); + } + return false; + } + } + + bool get(const K& input_key, V& out_val) { +#ifndef GLOBAL_HASHSET_LOCK + folly::SharedMutexWritePriority::ReadHolder holder(m_lock); +#endif + bool found{false}; + for (const auto& n : m_list) { + const K k = SimpleHashMap< K, V >::extractor_cb()(n.m_value); + if (input_key > k) { + break; + } else if (input_key == k) { + out_val = n.m_value; + found = true; + access_cb(n, input_key, hash_op_t::ACCESS); + break; + } + } + return found; + } + + bool erase(const K& input_key, V& out_val) { +#ifndef GLOBAL_HASHSET_LOCK + folly::SharedMutexWritePriority::WriteHolder holder(m_lock); +#endif + SingleEntryHashNode< V >* n = nullptr; + + auto it = m_list.begin(); + for (auto itend{m_list.end()}; it != itend; ++it) { + const K k = SimpleHashMap< K, V >::extractor_cb()(it->m_value); + if (input_key > k) { + break; + } else if (input_key == k) { + n = &*it; + break; + } + } + + if (n) { + access_cb(*n, input_key, hash_op_t::DELETE); + out_val = n->m_value; + m_list.erase(it); + delete n; + return true; + } + return false; + } + +private: + static void access_cb(const SingleEntryHashNode< V >& node, const K& key, hash_op_t op) { + SimpleHashMap< K, V >::call_access_cb((const ValueEntryBase&)node, key, op); + } +}; + +///////////////////////////////////////////// RangeHashMap Definitions /////////////////////////////////// +template < typename K, typename V > +SimpleHashMap< K, V >::SimpleHashMap(uint32_t nBuckets, const key_extractor_cb_t< K, V >& extract_cb, + key_access_cb_t< K > access_cb) : + m_nbuckets{nBuckets}, m_key_extract_cb{extract_cb}, m_key_access_cb{std::move(access_cb)} { + m_buckets = new SimpleHashBucket< K, V >[nBuckets]; +} + +template < typename K, typename V > +SimpleHashMap< K, V >::~SimpleHashMap() { + delete[] m_buckets; +} + +template < typename K, typename V > +bool SimpleHashMap< K, V >::insert(const K& key, const V& value) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).insert(key, value, false /* overwrite_ok */); +} + +template < typename K, typename V > +bool SimpleHashMap< K, V >::upsert(const K& key, const V& value) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).insert(key, value, true /* overwrite_ok */); +} + +template < typename K, typename V > +bool SimpleHashMap< K, V >::get(const K& key, V& out_val) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).get(key, out_val); +} + +template < typename K, typename V > +bool SimpleHashMap< K, V >::erase(const K& key, V& out_val) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).erase(key, out_val); +} + +template < typename K, typename V > +SimpleHashBucket< K, V >& SimpleHashMap< K, V >::get_bucket(const K& key) const { + return (m_buckets[compute_hash(key) % m_nbuckets]); +} + +template < typename K, typename V > +SimpleHashBucket< K, V >& SimpleHashMap< K, V >::get_bucket(size_t hash_code) const { + return (m_buckets[hash_code % m_nbuckets]); +} + +} // namespace sisl diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 03e5d67d..68e19dd1 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,6 +1,5 @@ cmake_minimum_required (VERSION 3.11) -#add_subdirectory (btree) add_subdirectory (logging) add_subdirectory (options) add_subdirectory (version) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index 68a242fa..aa554c36 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -39,4 +39,4 @@ target_link_libraries(test_auth_mgr jwt-cpp::jwt-cpp GTest::gmock ) -add_test(NAME test_auth_mgr COMMAND test_auth_mgr) +add_test(NAME AuthManager COMMAND test_auth_mgr) diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index 707deb4b..4805d6be 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -12,6 +12,7 @@ target_sources(test_range_hashmap PRIVATE ) target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) +add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) add_executable(test_range_cache) target_sources(test_range_cache PRIVATE @@ -19,3 +20,12 @@ target_sources(test_range_cache PRIVATE ) target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) +add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) + +add_executable(test_simple_cache) +target_sources(test_simple_cache PRIVATE + tests/test_simple_cache.cpp + ) +target_include_directories(test_simple_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) +add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) \ No newline at end of file diff --git a/src/cache/lru_evictor.cpp b/src/cache/lru_evictor.cpp index 7e6949f4..1ba796d5 100644 --- a/src/cache/lru_evictor.cpp +++ b/src/cache/lru_evictor.cpp @@ -8,93 +8,103 @@ * You may obtain a copy of the License at * https://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. + * Unless required by applicable law or agreed to in writing, software + *distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + *WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + *License for the specific language governing permissions and limitations under + *the License. * *********************************************************************************/ -#include "lru_evictor.hpp" +#include namespace sisl { -LRUEvictor::LRUEvictor(const int64_t max_size, const uint32_t num_partitions) : Evictor(max_size, num_partitions) { - m_partitions = std::make_unique< LRUPartition[] >(num_partitions); - for (uint32_t i{0}; i < num_partitions; ++i) { - m_partitions[i].init(this, i, uint64_cast(max_size / num_partitions)); - } +LRUEvictor::LRUEvictor(const int64_t max_size, const uint32_t num_partitions) + : Evictor(max_size, num_partitions) { + m_partitions = std::make_unique(num_partitions); + for (uint32_t i{0}; i < num_partitions; ++i) { + m_partitions[i].init(this, i, uint64_cast(max_size / num_partitions)); + } } -bool LRUEvictor::add_record(uint64_t hash_code, CacheRecord& record) { - return get_partition(hash_code).add_record(record); +bool LRUEvictor::add_record(uint64_t hash_code, CacheRecord &record) { + return get_partition(hash_code).add_record(record); } -void LRUEvictor::remove_record(uint64_t hash_code, CacheRecord& record) { - get_partition(hash_code).remove_record(record); +void LRUEvictor::remove_record(uint64_t hash_code, CacheRecord &record) { + get_partition(hash_code).remove_record(record); } -void LRUEvictor::record_accessed(uint64_t hash_code, CacheRecord& record) { - get_partition(hash_code).record_accessed(record); +void LRUEvictor::record_accessed(uint64_t hash_code, CacheRecord &record) { + get_partition(hash_code).record_accessed(record); } -void LRUEvictor::record_resized(uint64_t hash_code, const CacheRecord& record, uint32_t old_size) { - get_partition(hash_code).record_resized(record, old_size); +void LRUEvictor::record_resized(uint64_t hash_code, const CacheRecord &record, + uint32_t old_size) { + get_partition(hash_code).record_resized(record, old_size); } -bool LRUEvictor::LRUPartition::add_record(CacheRecord& record) { - std::unique_lock guard{m_list_guard}; - if (will_fill(record.size()) > m_max_size) { - if (!do_evict(record.record_family_id(), record.size())) { return false; } +bool LRUEvictor::LRUPartition::add_record(CacheRecord &record) { + std::unique_lock guard{m_list_guard}; + if (will_fill(record.size()) > m_max_size) { + if (!do_evict(record.record_family_id(), record.size())) { + return false; } - m_list.push_back(record); - m_filled_size += record.size(); - return true; + } + m_list.push_back(record); + m_filled_size += record.size(); + return true; } -void LRUEvictor::LRUPartition::remove_record(CacheRecord& record) { - std::unique_lock guard{m_list_guard}; - auto it = m_list.iterator_to(record); - m_filled_size -= record.size(); - m_list.erase(it); +void LRUEvictor::LRUPartition::remove_record(CacheRecord &record) { + std::unique_lock guard{m_list_guard}; + auto it = m_list.iterator_to(record); + m_filled_size -= record.size(); + m_list.erase(it); } -void LRUEvictor::LRUPartition::record_accessed(CacheRecord& record) { - std::unique_lock guard{m_list_guard}; - m_list.erase(m_list.iterator_to(record)); - m_list.push_back(record); +void LRUEvictor::LRUPartition::record_accessed(CacheRecord &record) { + std::unique_lock guard{m_list_guard}; + m_list.erase(m_list.iterator_to(record)); + m_list.push_back(record); } -void LRUEvictor::LRUPartition::record_resized(const CacheRecord& record, const uint32_t old_size) { - std::unique_lock guard{m_list_guard}; - m_filled_size -= (record.size() - old_size); +void LRUEvictor::LRUPartition::record_resized(const CacheRecord &record, + const uint32_t old_size) { + std::unique_lock guard{m_list_guard}; + m_filled_size -= (record.size() - old_size); } -bool LRUEvictor::LRUPartition::do_evict(const uint32_t record_fid, const uint32_t needed_size) { - size_t count{0}; +bool LRUEvictor::LRUPartition::do_evict(const uint32_t record_fid, + const uint32_t needed_size) { + size_t count{0}; - auto it = std::begin(m_list); - while (will_fill(needed_size) && (it != std::end(m_list))) { - CacheRecord& rec = *it; + auto it = std::begin(m_list); + while (will_fill(needed_size) && (it != std::end(m_list))) { + CacheRecord &rec = *it; - /* return the next element */ - if (!rec.is_pinned() && m_evictor->can_evict_cb(record_fid)(rec)) { - m_filled_size -= rec.size(); - it = m_list.erase(it); - } else { - ++count; - it = std::next(it); - } + /* return the next element */ + if (!rec.is_pinned() && m_evictor->can_evict_cb(record_fid)(rec)) { + m_filled_size -= rec.size(); + it = m_list.erase(it); + } else { + ++count; + it = std::next(it); } + } - if (count) { LOGDEBUG("LRU ejection had to skip {} entries", count); } - if (is_full()) { - // No available candidate to evict - LOGERROR("No cache space available: Eviction partition={} as total_entries={} rejected eviction request to add " - "size={}, already filled={}", - m_partition_num, m_list.size(), needed_size, m_filled_size); - return false; - } + if (count) { + LOGDEBUG("LRU ejection had to skip {} entries", count); + } + if (is_full()) { + // No available candidate to evict + LOGERROR("No cache space available: Eviction partition={} as " + "total_entries={} rejected eviction request to add " + "size={}, already filled={}", + m_partition_num, m_list.size(), needed_size, m_filled_size); + return false; + } - return true; + return true; } } // namespace sisl diff --git a/src/cache/tests/test_range_cache.cpp b/src/cache/tests/test_range_cache.cpp index a8a19f7f..cd68a6a5 100644 --- a/src/cache/tests/test_range_cache.cpp +++ b/src/cache/tests/test_range_cache.cpp @@ -15,7 +15,6 @@ * *********************************************************************************/ #include -#include #include #include #include @@ -28,9 +27,10 @@ #endif #include +#include #include -#include "range_cache.hpp" -#include "lru_evictor.hpp" +#include +#include using namespace sisl; SISL_LOGGING_INIT(test_rangecache) diff --git a/src/cache/tests/test_range_hashmap.cpp b/src/cache/tests/test_range_hashmap.cpp index 3ad5e70b..033593c1 100644 --- a/src/cache/tests/test_range_hashmap.cpp +++ b/src/cache/tests/test_range_hashmap.cpp @@ -23,97 +23,11 @@ #include #include #include "sisl/fds/bitset.hpp" -#include "range_hashmap.hpp" +#include using namespace sisl; SISL_LOGGING_INIT(test_hashmap) -#if 0 -struct TestRangeKey { - uint64_t m_num{0}; - uint32_t m_count{0}; - - TestRangeKey() = default; - TestRangeKey(const uint64_t n, const uint32_t c) : m_num{n}, m_count{c} {} - - std::pair< koffset_t, uint32_t > get_subset_from(const TestRangeKey& base) { - return std::make_pair<>(m_num - base.m_num, base.m_num + base.m_count - m_num); - } - - TestRangeKey get_base_key(const uint32_t split_boundary) { - return TestRangeKey{sisl::round_down(key.m_num, split_boundary), split_boundary}; - } - - // Take the range key and split them by modulo and then returning n different keyviews of it - static void split(const TestRangeKey& key, const uint32_t split_boundary, std::vector< KeyView >& out_views) { - auto base_num = sisl::round_down(key.m_num, split_boundary); - - auto base_key = key.get_base_key(split_boundary); - auto [offset, sub_size] = key.get_subset_from(base_key); - if (sub_size > key.m_count) { sub_size = key.m_count; } - out_views.emplace_back(base_key, offset, sub_size); - - auto remain_count = key.m_count - sub_size; - while (remain_count > 0) { - base_key.m_num += split_boundary; - size = std::min(split_boundary, remain_count); - out_views.emplace_back(base_key, 0, size); - remain_count -= size; - } - } - - static sisl::blob get_blob(const TestRangeKey& k) { - return sisl::blob{r_cast< uint8_t* >(&k), sizeof(TestRangeKey)}; - } - - static int compare(const TestRangeKey& k1, const TestRangeKey& k2) { - if (k2.m_num < k1.m_num) { - return -1; - } else if (k2.m_num > k1.m_num) { - return 1; - } else if (k2.m_count < k1.m_count) { - return -1; - } else if (k2.m_count > k1.m_count) { - return 1; - } else { - return 0; - } - } - - std::string to_string() const { return fmt::format("[{}-{}]", m_num, m_num + m_count - 1); } -}; - -struct TestRangeValue { -public: - TestRangeValue(const uint64_t& d1, const uint64_t& offset = 0) : m_base{d1}, m_offset{offset}, m_refcount{1} {} - TestRangeValue(const TestRangeValue& v) = default; - - bool operator==(const TestRangeValue& v) const { return (m_base == v.m_base); } - static std::string to_string(const TestRangeValue& v) { return fmt::format("{}", v.m_base + v.m_offset); } - - static void extract(TestRangeValue* v, const koffset_range_t& extract_range, uint8_t* new_buf) { - if (new_buf != nullptr) { - new (new_buf) TestRangeValue(v->m_base, v->m_offset + extract_range.second); - } else { - v->m_offset += extract_range.first; - } - } - - static bool can_erase(const TestRangeKey& k, const TestRangeValue& v) { return (--v.m_refcount == 0); } - - static void update(const TestRangeKey& base_key, const koffset_range_t range, TestRangeValue* value) { - ++value->m_refcount; - } - -private: - uint64_t m_base; - uint64_t m_offset; - int m_refcount; -}; - -DECLARE_RELOCATABLE(TestRangeValue) -#endif - static uint32_t g_max_offset; static constexpr uint32_t per_val_size = 128; diff --git a/src/cache/tests/test_simple_cache.cpp b/src/cache/tests/test_simple_cache.cpp new file mode 100644 index 00000000..6718a0cd --- /dev/null +++ b/src/cache/tests/test_simple_cache.cpp @@ -0,0 +1,200 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include +#include +#include + +#ifdef __linux__ +#include +#include +#endif + +#include +#include +#include +#include +#include + +using namespace sisl; +SISL_LOGGING_INIT(test_simplecache) + +static constexpr uint32_t g_val_size{512}; +static thread_local std::random_device g_rd{}; +static thread_local std::default_random_engine g_re{g_rd()}; + +struct Entry { + Entry(uint32_t id, const std::string& contents = "") : m_id{id}, m_contents{contents} {} + + uint32_t m_id; + std::string m_contents; +}; + +static constexpr std::array< const char, 62 > alphanum{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}; + +static std::string gen_random_string(size_t len) { + std::string str; + static thread_local std::random_device rd{}; + static thread_local std::default_random_engine re{rd()}; + std::uniform_int_distribution< size_t > rand_char{0, alphanum.size() - 1}; + for (size_t i{0}; i < len; ++i) { + str += alphanum[rand_char(re)]; + } + str += '\0'; + return str; +} + +struct SimpleCacheTest : public testing::Test { +protected: + std::shared_ptr< Evictor > m_evictor; + std::unique_ptr< SimpleCache< uint32_t, std::shared_ptr< Entry > > > m_cache; + std::unordered_map< uint32_t, std::string > m_shadow_map; + + uint64_t m_cache_misses{0}; + uint64_t m_cache_hits{0}; + uint32_t m_total_keys; + +protected: + void SetUp() override { + const auto cache_size = SISL_OPTIONS["cache_size_mb"].as< uint32_t >() * 1024 * 1024; + m_evictor = std::make_unique< LRUEvictor >(cache_size, 8); + m_cache = std::make_unique< SimpleCache< uint32_t, std::shared_ptr< Entry > > >( + m_evictor, // Evictor to evict used entries + cache_size / 4096, // Total number of buckets + g_val_size, // Value size + [](const std::shared_ptr< Entry >& e) -> uint32_t { return e->m_id; }, // Method to extract key + nullptr // Method to prevent eviction + ); + + const auto cache_pct = SISL_OPTIONS["cache_pct"].as< uint32_t >(); + const auto total_data_size = (100 * cache_size) / cache_pct; + m_total_keys = total_data_size / g_val_size; + LOGINFO("Initializing cache_size={} MB, cache_pct={}, total_data_size={}", + SISL_OPTIONS["cache_size_mb"].as< uint32_t >(), cache_pct, total_data_size); + } + + void TearDown() override { + m_evictor.reset(); + m_cache.reset(); + } + + void write(uint32_t id) { + const std::string data = gen_random_string(g_val_size); + const auto [it, expected_insert] = m_shadow_map.insert_or_assign(id, data); + + bool inserted = m_cache->upsert(std::make_shared< Entry >(id, data)); + ASSERT_EQ(inserted, expected_insert) + << "Mismatch about existence of key=" << id << " between shadow_map and cache"; + } + + void read(uint32_t id) { + const auto it = m_shadow_map.find(id); + bool expected_found = (it != m_shadow_map.end()); + + std::shared_ptr< Entry > e = std::make_shared< Entry >(0); + bool found = m_cache->get(id, e); + if (found) { + ASSERT_EQ(expected_found, true) << "Object key=" << id << " is deleted, but still found in cache"; + ASSERT_EQ(e->m_contents, it->second) << "Contents for key=" << id << " mismatch"; + ++m_cache_hits; + } else if (expected_found) { + bool inserted = m_cache->insert(std::make_shared< Entry >(id, it->second)); + ASSERT_EQ(inserted, true) << "Unable to insert to the cache for key=" << id; + ++m_cache_misses; + } + } + + void remove(uint32_t id) { + const auto it = m_shadow_map.find(id); + bool expected_found = (it != m_shadow_map.end()); + + std::shared_ptr< Entry > removed_e = std::make_shared< Entry >(0); + bool removed = m_cache->remove(id, removed_e); + if (removed) { + ASSERT_EQ(expected_found, true) + << "Object for key=" << id << " is deleted already, but still found in cache"; + ASSERT_EQ(removed_e->m_contents, it->second) << "Contents for key=" << id << " mismatch prior to removal"; + ++m_cache_hits; + } else { + ++m_cache_misses; + } + + m_shadow_map.erase(id); + } +}; + +VENUM(op_t, uint8_t, READ = 0, WRITE = 1, REMOVE = 2) + +TEST_F(SimpleCacheTest, RandomData) { + static std::uniform_int_distribution< uint8_t > op_generator{0, 2}; + static std::uniform_int_distribution< uint32_t > key_generator{0, this->m_total_keys}; + + uint32_t nread_ops{0}; + uint32_t nwrite_ops{0}; + uint32_t nremove_ops{0}; + + auto num_iters = SISL_OPTIONS["num_iters"].as< uint32_t >(); + LOGINFO("INFO: Do random read/write operations on all chunks for {} iters", num_iters); + for (uint32_t i{0}; i < num_iters; ++i) { + const op_t op = s_cast< op_t >(op_generator(g_re)); + const uint32_t id = key_generator(g_re); + + LOGDEBUG("INFO: Doing op={} for key=({})", enum_name(op), id); + switch (op) { + case op_t::READ: + read(id); + ++nread_ops; + break; + case op_t::WRITE: + write(id); + ++nwrite_ops; + break; + case op_t::REMOVE: + remove(id); + ++nremove_ops; + break; + } + } + const auto cache_ops = m_cache_hits + m_cache_misses; + LOGINFO("Executed read_ops={}, write_ops={} remove_ops={}", nread_ops, nwrite_ops, nremove_ops); + LOGINFO("Cache hits={} ({}%) Cache Misses={} ({}%)", m_cache_hits, (100 * (double)m_cache_hits) / cache_ops, + m_cache_misses, (100 * (double)m_cache_misses) / cache_ops); +} + +SISL_OPTIONS_ENABLE(logging, test_simplecache) +SISL_OPTION_GROUP(test_simplecache, + (cache_size_mb, "", "cache_size_mb", "cache size in mb", + ::cxxopts::value< uint32_t >()->default_value("100"), "number"), + (cache_pct, "", "cache_pct", "percentage of cache", + ::cxxopts::value< uint32_t >()->default_value("50"), "number"), + (num_iters, "", "num_iters", "number of iterations for rand ops", + ::cxxopts::value< uint32_t >()->default_value("65536"), "number")) + +int main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging, test_simplecache) + sisl::logging::SetLogger("test_simplecache"); + spdlog::set_pattern("[%D %T%z] [%^%L%$] [%t] %v"); + + auto ret = RUN_ALL_TESTS(); + return ret; +} diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 86392b74..f4f84d38 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -13,26 +13,28 @@ target_sources(test_stream_tracker PRIVATE tests/test_stream_tracker.cpp ) target_link_libraries(test_stream_tracker sisl ${COMMON_DEPS} GTest::gtest) +add_test(NAME StreamTracker COMMAND test_stream_tracker) add_executable(test_atomic_status_counter) target_sources(test_atomic_status_counter PRIVATE tests/test_atomic_status_counter.cpp ) target_link_libraries(test_atomic_status_counter sisl ${COMMON_DEPS} GTest::gtest atomic) -add_test(NAME atomic_status_counter COMMAND test_atomic_status_counter) +add_test(NAME AtomicStatusCounter COMMAND test_atomic_status_counter) add_executable(test_bitset) target_sources(test_bitset PRIVATE tests/test_bitset.cpp ) target_link_libraries(test_bitset sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME bitset COMMAND test_bitset) +add_test(NAME Bitset COMMAND test_bitset) add_executable(test_bitword) target_sources(test_bitword PRIVATE tests/test_bitword.cpp ) target_link_libraries(test_bitword sisl ${COMMON_DEPS} GTest::gtest) +add_test(NAME Bitword COMMAND test_bitset) add_executable(obj_allocator_benchmark) target_sources(obj_allocator_benchmark PRIVATE diff --git a/src/fds/tests/test_stream_tracker.cpp b/src/fds/tests/test_stream_tracker.cpp index eb0c55a1..448f7c49 100644 --- a/src/fds/tests/test_stream_tracker.cpp +++ b/src/fds/tests/test_stream_tracker.cpp @@ -52,7 +52,7 @@ struct StreamTrackerTest : public testing::Test { StreamTrackerTest() {} size_t get_mem_size() { auto json = MetricsFarm::getInstance().get_result_in_json(); - return (size_t)json["StreamTracker"]["StreamTracker"]["Gauges"]["Total Memsize for stream tracker"]; + return (size_t)json["StreamTracker"]["StreamTracker_2"]["Gauges"]["Total Memsize for stream tracker"]; } }; } // namespace @@ -108,7 +108,7 @@ TEST_F(StreamTrackerTest, ForceRealloc) { m_tracker.create_and_complete(i, gen(s_engine)); } EXPECT_EQ(m_tracker.completed_upto(), far_idx); - EXPECT_EQ(get_mem_size(), prev_size); + EXPECT_EQ(get_mem_size(), prev_size * 2); } int main(int argc, char* argv[]) { diff --git a/src/file_watcher/CMakeLists.txt b/src/file_watcher/CMakeLists.txt index 7078cd12..36cb0bb9 100644 --- a/src/file_watcher/CMakeLists.txt +++ b/src/file_watcher/CMakeLists.txt @@ -11,4 +11,4 @@ target_sources(test_file_watcher PRIVATE file_watcher_test.cpp ) target_link_libraries(test_file_watcher sisl ${COMMON_DEPS} GTest::gtest GTest::gmock) -add_test(NAME test_file_watcher COMMAND test_file_watcher) +add_test(NAME FileWatcher COMMAND test_file_watcher) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 7fc5013b..34640c19 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -26,7 +26,7 @@ target_sources(test_flip PRIVATE lib/test_flip.cpp ) target_link_libraries(test_flip flip cxxopts::cxxopts) -add_test(NAME TestFlip COMMAND test_flip) +add_test(NAME Flip COMMAND test_flip) add_executable(test_flip_server) target_sources(test_flip_server PRIVATE @@ -39,4 +39,4 @@ target_sources(test_flip_local_client PRIVATE client/local/test_flip_local_client.cpp ) target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) -add_test(NAME TestFlipLocalClient COMMAND test_flip_local_client) +add_test(NAME FlipLocalClient COMMAND test_flip_local_client) diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index a27db4f3..f14e62f7 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -20,18 +20,18 @@ target_sources(metrics_farm_test PRIVATE tests/farm_test.cpp ) target_link_libraries(metrics_farm_test sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME MetricsFarmTest COMMAND metrics_farm_test) +add_test(NAME MetricsFarm COMMAND metrics_farm_test) add_executable(metrics_wrapper_test) target_sources(metrics_wrapper_test PRIVATE tests/wrapper_test.cpp ) target_link_libraries(metrics_wrapper_test sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME MetricsWrapperTest COMMAND metrics_wrapper_test) +add_test(NAME MetricsWrapper COMMAND metrics_wrapper_test) add_executable(metrics_benchmark) target_sources(metrics_benchmark PRIVATE tests/metrics_benchmark.cpp ) target_link_libraries(metrics_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) -add_test(NAME MetricsBenchmarkTest COMMAND metrics_benchmark) +add_test(NAME MetricsBenchmark COMMAND metrics_benchmark) diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index 03cb3e05..4fc99004 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -17,4 +17,4 @@ if (DEFINED CONAN_BUILD_COVERAGE) list(APPEND extra_args "--gtest_output=xml:/output/test_basic.xml") endif () endif () -add_test(NAME BasicTest COMMAND basic_test ${extra_args}) +add_test(NAME OptionsBasics COMMAND basic_test ${extra_args}) diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index deb6f94f..e429df32 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -26,4 +26,4 @@ if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) endif() target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers GTest::gtest) -add_test(NAME SettingsTest COMMAND test_settings) +add_test(NAME Settings COMMAND test_settings) diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index cfe00c51..0b95585d 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -7,28 +7,28 @@ target_sources(test_atomic_counter PRIVATE tests/test_atomic_counter.cpp ) target_link_libraries(test_atomic_counter sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME atomic_counter COMMAND test_atomic_counter) +add_test(NAME AtomicCounter COMMAND test_atomic_counter) add_executable(test_thread_buffer) target_sources(test_thread_buffer PRIVATE tests/test_thread_buffer.cpp ) target_link_libraries(test_thread_buffer ${COMMON_DEPS} GTest::gtest) -add_test(NAME ThreadBufferTest COMMAND test_thread_buffer) +add_test(NAME ThreadBuffer COMMAND test_thread_buffer) add_executable(test_status_factory) target_sources(test_status_factory PRIVATE tests/test_status_factory.cpp ) target_link_libraries(test_status_factory ${COMMON_DEPS} benchmark::benchmark) -add_test(NAME StatusFactoryTest COMMAND test_status_factory) +add_test(NAME StatusFactory COMMAND test_status_factory) add_executable(test_enum) target_sources(test_enum PRIVATE tests/test_enum.cpp ) target_link_libraries(test_enum ${COMMON_DEPS} GTest::gtest) -add_test(NAME EnumTest COMMAND test_enum) +add_test(NAME Enum COMMAND test_enum) if (${prerelease_dummy_FOUND}) add_executable(test_objlife) @@ -36,5 +36,5 @@ if (${prerelease_dummy_FOUND}) tests/test_objlife_counter.cpp ) target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) - add_test(NAME ObjLifeTest COMMAND test_objlife) + add_test(NAME ObjLife COMMAND test_objlife) endif () diff --git a/src/version/CMakeLists.txt b/src/version/CMakeLists.txt index 5e84678d..cdf75f4b 100644 --- a/src/version/CMakeLists.txt +++ b/src/version/CMakeLists.txt @@ -11,4 +11,4 @@ target_sources(test_version PRIVATE tests/test_version.cpp ) target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) -add_test(NAME VersionTest COMMAND test_version) +add_test(NAME Version COMMAND test_version) diff --git a/src/wisr/CMakeLists.txt b/src/wisr/CMakeLists.txt index 5afe6e81..e0121243 100644 --- a/src/wisr/CMakeLists.txt +++ b/src/wisr/CMakeLists.txt @@ -7,7 +7,7 @@ target_sources(wisr_vector_test PRIVATE tests/test_wisr_vector.cpp ) target_link_libraries(wisr_vector_test ${COMMON_DEPS} benchmark::benchmark GTest::gtest) -add_test(NAME WisrVectorTest COMMAND wisr_vector_test) +add_test(NAME WisrVector COMMAND wisr_vector_test) add_executable(wisr_vector_benchmark) target_sources(wisr_vector_benchmark PRIVATE From d615530e02b2572a8fea9a911de8e42bba66c3cc Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 8 Dec 2022 08:45:11 -0800 Subject: [PATCH 197/385] Disabling range cache test until CI build succeeds this test (#68) --- src/cache/CMakeLists.txt | 4 +- src/cache/tests/test_range_hashmap.cpp | 222 ------------------------- 2 files changed, 2 insertions(+), 224 deletions(-) diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index 4805d6be..b214cb4e 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -20,7 +20,7 @@ target_sources(test_range_cache PRIVATE ) target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) +#add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) add_executable(test_simple_cache) target_sources(test_simple_cache PRIVATE @@ -28,4 +28,4 @@ target_sources(test_simple_cache PRIVATE ) target_include_directories(test_simple_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) \ No newline at end of file +add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) diff --git a/src/cache/tests/test_range_hashmap.cpp b/src/cache/tests/test_range_hashmap.cpp index 033593c1..9231b1c3 100644 --- a/src/cache/tests/test_range_hashmap.cpp +++ b/src/cache/tests/test_range_hashmap.cpp @@ -185,228 +185,6 @@ TEST_F(RangeHashMapTest, RandomEverythingTest) { nblks_read, ninsert_ops, nblks_inserted, nerase_ops, nblks_erased); } -#if 0 -struct HashNodeTest : public testing::Test { -public: - virtual ~HashNodeTest() override = default; - -protected: - void SetUp() override { m_node = MultiEntryHashNode< TestRangeKey, TestRangeValue >::alloc_node(TestRangeKey{}); } - void TearDown() override { delete m_node; } - - void insert_range(const uint8_t start, const uint8_t end, const bool expected_success) { - bool is_resized; - uint64_t d = g_rand_generator(g_re); - auto val = TestRangeValue{d, 0}; - - std::tie(m_node, is_resized) = MultiEntryHashNode< TestRangeKey, TestRangeValue >::resize_if_needed(m_node, 1); - auto [v, success] = m_node->try_emplace(std::make_pair<>(start, end), val); - - ASSERT_EQ(expected_success, success) << "emplace returned unexpected status"; - if (success) { - auto i{start}; - while (true) { - m_shadow_map.insert({i, val}); - m_inserted_slots.set_bit(i); - if (i == end) break; - ++i; - } - } - } - - void validate_range(const uint8_t start, const uint8_t end) const { - std::vector< const MultiEntryHashNode< TestRangeKey, TestRangeValue >::val_entry_info* > entries; - int n = m_node->find(koffset_range_t{start, end}, entries); - ASSERT_EQ(entries.size(), s_cast< size_t >(n)) << "find return does not match vector entries"; - - for (const auto& e : entries) { - for (auto o{e->range.first}; o < e->range.second; ++o) { - auto it = m_shadow_map.find(o); - ASSERT_EQ(*(e->get_value_const()), it->second) << "Value mismatch for offset=" << (int)o; - } - } - } - - void erase_range(const uint8_t start, const uint8_t end, const uint8_t expected_count) { - bool is_resized; - std::tie(m_node, is_resized) = MultiEntryHashNode< TestRangeKey, TestRangeValue >::resize_if_needed(m_node, 1); - auto erased_count = m_node->erase(std::make_pair<>(start, end), TestRangeValue::extract); - ASSERT_EQ(erased_count, expected_count) - << "erase return of count does not match expected for range" << start << "-" << end; - if (erased_count == 0) { return; } - - auto i{start}; - while (true) { - m_shadow_map.erase(i); - m_inserted_slots.reset_bit(i); - if (i == end) break; - ++i; - } - } - - void validate_all(const uint8_t in_count_of = 8) { - LOGDEBUG("INFO: Read it back (and validate) in range of {}", in_count_of); - for (uint16_t k{0}; k <= 256 - in_count_of; k += in_count_of) { - validate_range(k, k + in_count_of); - } - m_node->validate_keys(); - } - - std::pair< koffset_t, koffset_t > pick_to_erase(koffset_t max_nblks) { - assert(m_shadow_map.size() != 0); - auto start{g_offset_generator(g_re)}; - uint64_t prev{start}; - koffset_t count{0}; - - max_nblks = std::min((g_max_offset - start) + 1, s_cast< uint32_t >(max_nblks)); - do { - auto b = m_inserted_slots.get_next_set_bit(prev); - if (b == prev) { - ++prev; - ++count; - } else if (count > 0) { - break; - } else if (b == sisl::Bitset::npos) { - start = 0; - prev = 0; - } else { - start = b; - prev = b + 1; - count = 1; - } - } while (count < max_nblks); - - assert(count > 0); - return std::make_pair(s_cast< koffset_t >(start), s_cast< koffset_t >(start + count - 1)); - } - - std::pair< koffset_t, koffset_t > pick_to_insert(const koffset_t max_nblks) { - assert(m_shadow_map.size() < g_max_offset + 1); - auto start_offset{g_offset_generator(g_re)}; - auto bb = m_inserted_slots.get_next_contiguous_n_reset_bits(start_offset, std::nullopt, 1, max_nblks); - if (bb.nbits == 0) { bb = m_inserted_slots.get_next_contiguous_n_reset_bits(0, std::nullopt, 1, max_nblks); } - assert(bb.nbits > 0); - return std::make_pair(s_cast< koffset_t >(bb.start_bit), s_cast< koffset_t >(bb.start_bit + bb.nbits - 1)); - } - -protected: - MultiEntryHashNode< TestRangeKey, TestRangeValue >* m_node; - std::unordered_map< uint8_t, TestRangeValue > m_shadow_map; - sisl::Bitset m_inserted_slots{g_max_offset + 1}; -}; - -TEST_F(HashNodeTest, SequentialTest) { - LOGINFO("INFO: Insert all items in the range of 4"); - for (uint16_t k{0}; k <= 252; k += 4) { - insert_range(k, k + 3, true); - insert_range(k, k + 1, false); - } - validate_all(); - - LOGINFO("INFO: Erase the middle of the range"); - for (uint16_t k{0}; k <= 252; k += 4) { - erase_range(k + 1, k + 2, 2); - } - validate_all(); - - LOGINFO("INFO: Erase the last in the range of 4"); - for (uint16_t k{0}; k <= 252; k += 4) { - erase_range(k + 3, k + 3, 1); - } - validate_all(); - - LOGINFO("INFO: ReInsert 2nd in the range"); - for (uint16_t k{0}; k <= 252; k += 4) { - insert_range(k + 1, k + 1, true); - } - validate_all(); - - LOGINFO("INFO: ReInsert 3rd in the range"); - for (uint16_t k{0}; k <= 252; k += 4) { - insert_range(k + 2, k + 2, true); - } - validate_all(); - - LOGINFO("Node details after test: {}", m_node->to_string()); -} - -TEST_F(HashNodeTest, RandomValidWriteTest) { - LOGINFO("INFO: Insert all items in the range of 4"); - uint32_t offset{0}; - while (offset < g_max_offset) { - const auto sz{g_size_generator(g_re)}; - const koffset_t s{s_cast< koffset_t >(offset)}; - LOGTRACE("Inserting range {} to {} cur_offset={}", s, - s + std::min(sz, s_cast< koffset_t >(g_max_offset - offset)), offset); - insert_range(s, s + std::min(sz, s_cast< koffset_t >(g_max_offset - offset)), true); - offset += sz + 1; - } - validate_all(); - LOGINFO("Node details after all insert: {}", m_node->to_string()); - - auto num_iters{SISL_OPTIONS["num_iters"].as< uint64_t >()}; - LOGINFO("INFO: Insert/Erase valid entries randomly for {} iterations", num_iters); - for (uint64_t i{0}; i < num_iters; ++i) { - if (m_shadow_map.size() < g_max_offset + 1) { - const auto [s, e] = pick_to_insert(g_size_generator(g_re)); - LOGTRACE("Inserting [{}-{}]:", s, e); - insert_range(s, e, true); - LOGTRACE("After insert node: {}", m_node->to_string()); - m_node->validate_keys(); - } - if (m_shadow_map.size() > 0) { - const auto [s, e] = pick_to_erase(g_size_generator(g_re)); - LOGTRACE("Erasing [{}-{}]:", s, e); - erase_range(s, e, e - s + 1); - LOGTRACE("After erase node: {}", m_node->to_string()); - m_node->validate_keys(); - } - } - LOGINFO("Node details after test: {}", m_node->to_string()); -} - -TEST_F(HashNodeTest, RandomEverythingTest) { - enum class op_t : uint8_t { read = 0, insert = 1, erase = 2 }; - uint32_t nread_ops{0}, ninsert_ops{0}, nerase_ops{0}; - uint32_t nblks_read{0}, nblks_inserted{0}, nblks_erased{0}; - - auto num_iters{SISL_OPTIONS["num_iters"].as< uint64_t >()}; - LOGINFO("INFO: Do completely random read/insert/erase operations with both valid and invalid entries for {} iters", - num_iters); - for (uint64_t i{0}; i < num_iters; ++i) { - const op_t op{s_cast< op_t >(g_op_generator(g_re))}; - const koffset_t offset{g_offset_generator(g_re)}; - koffset_t size{g_size_generator(g_re)}; - if (g_max_offset - offset + 1 < size) { size = g_max_offset - offset + 1; } - - switch (op) { - case op_t::read: - validate_range(offset, offset + size - 1); - nblks_read += m_inserted_slots.get_set_count(offset, offset + size - 1); - ++nread_ops; - break; - case op_t::insert: { - auto expected_inserts = size - m_inserted_slots.get_set_count(offset, offset + size - 1); - insert_range(offset, offset + size - 1, m_inserted_slots.is_bits_reset(offset, size)); - nblks_inserted += expected_inserts; - ++ninsert_ops; - break; - } - case op_t::erase: { - auto expected_erases = m_inserted_slots.get_set_count(offset, offset + size - 1); - erase_range(offset, offset + size - 1, expected_erases); - nblks_erased += expected_erases; - ++nerase_ops; - break; - } - } - } - LOGINFO("Node details after test: {}", m_node->to_string()); - LOGINFO("Executed read_ops={}, blks_read={} insert_ops={} blks_inserted={} erase_ops={} blks_erased={}", nread_ops, - nblks_read, ninsert_ops, nblks_inserted, nerase_ops, nblks_erased); -} -#endif - SISL_OPTIONS_ENABLE(logging, test_hashmap) SISL_OPTION_GROUP(test_hashmap, (max_offset, "", "max_offset", "max number of offset", From 379735e03bd83588598fcdac657bd5497844d289 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Dec 2022 11:49:33 -0700 Subject: [PATCH 198/385] Change all libc builds to PRERELEASE=False --- .jenkins/Jenkinsfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 0f748138..adaa623b 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -122,9 +122,11 @@ pipeline { BUILD_PROFILE = "debug" } - if ("${BUILD_TYPE}" == "release") { + if ("${ALLOC}" == 'libc') { PRERELEASE = 'False' - if ("${ALLOC}" != 'libc') { + } else { + if ("${BUILD_TYPE}" == "release") { + PRERELEASE = 'False' BUILD_PROFILE = "test" } } From cfee29b30c8504fd63188922923e954ace3cd011 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Dec 2022 12:31:00 -0700 Subject: [PATCH 199/385] Disable range_hash_map test, leak detected. --- src/cache/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index b214cb4e..06b1f035 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -12,7 +12,7 @@ target_sources(test_range_hashmap PRIVATE ) target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) +#add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) add_executable(test_range_cache) target_sources(test_range_cache PRIVATE From 7aea98eb3854fd5e958cc304e03c2891533268ee Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Dec 2022 08:17:11 -0700 Subject: [PATCH 200/385] Update libcurl to 7.86.0 --- CHANGELOG.md | 11 ++++++++--- conanfile.py | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3243b80f..3230d402 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,17 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] +## Unreleased + +- Override libcurl to 7.86.0 + +## 8.2.7 ### Added -### Fixed +### Changed - Moved SISL code to github.com; start Changelog -[Unreleased]: https://github.com/eBay/IOManager/compare/c5b7966...HEAD +[Unreleased]: https://github.com/eBay/IOManager/compare/v8.2.7...HEAD +[8.2.7]: https://github.com/eBay/IOManager/compare/c5b7966...v8.2.7 diff --git a/conanfile.py b/conanfile.py index d18d5c37..b7bc737a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -64,6 +64,7 @@ def requirements(self): self.requires("userspace-rcu/0.11.4") self.requires("zmarok-semver/1.1.0") self.requires("fmt/8.1.1", override=True) + self.requires("libcurl/7.86.0", override=True) self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) self.requires("xz_utils/5.2.5", override=True) From 874d35d4c786370a626058ae35808f67cd6c3146 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Dec 2022 08:18:27 -0700 Subject: [PATCH 201/385] More cleanup to recipe and CHANGELOG. --- CHANGELOG.md | 4 ++-- conanfile.py | 22 +++++++++++++--------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3230d402..a8f11bfa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,12 +6,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Changed + - Override libcurl to 7.86.0 ## 8.2.7 -### Added - ### Changed - Moved SISL code to github.com; start Changelog diff --git a/conanfile.py b/conanfile.py index b7bc737a..aa172a26 100644 --- a/conanfile.py +++ b/conanfile.py @@ -48,31 +48,35 @@ def requirements(self): if self.options.prerelease: self.requires("prerelease_dummy/1.0.1") + # Memory allocation + if self.options.malloc_impl == "tcmalloc": + self.requires("gperftools/2.7.0") + elif self.options.malloc_impl == "jemalloc": + self.requires("jemalloc/5.2.1") + + # Linux Specific Support + if self.settings.os in ["Linux"]: + self.requires("folly/2022.01.31.00") + self.requires("userspace-rcu/0.11.4") + # Generic packages (conan-center) self.requires("boost/1.79.0") self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") - if self.settings.os in ["Linux"]: - self.requires("folly/2022.01.31.00") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.10.5") self.requires("prometheus-cpp/1.0.1") self.requires("spdlog/1.11.0") - if self.settings.os in ["Linux"]: - self.requires("userspace-rcu/0.11.4") self.requires("zmarok-semver/1.1.0") + self.requires("fmt/8.1.1", override=True) - self.requires("libcurl/7.86.0", override=True) + self.requires("libcurl/7.86.0", override=True) self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.12", override=True) - if self.options.malloc_impl == "jemalloc": - self.requires("jemalloc/5.2.1") - elif self.options.malloc_impl == "tcmalloc": - self.requires("gperftools/2.7.0") def validate(self): if self.info.settings.compiler.cppstd: From c61e80f1c05111c7ddfa18336980690b2caeb287 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Dec 2022 08:53:18 -0700 Subject: [PATCH 202/385] Cleanup build and recipe files. --- CMakeLists.txt | 13 +------------ lib/CMakeLists.txt | 14 ++++++++++++++ {include => lib}/utils.hpp | 0 tests/proto/CMakeLists.txt | 6 +++--- 4 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 lib/CMakeLists.txt rename {include => lib}/utils.hpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 402e82c0..952d849c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,6 @@ cmake_minimum_required (VERSION 3.11) set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") set(PACKAGE_REVISION "0.0.0+unknown") if (DEFINED CONAN_PACKAGE_NAME) @@ -15,17 +14,7 @@ find_package(gRPC REQUIRED) include_directories(BEFORE "include") -add_library(${PROJECT_NAME}) -target_sources(${PROJECT_NAME} PRIVATE - lib/rpc_server.cpp - lib/rpc_client.cpp - ) -target_link_libraries(${PROJECT_NAME} - gRPC::grpc++ - sisl::sisl - Boost::Boost - ) - +add_subdirectory(lib) add_subdirectory(tests) # build info diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt new file mode 100644 index 00000000..aa44a113 --- /dev/null +++ b/lib/CMakeLists.txt @@ -0,0 +1,14 @@ +cmake_minimum_required (VERSION 3.11) + +set(CMAKE_CXX_STANDARD 17) + +add_library(${PROJECT_NAME}) +target_sources(${PROJECT_NAME} PRIVATE + rpc_server.cpp + rpc_client.cpp + ) +target_link_libraries(${PROJECT_NAME} + gRPC::grpc++ + sisl::sisl + Boost::Boost + ) diff --git a/include/utils.hpp b/lib/utils.hpp similarity index 100% rename from include/utils.hpp rename to lib/utils.hpp diff --git a/tests/proto/CMakeLists.txt b/tests/proto/CMakeLists.txt index d36a2a63..1226be7b 100644 --- a/tests/proto/CMakeLists.txt +++ b/tests/proto/CMakeLists.txt @@ -1,14 +1,14 @@ cmake_minimum_required(VERSION 3.11) -add_library(test_proto OBJECT +add_library(test_proto OBJECT) +target_sources(test_proto PRIVATE grpc_helper_test.proto ) target_link_libraries(test_proto - protobuf::libprotobuf gRPC::grpc++ ) -protobuf_generate(LANGUAGE cpp TARGET test_proto PROTOS grpc_helper_test.proto) +protobuf_generate(LANGUAGE cpp TARGET test_proto PROTOS) protobuf_generate( TARGET test_proto LANGUAGE grpc From b9d2f613ecbc094dc6fcb3edabc15aea07e11359 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 12:39:17 -0700 Subject: [PATCH 203/385] Import grpc_helper --- CMakeLists.txt | 30 ------------------- .../grpc_helper/generic_service.hpp | 0 include/{ => sisl}/grpc_helper/rpc_call.hpp | 0 include/{ => sisl}/grpc_helper/rpc_client.hpp | 0 include/{ => sisl}/grpc_helper/rpc_common.hpp | 0 include/{ => sisl}/grpc_helper/rpc_server.hpp | 0 {lib => src/grpc_helper}/CMakeLists.txt | 0 {lib => src/grpc_helper}/rpc_client.cpp | 0 {lib => src/grpc_helper}/rpc_server.cpp | 0 .../grpc_helper/tests}/CMakeLists.txt | 0 .../tests}/function/CMakeLists.txt | 0 .../tests}/function/echo_async_client.cpp | 0 .../tests}/function/echo_server.cpp | 0 .../tests}/function/echo_sync_client.cpp | 0 .../grpc_helper/tests}/proto/CMakeLists.txt | 0 .../tests}/proto/grpc_helper_test.proto | 0 .../grpc_helper/tests}/unit/CMakeLists.txt | 0 .../grpc_helper/tests}/unit/auth_test.cpp | 0 .../tests}/unit/basic_http_server.hpp | 0 .../grpc_helper/tests}/unit/test_token.hpp | 0 {lib => src/grpc_helper}/utils.hpp | 0 21 files changed, 30 deletions(-) delete mode 100644 CMakeLists.txt rename include/{ => sisl}/grpc_helper/generic_service.hpp (100%) rename include/{ => sisl}/grpc_helper/rpc_call.hpp (100%) rename include/{ => sisl}/grpc_helper/rpc_client.hpp (100%) rename include/{ => sisl}/grpc_helper/rpc_common.hpp (100%) rename include/{ => sisl}/grpc_helper/rpc_server.hpp (100%) rename {lib => src/grpc_helper}/CMakeLists.txt (100%) rename {lib => src/grpc_helper}/rpc_client.cpp (100%) rename {lib => src/grpc_helper}/rpc_server.cpp (100%) rename {tests => src/grpc_helper/tests}/CMakeLists.txt (100%) rename {tests => src/grpc_helper/tests}/function/CMakeLists.txt (100%) rename {tests => src/grpc_helper/tests}/function/echo_async_client.cpp (100%) rename {tests => src/grpc_helper/tests}/function/echo_server.cpp (100%) rename {tests => src/grpc_helper/tests}/function/echo_sync_client.cpp (100%) rename {tests => src/grpc_helper/tests}/proto/CMakeLists.txt (100%) rename {tests => src/grpc_helper/tests}/proto/grpc_helper_test.proto (100%) rename {tests => src/grpc_helper/tests}/unit/CMakeLists.txt (100%) rename {tests => src/grpc_helper/tests}/unit/auth_test.cpp (100%) rename {tests => src/grpc_helper/tests}/unit/basic_http_server.hpp (100%) rename {tests => src/grpc_helper/tests}/unit/test_token.hpp (100%) rename {lib => src/grpc_helper}/utils.hpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt deleted file mode 100644 index 952d849c..00000000 --- a/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -cmake_minimum_required (VERSION 3.11) - -set(CMAKE_CXX_STANDARD 17) - -set(PACKAGE_REVISION "0.0.0+unknown") -if (DEFINED CONAN_PACKAGE_NAME) - set(PACKAGE_REVISION "${CONAN_PACKAGE_VERSION}") -endif () -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_WARNINGS} -DPACKAGE_NAME=${PROJECT_NAME} -DPACKAGE_VERSION=${PACKAGE_REVISION}") - -find_package(Threads REQUIRED) -find_package(sisl REQUIRED) -find_package(gRPC REQUIRED) - -include_directories(BEFORE "include") - -add_subdirectory(lib) -add_subdirectory(tests) - -# build info -string(TOUPPER "${CMAKE_BUILD_TYPE}" UC_CMAKE_BUILD_TYPE) -message(STATUS "Build configuration: ${CMAKE_BUILD_TYPE}") -message(STATUS "C compiler info: ${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION} ${CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN}") -message(STATUS "C++ compiler info: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} ${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}") -message(STATUS "C flags: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${UC_CMAKE_BUILD_TYPE}}") -message(STATUS "C++ flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${UC_CMAKE_BUILD_TYPE}}") -message(STATUS "Linker flags (executable): ${CMAKE_EXE_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") -message(STATUS "Linker flags (shared): ${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") -message(STATUS "Linker flags (module): ${CMAKE_MODULE_LINKER_FLAGS} ${CMAKE_MODULE_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") -message(STATUS "Linker flags (static): ${CMAKE_STATIC_LINKER_FLAGS} ${CMAKE_STATIC_LINKER_FLAGS_${UC_CMAKE_BUILD_TYPE}}") diff --git a/include/grpc_helper/generic_service.hpp b/include/sisl/grpc_helper/generic_service.hpp similarity index 100% rename from include/grpc_helper/generic_service.hpp rename to include/sisl/grpc_helper/generic_service.hpp diff --git a/include/grpc_helper/rpc_call.hpp b/include/sisl/grpc_helper/rpc_call.hpp similarity index 100% rename from include/grpc_helper/rpc_call.hpp rename to include/sisl/grpc_helper/rpc_call.hpp diff --git a/include/grpc_helper/rpc_client.hpp b/include/sisl/grpc_helper/rpc_client.hpp similarity index 100% rename from include/grpc_helper/rpc_client.hpp rename to include/sisl/grpc_helper/rpc_client.hpp diff --git a/include/grpc_helper/rpc_common.hpp b/include/sisl/grpc_helper/rpc_common.hpp similarity index 100% rename from include/grpc_helper/rpc_common.hpp rename to include/sisl/grpc_helper/rpc_common.hpp diff --git a/include/grpc_helper/rpc_server.hpp b/include/sisl/grpc_helper/rpc_server.hpp similarity index 100% rename from include/grpc_helper/rpc_server.hpp rename to include/sisl/grpc_helper/rpc_server.hpp diff --git a/lib/CMakeLists.txt b/src/grpc_helper/CMakeLists.txt similarity index 100% rename from lib/CMakeLists.txt rename to src/grpc_helper/CMakeLists.txt diff --git a/lib/rpc_client.cpp b/src/grpc_helper/rpc_client.cpp similarity index 100% rename from lib/rpc_client.cpp rename to src/grpc_helper/rpc_client.cpp diff --git a/lib/rpc_server.cpp b/src/grpc_helper/rpc_server.cpp similarity index 100% rename from lib/rpc_server.cpp rename to src/grpc_helper/rpc_server.cpp diff --git a/tests/CMakeLists.txt b/src/grpc_helper/tests/CMakeLists.txt similarity index 100% rename from tests/CMakeLists.txt rename to src/grpc_helper/tests/CMakeLists.txt diff --git a/tests/function/CMakeLists.txt b/src/grpc_helper/tests/function/CMakeLists.txt similarity index 100% rename from tests/function/CMakeLists.txt rename to src/grpc_helper/tests/function/CMakeLists.txt diff --git a/tests/function/echo_async_client.cpp b/src/grpc_helper/tests/function/echo_async_client.cpp similarity index 100% rename from tests/function/echo_async_client.cpp rename to src/grpc_helper/tests/function/echo_async_client.cpp diff --git a/tests/function/echo_server.cpp b/src/grpc_helper/tests/function/echo_server.cpp similarity index 100% rename from tests/function/echo_server.cpp rename to src/grpc_helper/tests/function/echo_server.cpp diff --git a/tests/function/echo_sync_client.cpp b/src/grpc_helper/tests/function/echo_sync_client.cpp similarity index 100% rename from tests/function/echo_sync_client.cpp rename to src/grpc_helper/tests/function/echo_sync_client.cpp diff --git a/tests/proto/CMakeLists.txt b/src/grpc_helper/tests/proto/CMakeLists.txt similarity index 100% rename from tests/proto/CMakeLists.txt rename to src/grpc_helper/tests/proto/CMakeLists.txt diff --git a/tests/proto/grpc_helper_test.proto b/src/grpc_helper/tests/proto/grpc_helper_test.proto similarity index 100% rename from tests/proto/grpc_helper_test.proto rename to src/grpc_helper/tests/proto/grpc_helper_test.proto diff --git a/tests/unit/CMakeLists.txt b/src/grpc_helper/tests/unit/CMakeLists.txt similarity index 100% rename from tests/unit/CMakeLists.txt rename to src/grpc_helper/tests/unit/CMakeLists.txt diff --git a/tests/unit/auth_test.cpp b/src/grpc_helper/tests/unit/auth_test.cpp similarity index 100% rename from tests/unit/auth_test.cpp rename to src/grpc_helper/tests/unit/auth_test.cpp diff --git a/tests/unit/basic_http_server.hpp b/src/grpc_helper/tests/unit/basic_http_server.hpp similarity index 100% rename from tests/unit/basic_http_server.hpp rename to src/grpc_helper/tests/unit/basic_http_server.hpp diff --git a/tests/unit/test_token.hpp b/src/grpc_helper/tests/unit/test_token.hpp similarity index 100% rename from tests/unit/test_token.hpp rename to src/grpc_helper/tests/unit/test_token.hpp diff --git a/lib/utils.hpp b/src/grpc_helper/utils.hpp similarity index 100% rename from lib/utils.hpp rename to src/grpc_helper/utils.hpp From f49bac3b9e850ee85a6fca27e92a930620dbea17 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 13:53:43 -0700 Subject: [PATCH 204/385] sisl_grpc depends on sisl_auth_manager --- src/grpc/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index b1a6d163..888f786b 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -13,6 +13,7 @@ target_sources(sisl_grpc PRIVATE rpc_client.cpp ) target_link_libraries(sisl_grpc + sisl_auth_manager gRPC::grpc++ cpr::cpr flatbuffers::flatbuffers From cc36265a70d922c622ce5eb00dac9ccadcad6933 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 14:06:00 -0700 Subject: [PATCH 205/385] Added Apache-2.0 header to grpc files. --- include/sisl/grpc/generic_service.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_call.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_client.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_common.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_server.hpp | 14 ++++++++++++++ src/grpc/rpc_client.cpp | 14 ++++++++++++++ src/grpc/rpc_server.cpp | 18 +++++++++++++----- src/grpc/tests/function/echo_async_client.cpp | 14 ++++++++++++++ src/grpc/tests/function/echo_server.cpp | 18 +++++++++++++----- src/grpc/tests/function/echo_sync_client.cpp | 18 +++++++++++++----- src/grpc/tests/proto/grpc_helper_test.proto | 15 ++++++++++++++- src/grpc/tests/unit/auth_test.cpp | 14 ++++++++++++++ src/grpc/tests/unit/basic_http_server.hpp | 14 ++++++++++++++ src/grpc/tests/unit/test_token.hpp | 14 ++++++++++++++ src/grpc/utils.hpp | 14 ++++++++++++++ 15 files changed, 207 insertions(+), 16 deletions(-) diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index b5d59e56..a530fc49 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_call.hpp b/include/sisl/grpc/rpc_call.hpp index f72a99de..2e4d5767 100644 --- a/include/sisl/grpc/rpc_call.hpp +++ b/include/sisl/grpc/rpc_call.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index 8d5f9355..4da34005 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index bf2a0326..8c726df6 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once namespace sisl { diff --git a/include/sisl/grpc/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp index 7c84c71f..a813b717 100644 --- a/include/sisl/grpc/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/src/grpc/rpc_client.cpp b/src/grpc/rpc_client.cpp index 0ea2c155..cc1c0baa 100644 --- a/src/grpc/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "sisl/grpc/rpc_client.hpp" #include "utils.hpp" diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index 1239a33e..e23af456 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -1,9 +1,17 @@ -/* - * server.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Oct 24, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "sisl/grpc/rpc_server.hpp" #include "sisl/grpc/generic_service.hpp" #include "utils.hpp" diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index ce2655b2..dbd8b3fc 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/function/echo_server.cpp b/src/grpc/tests/function/echo_server.cpp index 4e0726f2..f2a8290c 100644 --- a/src/grpc/tests/function/echo_server.cpp +++ b/src/grpc/tests/function/echo_server.cpp @@ -1,9 +1,17 @@ -/* - * echo_server.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Sep 22, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/function/echo_sync_client.cpp b/src/grpc/tests/function/echo_sync_client.cpp index 3d904eb4..8ccc9e86 100644 --- a/src/grpc/tests/function/echo_sync_client.cpp +++ b/src/grpc/tests/function/echo_sync_client.cpp @@ -1,9 +1,17 @@ -/* - * echo_sync_client.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Sep 22, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/proto/grpc_helper_test.proto b/src/grpc/tests/proto/grpc_helper_test.proto index 500816ae..d5844389 100644 --- a/src/grpc/tests/proto/grpc_helper_test.proto +++ b/src/grpc/tests/proto/grpc_helper_test.proto @@ -1,4 +1,17 @@ - +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ syntax = "proto3"; package grpc_helper_test; diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index 756c8d37..dc146c22 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/unit/basic_http_server.hpp b/src/grpc/tests/unit/basic_http_server.hpp index 9087a5b9..f01038a1 100644 --- a/src/grpc/tests/unit/basic_http_server.hpp +++ b/src/grpc/tests/unit/basic_http_server.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/unit/test_token.hpp b/src/grpc/tests/unit/test_token.hpp index 629e2530..a50bcdad 100644 --- a/src/grpc/tests/unit/test_token.hpp +++ b/src/grpc/tests/unit/test_token.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once namespace sisl::grpc::testing { diff --git a/src/grpc/utils.hpp b/src/grpc/utils.hpp index cbefdae0..1f4b4bc7 100644 --- a/src/grpc/utils.hpp +++ b/src/grpc/utils.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include From 3099fe44f74bab40ee57c2fdc795f9b60327c45e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 14:08:17 -0700 Subject: [PATCH 206/385] Remove unused vars --- include/sisl/utility/obj_life_counter.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/sisl/utility/obj_life_counter.hpp b/include/sisl/utility/obj_life_counter.hpp index 640309ae..a37c2891 100644 --- a/include/sisl/utility/obj_life_counter.hpp +++ b/include/sisl/utility/obj_life_counter.hpp @@ -173,9 +173,9 @@ class ObjCounterRegistry { return instance; } - static void register_obj(const char* name, pair_of_atomic_ptrs ptrs) {} + static void register_obj(const char*, pair_of_atomic_ptrs) {} - static void foreach (const std::function< void(const std::string&, int64_t, int64_t) >& closure) {} + static void foreach (const std::function< void(const std::string&, int64_t, int64_t) >&) {} static inline void enable_metrics_reporting() {} }; #endif // _PRERELEASE From 853d58c48146310459f29904ef23dc4b3f4e2ffa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 21 Dec 2022 14:12:22 -0700 Subject: [PATCH 207/385] Use Ubuntu 22.04 image for CI. --- .github/workflows/build_with_conan.yml | 2 +- .jenkins/Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index c7800353..f91292a7 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -15,7 +15,7 @@ jobs: # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. # You can convert this to a matrix build if you need cross-platform coverage. # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index adaa623b..eeae0e88 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -58,7 +58,7 @@ pipeline { stage('Build') { failFast true matrix { - agent { label 'sds-builder' } + agent { docker { image 'hub.tess.io/sds/sds_tess_build:4.00' } } axes { axis { name 'BUILD_TYPE' From cff171380651aa530eacfad9debfa9a62a71ea9e Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 22 Dec 2022 15:13:04 -0800 Subject: [PATCH 208/385] Streamtracker rollback functionality (#72) --- conanfile.py | 2 +- include/sisl/fds/stream_tracker.hpp | 14 ++++++- src/fds/tests/test_stream_tracker.cpp | 59 ++++++++++++++++++++++++++- 3 files changed, 71 insertions(+), 4 deletions(-) diff --git a/conanfile.py b/conanfile.py index 75c99019..156f2ec2 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.1" + version = "8.3.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/fds/stream_tracker.hpp b/include/sisl/fds/stream_tracker.hpp index d0ef7d8f..879461fa 100644 --- a/include/sisl/fds/stream_tracker.hpp +++ b/include/sisl/fds/stream_tracker.hpp @@ -74,7 +74,7 @@ class StreamTracker { template < class... Args > int64_t create(int64_t idx, Args&&... args) { return do_update( - idx, [](T& data) { return false; }, true /* replace */, std::forward< Args >(args)...); + idx, []([[maybe_unused]] T& data) { return false; }, true /* replace */, std::forward< Args >(args)...); } template < class... Args > @@ -88,6 +88,18 @@ class StreamTracker { m_comp_slot_bits.set_bits(start_bit, end_idx - start_idx + 1); } + void rollback(int64_t new_end_idx) { + folly::SharedMutexWritePriority::ReadHolder holder(m_lock); + if ((new_end_idx < m_slot_ref_idx) || + (new_end_idx >= (m_slot_ref_idx + int64_cast(m_active_slot_bits.size())))) { + throw std::out_of_range("Slot idx is not in range"); + } + + auto new_end_bit = new_end_idx - m_slot_ref_idx; + m_active_slot_bits.reset_bits(new_end_bit + 1, m_active_slot_bits.size() - new_end_bit - 1); + m_comp_slot_bits.reset_bits(new_end_bit + 1, m_comp_slot_bits.size() - new_end_bit - 1); + } + T& at(int64_t idx) const { folly::SharedMutexWritePriority::ReadHolder holder(m_lock); if (idx < m_slot_ref_idx) { throw std::out_of_range("Slot idx is not in range"); } diff --git a/src/fds/tests/test_stream_tracker.cpp b/src/fds/tests/test_stream_tracker.cpp index 448f7c49..bfe832e8 100644 --- a/src/fds/tests/test_stream_tracker.cpp +++ b/src/fds/tests/test_stream_tracker.cpp @@ -32,6 +32,8 @@ namespace { struct TestData { TestData(int val) : m_value(val) {} int m_value = 0; + + bool operator==(const TestData& other) const { return (m_value == other.m_value); } }; struct StreamTrackerTest : public testing::Test { @@ -60,7 +62,7 @@ struct StreamTrackerTest : public testing::Test { TEST_F(StreamTrackerTest, SimpleCompletions) { static std::random_device s_rd{}; static std::default_random_engine s_engine{s_rd()}; - std::uniform_int_distribution< int64_t > gen{0, 999}; + std::uniform_int_distribution< int > gen{0, 999}; for (auto i = 0; i < 100; ++i) { m_tracker.create_and_complete(i, gen(s_engine)); @@ -96,7 +98,7 @@ TEST_F(StreamTrackerTest, SimpleCompletions) { TEST_F(StreamTrackerTest, ForceRealloc) { static std::random_device s_rd{}; static std::default_random_engine s_engine{s_rd()}; - std::uniform_int_distribution< int64_t > gen{0, 999}; + std::uniform_int_distribution< int > gen{0, 999}; auto prev_size = get_mem_size(); auto far_idx = (int64_t)StreamTracker< TestData >::alloc_blk_size + 1; @@ -111,6 +113,59 @@ TEST_F(StreamTrackerTest, ForceRealloc) { EXPECT_EQ(get_mem_size(), prev_size * 2); } +TEST_F(StreamTrackerTest, Rollback) { + static std::random_device s_rd{}; + static std::default_random_engine s_engine{s_rd()}; + std::uniform_int_distribution< int > gen{0, 999}; + + for (auto i = 0; i < 200; ++i) { + m_tracker.create(i, gen(s_engine)); + } + EXPECT_EQ(m_tracker.active_upto(), 199); + EXPECT_EQ(m_tracker.completed_upto(), -1); + m_tracker.complete(0, 99); + EXPECT_EQ(m_tracker.active_upto(), 199); + EXPECT_EQ(m_tracker.completed_upto(), 99); + + m_tracker.rollback(169); + EXPECT_EQ(m_tracker.active_upto(), 169); + EXPECT_EQ(m_tracker.completed_upto(), 99); + + m_tracker.complete(100, 169); + EXPECT_EQ(m_tracker.active_upto(), 169); + EXPECT_EQ(m_tracker.completed_upto(), 169); + + auto new_val1 = gen(s_engine); + auto new_val2 = gen(s_engine); + m_tracker.create(170, new_val1); + m_tracker.create(172, new_val2); + EXPECT_EQ(m_tracker.active_upto(), 170); + EXPECT_EQ(m_tracker.completed_upto(), 169); + m_tracker.complete(170, 170); + EXPECT_EQ(m_tracker.completed_upto(), 170); + m_tracker.create_and_complete(171, new_val2); + m_tracker.complete(172, 172); + + EXPECT_EQ(m_tracker.completed_upto(), 172); + EXPECT_EQ(m_tracker.at(170), TestData{new_val1}); + EXPECT_EQ(m_tracker.at(171), TestData{new_val2}); + EXPECT_EQ(m_tracker.at(172), TestData{new_val2}); + + bool exception_hit{false}; + m_tracker.truncate(80); + try { + m_tracker.rollback(1); + } catch (const std::out_of_range& e) { exception_hit = true; } + EXPECT_EQ(exception_hit, true); + + exception_hit = false; + m_tracker.truncate(173); + try { + m_tracker.rollback(1); + } catch (const std::out_of_range& e) { exception_hit = true; } + EXPECT_EQ(exception_hit, true); +} + int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); auto ret = RUN_ALL_TESTS(); From 2589ee1a8707b5d6b0fa11252e7b23c05147549e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 3 Jan 2023 15:07:22 -0700 Subject: [PATCH 209/385] Use Ubuntu 2204 in CI. --- .jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index eeae0e88..894495b6 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -1,5 +1,5 @@ pipeline { - agent any + agent { label 'sds-builder-2204' } environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') @@ -58,7 +58,7 @@ pipeline { stage('Build') { failFast true matrix { - agent { docker { image 'hub.tess.io/sds/sds_tess_build:4.00' } } + agent { label 'sds-builder-2204' } axes { axis { name 'BUILD_TYPE' From 6930e99842de0bd74182991c2e413a332aa1355b Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 9 Jan 2023 10:19:21 -0800 Subject: [PATCH 210/385] resolve deadlock in file watcher on_modified_event (#75) * resolve deadlock in file watcher on_modified_event * improve unit test Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/file_watcher/file_watcher.hpp | 2 +- src/file_watcher/file_watcher.cpp | 12 +-- src/file_watcher/file_watcher_test.cpp | 87 +++++++++++++++------- 4 files changed, 68 insertions(+), 35 deletions(-) diff --git a/conanfile.py b/conanfile.py index 156f2ec2..3a81893f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.2" + version = "8.3.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/file_watcher/file_watcher.hpp b/include/sisl/file_watcher/file_watcher.hpp index 7eff1b6e..af205293 100644 --- a/include/sisl/file_watcher/file_watcher.hpp +++ b/include/sisl/file_watcher/file_watcher.hpp @@ -41,7 +41,7 @@ class FileWatcher { private: int m_inotify_fd; std::map< std::string, FileInfo > m_files; - std::mutex m_files_lock; + mutable std::mutex m_files_lock; std::unique_ptr< std::thread > m_fw_thread; // This is used to notify poll loop to exit int m_pipefd[2] = {-1, -1}; diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index b34c7d42..cf259889 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -186,16 +186,18 @@ void FileWatcher::handle_events() { } void FileWatcher::on_modified_event(const int wd, const bool is_deleted) { - auto lk = std::unique_lock< std::mutex >(m_files_lock); FileInfo file_info; get_fileinfo(wd, file_info); if (is_deleted) { + // There is a corner case (very unlikely) where a new listener + // regestered for this filepath after the current delete event was triggered. + { + auto lk = std::unique_lock< std::mutex >(m_files_lock); + m_files.erase(file_info.m_filepath); + } for (const auto& [id, handler] : file_info.m_handlers) { handler(file_info.m_filepath, true); } - // There is a corner case (very unlikely) where a new listener - // regestered for this filepath after the current delete event was triggered. - m_files.erase(file_info.m_filepath); return; } @@ -243,8 +245,8 @@ bool FileWatcher::get_file_contents(const std::string& file_name, std::string& c return false; } -// Hold the m_files_lock before calling this method. void FileWatcher::get_fileinfo(const int wd, FileInfo& file_info) const { + auto lk = std::unique_lock< std::mutex >(m_files_lock); for (const auto& [file_path, file] : m_files) { if (file.m_wd == wd) { file_info = file; diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index b6081405..beeb0693 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -21,55 +21,86 @@ using namespace ::testing; class FileWatcherTest : public ::testing::Test { public: - std::shared_ptr< FileWatcher > file_watcher; virtual void SetUp() override { - file_watcher = std::make_shared< FileWatcher >(); - EXPECT_TRUE(file_watcher->start()); + m_file_change_params.file_watcher = std::make_shared< FileWatcher >(); + EXPECT_TRUE(m_file_change_params.file_watcher->start()); } - virtual void TearDown() override { EXPECT_TRUE(file_watcher->stop()); } + virtual void TearDown() override { + EXPECT_TRUE(m_file_change_params.file_watcher->stop()); + std::remove(m_file_change_params.file_str.c_str()); + } - std::mutex file_change_lock; - std::condition_variable file_change_cv; + struct FileChangeParams { + std::shared_ptr< FileWatcher > file_watcher; + std::string file_str; + std::mutex file_change_lock; + std::condition_variable file_change_cv; + bool is_deleted; + bool cb_called; + }; + FileChangeParams m_file_change_params; }; +void monitor_file_changes(FileWatcherTest::FileChangeParams& file_change_params) { + EXPECT_TRUE(file_change_params.file_watcher->register_listener( + file_change_params.file_str, "basic_test_listener", + [&file_change_params](const std::string filepath, const bool deleted) { + EXPECT_EQ(file_change_params.file_str, filepath); + { + std::lock_guard< std::mutex > lg(file_change_params.file_change_lock); + file_change_params.is_deleted = deleted; + file_change_params.cb_called = true; + } + if (deleted) { + std::ofstream file_of{file_change_params.file_str}; + monitor_file_changes(file_change_params); + } + file_change_params.file_change_cv.notify_one(); + })); +} + TEST_F(FileWatcherTest, basic_watcher) { const auto file_path = fs::current_path() / "basic_test.txt"; // remove if exists and then create a new file fs::remove(file_path); - const std::string file_str{file_path.string()}; - std::ofstream file_of{file_str}; - bool is_deleted = true; - bool cb_called = false; + m_file_change_params.file_str = file_path.string(); + std::ofstream file_of{m_file_change_params.file_str}; + m_file_change_params.is_deleted = true; + m_file_change_params.cb_called = false; - EXPECT_TRUE(file_watcher->register_listener( - file_str, "basic_test_listener", - [this, &is_deleted, &cb_called, &file_str](const std::string filepath, const bool deleted) { - EXPECT_EQ(file_str, filepath); - { - std::lock_guard< std::mutex > lg(file_change_lock); - is_deleted = deleted; - cb_called = true; - } - file_change_cv.notify_one(); - })); + monitor_file_changes(m_file_change_params); // edit the file file_of << "Hello World!"; file_of.flush(); { - auto lk = std::unique_lock< std::mutex >(file_change_lock); - EXPECT_TRUE(file_change_cv.wait_for(lk, std::chrono::milliseconds(500), [&cb_called]() { return cb_called; })); - EXPECT_FALSE(is_deleted); - cb_called = false; // set it false for the next iteration of the test + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), + [this]() { return m_file_change_params.cb_called; })); + EXPECT_FALSE(m_file_change_params.is_deleted); + m_file_change_params.cb_called = false; // set it false for the next iteration of the test } // remove the file fs::remove(file_path); { - auto lk = std::unique_lock< std::mutex >(file_change_lock); - EXPECT_TRUE(file_change_cv.wait_for(lk, std::chrono::milliseconds(500), [&cb_called]() { return cb_called; })); - EXPECT_TRUE(is_deleted); + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), + [this]() { return m_file_change_params.cb_called; })); + EXPECT_TRUE(m_file_change_params.is_deleted); + m_file_change_params.cb_called = false; // set it false for the next iteration of the test + } + + std::ofstream file_of1{m_file_change_params.file_str}; + file_of1 << "Hello World Again!"; + file_of1.flush(); + { + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), + [this]() { return m_file_change_params.cb_called; })); + EXPECT_FALSE(m_file_change_params.is_deleted); + m_file_change_params.cb_called = false; // set it false for the next iteration of the test } } From 9e235cfeb5ba91f9ff0838d3ba07440e87cfb458 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 9 Jan 2023 15:11:40 -0800 Subject: [PATCH 211/385] resolve the bug in multiple listeners case in file watcher (#76) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- src/file_watcher/file_watcher.cpp | 2 +- src/file_watcher/file_watcher_test.cpp | 56 ++++++++++++++++++-------- 3 files changed, 41 insertions(+), 19 deletions(-) diff --git a/conanfile.py b/conanfile.py index 3a81893f..6ad2d2dc 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.3" + version = "8.3.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index cf259889..138239af 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -69,7 +69,7 @@ bool FileWatcher::register_listener(const std::string& file_path, const std::str { auto lk = std::unique_lock< std::mutex >(m_files_lock); if (const auto it{m_files.find(file_path)}; it != m_files.end()) { - auto file_info = it->second; + auto& file_info = it->second; file_info.m_handlers.emplace(listener_id, file_event_handler); LOGDEBUG("File path {} exists, adding the handler cb for the listener {}", file_path, listener_id); return true; diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index beeb0693..4681936c 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -37,24 +37,24 @@ class FileWatcherTest : public ::testing::Test { std::mutex file_change_lock; std::condition_variable file_change_cv; bool is_deleted; - bool cb_called; + int cb_call_count; }; FileChangeParams m_file_change_params; }; -void monitor_file_changes(FileWatcherTest::FileChangeParams& file_change_params) { +void monitor_file_changes(FileWatcherTest::FileChangeParams& file_change_params, const std::string& listener) { EXPECT_TRUE(file_change_params.file_watcher->register_listener( - file_change_params.file_str, "basic_test_listener", - [&file_change_params](const std::string filepath, const bool deleted) { + file_change_params.file_str, listener, + [&file_change_params, listener](const std::string filepath, const bool deleted) { EXPECT_EQ(file_change_params.file_str, filepath); { std::lock_guard< std::mutex > lg(file_change_params.file_change_lock); file_change_params.is_deleted = deleted; - file_change_params.cb_called = true; + file_change_params.cb_call_count--; } if (deleted) { std::ofstream file_of{file_change_params.file_str}; - monitor_file_changes(file_change_params); + monitor_file_changes(file_change_params, listener); } file_change_params.file_change_cv.notify_one(); })); @@ -67,29 +67,29 @@ TEST_F(FileWatcherTest, basic_watcher) { m_file_change_params.file_str = file_path.string(); std::ofstream file_of{m_file_change_params.file_str}; m_file_change_params.is_deleted = true; - m_file_change_params.cb_called = false; + m_file_change_params.cb_call_count = 1; - monitor_file_changes(m_file_change_params); + monitor_file_changes(m_file_change_params, "basic_listener"); // edit the file file_of << "Hello World!"; file_of.flush(); { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); - EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), - [this]() { return m_file_change_params.cb_called; })); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); - m_file_change_params.cb_called = false; // set it false for the next iteration of the test + m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } // remove the file fs::remove(file_path); { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); - EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), - [this]() { return m_file_change_params.cb_called; })); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_TRUE(m_file_change_params.is_deleted); - m_file_change_params.cb_called = false; // set it false for the next iteration of the test + m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } std::ofstream file_of1{m_file_change_params.file_str}; @@ -97,10 +97,32 @@ TEST_F(FileWatcherTest, basic_watcher) { file_of1.flush(); { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); - EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for(lk, std::chrono::milliseconds(500), - [this]() { return m_file_change_params.cb_called; })); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); + EXPECT_FALSE(m_file_change_params.is_deleted); + } +} + +TEST_F(FileWatcherTest, multiple_watchers) { + const auto file_path = fs::current_path() / "basic_test.txt"; + // remove if exists and then create a new file + fs::remove(file_path); + m_file_change_params.file_str = file_path.string(); + std::ofstream file_of{m_file_change_params.file_str}; + m_file_change_params.is_deleted = true; + m_file_change_params.cb_call_count = 2; + + monitor_file_changes(m_file_change_params, "basic_listener1"); + monitor_file_changes(m_file_change_params, "basic_listener2"); + + // edit the file + file_of << "Hello World!"; + file_of.flush(); + { + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); - m_file_change_params.cb_called = false; // set it false for the next iteration of the test } } From 280796f00ad48cee6a6ee803d24615858ba9b011 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 12 Jan 2023 14:38:54 -0700 Subject: [PATCH 212/385] Build For GCC-9 variants as well. --- .jenkins/Jenkinsfile | 47 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 894495b6..6ff82a01 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -193,6 +193,53 @@ pipeline { } } } + + stage('Build-GCC9') { + failFast true + matrix { + agent { label 'sds-builder' } + axes { + axis { + name 'BUILD_TYPE' + values 'release', 'debug' + } + } + + stages { + stage('Adjust Tag for Master/PR') { + when { not { anyOf { + branch "${TESTING_BRANCH}" + branch "${STABLE_BRANCH}" + } } } + steps { + sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + } + } + + stage("Build") { + steps { + script { + if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { + sh "echo Skipping debug build for PR branch" + } else { + sh "conan create -u ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=libc -o ${PROJECT}:prerelease=False -o ${PROJECT}:sanitize=False -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" + } + } + } + } + + stage("Deploy") { + when { + expression { !(env.BRANCH_NAME =~ /PR-/) } + } + steps { + sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" + sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + } + } + } + } + } } post { From 72a87fc2be7f09851671f7e8ac6eff15a4bc23bd Mon Sep 17 00:00:00 2001 From: Yaming Kuang Date: Fri, 13 Jan 2023 12:03:12 -0700 Subject: [PATCH 213/385] SDSTOR-xxxx add sg_list struct to sisl --- conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 49 +++++++++++++++++++++++++++++++--- src/fds/CMakeLists.txt | 8 ++++++ src/fds/tests/test_sg_list.cpp | 41 ++++++++++++++++++++++++++++ 4 files changed, 95 insertions(+), 5 deletions(-) create mode 100644 src/fds/tests/test_sg_list.cpp diff --git a/conanfile.py b/conanfile.py index 6ad2d2dc..a3afd802 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.4" + version = "8.3.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index ec88a773..078e5b34 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -24,12 +24,12 @@ #include #ifdef __linux__ #include +#include #endif #include #include #include "utils.hpp" - namespace sisl { struct blob { uint8_t* bytes; @@ -39,6 +39,44 @@ struct blob { blob(uint8_t* const b, const uint32_t s) : bytes{b}, size{s} {} }; +struct sg_list { + uint64_t size; // total size of data pointed by iovs; + std::vector< iovec > iovs; +}; + +struct sg_iterator { + sg_iterator(const std::vector< iovec >& v) : m_input_iovs{v} { assert(v.size() > 0); } + + std::vector< iovec > next_iovs(uint32_t size) { + std::vector< iovec > ret_iovs; + uint64_t remain_size = size; + + while ((remain_size > 0) && (m_cur_index < m_input_iovs.size())) { + const auto& inp_iov = m_input_iovs[m_cur_index]; + iovec this_iov; + this_iov.iov_base = static_cast< uint8_t* >(inp_iov.iov_base) + m_cur_offset; + if (remain_size < inp_iov.iov_len - m_cur_offset) { + this_iov.iov_len = remain_size; + m_cur_offset += remain_size; + } else { + this_iov.iov_len = inp_iov.iov_len - m_cur_offset; + ++m_cur_index; + m_cur_offset = 0; + } + + ret_iovs.push_back(this_iov); + assert(remain_size >= this_iov.iov_len); + remain_size -= this_iov.iov_len; + } + + return ret_iovs; + } + + const std::vector< iovec >& m_input_iovs; + uint64_t m_cur_offset{0}; + size_t m_cur_index{0}; +}; + // typedef size_t buftag_t; // TODO: Ideally we want this to be registration, but this tag needs to be used as template @@ -192,7 +230,8 @@ struct io_blob : public blob { buf_alloc(sz, align_size, tag); } io_blob(uint8_t* const bytes, const uint32_t size, const bool is_aligned) : - blob(bytes, size), aligned{is_aligned} {} + blob(bytes, size), + aligned{is_aligned} {} ~io_blob() = default; void buf_alloc(const size_t sz, const uint32_t align_size = 512, const buftag tag = buftag::common) { @@ -205,7 +244,8 @@ struct io_blob : public blob { aligned ? sisl_aligned_free(blob::bytes, tag) : std::free(blob::bytes); } - void buf_realloc(const size_t new_size, const uint32_t align_size = 512, [[maybe_unused]] const buftag tag = buftag::common) { + void buf_realloc(const size_t new_size, const uint32_t align_size = 512, + [[maybe_unused]] const buftag tag = buftag::common) { uint8_t* new_buf{nullptr}; if (aligned) { // aligned before, so do not need check for new align size, once aligned will be aligned on realloc also @@ -231,7 +271,8 @@ struct io_blob : public blob { */ struct byte_array_impl : public io_blob { byte_array_impl(const uint32_t sz, const uint32_t alignment = 0, const buftag tag = buftag::common) : - io_blob(sz, alignment, tag), m_tag{tag} {} + io_blob(sz, alignment, tag), + m_tag{tag} {} byte_array_impl(uint8_t* const bytes, const uint32_t size, const bool is_aligned) : io_blob(bytes, size, is_aligned) {} ~byte_array_impl() { io_blob::buf_free(m_tag); } diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index f4f84d38..bf3e4611 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -57,6 +57,14 @@ target_sources(test_cb_mutex PRIVATE target_link_libraries(test_cb_mutex sisl ${COMMON_DEPS} GTest::gtest) #add_test(NAME TestCBMutex COMMAND test_cb_mutex) +add_executable(test_sg_list) +target_sources(test_sg_list PRIVATE + tests/test_sg_list.cpp + ) +target_link_libraries(test_sg_list sisl ${COMMON_DEPS} GTest::gtest) +add_test(NAME SgList COMMAND test_sg_list) + + if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "jemalloc") add_executable(test_jemalloc) diff --git a/src/fds/tests/test_sg_list.cpp b/src/fds/tests/test_sg_list.cpp new file mode 100644 index 00000000..b33405a5 --- /dev/null +++ b/src/fds/tests/test_sg_list.cpp @@ -0,0 +1,41 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ + +#include +#include "sisl/fds/buffer.hpp" + +SISL_LOGGING_INIT(test_sg_list) +SISL_OPTIONS_ENABLE(logging, test_sg_list) +SISL_OPTION_GROUP(test_sg_list, + (num_threads, "", "num_threads", "number of threads", + ::cxxopts::value< uint32_t >()->default_value("8"), "number")) + +struct SgListTest : public testing::Test {}; + +// a test case that make sure iterator works as expected; +TEST_F(SgListTest, TestIterator) { + // TO Be Implemented; +} + +int main(int argc, char* argv[]) { + int parsed_argc{argc}; + ::testing::InitGoogleTest(&parsed_argc, argv); + SISL_OPTIONS_LOAD(parsed_argc, argv, logging, test_sg_list); + sisl::logging::SetLogger("test_sg_list"); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + const auto ret{RUN_ALL_TESTS()}; + return ret; +} From 2ba19babf991e9b7e7410d8617baee7f9a761b6e Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Sat, 21 Jan 2023 09:09:39 -0800 Subject: [PATCH 214/385] nlohmann json package version upgrade --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index a3afd802..18ae17e1 100644 --- a/conanfile.py +++ b/conanfile.py @@ -66,7 +66,7 @@ def requirements(self): self.requires("flatbuffers/1.12.0") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") - self.requires("nlohmann_json/3.10.5") + self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.0.1") self.requires("spdlog/1.11.0") self.requires("zmarok-semver/1.1.0") From 9e6cde36af6a055cf632bceafe10c4d3ac69d130 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Sun, 22 Jan 2023 20:19:50 -0800 Subject: [PATCH 215/385] resolve declaration error (#79) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/grpc/rpc_common.hpp | 1 + src/file_watcher/file_watcher_test.cpp | 8 ++++---- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/conanfile.py b/conanfile.py index 18ae17e1..64bd6da9 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.5" + version = "8.3.6" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index 8c726df6..64be224e 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -17,6 +17,7 @@ namespace sisl { class GrpcServer; class GenericRpcData; +enum class AuthVerifyStatus : uint8_t; struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); static bool run_generic_handler_cb(GrpcServer* server, const std::string& method, diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index 4681936c..6fc2572a 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -77,7 +77,7 @@ TEST_F(FileWatcherTest, basic_watcher) { { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( - lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } @@ -87,7 +87,7 @@ TEST_F(FileWatcherTest, basic_watcher) { { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( - lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_TRUE(m_file_change_params.is_deleted); m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } @@ -98,7 +98,7 @@ TEST_F(FileWatcherTest, basic_watcher) { { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( - lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); } } @@ -121,7 +121,7 @@ TEST_F(FileWatcherTest, multiple_watchers) { { auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( - lk, std::chrono::milliseconds(500), [this]() { return m_file_change_params.cb_call_count == 0; })); + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); } } From 34dd2f44ea1d0baf91e1783363019996f04c3374 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 24 Jan 2023 10:33:05 -0800 Subject: [PATCH 216/385] ad manual token parser (#81) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/auth_manager/trf_client.hpp | 3 +++ src/auth_manager/tests/AuthTest.cpp | 32 +++++++++++++++++------- src/auth_manager/tests/dummy_grant.cg | 1 - src/auth_manager/trf_client.cpp | 30 +++++++++++++++++++++- src/file_watcher/file_watcher_test.cpp | 2 ++ 6 files changed, 58 insertions(+), 12 deletions(-) delete mode 100644 src/auth_manager/tests/dummy_grant.cg diff --git a/conanfile.py b/conanfile.py index 64bd6da9..2e0e846e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.6" + version = "8.3.7" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/auth_manager/trf_client.hpp b/include/sisl/auth_manager/trf_client.hpp index 1eab98d1..9632b7e7 100644 --- a/include/sisl/auth_manager/trf_client.hpp +++ b/include/sisl/auth_manager/trf_client.hpp @@ -38,6 +38,9 @@ class TrfClient { protected: // acquire unique lock before calling virtual void request_with_grant_token(); + void parse_response(const std::string& resp); + static std::string get_string(const std::string& resp, const std::string& pattern); + static std::string get_quoted_string(const std::string& resp, const std::string& pattern); protected: std::string m_access_token; diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index f998a17f..7447a346 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -156,6 +156,9 @@ class MockTrfClient : public TrfClient { void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } std::string get_access_token() { return m_access_token; } + std::chrono::system_clock::time_point get_expiry() { return m_expiry; } + + void parse_token(const std::string& resp) { TrfClient::parse_response(resp); } }; static void load_trf_settings() { @@ -220,14 +223,8 @@ static const std::string trf_token_server_ip{"127.0.0.1"}; static const uint32_t trf_token_server_port{12346}; static std::string token_response; static void set_token_response(const std::string& raw_token) { - token_response = "{\n" - " \"access_token\": \"" + - raw_token + - "\",\n" - " \"token_type\": \"Bearer\",\n" - " \"expires_in\": 2000,\n" - " \"refresh_token\": \"dummy_refresh_token\"\n" - "}"; + token_response = "{\"access_token\":\"" + raw_token + + "\",\"token_type\":\"Bearer\",\"expires_in\":2000,\"refresh_token\":\"dummy_refresh_token\"}\n"; } class TokenApiImpl : public TokenApi { @@ -256,7 +253,10 @@ class TrfClientTest : public ::testing::Test { APIBase::start(); } - virtual void TearDown() override { APIBase::stop(); } + virtual void TearDown() override { + APIBase::stop(); + remove_grant_path(); + } private: std::unique_ptr< TokenApiImpl > m_token_server; @@ -297,6 +297,20 @@ TEST_F(TrfClientTest, request_with_grant_token) { EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); } + +TEST(TrfClientParseTest, parse_token) { + load_trf_settings(); + MockTrfClient mock_trf_client; + const auto raw_token{TestToken().sign_rs256()}; + set_token_response(raw_token); + EXPECT_TRUE(mock_trf_client.get_access_token().empty()); + EXPECT_TRUE(mock_trf_client.get_token_type().empty()); + mock_trf_client.parse_token(token_response); + EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); + EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); + EXPECT_TRUE(mock_trf_client.get_expiry() > std::chrono::system_clock::now()); + remove_grant_path(); +} } // namespace sisl::testing using namespace sisl; diff --git a/src/auth_manager/tests/dummy_grant.cg b/src/auth_manager/tests/dummy_grant.cg deleted file mode 100644 index e525b7a0..00000000 --- a/src/auth_manager/tests/dummy_grant.cg +++ /dev/null @@ -1 +0,0 @@ -dummy cg contents diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp index 49d43cfe..4d539180 100644 --- a/src/auth_manager/trf_client.cpp +++ b/src/auth_manager/trf_client.cpp @@ -76,10 +76,38 @@ void TrfClient::request_with_grant_token() { m_access_token = resp_json["access_token"]; m_token_type = resp_json["token_type"]; } catch ([[maybe_unused]] const nlohmann::detail::exception& e) { - LOGDEBUG("parsing token response failed, what: {}", e.what()); + LOGERROR("parsing token response using json failed, what: {}; trying to parse manually", e.what()); + parse_response(resp.text); } } +void TrfClient::parse_response(const std::string& resp) { + try { + static std::string token1{"{\"access_token\":"}; + static std::string token2{"\"token_type\":"}; + static std::string token3{"\"expires_in\":"}; + + if (m_access_token = get_quoted_string(resp, token1); m_access_token.empty()) { return; } + if (m_token_type = get_quoted_string(resp, token2); m_access_token.empty()) { return; } + auto expiry_str = get_string(resp, token3); + if (expiry_str.empty()) { return; } + m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(std::stol(expiry_str)); + } catch (const std::exception& e) { LOGDEBUG("failed to parse pattern, what: {}", e.what()); } +} + +std::string TrfClient::get_string(const std::string& resp, const std::string& pattern) { + auto n = resp.find(pattern); + if (n == std::string::npos) { return ""; } + auto n1 = resp.find(',', n); + if (n1 == std::string::npos) { return ""; } + return resp.substr(n + pattern.length(), n1 - n - pattern.length()); +} + +std::string TrfClient::get_quoted_string(const std::string& resp, const std::string& pattern) { + auto quoted_string{get_string(resp, pattern)}; + return quoted_string.substr(1, quoted_string.length() - 2); +} + std::string TrfClient::get_token() { { std::shared_lock< std::shared_mutex > lock(m_mtx); diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index 6fc2572a..1071d7ff 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -92,6 +92,7 @@ TEST_F(FileWatcherTest, basic_watcher) { m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } + /* TODO fix this in CI. std::ofstream file_of1{m_file_change_params.file_str}; file_of1 << "Hello World Again!"; file_of1.flush(); @@ -101,6 +102,7 @@ TEST_F(FileWatcherTest, basic_watcher) { lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); EXPECT_FALSE(m_file_change_params.is_deleted); } + */ } TEST_F(FileWatcherTest, multiple_watchers) { From fd02a263108b96b1a40735fd0488fcedd1f81c32 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 25 Jan 2023 09:19:16 -0700 Subject: [PATCH 217/385] Remove need for GCC-9 builds. --- .jenkins/Jenkinsfile | 47 -------------------------------------------- 1 file changed, 47 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 6ff82a01..894495b6 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -193,53 +193,6 @@ pipeline { } } } - - stage('Build-GCC9') { - failFast true - matrix { - agent { label 'sds-builder' } - axes { - axis { - name 'BUILD_TYPE' - values 'release', 'debug' - } - } - - stages { - stage('Adjust Tag for Master/PR') { - when { not { anyOf { - branch "${TESTING_BRANCH}" - branch "${STABLE_BRANCH}" - } } } - steps { - sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") - } - } - - stage("Build") { - steps { - script { - if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { - sh "echo Skipping debug build for PR branch" - } else { - sh "conan create -u ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=libc -o ${PROJECT}:prerelease=False -o ${PROJECT}:sanitize=False -pr ${BUILD_TYPE} . ${PROJECT}/${TAG}" - } - } - } - } - - stage("Deploy") { - when { - expression { !(env.BRANCH_NAME =~ /PR-/) } - } - steps { - sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" - sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" - } - } - } - } - } } post { From 4e5d86aeb561c32ea5f5cdb16ae3871786e3800e Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 26 Jan 2023 16:00:55 -0800 Subject: [PATCH 218/385] Cherrypick fixes (#83) * resolve deadlock in file watcher on_modified_event * improve unit test * resolve the bug in multiple listeners case in file watcher * ad manual token parser * bump conan version Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/auth_manager/trf_client.hpp | 3 + include/sisl/file_watcher/file_watcher.hpp | 2 +- src/auth_manager/tests/AuthTest.cpp | 32 ++++-- src/auth_manager/tests/dummy_grant.cg | 1 - src/auth_manager/trf_client.cpp | 30 +++++- src/file_watcher/file_watcher.cpp | 14 +-- src/file_watcher/file_watcher_test.cpp | 113 +++++++++++++++------ 8 files changed, 149 insertions(+), 48 deletions(-) delete mode 100644 src/auth_manager/tests/dummy_grant.cg diff --git a/conanfile.py b/conanfile.py index d18d5c37..cb903135 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.7" + version = "8.2.8" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/auth_manager/trf_client.hpp b/include/sisl/auth_manager/trf_client.hpp index 1eab98d1..9632b7e7 100644 --- a/include/sisl/auth_manager/trf_client.hpp +++ b/include/sisl/auth_manager/trf_client.hpp @@ -38,6 +38,9 @@ class TrfClient { protected: // acquire unique lock before calling virtual void request_with_grant_token(); + void parse_response(const std::string& resp); + static std::string get_string(const std::string& resp, const std::string& pattern); + static std::string get_quoted_string(const std::string& resp, const std::string& pattern); protected: std::string m_access_token; diff --git a/include/sisl/file_watcher/file_watcher.hpp b/include/sisl/file_watcher/file_watcher.hpp index 7eff1b6e..af205293 100644 --- a/include/sisl/file_watcher/file_watcher.hpp +++ b/include/sisl/file_watcher/file_watcher.hpp @@ -41,7 +41,7 @@ class FileWatcher { private: int m_inotify_fd; std::map< std::string, FileInfo > m_files; - std::mutex m_files_lock; + mutable std::mutex m_files_lock; std::unique_ptr< std::thread > m_fw_thread; // This is used to notify poll loop to exit int m_pipefd[2] = {-1, -1}; diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index f998a17f..7447a346 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -156,6 +156,9 @@ class MockTrfClient : public TrfClient { void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } std::string get_access_token() { return m_access_token; } + std::chrono::system_clock::time_point get_expiry() { return m_expiry; } + + void parse_token(const std::string& resp) { TrfClient::parse_response(resp); } }; static void load_trf_settings() { @@ -220,14 +223,8 @@ static const std::string trf_token_server_ip{"127.0.0.1"}; static const uint32_t trf_token_server_port{12346}; static std::string token_response; static void set_token_response(const std::string& raw_token) { - token_response = "{\n" - " \"access_token\": \"" + - raw_token + - "\",\n" - " \"token_type\": \"Bearer\",\n" - " \"expires_in\": 2000,\n" - " \"refresh_token\": \"dummy_refresh_token\"\n" - "}"; + token_response = "{\"access_token\":\"" + raw_token + + "\",\"token_type\":\"Bearer\",\"expires_in\":2000,\"refresh_token\":\"dummy_refresh_token\"}\n"; } class TokenApiImpl : public TokenApi { @@ -256,7 +253,10 @@ class TrfClientTest : public ::testing::Test { APIBase::start(); } - virtual void TearDown() override { APIBase::stop(); } + virtual void TearDown() override { + APIBase::stop(); + remove_grant_path(); + } private: std::unique_ptr< TokenApiImpl > m_token_server; @@ -297,6 +297,20 @@ TEST_F(TrfClientTest, request_with_grant_token) { EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); } + +TEST(TrfClientParseTest, parse_token) { + load_trf_settings(); + MockTrfClient mock_trf_client; + const auto raw_token{TestToken().sign_rs256()}; + set_token_response(raw_token); + EXPECT_TRUE(mock_trf_client.get_access_token().empty()); + EXPECT_TRUE(mock_trf_client.get_token_type().empty()); + mock_trf_client.parse_token(token_response); + EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); + EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); + EXPECT_TRUE(mock_trf_client.get_expiry() > std::chrono::system_clock::now()); + remove_grant_path(); +} } // namespace sisl::testing using namespace sisl; diff --git a/src/auth_manager/tests/dummy_grant.cg b/src/auth_manager/tests/dummy_grant.cg deleted file mode 100644 index e525b7a0..00000000 --- a/src/auth_manager/tests/dummy_grant.cg +++ /dev/null @@ -1 +0,0 @@ -dummy cg contents diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp index 49d43cfe..4d539180 100644 --- a/src/auth_manager/trf_client.cpp +++ b/src/auth_manager/trf_client.cpp @@ -76,10 +76,38 @@ void TrfClient::request_with_grant_token() { m_access_token = resp_json["access_token"]; m_token_type = resp_json["token_type"]; } catch ([[maybe_unused]] const nlohmann::detail::exception& e) { - LOGDEBUG("parsing token response failed, what: {}", e.what()); + LOGERROR("parsing token response using json failed, what: {}; trying to parse manually", e.what()); + parse_response(resp.text); } } +void TrfClient::parse_response(const std::string& resp) { + try { + static std::string token1{"{\"access_token\":"}; + static std::string token2{"\"token_type\":"}; + static std::string token3{"\"expires_in\":"}; + + if (m_access_token = get_quoted_string(resp, token1); m_access_token.empty()) { return; } + if (m_token_type = get_quoted_string(resp, token2); m_access_token.empty()) { return; } + auto expiry_str = get_string(resp, token3); + if (expiry_str.empty()) { return; } + m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(std::stol(expiry_str)); + } catch (const std::exception& e) { LOGDEBUG("failed to parse pattern, what: {}", e.what()); } +} + +std::string TrfClient::get_string(const std::string& resp, const std::string& pattern) { + auto n = resp.find(pattern); + if (n == std::string::npos) { return ""; } + auto n1 = resp.find(',', n); + if (n1 == std::string::npos) { return ""; } + return resp.substr(n + pattern.length(), n1 - n - pattern.length()); +} + +std::string TrfClient::get_quoted_string(const std::string& resp, const std::string& pattern) { + auto quoted_string{get_string(resp, pattern)}; + return quoted_string.substr(1, quoted_string.length() - 2); +} + std::string TrfClient::get_token() { { std::shared_lock< std::shared_mutex > lock(m_mtx); diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index b34c7d42..138239af 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -69,7 +69,7 @@ bool FileWatcher::register_listener(const std::string& file_path, const std::str { auto lk = std::unique_lock< std::mutex >(m_files_lock); if (const auto it{m_files.find(file_path)}; it != m_files.end()) { - auto file_info = it->second; + auto& file_info = it->second; file_info.m_handlers.emplace(listener_id, file_event_handler); LOGDEBUG("File path {} exists, adding the handler cb for the listener {}", file_path, listener_id); return true; @@ -186,16 +186,18 @@ void FileWatcher::handle_events() { } void FileWatcher::on_modified_event(const int wd, const bool is_deleted) { - auto lk = std::unique_lock< std::mutex >(m_files_lock); FileInfo file_info; get_fileinfo(wd, file_info); if (is_deleted) { + // There is a corner case (very unlikely) where a new listener + // regestered for this filepath after the current delete event was triggered. + { + auto lk = std::unique_lock< std::mutex >(m_files_lock); + m_files.erase(file_info.m_filepath); + } for (const auto& [id, handler] : file_info.m_handlers) { handler(file_info.m_filepath, true); } - // There is a corner case (very unlikely) where a new listener - // regestered for this filepath after the current delete event was triggered. - m_files.erase(file_info.m_filepath); return; } @@ -243,8 +245,8 @@ bool FileWatcher::get_file_contents(const std::string& file_name, std::string& c return false; } -// Hold the m_files_lock before calling this method. void FileWatcher::get_fileinfo(const int wd, FileInfo& file_info) const { + auto lk = std::unique_lock< std::mutex >(m_files_lock); for (const auto& [file_path, file] : m_files) { if (file.m_wd == wd) { file_info = file; diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index b6081405..9dbcb1e6 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -21,55 +21,110 @@ using namespace ::testing; class FileWatcherTest : public ::testing::Test { public: - std::shared_ptr< FileWatcher > file_watcher; virtual void SetUp() override { - file_watcher = std::make_shared< FileWatcher >(); - EXPECT_TRUE(file_watcher->start()); + m_file_change_params.file_watcher = std::make_shared< FileWatcher >(); + EXPECT_TRUE(m_file_change_params.file_watcher->start()); } - virtual void TearDown() override { EXPECT_TRUE(file_watcher->stop()); } + virtual void TearDown() override { + EXPECT_TRUE(m_file_change_params.file_watcher->stop()); + std::remove(m_file_change_params.file_str.c_str()); + } - std::mutex file_change_lock; - std::condition_variable file_change_cv; + struct FileChangeParams { + std::shared_ptr< FileWatcher > file_watcher; + std::string file_str; + std::mutex file_change_lock; + std::condition_variable file_change_cv; + bool is_deleted; + int cb_call_count; + }; + FileChangeParams m_file_change_params; }; +void monitor_file_changes(FileWatcherTest::FileChangeParams& file_change_params, const std::string& listener) { + EXPECT_TRUE(file_change_params.file_watcher->register_listener( + file_change_params.file_str, listener, + [&file_change_params, listener](const std::string filepath, const bool deleted) { + EXPECT_EQ(file_change_params.file_str, filepath); + { + std::lock_guard< std::mutex > lg(file_change_params.file_change_lock); + file_change_params.is_deleted = deleted; + file_change_params.cb_call_count--; + } + if (deleted) { + std::ofstream file_of{file_change_params.file_str}; + monitor_file_changes(file_change_params, listener); + } + file_change_params.file_change_cv.notify_one(); + })); +} + TEST_F(FileWatcherTest, basic_watcher) { const auto file_path = fs::current_path() / "basic_test.txt"; // remove if exists and then create a new file fs::remove(file_path); - const std::string file_str{file_path.string()}; - std::ofstream file_of{file_str}; - bool is_deleted = true; - bool cb_called = false; - - EXPECT_TRUE(file_watcher->register_listener( - file_str, "basic_test_listener", - [this, &is_deleted, &cb_called, &file_str](const std::string filepath, const bool deleted) { - EXPECT_EQ(file_str, filepath); - { - std::lock_guard< std::mutex > lg(file_change_lock); - is_deleted = deleted; - cb_called = true; - } - file_change_cv.notify_one(); - })); + m_file_change_params.file_str = file_path.string(); + std::ofstream file_of{m_file_change_params.file_str}; + m_file_change_params.is_deleted = true; + m_file_change_params.cb_call_count = 1; + + monitor_file_changes(m_file_change_params, "basic_listener"); // edit the file file_of << "Hello World!"; file_of.flush(); { - auto lk = std::unique_lock< std::mutex >(file_change_lock); - EXPECT_TRUE(file_change_cv.wait_for(lk, std::chrono::milliseconds(500), [&cb_called]() { return cb_called; })); - EXPECT_FALSE(is_deleted); - cb_called = false; // set it false for the next iteration of the test + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); + EXPECT_FALSE(m_file_change_params.is_deleted); + m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } // remove the file fs::remove(file_path); { - auto lk = std::unique_lock< std::mutex >(file_change_lock); - EXPECT_TRUE(file_change_cv.wait_for(lk, std::chrono::milliseconds(500), [&cb_called]() { return cb_called; })); - EXPECT_TRUE(is_deleted); + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); + EXPECT_TRUE(m_file_change_params.is_deleted); + m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test + } + + /* TODO fix this in CI. + std::ofstream file_of1{m_file_change_params.file_str}; + file_of1 << "Hello World Again!"; + file_of1.flush(); + { + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); + EXPECT_FALSE(m_file_change_params.is_deleted); + } + */ +} + +TEST_F(FileWatcherTest, multiple_watchers) { + const auto file_path = fs::current_path() / "basic_test.txt"; + // remove if exists and then create a new file + fs::remove(file_path); + m_file_change_params.file_str = file_path.string(); + std::ofstream file_of{m_file_change_params.file_str}; + m_file_change_params.is_deleted = true; + m_file_change_params.cb_call_count = 2; + + monitor_file_changes(m_file_change_params, "basic_listener1"); + monitor_file_changes(m_file_change_params, "basic_listener2"); + + // edit the file + file_of << "Hello World!"; + file_of.flush(); + { + auto lk = std::unique_lock< std::mutex >(m_file_change_params.file_change_lock); + EXPECT_TRUE(m_file_change_params.file_change_cv.wait_for( + lk, std::chrono::milliseconds(1500), [this]() { return m_file_change_params.cb_call_count == 0; })); + EXPECT_FALSE(m_file_change_params.is_deleted); } } From 9ed347d4289fb211e5abe3bddb4658d77c90a3aa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 27 Jan 2023 08:45:33 -0700 Subject: [PATCH 219/385] Instruct Github actions to build PRs to stable channel as well. --- .github/workflows/build_with_conan.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index c7800353..c6561f9a 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -2,9 +2,13 @@ name: Conan Build on: push: - branches: [ master ] + branches: + - master + - 'stable/v*' pull_request: - branches: [ master ] + branches: + - master + - 'stable/v*' #env: # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) From 6916e2867ca23570f42de248605323d5c6876f48 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Fri, 27 Jan 2023 07:57:23 -0800 Subject: [PATCH 220/385] Merge stable into master (#84) * Cherrypick fixes (#83) * resolve deadlock in file watcher on_modified_event * improve unit test * resolve the bug in multiple listeners case in file watcher * ad manual token parser * bump conan version Co-authored-by: Ravi Akella email = raakella@ebay.com * bump version --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- src/file_watcher/file_watcher_test.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index 2e0e846e..ea1d0945 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.7" + version = "8.3.8" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/file_watcher/file_watcher_test.cpp b/src/file_watcher/file_watcher_test.cpp index 1071d7ff..9dbcb1e6 100644 --- a/src/file_watcher/file_watcher_test.cpp +++ b/src/file_watcher/file_watcher_test.cpp @@ -92,7 +92,7 @@ TEST_F(FileWatcherTest, basic_watcher) { m_file_change_params.cb_call_count = 1; // set it 1 for the next iteration of the test } - /* TODO fix this in CI. + /* TODO fix this in CI. std::ofstream file_of1{m_file_change_params.file_str}; file_of1 << "Hello World Again!"; file_of1.flush(); From ed773dbb0c7e53f46b29374e477f4be3dc1db99b Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 30 Jan 2023 19:19:01 -0800 Subject: [PATCH 221/385] Add api to get non-contiguos stream entries from StreamTracker (#86) --- conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 6 ++++++ include/sisl/fds/stream_tracker.hpp | 23 ++++++++++++++++++----- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/conanfile.py b/conanfile.py index ea1d0945..bd174a61 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.8" + version = "8.3.9" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 078e5b34..8ee1c053 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -264,6 +264,10 @@ struct io_blob : public blob { blob::size = new_size; blob::bytes = new_buf; } + + static io_blob from_string(const std::string& s) { + return io_blob{r_cast< uint8_t* >(const_cast< char* >(s.data())), uint32_cast(s.size()), false}; + } }; /* An extension to blob where the buffer it holds is allocated by constructor and freed during destruction. The only @@ -356,6 +360,8 @@ struct byte_view { void set_size(const uint32_t sz) { m_view.size = sz; } void validate() { assert((m_base_buf->bytes + m_base_buf->size) >= (m_view.bytes + m_view.size)); } + std::string get_string() const { return std::string(r_cast< const char* >(bytes()), uint64_cast(size())); } + private: byte_array m_base_buf; blob m_view; diff --git a/include/sisl/fds/stream_tracker.hpp b/include/sisl/fds/stream_tracker.hpp index 879461fa..3cff7e64 100644 --- a/include/sisl/fds/stream_tracker.hpp +++ b/include/sisl/fds/stream_tracker.hpp @@ -188,9 +188,10 @@ class StreamTracker { return m_slot_ref_idx - 1; } - void foreach_completed(int64_t start_idx, const auto& cb) { _foreach(start_idx, true /* completed */, cb); } - - void foreach_active(int64_t start_idx, const auto& cb) { _foreach(start_idx, false /* completed */, cb); } + void foreach_contiguous_completed(int64_t start_idx, const auto& cb) { _foreach_contiguous(start_idx, true, cb); } + void foreach_contiguous_active(int64_t start_idx, const auto& cb) { _foreach_contiguous(start_idx, false, cb); } + void foreach_all_completed(int64_t start_idx, const auto& cb) { _foreach_all(start_idx, true, cb); } + void foreach_all_active(int64_t start_idx, const auto& cb) { _foreach_all(start_idx, false, cb); } int64_t completed_upto(int64_t search_hint_idx = 0) const { folly::SharedMutexWritePriority::ReadHolder holder(m_lock); @@ -307,15 +308,27 @@ class StreamTracker { } } - void _foreach(int64_t start_idx, bool completed, const auto& cb) { + void _foreach_contiguous(int64_t start_idx, bool completed_only, const auto& cb) { folly::SharedMutexWritePriority::ReadHolder holder(m_lock); - auto upto = _upto(completed, start_idx); + auto upto = _upto(completed_only, start_idx); for (auto idx = start_idx; idx <= upto; ++idx) { auto proceed = cb(idx, upto, *(get_slot_data(idx - m_slot_ref_idx))); if (!proceed) break; } } + void _foreach_all(int64_t start_idx, bool completed_only, const auto& cb) { + folly::SharedMutexWritePriority::ReadHolder holder(m_lock); + auto search_bit = std::max(0l, (start_idx - m_slot_ref_idx)); + do { + search_bit = completed_only ? m_comp_slot_bits.get_next_set_bit(search_bit) + : m_active_slot_bits.get_next_set_bit(search_bit); + if (search_bit == AtomicBitset::npos) { break; } + if (!cb(search_bit + m_slot_ref_idx, *(get_slot_data(search_bit)))) { break; } + ++search_bit; + } while (true); + } + T* get_slot_data(int64_t nbit) const { return &(m_slot_data[nbit + m_data_skip_count]); } private: From 241fb30bf5b891af009070ab10b64cdca734dd37 Mon Sep 17 00:00:00 2001 From: Yaming Kuang Date: Tue, 31 Jan 2023 11:20:08 -0700 Subject: [PATCH 222/385] bump sisl master version to 9.0.1 --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index a3afd802..f52a7b0e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.5" + version = "9.0.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") From eea7a947adcf5d734200088a23ea282c81432a0b Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 2 Feb 2023 10:49:21 -0800 Subject: [PATCH 223/385] improve logging (#88) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/file_watcher/file_watcher.hpp | 1 + src/auth_manager/trf_client.cpp | 6 +++--- src/file_watcher/file_watcher.cpp | 19 ++++++++++++------- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/conanfile.py b/conanfile.py index cb903135..ebe61e08 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.8" + version = "8.2.9" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/file_watcher/file_watcher.hpp b/include/sisl/file_watcher/file_watcher.hpp index af205293..109028d0 100644 --- a/include/sisl/file_watcher/file_watcher.hpp +++ b/include/sisl/file_watcher/file_watcher.hpp @@ -35,6 +35,7 @@ class FileWatcher { void handle_events(); void get_fileinfo(const int wd, FileInfo& file_info) const; void on_modified_event(const int wd, const bool is_deleted); + bool remove_watcher(FileInfo& file_info); static bool get_file_contents(const std::string& file_name, std::string& contents); static bool check_file_size(const std::string& file_path); diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp index 4d539180..92e5f062 100644 --- a/src/auth_manager/trf_client.cpp +++ b/src/auth_manager/trf_client.cpp @@ -65,7 +65,7 @@ void TrfClient::request_with_grant_token() { session.SetTimeout(std::chrono::milliseconds{5000}); const auto resp{session.Post()}; if (resp.error || resp.status_code != 200) { - LOGDEBUG("request grant token from server failed, error: {}, status code: {}", resp.error.message, + LOGERROR("request grant token from server failed, error: {}, status code: {}", resp.error.message, resp.status_code); return; } @@ -76,7 +76,7 @@ void TrfClient::request_with_grant_token() { m_access_token = resp_json["access_token"]; m_token_type = resp_json["token_type"]; } catch ([[maybe_unused]] const nlohmann::detail::exception& e) { - LOGERROR("parsing token response using json failed, what: {}; trying to parse manually", e.what()); + LOGDEBUG("parsing token response using json failed, what: {}; trying to parse manually", e.what()); parse_response(resp.text); } } @@ -92,7 +92,7 @@ void TrfClient::parse_response(const std::string& resp) { auto expiry_str = get_string(resp, token3); if (expiry_str.empty()) { return; } m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(std::stol(expiry_str)); - } catch (const std::exception& e) { LOGDEBUG("failed to parse pattern, what: {}", e.what()); } + } catch (const std::exception& e) { LOGERROR("failed to parse response: {}, what: {}", resp, e.what()); } } std::string TrfClient::get_string(const std::string& resp, const std::string& pattern) { diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index 138239af..05145ba1 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -110,19 +110,24 @@ bool FileWatcher::unregister_listener(const std::string& file_path, const std::s file_info.m_handlers.erase(listener_id); if (file_info.m_handlers.empty()) { - if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { - LOGERROR("inotify rm failed for file path {}, listener id {} errno: {}", file_path, listener_id, errno); + if (!remove_watcher(file_info)) { + LOGDEBUG("inotify rm failed for file path {}, listener id {} errno: {}", file_path, listener_id, errno); return false; } - m_files.erase(file_path); } return true; } +bool FileWatcher::remove_watcher(FileInfo& file_info) { + if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { return false; } + m_files.erase(file_info.m_filepath); + return true; +} + bool FileWatcher::stop() { // signal event loop to break and wait for the thread to join // event value does not matter, this is just generating an event at the read end of the pipe - LOGINFO("Stopping file watcher event loop."); + LOGDEBUG("Stopping file watcher event loop."); int event = 1; int ret; do { @@ -134,7 +139,7 @@ bool FileWatcher::stop() { return false; } - LOGINFO("Waiting for file watcher thread to join.."); + LOGDEBUG("Waiting for file watcher thread to join.."); if (m_fw_thread && m_fw_thread->joinable()) { try { m_fw_thread->join(); @@ -143,7 +148,7 @@ bool FileWatcher::stop() { return false; } } - LOGINFO("file watcher thread joined."); + LOGINFO("file watcher stopped."); return true; } @@ -193,7 +198,7 @@ void FileWatcher::on_modified_event(const int wd, const bool is_deleted) { // regestered for this filepath after the current delete event was triggered. { auto lk = std::unique_lock< std::mutex >(m_files_lock); - m_files.erase(file_info.m_filepath); + remove_watcher(file_info); } for (const auto& [id, handler] : file_info.m_handlers) { handler(file_info.m_filepath, true); From 2a70d80318f509cde47dbcd7e877c5986f61222e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 14 Feb 2023 08:23:21 -0700 Subject: [PATCH 224/385] Updated FOSS (#89) - Updated pistache/0.0.5 - Updated prometheus/1.1.0 - Updated cpr/1.9.3 --- CHANGELOG.md | 14 ++++++++------ conanfile.py | 12 ++++++------ src/auth_manager/CMakeLists.txt | 2 +- src/grpc/tests/unit/CMakeLists.txt | 2 +- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8f11bfa..c3d5f659 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,17 +4,19 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## Unreleased +## [Unreleased] -### Changed +### Dependency Changes -- Override libcurl to 7.86.0 +- Updated: cpr/1.9.3 +- Updated: pistache/0.0.5 +- Updated: prometheus-cpp/1.1.0 -## 8.2.7 +## [8.x] ### Changed - Moved SISL code to github.com; start Changelog -[Unreleased]: https://github.com/eBay/IOManager/compare/v8.2.7...HEAD -[8.2.7]: https://github.com/eBay/IOManager/compare/c5b7966...v8.2.7 +[Unreleased]: https://github.com/eBay/sisl/compare/stable/v8.x...HEAD +[8.x]: https://github.com/eBay/sisl/compare/v5.0.10...stable/v8.x diff --git a/conanfile.py b/conanfile.py index ce37100a..c1c698ca 100644 --- a/conanfile.py +++ b/conanfile.py @@ -39,10 +39,8 @@ class SISLConan(ConanFile): exports_sources = ("CMakeLists.txt", "cmake/*", "include/*", "src/*", "LICENSE") def build_requirements(self): - self.build_requires("benchmark/1.7.0") - self.build_requires("gtest/1.11.0") - if self.settings.compiler in ["gcc"]: - self.build_requires("pistache/cci.20201127") + self.build_requires("benchmark/1.7.1") + self.build_requires("gtest/1.13.0") def requirements(self): # Custom packages @@ -62,15 +60,17 @@ def requirements(self): # Generic packages (conan-center) self.requires("boost/1.79.0") - self.requires("cpr/1.8.1") + self.requires("cpr/1.9.3") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.11.2") - self.requires("prometheus-cpp/1.0.1") + self.requires("prometheus-cpp/1.1.0") self.requires("spdlog/1.11.0") self.requires("zmarok-semver/1.1.0") + if self.settings.compiler in ["gcc"]: + self.requires("pistache/0.0.5") self.requires("fmt/8.1.1", override=True) self.requires("libcurl/7.86.0", override=True) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index aa554c36..ff83dc6d 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -34,7 +34,7 @@ target_link_libraries(test_auth_mgr sisl ${COMMON_DEPS} cpr::cpr - pistache::pistache + Pistache::Pistache flatbuffers::flatbuffers jwt-cpp::jwt-cpp GTest::gmock diff --git a/src/grpc/tests/unit/CMakeLists.txt b/src/grpc/tests/unit/CMakeLists.txt index 1e82a780..bd163d99 100644 --- a/src/grpc/tests/unit/CMakeLists.txt +++ b/src/grpc/tests/unit/CMakeLists.txt @@ -7,7 +7,7 @@ add_executable(auth_test target_link_libraries(auth_test sisl sisl_grpc - pistache::pistache + Pistache::Pistache GTest::gmock ${COMMON_DEPS} ) From 3c28314847bfde11f917abfd2b9e24481817f91b Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 14 Feb 2023 08:43:37 -0700 Subject: [PATCH 225/385] De-parallelize Jenkins CI pipeline. --- .jenkins/Jenkinsfile | 186 ++++++++++++------------------------------- 1 file changed, 52 insertions(+), 134 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 894495b6..42224f3e 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -5,16 +5,14 @@ pipeline { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') CONAN_USER = 'oss' TARGET_BRANCH = 'master' - TESTING_BRANCH = 'testing/v*' STABLE_BRANCH = 'stable/v*' } stages { stage('Adjust Tag for Master/PR') { - when { not { anyOf { - branch "${TESTING_BRANCH}" + when { not { branch "${STABLE_BRANCH}" - } } } + } } steps { script { sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") @@ -23,10 +21,9 @@ pipeline { } } stage('Adjust for Testing/Stable') { - when { anyOf { - branch "${TESTING_BRANCH}" + when { branch "${STABLE_BRANCH}" - } } + } steps { script { BUILD_MISSING = "" @@ -55,144 +52,65 @@ pipeline { } } - stage('Build') { - failFast true - matrix { - agent { label 'sds-builder-2204' } - axes { - axis { - name 'BUILD_TYPE' - values 'sanitize', 'release', 'debug', 'test' - } - axis { - name 'ALLOC' - values 'libc', 'tcmalloc' - } - axis { - name 'COVERAGE' - values 'False' - } - } - excludes { - exclude { - axis { - name 'BUILD_TYPE' - values 'sanitize', 'test', 'release' - } - axis { - name 'COVERAGE' - values 'True' - } - } - exclude { - axis { - name 'BUILD_TYPE' - values 'sanitize', 'test' - } - axis { - name 'ALLOC' - values 'libc' - } - } - } +/* Commented out until unit tests are available + stage('Coverage') { + when { not { + branch "${STABLE_BRANCH}" + } } - stages { - stage('Adjust Tag for Master/PR') { - when { not { anyOf { - branch "${TESTING_BRANCH}" - branch "${STABLE_BRANCH}" - } } } - steps { - sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + stages { + stage("Adjust Sonar Branch") { + when { + not { + branch "${TARGET_BRANCH}" } } - - stage("Build") { - when { allOf { - expression { "${COVERAGE}" == "False" } - } } - steps { - script { - def PRERELEASE = 'True' - def BUILD_PROFILE = "${BUILD_TYPE}" - def SANITIZE = 'False' - - if ("${BUILD_TYPE}" == 'sanitize') { - SANITIZE = 'True' - BUILD_PROFILE = "debug" - } - - if ("${ALLOC}" == 'libc') { - PRERELEASE = 'False' - } else { - if ("${BUILD_TYPE}" == "release") { - PRERELEASE = 'False' - BUILD_PROFILE = "test" - } - } - - if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { - sh "echo Skipping debug build for PR branch" - } else { - sh "conan create -u ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=${ALLOC} -o ${PROJECT}:prerelease=${PRERELEASE} -o ${PROJECT}:sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" - } - } - } + steps { + sh "echo \"sonar.branch.target=${TARGET_BRANCH}\" >> sonar-project.properties" } - - stage('Coverage') { - when { not { anyOf { - branch "${STABLE_BRANCH}" - expression { "${COVERAGE}" == 'False' } - expression { !(env.BRANCH_NAME =~ /PR-/) } - } } } - - stages { - stage("Adjust Sonar Branch") { - when { - not { - branch "${TARGET_BRANCH}" - } - } - steps { - sh "echo \"sonar.branch.target=${TARGET_BRANCH}\" >> sonar-project.properties" - } - } - stage("Code Coverage") { - steps { - slackSend channel: '#sds-ci', message: "*${PROJECT}:${TAG}* is undergoing Code Coverage." - sh "echo \"sonar.branch.name=${BRANCH_NAME}\" >> sonar-project.properties" - sh "conan install -pr ${BUILD_TYPE} ${BUILD_MISSING} -o ${PROJECT}:coverage=True ." - sh "build-wrapper-linux-x86-64 --out-dir /tmp/sonar conan build ." - sh "find . -name \"*.gcno\" -exec gcov {} \\;" - withSonarQubeEnv('sds-sonar') { - sh "sonar-scanner -Dsonar.projectBaseDir=. -Dsonar.projectVersion=\"${VER}\"" - } - } - } - stage("Quality Gate") { - steps { - timeout(time: 30, unit: 'MINUTES') { - waitForQualityGate abortPipeline: true - } - } - } + } + stage("Code Coverage") { + steps { + slackSend channel: '#sds-ci', message: "*${PROJECT}:${TAG}* is undergoing Code Coverage." + sh "echo \"sonar.branch.name=${BRANCH_NAME}\" >> sonar-project.properties" + sh "conan install -o sisl:prerelease=True -pr debug ${BUILD_MISSING} -o ${PROJECT}:coverage=True ." + sh "build-wrapper-linux-x86-64 --out-dir /tmp/sonar conan build ." + sh "find . -name \"*.gcno\" -exec gcov {} \\;" + withSonarQubeEnv('sds-sonar') { + sh "sonar-scanner -Dsonar.projectBaseDir=. -Dsonar.projectVersion=\"${VER}\"" } } - - stage("Deploy") { - when { allOf { - expression { "${COVERAGE}" == 'False' } - expression { !(env.BRANCH_NAME =~ /PR-/) } - } } - steps { - sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" - sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + } + stage("Quality Gate") { + steps { + timeout(time: 5, unit: 'MINUTES') { + waitForQualityGate abortPipeline: false } } } } } +*/ + stage("Compile") { + steps { + sh "conan create ${BUILD_MISSING} -o sisl:prerelease=False -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG}" + sh "conan remove -f ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -pr debug -o sisl:prerelease=False . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -pr test -o sisl:prerelease=False -pr test . ${PROJECT}/${TAG}" + } + } + + stage("Deploy") { + when { + expression { !(env.BRANCH_NAME =~ /PR-/) } + } + steps { + sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" + sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + } + } } post { From f82b11790a0ffc706a5e18adaaeb589187d525f9 Mon Sep 17 00:00:00 2001 From: Yaming Kuang <1477567+yamingk@users.noreply.github.com> Date: Wed, 15 Feb 2023 14:38:28 -0800 Subject: [PATCH 226/385] SDSTOR-8778 unaligned iov size test case (#90) * SDSTOR-8778 unaligned iov size test case * replace std::vector with folly::small_vector * incorperate comments --- conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 14 ++++--- src/fds/CMakeLists.txt | 2 +- src/fds/tests/test_sg_list.cpp | 76 ++++++++++++++++++++++++++++++++-- 4 files changed, 83 insertions(+), 11 deletions(-) diff --git a/conanfile.py b/conanfile.py index c1c698ca..9e8e3d8e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.0.1" + version = "9.0.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 8ee1c053..462c294c 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -26,10 +26,11 @@ #include #include #endif - +#include #include #include #include "utils.hpp" + namespace sisl { struct blob { uint8_t* bytes; @@ -39,16 +40,17 @@ struct blob { blob(uint8_t* const b, const uint32_t s) : bytes{b}, size{s} {} }; +using sg_iovs_t = folly::small_vector< iovec, 4 >; struct sg_list { uint64_t size; // total size of data pointed by iovs; - std::vector< iovec > iovs; + sg_iovs_t iovs; }; struct sg_iterator { - sg_iterator(const std::vector< iovec >& v) : m_input_iovs{v} { assert(v.size() > 0); } + sg_iterator(const sg_iovs_t& v) : m_input_iovs{v} { assert(v.size() > 0); } - std::vector< iovec > next_iovs(uint32_t size) { - std::vector< iovec > ret_iovs; + sg_iovs_t next_iovs(uint32_t size) { + sg_iovs_t ret_iovs; uint64_t remain_size = size; while ((remain_size > 0) && (m_cur_index < m_input_iovs.size())) { @@ -72,7 +74,7 @@ struct sg_iterator { return ret_iovs; } - const std::vector< iovec >& m_input_iovs; + const sg_iovs_t& m_input_iovs; uint64_t m_cur_offset{0}; size_t m_cur_index{0}; }; diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index bf3e4611..c1468ad3 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -6,7 +6,7 @@ add_library(sisl_buffer OBJECT) target_sources(sisl_buffer PRIVATE buffer.cpp ) -target_link_libraries(sisl_buffer ${COMMON_DEPS}) +target_link_libraries(sisl_buffer Folly::Folly ${COMMON_DEPS}) add_executable(test_stream_tracker) target_sources(test_stream_tracker PRIVATE diff --git a/src/fds/tests/test_sg_list.cpp b/src/fds/tests/test_sg_list.cpp index b33405a5..fa93e22c 100644 --- a/src/fds/tests/test_sg_list.cpp +++ b/src/fds/tests/test_sg_list.cpp @@ -24,9 +24,79 @@ SISL_OPTION_GROUP(test_sg_list, struct SgListTest : public testing::Test {}; -// a test case that make sure iterator works as expected; -TEST_F(SgListTest, TestIterator) { - // TO Be Implemented; +// the iterator request size is same as iov size for each iov; +TEST_F(SgListTest, TestIteratorAlignedSize) { + + sisl::sg_iovs_t iovs; + iovs.push_back(iovec{nullptr, 1024}); + iovs.push_back(iovec{nullptr, 512}); + iovs.push_back(iovec{nullptr, 2048}); + iovs.push_back(iovec{nullptr, 512}); + uint32_t iov_size_total = 0; + for (const auto& v : iovs) { + iov_size_total += v.iov_len; + } + + sisl::sg_list sg; + sg.size = iov_size_total; + sg.iovs = iovs; + + sisl::sg_iterator sg_it{sg.iovs}; + std::vector< uint32_t > bids_size_vec{1024, 512, 2048, 512}; + uint32_t bids_size_total = 0; + for (const auto s : bids_size_vec) { + bids_size_total += s; + } + + assert(iov_size_total == bids_size_total); + + uint32_t itr_size_total = 0; + for (const auto& size : bids_size_vec) { + const auto iovs = sg_it.next_iovs(size); + for (const auto& iov : iovs) { + itr_size_total += iov.iov_len; + } + } + + assert(itr_size_total == bids_size_total); +} + +// +// the iterator request size is unaligned with iov len, but total size is same; +// +TEST_F(SgListTest, TestIteratorUnalignedSize) { + sisl::sg_iovs_t iovs; + iovs.push_back(iovec{nullptr, 1024}); + iovs.push_back(iovec{nullptr, 512}); + iovs.push_back(iovec{nullptr, 2048}); + iovs.push_back(iovec{nullptr, 512}); + uint32_t iov_size_total = 0; + for (const auto& v : iovs) { + iov_size_total += v.iov_len; + } + + sisl::sg_list sg; + sg.size = iov_size_total; + sg.iovs = iovs; + + sisl::sg_iterator sg_it{sg.iovs}; + std::vector< uint32_t > bids_size_vec{512, 1024, 1024, 512, 512, 512}; + uint32_t bids_size_total = 0; + for (const auto s : bids_size_vec) { + bids_size_total += s; + } + + assert(iov_size_total == bids_size_total); + + uint32_t itr_size_total = 0; + for (const auto& size : bids_size_vec) { + const auto iovs = sg_it.next_iovs(size); + for (const auto& iov : iovs) { + itr_size_total += iov.iov_len; + } + } + + assert(itr_size_total == bids_size_total); } int main(int argc, char* argv[]) { From 3565cee16cadcc68d606727b015e5c279840428a Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 22 Feb 2023 09:15:02 -0700 Subject: [PATCH 227/385] Remove standard spec in gRPC module. --- src/grpc/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 888f786b..e59bf9ef 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -3,8 +3,6 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(gRPC REQUIRED) -set(CMAKE_CXX_STANDARD 17) - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) add_library(sisl_grpc OBJECT) From 758d57095be84e3091eed1b711ece2fe47cd6afc Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 22 Feb 2023 09:34:01 -0700 Subject: [PATCH 228/385] Revert "Remove standard spec in gRPC module." This reverts commit 3565cee16cadcc68d606727b015e5c279840428a. --- src/grpc/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index e59bf9ef..888f786b 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -3,6 +3,8 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(gRPC REQUIRED) +set(CMAKE_CXX_STANDARD 17) + include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) add_library(sisl_grpc OBJECT) From 952884cf4b0902cee57bf382624f052bf0d783ee Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 3 Mar 2023 09:03:55 -0700 Subject: [PATCH 229/385] Apparently we use the sanitized builds downstream. --- .jenkins/Jenkinsfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 42224f3e..2bed99e7 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,8 +93,7 @@ pipeline { */ stage("Compile") { steps { - sh "conan create ${BUILD_MISSING} -o sisl:prerelease=False -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG}" - sh "conan remove -f ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG}" sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" sh "conan create ${BUILD_MISSING} -pr debug -o sisl:prerelease=False . ${PROJECT}/${TAG}" sh "conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" From 359dc919ded489cbabed5e78b3f40155664db91b Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 10 Mar 2023 16:18:50 -0700 Subject: [PATCH 230/385] Pin conan to 1.x series. (#93) --- .github/workflows/build_with_conan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index c4ea3769..800a96d1 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -35,7 +35,7 @@ jobs: - name: Install Conan run: | python -m pip install --upgrade pip - python -m pip install conan + python -m pip install conan~=1.0 - name: Configure Conan # Configure conan profiles for build runner From fffaa9acf8802db766387723e2727aada183cf7b Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 14 Mar 2023 11:18:18 -0700 Subject: [PATCH 231/385] =?UTF-8?q?Add=20move=20offset=20api=20to=20sg=20l?= =?UTF-8?q?ist=20iterator.=20Add=20sglist=20to=20ioblob=20convers=E2=80=A6?= =?UTF-8?q?=20(#91)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add move offset api to sg list iterator. Add sglist to ioblob conversion method * resolve memory leak in uint test * review comments --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 27 ++++++++- src/fds/tests/test_sg_list.cpp | 106 ++++++++++++++++++++++++++++++--- 3 files changed, 125 insertions(+), 10 deletions(-) diff --git a/conanfile.py b/conanfile.py index 9e8e3d8e..ca38a99e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.0.2" + version = "9.1.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 462c294c..41ebd7ca 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -51,7 +51,7 @@ struct sg_iterator { sg_iovs_t next_iovs(uint32_t size) { sg_iovs_t ret_iovs; - uint64_t remain_size = size; + auto remain_size = size; while ((remain_size > 0) && (m_cur_index < m_input_iovs.size())) { const auto& inp_iov = m_input_iovs[m_cur_index]; @@ -74,6 +74,19 @@ struct sg_iterator { return ret_iovs; } + void move_offset(const uint32_t size) { + auto remain_size = size; + const auto input_iovs_size = m_input_iovs.size(); + for (; (remain_size > 0) && (m_cur_index < input_iovs_size); ++m_cur_index, m_cur_offset = 0) { + const auto& inp_iov = m_input_iovs[m_cur_index]; + if (remain_size < inp_iov.iov_len - m_cur_offset) { + m_cur_offset += remain_size; + return; + } + remain_size -= inp_iov.iov_len - m_cur_offset; + } + } + const sg_iovs_t& m_input_iovs; uint64_t m_cur_offset{0}; size_t m_cur_index{0}; @@ -224,6 +237,9 @@ class aligned_shared_ptr : public std::shared_ptr< T > { aligned_shared_ptr(T* p) : std::shared_ptr< T >(p) {} }; +struct io_blob; +using io_blob_list_t = folly::small_vector< sisl::io_blob, 4 >; + struct io_blob : public blob { bool aligned{false}; @@ -270,6 +286,15 @@ struct io_blob : public blob { static io_blob from_string(const std::string& s) { return io_blob{r_cast< uint8_t* >(const_cast< char* >(s.data())), uint32_cast(s.size()), false}; } + + static io_blob_list_t sg_list_to_ioblob_list(const sg_list& sglist) { + io_blob_list_t ret_list; + for (const auto& iov : sglist.iovs) { + ret_list.emplace_back(r_cast< uint8_t* >(const_cast< void* >(iov.iov_base)), uint32_cast(iov.iov_len), + false); + } + return ret_list; + } }; /* An extension to blob where the buffer it holds is allocated by constructor and freed during destruction. The only diff --git a/src/fds/tests/test_sg_list.cpp b/src/fds/tests/test_sg_list.cpp index fa93e22c..e94e89da 100644 --- a/src/fds/tests/test_sg_list.cpp +++ b/src/fds/tests/test_sg_list.cpp @@ -14,6 +14,7 @@ *********************************************************************************/ #include +#include #include "sisl/fds/buffer.hpp" SISL_LOGGING_INIT(test_sg_list) @@ -22,11 +23,10 @@ SISL_OPTION_GROUP(test_sg_list, (num_threads, "", "num_threads", "number of threads", ::cxxopts::value< uint32_t >()->default_value("8"), "number")) -struct SgListTest : public testing::Test {}; +static constexpr uint32_t SZ{sizeof(uint32_t)}; // the iterator request size is same as iov size for each iov; -TEST_F(SgListTest, TestIteratorAlignedSize) { - +TEST(SgListTestBasic, TestIteratorAlignedSize) { sisl::sg_iovs_t iovs; iovs.push_back(iovec{nullptr, 1024}); iovs.push_back(iovec{nullptr, 512}); @@ -48,7 +48,7 @@ TEST_F(SgListTest, TestIteratorAlignedSize) { bids_size_total += s; } - assert(iov_size_total == bids_size_total); + ASSERT_EQ(iov_size_total, bids_size_total); uint32_t itr_size_total = 0; for (const auto& size : bids_size_vec) { @@ -58,13 +58,13 @@ TEST_F(SgListTest, TestIteratorAlignedSize) { } } - assert(itr_size_total == bids_size_total); + ASSERT_EQ(itr_size_total, bids_size_total); } // // the iterator request size is unaligned with iov len, but total size is same; // -TEST_F(SgListTest, TestIteratorUnalignedSize) { +TEST(SgListTestBasic, TestIteratorUnalignedSize) { sisl::sg_iovs_t iovs; iovs.push_back(iovec{nullptr, 1024}); iovs.push_back(iovec{nullptr, 512}); @@ -86,7 +86,7 @@ TEST_F(SgListTest, TestIteratorUnalignedSize) { bids_size_total += s; } - assert(iov_size_total == bids_size_total); + ASSERT_EQ(iov_size_total, bids_size_total); uint32_t itr_size_total = 0; for (const auto& size : bids_size_vec) { @@ -96,7 +96,97 @@ TEST_F(SgListTest, TestIteratorUnalignedSize) { } } - assert(itr_size_total == bids_size_total); + ASSERT_EQ(itr_size_total, bids_size_total); +} + +class SgListTestOffset : public testing::Test { +public: + void SetUp() override { + + for (uint16_t i = 0; i < 8; ++i) { + data_vec.emplace_back(get_random_num()); + sgl.iovs.emplace_back(iovec{new uint32_t(data_vec[i]), SZ}); + } + sgl.size = SZ * 8; + } + + void TearDown() override { + for (auto& iov : sgl.iovs) { + auto data_ptr = r_cast< uint32_t* >(iov.iov_base); + delete data_ptr; + } + } + + static uint32_t get_random_num() { + static std::random_device dev; + static std::mt19937 rng(dev()); + std::uniform_int_distribution< std::mt19937::result_type > dist(1001u, 99999u); + return dist(rng); + } + + std::vector< uint32_t > data_vec; + sisl::sg_list sgl{0, {}}; +}; + +TEST_F(SgListTestOffset, TestMoveOffsetAligned) { + // test next_iovs and sg_list_to_ioblob_list + sisl::sg_iterator sgitr{sgl.iovs}; + auto ioblob_list = sisl::io_blob::sg_list_to_ioblob_list(sgl); + ASSERT_EQ(sgl.iovs.size(), ioblob_list.size()); + ASSERT_EQ(sgl.iovs.size(), data_vec.size()); + for (uint16_t i = 0; i < data_vec.size(); ++i) { + auto const iovs = sgitr.next_iovs(SZ); + ASSERT_EQ(iovs.size(), 1); + auto rand_num = r_cast< uint32_t* >(iovs[0].iov_base); + EXPECT_EQ(*rand_num, data_vec[i]); + + rand_num = r_cast< uint32_t* >(ioblob_list[i].bytes); + EXPECT_EQ(*rand_num, data_vec[i]); + EXPECT_EQ(ioblob_list[i].size, SZ); + } + + sisl::sg_iterator sgitr1{sgl.iovs}; + // test move_offset + for (uint16_t i = 0; i < data_vec.size(); ++i) { + if (i % 2 == 0) { + sgitr1.move_offset(SZ); + continue; + } + auto const iovs = sgitr1.next_iovs(SZ); + ASSERT_EQ(iovs.size(), 1); + auto rand_num = r_cast< uint32_t* >(iovs[0].iov_base); + EXPECT_EQ(*rand_num, data_vec[i]); + } +} + +TEST_F(SgListTestOffset, TestMoveOffsetUnaligned) { + // total size should be SZ * 8 + std::vector< uint32_t > size_vec{SZ, 3 * SZ, SZ / 2, SZ / 4, 2 * SZ, SZ / 4 + SZ}; + uint32_t itr_size_total{0}; + sisl::sg_iterator sgitr{sgl.iovs}; + for (auto const& s : size_vec) { + auto const iovs = sgitr.next_iovs(s); + for (const auto& iov : iovs) { + itr_size_total += iov.iov_len; + } + } + EXPECT_EQ(itr_size_total, sgl.size); + + sisl::sg_iterator sgitr1{sgl.iovs}; + uint32_t itr_size_offset{0}; + uint32_t itr_size_offset_target{0}; + for (uint16_t i = 0; i < size_vec.size(); ++i) { + if (i % 2 == 0) { + sgitr1.move_offset(size_vec[i]); + continue; + } + auto const iovs = sgitr1.next_iovs(size_vec[i]); + for (const auto& iov : iovs) { + itr_size_offset += iov.iov_len; + } + itr_size_offset_target += size_vec[i]; + } + EXPECT_EQ(itr_size_offset_target, itr_size_offset); } int main(int argc, char* argv[]) { From faf36448cc674f2867cd92ec3283f0d821ef74cc Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 4 Apr 2023 20:19:30 -0700 Subject: [PATCH 232/385] Improvements on enum, callbackmutex, logging (#94) * Fixed a possible use-after-free during destruction of global static during exit in logging * Added CallbackMutex to support shared lock as well * Added scoped enum to allow enums to be declared within class scope --- CMakeLists.txt | 9 ++++ conanfile.py | 2 +- include/sisl/logging/logging.h | 22 +++++++--- include/sisl/utility/enum.hpp | 53 +++++++++++++---------- src/fds/callback_mutex.hpp | 37 +++++++++++----- src/fds/tests/obj_allocator_benchmark.cpp | 3 +- src/fds/tests/test_cb_mutex.cpp | 23 +++++----- src/logging/logging.cpp | 31 +++++++------ src/logging/stacktrace.cpp | 4 +- src/utility/tests/test_enum.cpp | 46 ++++++++++++-------- 10 files changed, 143 insertions(+), 87 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 216549e8..17c0b472 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,11 @@ find_package(GTest REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") find_package(gperftools REQUIRED) endif() + +if (${MALLOC_IMPL} STREQUAL "jemalloc") + find_package(jemalloc REQUIRED) +endif() + find_package(jwt-cpp REQUIRED) find_package(nlohmann_json REQUIRED) find_package(prerelease_dummy QUIET) @@ -92,6 +97,10 @@ if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "tcmalloc") list(APPEND COMMON_DEPS gperftools::gperftools) endif() + + if (${MALLOC_IMPL} STREQUAL "jemalloc") + list(APPEND COMMON_DEPS jemalloc::jemalloc) + endif() endif() find_program(CCACHE_FOUND ccache) diff --git a/conanfile.py b/conanfile.py index ca38a99e..4ca1b435 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.1.1" + version = "9.1.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 3b244526..802f414b 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -376,6 +376,20 @@ static constexpr uint32_t max_stacktrace_size() { return static_cast< uint32_t > #define SIGUSR4 SIGUSR2 #endif +class LoggerThreadContext; + +class LoggerThreadRegistry { +public: + std::mutex m_logger_thread_mutex; + std::unordered_set< LoggerThreadContext* > m_logger_thread_set; + +public: + void add_logger_thread(LoggerThreadContext* ctx); + void remove_logger_thread(LoggerThreadContext* ctx); + + static std::shared_ptr< LoggerThreadRegistry > instance(); +}; + class LoggerThreadContext { public: LoggerThreadContext(const LoggerThreadContext&) = delete; @@ -386,16 +400,10 @@ class LoggerThreadContext { static LoggerThreadContext& instance(); - static void add_logger_thread(LoggerThreadContext* const ctx); - - static void remove_logger_thread(LoggerThreadContext* const ctx); - - static std::mutex s_logger_thread_mutex; - static std::unordered_set< LoggerThreadContext* > s_logger_thread_set; - std::shared_ptr< spdlog::logger > m_logger; std::shared_ptr< spdlog::logger > m_critical_logger; pthread_t m_thread_id; + std::shared_ptr< LoggerThreadRegistry > m_logger_thread_registry; // Take reference to avoid singleton destruction private: LoggerThreadContext(); diff --git a/include/sisl/utility/enum.hpp b/include/sisl/utility/enum.hpp index bcc16e16..a8b4c8cf 100644 --- a/include/sisl/utility/enum.hpp +++ b/include/sisl/utility/enum.hpp @@ -109,11 +109,16 @@ class EnumSupportBase { }; #define VENUM(EnumName, Underlying, ...) ENUM(EnumName, Underlying, __VA_ARGS__) +#define ENUM(EnumName, Underlying, ...) BASE_ENUM(EnumName, EnumName, Underlying, __VA_ARGS__) +#define SCOPED_ENUM_DEF(Scope, EnumName, Underlying, ...) BASE_ENUM(Scope::EnumName, EnumName, Underlying, __VA_ARGS__) +#define SCOPED_ENUM_DECL(EnumName, Underlying) \ + enum class EnumName : Underlying; \ + struct EnumName##Support; -#define ENUM(EnumName, Underlying, ...) \ - enum class EnumName : Underlying { __VA_ARGS__ }; \ +#define BASE_ENUM(FQEnumName, EnumName, Underlying, ...) \ + enum class FQEnumName : Underlying { __VA_ARGS__ }; \ \ - struct EnumName##Support : EnumSupportBase< EnumName > { \ + struct FQEnumName##Support : EnumSupportBase< EnumName > { \ typedef EnumName enum_type; \ typedef std::underlying_type_t< enum_type > underlying_type; \ EnumName##Support(const std::string tokens) : EnumSupportBase< enum_type >{tokens} {}; \ @@ -127,37 +132,39 @@ class EnumSupportBase { return s_instance; \ }; \ }; \ - [[nodiscard]] inline EnumName##Support::enum_type operator|(const EnumName##Support::enum_type a, \ - const EnumName##Support::enum_type b) { \ - return static_cast< EnumName##Support::enum_type >(static_cast< EnumName##Support::underlying_type >(a) | \ - static_cast< EnumName##Support::underlying_type >(b)); \ + [[nodiscard]] inline FQEnumName##Support::enum_type operator|(const FQEnumName##Support::enum_type a, \ + const FQEnumName##Support::enum_type b) { \ + return static_cast< FQEnumName##Support::enum_type >(static_cast< FQEnumName##Support::underlying_type >(a) | \ + static_cast< FQEnumName##Support::underlying_type >(b)); \ } \ - [[nodiscard]] inline EnumName##Support::enum_type operator&(const EnumName##Support::enum_type a, \ - const EnumName##Support::enum_type b) { \ - return static_cast< EnumName##Support::enum_type >(static_cast< EnumName##Support::underlying_type >(a) & \ - static_cast< EnumName##Support::underlying_type >(b)); \ + [[nodiscard]] inline FQEnumName##Support::enum_type operator&(const FQEnumName##Support::enum_type a, \ + const FQEnumName##Support::enum_type b) { \ + return static_cast< FQEnumName##Support::enum_type >(static_cast< FQEnumName##Support::underlying_type >(a) & \ + static_cast< FQEnumName##Support::underlying_type >(b)); \ } \ - [[maybe_unused]] inline EnumName##Support::enum_type operator|=(EnumName##Support::enum_type& a, \ - const EnumName##Support::enum_type b) { \ - return a = static_cast< EnumName##Support::enum_type >(static_cast< EnumName##Support::underlying_type >(a) | \ - static_cast< EnumName##Support::underlying_type >(b)); \ + [[maybe_unused]] inline FQEnumName##Support::enum_type operator|=(FQEnumName##Support::enum_type& a, \ + const FQEnumName##Support::enum_type b) { \ + return a = static_cast< FQEnumName##Support::enum_type >( \ + static_cast< FQEnumName##Support::underlying_type >(a) | \ + static_cast< FQEnumName##Support::underlying_type >(b)); \ } \ - [[maybe_unused]] inline EnumName##Support::enum_type operator&=(EnumName##Support::enum_type& a, \ - const EnumName##Support::enum_type b) { \ - return a = static_cast< EnumName##Support::enum_type >(static_cast< EnumName##Support::underlying_type >(a) & \ - static_cast< EnumName##Support::underlying_type >(b)); \ + [[maybe_unused]] inline FQEnumName##Support::enum_type operator&=(FQEnumName##Support::enum_type& a, \ + const FQEnumName##Support::enum_type b) { \ + return a = static_cast< FQEnumName##Support::enum_type >( \ + static_cast< FQEnumName##Support::underlying_type >(a) & \ + static_cast< FQEnumName##Support::underlying_type >(b)); \ } \ template < typename charT, typename traits > \ std::basic_ostream< charT, traits >& operator<<(std::basic_ostream< charT, traits >& out_stream, \ - const EnumName##Support::enum_type es) { \ + const FQEnumName##Support::enum_type es) { \ std::basic_ostringstream< charT, traits > out_stream_copy{}; \ out_stream_copy.copyfmt(out_stream); \ - out_stream_copy << EnumName##Support::instance().get_name(es); \ + out_stream_copy << FQEnumName##Support::instance().get_name(es); \ out_stream << out_stream_copy.str(); \ return out_stream; \ } \ - [[nodiscard]] inline const std::string& enum_name(const EnumName##Support::enum_type es) { \ - return EnumName##Support::instance().get_name(es); \ + [[nodiscard]] inline const std::string& enum_name(const FQEnumName##Support::enum_type es) { \ + return FQEnumName##Support::instance().get_name(es); \ } #endif // SISL_ENUM_HPP diff --git a/src/fds/callback_mutex.hpp b/src/fds/callback_mutex.hpp index e99125db..64c847b7 100644 --- a/src/fds/callback_mutex.hpp +++ b/src/fds/callback_mutex.hpp @@ -18,6 +18,7 @@ #include #include #include "sisl/fds/vector_pool.hpp" +#include #include // Generate the metafunction @@ -38,8 +39,8 @@ class _cb_wait_q { _cb_wait_q() = default; ~_cb_wait_q() = default; - void add_cb(const post_lock_cb_t& cb) { - std::unique_lock< std::mutex > l(m_waitq_mutex); + void add_cb(post_lock_cb_t&& cb) { + folly::SharedMutexWritePriority::WriteHolder holder{m_waitq_lock}; if (m_wait_q == nullptr) { m_wait_q = sisl::VectorPool< post_lock_cb_t >::alloc(); } m_wait_q->emplace_back(std::move(cb)); } @@ -47,12 +48,12 @@ class _cb_wait_q { bool drain_cb() { std::vector< post_lock_cb_t >* wait_q{nullptr}; { - std::unique_lock< std::mutex > l(m_waitq_mutex); + folly::SharedMutexWritePriority::WriteHolder holder{m_waitq_lock}; std::swap(wait_q, m_wait_q); } if (wait_q) { - for (auto& cb : *wait_q) { + for (const auto& cb : *wait_q) { cb(); } sisl::VectorPool< post_lock_cb_t >::free(wait_q); @@ -60,8 +61,13 @@ class _cb_wait_q { return (wait_q != nullptr); } + bool empty() const { + folly::SharedMutexWritePriority::ReadHolder holder{m_waitq_lock}; + return ((m_wait_q == nullptr) || (m_wait_q->empty())); + } + private: - std::mutex m_waitq_mutex; + mutable folly::SharedMutexWritePriority m_waitq_lock; std::vector< post_lock_cb_t >* m_wait_q{nullptr}; }; @@ -71,7 +77,7 @@ class CallbackMutex { explicit CallbackMutex() = default; ~CallbackMutex() = default; - bool try_lock(const post_lock_cb_t& cb) { + bool try_lock(post_lock_cb_t&& cb) { if (m_base_mutex.try_lock()) { cb(); return true; @@ -81,7 +87,7 @@ class CallbackMutex { } template < class I = MutexImpl > - typename std::enable_if< try_lock_shared_check< I >, bool >::type try_lock_shared(const post_lock_cb_t& cb) { + typename std::enable_if< try_lock_shared_check< I >, bool >::type try_lock_shared(post_lock_cb_t&& cb) { if (m_base_mutex.try_lock_shared()) { cb(); return true; @@ -99,6 +105,15 @@ class CallbackMutex { template < class I = MutexImpl > typename std::enable_if< unlock_shared_check< I >, void >::type unlock_shared() { m_base_mutex.unlock_shared(); + if (!m_q.empty()) { + // If Q is not empty, try to lock the base mutex (which callers wait on) and if successful, + // we can drain the q. If unsuccessful, ignore it, because next unlock on elements in the q + // will do the same + if (m_base_mutex.try_lock()) { + m_q.drain_cb(); + m_base_mutex.unlock(); + } + } } template < class I = MutexImpl > @@ -127,8 +142,8 @@ class CallbackMutex { template < typename MutexImpl > class CBUniqueLock { public: - CBUniqueLock(CallbackMutex< MutexImpl >& cb_mtx, const post_lock_cb_t& cb) : m_cb_mtx{cb_mtx} { - m_locked = m_cb_mtx.try_lock(cb); + CBUniqueLock(CallbackMutex< MutexImpl >& cb_mtx, post_lock_cb_t&& cb) : m_cb_mtx{cb_mtx} { + m_locked = m_cb_mtx.try_lock(std::move(cb)); } ~CBUniqueLock() { @@ -143,8 +158,8 @@ class CBUniqueLock { template < typename MutexImpl > class CBSharedLock { public: - CBSharedLock(CallbackMutex< MutexImpl >& cb_mtx, const post_lock_cb_t& cb) : m_cb_mtx{cb_mtx} { - m_locked = m_cb_mtx.try_lock_shared(cb); + CBSharedLock(CallbackMutex< MutexImpl >& cb_mtx, post_lock_cb_t&& cb) : m_cb_mtx{cb_mtx} { + m_locked = m_cb_mtx.try_lock_shared(std::move(cb)); } ~CBSharedLock() { diff --git a/src/fds/tests/obj_allocator_benchmark.cpp b/src/fds/tests/obj_allocator_benchmark.cpp index 751640e6..570f6d30 100644 --- a/src/fds/tests/obj_allocator_benchmark.cpp +++ b/src/fds/tests/obj_allocator_benchmark.cpp @@ -33,7 +33,7 @@ RCU_REGISTER_INIT namespace { std::mutex s_print_mutex; -constexpr size_t ITERATIONS{1000000}; +constexpr size_t ITERATIONS{10000000}; constexpr size_t THREADS{8}; struct my_request { @@ -79,6 +79,7 @@ void test_obj_alloc(benchmark::State& state) { counter += req->m_d; sisl::ObjectAllocator< my_request >::deallocate(req); } + { std::scoped_lock< std::mutex > lock{s_print_mutex}; std::cout << "Counter = " << counter << std::endl; diff --git a/src/fds/tests/test_cb_mutex.cpp b/src/fds/tests/test_cb_mutex.cpp index b811ff43..b1e10b35 100644 --- a/src/fds/tests/test_cb_mutex.cpp +++ b/src/fds/tests/test_cb_mutex.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -68,8 +69,7 @@ class CBMutexTest : public testing::Test { } template < typename I = MutexImpl > - typename std::enable_if< !sisl::CallbackMutex< I >::shared_mode_supported, void >::type - thread_shared_fn(uint64_t) { + typename std::enable_if< !sisl::CallbackMutex< I >::shared_mode_supported, void >::type thread_shared_fn(uint64_t) { assert(0); } @@ -84,24 +84,27 @@ class CBMutexTest : public testing::Test { shared_threads = std::max(1u, num_threads - unique_threads); } - std::vector< std::thread* > threads; + std::vector< std::thread > threads; for (uint32_t i{0}; i < unique_threads; ++i) { - threads.push_back(new std::thread(bind_this(CBMutexTest::thread_unique_fn, 1), num_iters / num_threads)); + threads.emplace_back( + std::move(std::thread(bind_this(CBMutexTest::thread_unique_fn, 1), num_iters / num_threads))); } for (uint32_t i{0}; i < shared_threads; ++i) { - threads.push_back(new std::thread(bind_this(CBMutexTest::thread_shared_fn<>, 1), num_iters / num_threads)); + threads.emplace_back( + std::move(std::thread(bind_this(CBMutexTest::thread_shared_fn<>, 1), num_iters / num_threads))); } - for (auto t : threads) { - t->join(); - delete (t); + for (auto& t : threads) { + t.join(); + // delete (t); } } }; using testing::Types; -// typedef Types< std::mutex, std::shared_mutex, folly::SharedMutex > Implementations; -typedef Types< std::mutex > Implementations; +typedef Types< std::mutex, std::shared_mutex, folly::SharedMutex > Implementations; +// typedef Types< std::mutex > Implementations; +// typedef Types< std::mutex, folly::SharedMutex > Implementations; TYPED_TEST_SUITE(CBMutexTest, Implementations); TYPED_TEST(CBMutexTest, LockUnlockTest) { this->run_lock_unlock(); } diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index 5b4f3e3f..070fc5bf 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -78,31 +78,34 @@ static constexpr size_t MAX_MODULES{100}; static std::array< const char*, MAX_MODULES > glob_enabled_mods{"base"}; static size_t glob_num_mods{1}; -/****************************** LoggerThreadContext ******************************/ -std::mutex LoggerThreadContext::s_logger_thread_mutex; -std::unordered_set< LoggerThreadContext* > LoggerThreadContext::s_logger_thread_set; +/****************************** LoggerThreadRegistry ******************************/ +std::shared_ptr< LoggerThreadRegistry > LoggerThreadRegistry::instance() { + static std::shared_ptr< LoggerThreadRegistry > inst{std::make_shared< LoggerThreadRegistry >()}; + return inst; +} -LoggerThreadContext::LoggerThreadContext() { - m_thread_id = pthread_self(); - LoggerThreadContext::add_logger_thread(this); +void LoggerThreadRegistry::add_logger_thread(LoggerThreadContext* ctx) { + std::unique_lock l{m_logger_thread_mutex}; + m_logger_thread_set.insert(ctx); } -LoggerThreadContext::~LoggerThreadContext() { LoggerThreadContext::remove_logger_thread(this); } +void LoggerThreadRegistry::remove_logger_thread(LoggerThreadContext* ctx) { + std::unique_lock l{m_logger_thread_mutex}; + m_logger_thread_set.erase(ctx); +} +/****************************** LoggerThreadContext ******************************/ LoggerThreadContext& LoggerThreadContext::instance() { static thread_local LoggerThreadContext inst{}; return inst; } -void LoggerThreadContext::add_logger_thread(LoggerThreadContext* const ctx) { - std::unique_lock l{s_logger_thread_mutex}; - s_logger_thread_set.insert(ctx); +LoggerThreadContext::LoggerThreadContext() : + m_thread_id{pthread_self()}, m_logger_thread_registry{LoggerThreadRegistry::instance()} { + m_logger_thread_registry->add_logger_thread(this); } -void LoggerThreadContext::remove_logger_thread(LoggerThreadContext* const ctx) { - std::unique_lock l{s_logger_thread_mutex}; - s_logger_thread_set.erase(ctx); -} +LoggerThreadContext::~LoggerThreadContext() { m_logger_thread_registry->remove_logger_thread(this); } /******************************** InitModules *********************************/ void InitModules::init_modules(std::initializer_list< const char* > mods_list) { diff --git a/src/logging/stacktrace.cpp b/src/logging/stacktrace.cpp index f7656c87..de43d142 100644 --- a/src/logging/stacktrace.cpp +++ b/src/logging/stacktrace.cpp @@ -185,7 +185,7 @@ static void bt_dumper([[maybe_unused]] const SignalType signal_number) { } static void log_stack_trace_all_threads() { - std::unique_lock logger_lock{LoggerThreadContext::s_logger_thread_mutex}; + std::unique_lock logger_lock{LoggerThreadRegistry::instance()->m_logger_thread_mutex}; auto& logger{GetLogger()}; auto& critical_logger{GetCriticalLogger()}; size_t thread_count{1}; @@ -267,7 +267,7 @@ static void log_stack_trace_all_threads() { ++thread_count; // dump other threads - for (auto* const ctx : LoggerThreadContext::s_logger_thread_set) { + for (auto* ctx : LoggerThreadRegistry::instance()->m_logger_thread_set) { if (ctx == &logger_thread_ctx) { continue; } dump_thread(true, ctx->m_thread_id); ++thread_count; diff --git a/src/utility/tests/test_enum.cpp b/src/utility/tests/test_enum.cpp index 51543df1..c01ef0c7 100644 --- a/src/utility/tests/test_enum.cpp +++ b/src/utility/tests/test_enum.cpp @@ -9,7 +9,6 @@ #include -#include "sisl/utility/thread_buffer.hpp" #include "sisl/utility/enum.hpp" class EnumTest : public testing::Test { @@ -65,7 +64,7 @@ TEST_F(EnumTest, enum_unsigned_test) { ASSERT_EQ(enum_name(unsigned_enum::val2), "val2"); } -ENUM(signed_enum_value, int16_t, val1=-10, val2=-20) +ENUM(signed_enum_value, int16_t, val1 = -10, val2 = -20) TEST_F(EnumTest, enum_signed_value_test) { auto enum_lambda{[](const signed_enum_value& val) { switch (val) { @@ -84,7 +83,7 @@ TEST_F(EnumTest, enum_signed_value_test) { ASSERT_EQ(enum_name(signed_enum_value::val2), "val2"); } -ENUM(unsigned_enum_value, uint16_t, val1=10, val2=20, val3=1<<4, val4 = +30, val5 = 40u) +ENUM(unsigned_enum_value, uint16_t, val1 = 10, val2 = 20, val3 = 1 << 4, val4 = +30, val5 = 40u) TEST_F(EnumTest, enum_unsigned_value_test) { auto enum_lambda{[](const unsigned_enum_value& val) { switch (val) { @@ -113,11 +112,6 @@ TEST_F(EnumTest, enum_unsigned_value_test) { ASSERT_EQ(enum_name(unsigned_enum_value::val3), "val3"); ASSERT_EQ(enum_name(unsigned_enum_value::val4), "val4"); ASSERT_EQ(enum_name(unsigned_enum_value::val5), "val5"); - //ASSERT_EQ(enum_value("val1"), unsigned_enum_value::val1); - //ASSERT_EQ(enum_value("val2"), unsigned_enum_value::val2); - //ASSERT_EQ(enum_value("val3"), unsigned_enum_value::val3); - //ASSERT_EQ(enum_value("val4"), unsigned_enum_value::val4); - //ASSERT_EQ(enum_value("val5"), unsigned_enum_value::val5); } ENUM(signed_enum_mixed, int16_t, val1 = -10, val2) @@ -137,11 +131,9 @@ TEST_F(EnumTest, enum_signed_mixed_test) { ASSERT_EQ(enum_lambda(signed_enum_mixed::val2), -9); ASSERT_EQ(enum_name(signed_enum_mixed::val1), "val1"); ASSERT_EQ(enum_name(signed_enum_mixed::val2), "val2"); - //ASSERT_EQ(enum_value("val1"), signed_enum_mixed::val1); - //ASSERT_EQ(enum_value("val2") ,signed_enum_mixed::val2); } -ENUM(unsigned_enum_mixed, uint16_t, val1 = 10, val2, val3 = 1<<2) +ENUM(unsigned_enum_mixed, uint16_t, val1 = 10, val2, val3 = 1 << 2) TEST_F(EnumTest, enum_unsigned_mixed_test) { auto enum_lambda{[](const unsigned_enum_mixed& val) { switch (val) { @@ -162,12 +154,9 @@ TEST_F(EnumTest, enum_unsigned_mixed_test) { ASSERT_EQ(enum_name(unsigned_enum_mixed::val1), "val1"); ASSERT_EQ(enum_name(unsigned_enum_mixed::val2), "val2"); ASSERT_EQ(enum_name(unsigned_enum_mixed::val3), "val3"); - //ASSERT_EQ(enum_value("val1"), unsigned_enum_value::val1); - //ASSERT_EQ(enum_value("val2"), unsigned_enum_value::val2); - //ASSERT_EQ(enum_value("val3"), unsigned_enum_value::val3); } -ENUM(unsigned_enum2, uint16_t, val1=0x1, val2=0x2, val3=0x3) +ENUM(unsigned_enum2, uint16_t, val1 = 0x1, val2 = 0x2, val3 = 0x3) TEST_F(EnumTest, enum_unsigned_test_bit_ops) { auto enum_lambda{[](const unsigned_enum2& val) { switch (val) { @@ -188,9 +177,6 @@ TEST_F(EnumTest, enum_unsigned_test_bit_ops) { ASSERT_EQ(enum_name(unsigned_enum2::val1), "val1"); ASSERT_EQ(enum_name(unsigned_enum2::val2), "val2"); ASSERT_EQ(enum_name(unsigned_enum2::val3), "val3"); - //ASSERT_EQ(enum_value("val1"), unsigned_enum2::val1); - //ASSERT_EQ(enum_value("val2"), unsigned_enum2::val2); - //ASSERT_EQ(enum_value("val3"), unsigned_enum2::val3); ASSERT_EQ(unsigned_enum2::val1 | unsigned_enum2::val2, unsigned_enum2::val3); ASSERT_EQ(unsigned_enum2::val1 & unsigned_enum2::val3, unsigned_enum2::val1); @@ -202,6 +188,30 @@ TEST_F(EnumTest, enum_unsigned_test_bit_ops) { ASSERT_EQ(val2, unsigned_enum2::val2); } +class Container { +public: + SCOPED_ENUM_DECL(signed_enum, int16_t) +}; + +SCOPED_ENUM_DEF(Container, signed_enum, int16_t, val1, val2) +TEST_F(EnumTest, scoped_enum) { + auto enum_lambda{[](const Container::signed_enum& val) { + switch (val) { + case Container::signed_enum::val1: + return 1; + case Container::signed_enum::val2: + return 2; + default: + return 0; + }; + }}; + + ASSERT_EQ(enum_lambda(Container::signed_enum::val1), 1); + ASSERT_EQ(enum_lambda(Container::signed_enum::val2), 2); + ASSERT_EQ(enum_name(Container::signed_enum::val1), "val1"); + ASSERT_EQ(enum_name(Container::signed_enum::val2), "val2"); +} + int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); From 90f32476b91bb370463b282b21831ed2ae309158 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 4 Apr 2023 20:21:59 -0700 Subject: [PATCH 233/385] Threadbuffer destruction order control --- include/sisl/utility/thread_buffer.hpp | 22 +++++++++++----------- include/sisl/utility/thread_factory.hpp | 1 + 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/sisl/utility/thread_buffer.hpp b/include/sisl/utility/thread_buffer.hpp index 093e3515..48da6f49 100644 --- a/include/sisl/utility/thread_buffer.hpp +++ b/include/sisl/utility/thread_buffer.hpp @@ -36,9 +36,9 @@ #include #include -#include "atomic_counter.hpp" -#include "enum.hpp" -#include "urcu_helper.hpp" +#include +#include +#include namespace sisl { @@ -243,14 +243,10 @@ class ThreadRegistry { // std::vector< thread_state_cb_t > m_registered_notifiers; }; -#define thread_registry ThreadRegistry::instance() - class ThreadLocalContext { public: - ThreadLocalContext() { + ThreadLocalContext() : thread_registry{ThreadRegistry::get_instance_ptr()} { this_thread_num = thread_registry->attach(); - // LOGINFO("Created new ThreadLocalContext with thread_num = {}, my_thread_num = {}", this_thread_num, - // my_thread_num()); } ThreadLocalContext(const ThreadLocalContext&) = delete; ThreadLocalContext(ThreadLocalContext&&) noexcept = delete; @@ -270,8 +266,9 @@ class ThreadLocalContext { } static uint64_t get_context(const uint32_t context_id) { return instance()->user_contexts[context_id]; } +public: static thread_local ThreadLocalContext inst; - + std::shared_ptr< ThreadRegistry > thread_registry; // Take reference to control destruction order uint32_t this_thread_num; std::array< uint64_t, 5 > user_contexts; // To store any user contexts }; @@ -285,7 +282,9 @@ class ThreadBuffer { public: template < class... Args1 > ThreadBuffer(Args1&&... args) : - m_args(std::forward< Args1 >(args)...), m_thread_slots(ThreadRegistry::max_tracked_threads()) { + m_args(std::forward< Args1 >(args)...), + thread_registry{ThreadRegistry::get_instance_ptr()}, + m_thread_slots(ThreadRegistry::max_tracked_threads()) { m_buffers.reserve(ThreadRegistry::max_tracked_threads()); m_notify_idx = thread_registry->register_for_sc_notification( std::bind(&ThreadBuffer::on_thread_state_change, this, std::placeholders::_1, std::placeholders::_2)); @@ -436,10 +435,11 @@ class ThreadBuffer { private: sisl::sparse_vector< std::unique_ptr< T > > m_buffers; std::tuple< Args... > m_args; + std::shared_ptr< ThreadRegistry > thread_registry; // Take reference to control destruction order std::shared_mutex m_expand_mutex; boost::dynamic_bitset<> m_thread_slots; std::vector< std::unique_ptr< T > > m_exited_buffers; // Holds buffers whose threads already exited - uint64_t m_notify_idx = 0; + uint64_t m_notify_idx{0}; }; template < typename T, typename... Args > diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index 480921ba..b010b866 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -60,6 +60,7 @@ std::unique_ptr< std::thread > make_unique_thread(const std::string name, F&& f, template < class... Args > std::thread named_thread(const std::string name, Args&&... args) { auto t = std::thread(std::forward< Args >(args)...); + #ifdef _POSIX_THREADS #ifndef __APPLE__ auto tname = name.substr(0, 15); From a14473e1e8cd3ec8fbd742e8785b802fd0fea822 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 4 Apr 2023 20:22:47 -0700 Subject: [PATCH 234/385] Create a link called 'latest' to the last logdir --- src/logging/logging.cpp | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index 070fc5bf..fd4cdc0c 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -138,18 +138,30 @@ std::shared_ptr< spdlog::logger >& GetCriticalLogger() { } static std::filesystem::path get_base_dir() { - const auto cwd{std::filesystem::current_path()}; - auto p{cwd / "logs"}; + namespace fs = std::filesystem; + const auto cwd = fs::current_path(); + const auto log_dir{cwd / "logs"}; + // Construct a unique directory path based on the current time auto const current_time{std::chrono::system_clock::now()}; auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; auto const current_tm{std::localtime(¤t_t)}; std::array< char, PATH_MAX > c_time; if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { - p /= c_time.data(); - std::filesystem::create_directories(p); + const fs::path cur_log_dir = log_dir / c_time.data(); + fs::create_directories(cur_log_dir); + + const fs::path sym_path = log_dir / "latest"; + try { + if (fs::is_symlink(sym_path)) { fs::remove(sym_path); } + fs::create_directory_symlink(cur_log_dir, sym_path); + } catch (std::exception& e) { + LOGINFO("Unable to create latest symlink={} to log dir={}, ignoring symlink creation\n", sym_path, log_dir); + } + return cur_log_dir; + } else { + return log_dir; } - return p; } static std::filesystem::path log_path(std::string const& name) { From 46878b657c37c0da4456256b534a62d4e773f190 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 4 Apr 2023 20:23:31 -0700 Subject: [PATCH 235/385] More helper cast macros --- include/sisl/fds/utils.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/sisl/fds/utils.hpp b/include/sisl/fds/utils.hpp index a71c09fb..0832a76e 100644 --- a/include/sisl/fds/utils.hpp +++ b/include/sisl/fds/utils.hpp @@ -224,10 +224,13 @@ static int spaceship_oper(const T& left, const T& right) { #define uintptr_cast reinterpret_cast< uint8_t* > #define voidptr_cast reinterpret_cast< void* > +#define charptr_cast reinterpret_cast< char* > +#define c_charptr_cast reinterpret_cast< const char* > #define int_cast static_cast< int > #define uint32_cast static_cast< uint32_t > #define int64_cast static_cast< int64_t > #define uint64_cast static_cast< uint64_t > +#define size_cast static_cast< size_t > } // namespace sisl From 705709b625cc09900fab6b3021a0c0d24f618ab6 Mon Sep 17 00:00:00 2001 From: "Kadayam, Harihara(hkadayam)" Date: Tue, 4 Apr 2023 20:25:11 -0700 Subject: [PATCH 236/385] Flip supporting remove_flip runtime and some syntax sugar for Flipclient --- conanfile.py | 2 +- include/sisl/flip/flip.hpp | 183 ++++++++++++-------------- include/sisl/flip/flip_client.hpp | 9 ++ include/sisl/flip/flip_rpc_server.hpp | 2 + src/flip/lib/flip_rpc_server.cpp | 9 +- src/flip/proto/flip_server.proto | 7 + 6 files changed, 109 insertions(+), 103 deletions(-) diff --git a/conanfile.py b/conanfile.py index 4ca1b435..edb74a8e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.1.2" + version = "9.1.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/flip/flip.hpp b/include/sisl/flip/flip.hpp index 0142850d..d0caad81 100644 --- a/include/sisl/flip/flip.hpp +++ b/include/sisl/flip/flip.hpp @@ -133,6 +133,13 @@ struct val_converter< std::string > { } }; +template <> +struct val_converter< const std::string > { + std::string operator()(const ParamValue& val) { + return (val.kind_case() == ParamValue::kStringValue) ? val.string_value() : ""; + } +}; + template <> struct val_converter< const char* > { const char* operator()(const ParamValue& val) { @@ -172,15 +179,6 @@ struct to_proto_converter< int > { void operator()(const int& val, ParamValue* out_pval) { out_pval->set_int_value(val); } }; -#if 0 -template <> -struct val_converter { - const int operator()(const ParamValue &val) { - return (val.kind_case() == ParamValue::kIntValue) ? val.int_value() : 0; - } -}; -#endif - template <> struct to_proto_converter< long > { void operator()(const long& val, ParamValue* out_pval) { out_pval->set_long_value(val); } @@ -196,6 +194,11 @@ struct to_proto_converter< std::string > { void operator()(const std::string& val, ParamValue* out_pval) { out_pval->set_string_value(val); } }; +template <> +struct to_proto_converter< const std::string > { + void operator()(const std::string& val, ParamValue* out_pval) { out_pval->set_string_value(val); } +}; + template <> struct to_proto_converter< const char* > { void operator()(const char*& val, ParamValue* out_pval) { out_pval->set_string_value(val); } @@ -273,6 +276,43 @@ struct compare_val< std::string > { } } }; + +template <> +struct compare_val< const std::string > { + bool operator()(const std::string& val1, const std::string& val2, Operator oper) { + switch (oper) { + case Operator::DONT_CARE: + return true; + + case Operator::EQUAL: + return (val1 == val2); + + case Operator::NOT_EQUAL: + return (val1 != val2); + + case Operator::GREATER_THAN: + return (val1 > val2); + + case Operator::LESS_THAN: + return (val1 < val2); + + case Operator::GREATER_THAN_OR_EQUAL: + return (val1 >= val2); + + case Operator::LESS_THAN_OR_EQUAL: + return (val1 <= val2); + + case Operator::REG_EX: { + const std::regex re(val2); + return (std::sregex_iterator(val1.begin(), val1.end(), re) != std::sregex_iterator()); + } + + default: + return false; + } + } +}; + template <> struct compare_val< const char* > { bool operator()(const char*& val1, const char*& val2, Operator oper) { @@ -317,12 +357,14 @@ using io_work = boost::asio::io_service::work; class FlipTimerBase { public: virtual ~FlipTimerBase() = default; - virtual void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) = 0; + virtual void schedule(const std::string& timer_name, boost::posix_time::time_duration delay_us, + const std::function< void() >& closure) = 0; + virtual void cancel(const std::string& timer_name) = 0; }; class FlipTimerAsio : public FlipTimerBase { public: - FlipTimerAsio() : m_timer_count(0) {} + FlipTimerAsio() {} ~FlipTimerAsio() { if (m_timer_thread != nullptr) { m_work.reset(); @@ -330,26 +372,29 @@ class FlipTimerAsio : public FlipTimerBase { } } - void schedule(boost::posix_time::time_duration delay_us, const std::function< void() >& closure) override { + void schedule(const std::string& timer_name, boost::posix_time::time_duration delay_us, + const std::function< void() >& closure) override { std::unique_lock< std::mutex > lk(m_thr_mutex); - ++m_timer_count; if (m_work == nullptr) { m_work = std::make_unique< io_work >(m_svc); m_timer_thread = std::make_unique< std::thread >(std::bind(&FlipTimerAsio::timer_thr, this)); } auto t = std::make_shared< deadline_timer >(m_svc, delay_us); - t->async_wait([this, closure, t](const boost::system::error_code& e) { + t->async_wait([this, closure, t, timer_name](const boost::system::error_code& e) { if (e) { LOGERRORMOD(flip, "Error in timer routine, message {}", e.message()); } else { closure(); } - std::unique_lock< std::mutex > lk(m_thr_mutex); - --m_timer_count; + remove_timer(timer_name, t); }); + + m_timer_instances.insert(std::make_pair(timer_name, std::move(t))); } + void cancel(const std::string& timer_name) { remove_timer(timer_name, nullptr); } + void timer_thr() { size_t executed = 0; executed = m_svc.run(); @@ -357,12 +402,25 @@ class FlipTimerAsio : public FlipTimerBase { (void)executed; } +private: + void remove_timer(const std::string& timer_name, const std::shared_ptr< deadline_timer >& t) { + std::unique_lock< std::mutex > lk(m_thr_mutex); + auto range = m_timer_instances.equal_range(timer_name); + for (auto it = range.first; it != range.second;) { + if ((t == nullptr) || (it->second == t)) { + it = m_timer_instances.erase(it); + } else { + ++it; + } + } + } + private: io_service m_svc; std::unique_ptr< io_work > m_work; std::mutex m_thr_mutex; - int32_t m_timer_count; std::unique_ptr< std::thread > m_timer_thread; + std::multimap< std::string, std::shared_ptr< deadline_timer > > m_timer_instances; }; static constexpr int TEST_ONLY = 0; @@ -424,35 +482,17 @@ class Flip { for (const auto& it : m_flip_specs) { const auto& inst = it.second; res.emplace_back(inst.to_string()); -#if 0 - for (auto it = inst_range.first; it != inst_range.second; ++it) { - const auto& inst = inst_range->second; - res.emplace_back(inst.to_string()); - } -#endif } return res; } -#if 0 - bool add_flip(std::string flip_name, std::vector conditions, FlipAction& action, - uint32_t count, uint8_t percent) { - FlipSpec fspec; - *(fspec.mutable_flip_name()) = "delay_ret_fspec"; - auto cond = fspec->mutable_conditions()->Add(); - *cond->mutable_name() = "cmd_type"; - cond->set_oper(flip::Operator::EQUAL); - cond->mutable_value()->set_int_value(2); - - fspec->mutable_flip_action()->mutable_delay_returns()->set_delay_in_usec(100000); - fspec->mutable_flip_action()->mutable_delay_returns()->mutable_return_()->set_string_value("Delayed error simulated value"); - - auto freq = fspec->mutable_flip_frequency(); - freq->set_count(2); - freq->set_percent(100); + uint32_t remove(const std::string& flip_name) { + std::unique_lock< std::shared_mutex > lock(m_mutex); + auto nremoved = m_flip_specs.erase(flip_name); + m_timer->cancel(flip_name); + return static_cast< uint32_t >(nremoved); } -#endif template < class... Args > bool test_flip(std::string flip_name, Args&&... args) { @@ -478,7 +518,7 @@ class Flip { if (ret == boost::none) return false; // Not a hit uint64_t delay_usec = boost::get< uint64_t >(ret.get()); - get_timer().schedule(boost::posix_time::microseconds(delay_usec), closure); + get_timer().schedule(flip_name, boost::posix_time::microseconds(delay_usec), closure); return true; } @@ -491,7 +531,7 @@ class Flip { auto param = boost::get< delayed_return_param< T > >(ret.get()); LOGDEBUGMOD(flip, "Returned param delay = {} val = {}", param.delay_usec, param.val); - get_timer().schedule(boost::posix_time::microseconds(param.delay_usec), + get_timer().schedule(flip_name, boost::posix_time::microseconds(param.delay_usec), [closure, param]() { closure(param.val); }); return true; } @@ -621,65 +661,6 @@ class Flip { FlipTimerBase& get_timer() { return *m_timer; } -#if 0 - template< typename T > - bool compare_val(T &val1, T &val2, Operator oper) { - switch (oper) { - case Operator::DONT_CARE: - return true; - - case Operator::EQUAL: - return (val1 == val2); - - case Operator::NOT_EQUAL: - return (val1 != val2); - - case Operator::GREATER_THAN: - return (val1 > val2); - - case Operator::LESS_THAN: - return (val1 < val2); - - case Operator::GREATER_THAN_OR_EQUAL: - return (val1 >= val2); - - case Operator::LESS_THAN_OR_EQUAL: - return (val1 <= val2); - - default: - return false; - } - } - - template<> - bool compare_val(const char *&val1, const char *&val2, Operator oper) { - switch (oper) { - case Operator::DONT_CARE: - return true; - - case Operator::EQUAL: - return (val1 && val2 && (strcmp(val1, val2) == 0)) || (!val1 && !val2); - - case Operator::NOT_EQUAL: - return (val1 && val2 && (strcmp(val1, val2) != 0)) || (!val1 && val2) || (val1 && !val2); - - case Operator::GREATER_THAN: - return (val1 && val2 && (strcmp(val1, val2) > 0)) || (val1 && !val2); - - case Operator::LESS_THAN: - return (val1 && val2 && (strcmp(val1, val2) < 0)) || (!val1 && val2); - - case Operator::GREATER_THAN_OR_EQUAL: - return (val1 && val2 && (strcmp(val1, val2) >= 0)) || (val1 && !val2) || (!val1 && !val2); - - case Operator::LESS_THAN_OR_EQUAL: - return (val1 && val2 && (strcmp(val1, val2) <= 0)) || (!val1 && val2) || (!val1 && !val2); - - default: - return false; - } - } -#endif private: std::multimap< std::string, flip_instance, flip_name_compare > m_flip_specs; std::shared_mutex m_mutex; diff --git a/include/sisl/flip/flip_client.hpp b/include/sisl/flip/flip_client.hpp index 768fee74..ec258c45 100644 --- a/include/sisl/flip/flip_client.hpp +++ b/include/sisl/flip/flip_client.hpp @@ -30,6 +30,13 @@ class FlipClient { to_proto_converter< T >()(value, out_condition->mutable_value()); } + template < typename T > + FlipCondition create_condition(const std::string& param_name, flip::Operator oper, const T& value) { + FlipCondition fcond; + create_condition(param_name, oper, value, &fcond); + return fcond; + } + bool inject_noreturn_flip(std::string flip_name, const std::vector< FlipCondition >& conditions, const FlipFrequency& freq) { FlipSpec fspec; @@ -77,6 +84,8 @@ class FlipClient { return true; } + uint32_t remove_flip(const std::string& flip_name) { return m_flip->remove(flip_name); } + private: void _create_flip_spec(std::string flip_name, const std::vector< FlipCondition >& conditions, const FlipFrequency& freq, FlipSpec& out_fspec) { diff --git a/include/sisl/flip/flip_rpc_server.hpp b/include/sisl/flip/flip_rpc_server.hpp index 8168ae1f..ede9cd0a 100644 --- a/include/sisl/flip/flip_rpc_server.hpp +++ b/include/sisl/flip/flip_rpc_server.hpp @@ -25,6 +25,8 @@ class FlipRPCServer final : public FlipServer::Service { grpc::Status InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) override; grpc::Status GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, FlipListResponse* response) override; + grpc::Status RemoveFault(grpc::ServerContext*, const FlipRemoveRequest* request, + FlipRemoveResponse* response) override; static void rpc_thread(); }; } // namespace flip diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 2a4a2b4e..890f7e43 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -27,7 +27,7 @@ namespace flip { grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext*, const FlipSpec* request, FlipResponse* response) { - LOGTRACEMOD(flip, "Flipspec request = {}", request->DebugString()); + LOGTRACEMOD(flip, "InjectFault request = {}", request->DebugString()); flip::Flip::instance().add(*request); response->set_success(true); return grpc::Status::OK; @@ -44,6 +44,13 @@ grpc::Status FlipRPCServer::GetFaults(grpc::ServerContext*, const FlipNameReques return grpc::Status::OK; } +grpc::Status FlipRPCServer::RemoveFault(grpc::ServerContext*, const FlipRemoveRequest* request, + FlipRemoveResponse* response) { + LOGTRACEMOD(flip, "RemoveFault request = {}", request->DebugString()); + response->set_num_removed(flip::Flip::instance().remove(request->name())); + return grpc::Status::OK; +} + class FlipRPCServiceWrapper : public FlipRPCServer::Service { public: void print_method_names() { diff --git a/src/flip/proto/flip_server.proto b/src/flip/proto/flip_server.proto index 79e3ae71..42ac0bd8 100644 --- a/src/flip/proto/flip_server.proto +++ b/src/flip/proto/flip_server.proto @@ -12,10 +12,17 @@ message FlipListResponse { message FlipNameRequest { string name = 1; } +message FlipRemoveRequest { string name = 1; } + +message FlipRemoveResponse { uint32 num_removed = 1; } + service FlipServer { // Inject a fault rpc rpc InjectFault(flip.FlipSpec) returns (flip.FlipResponse); // Get details about one or all faults rpc GetFaults(FlipNameRequest) returns (FlipListResponse); + + // Remove a fault added earlier + rpc RemoveFault(FlipRemoveRequest) returns (FlipRemoveResponse); } \ No newline at end of file From f6413452a458be4691a21b5b85eec9b7becc48e0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 12:40:58 -0700 Subject: [PATCH 237/385] Merge remote-tracking branch 'grpc/master' --- conanfile.py | 2 +- .../{grpc_helper => grpc}/generic_service.hpp | 12 ++-- .../sisl/{grpc_helper => grpc}/rpc_call.hpp | 6 +- .../sisl/{grpc_helper => grpc}/rpc_client.hpp | 6 +- .../sisl/{grpc_helper => grpc}/rpc_common.hpp | 4 +- .../sisl/{grpc_helper => grpc}/rpc_server.hpp | 6 +- src/CMakeLists.txt | 2 + src/grpc/CMakeLists.txt | 23 +++++++ src/{grpc_helper => grpc}/rpc_client.cpp | 6 +- src/{grpc_helper => grpc}/rpc_server.cpp | 8 +-- .../tests/CMakeLists.txt | 0 .../tests/function/CMakeLists.txt | 8 ++- .../tests/function/echo_async_client.cpp | 8 +-- .../tests/function/echo_server.cpp | 4 +- .../tests/function/echo_sync_client.cpp | 2 +- .../tests/proto/CMakeLists.txt | 0 .../tests/proto/grpc_helper_test.proto | 0 .../tests/unit/CMakeLists.txt | 5 +- .../tests/unit/auth_test.cpp | 62 +++++++++---------- .../tests/unit/basic_http_server.hpp | 6 +- .../tests/unit/test_token.hpp | 4 +- src/{grpc_helper => grpc}/utils.hpp | 4 +- src/grpc_helper/CMakeLists.txt | 14 ----- 23 files changed, 104 insertions(+), 88 deletions(-) rename include/sisl/{grpc_helper => grpc}/generic_service.hpp (95%) rename include/sisl/{grpc_helper => grpc}/rpc_call.hpp (99%) rename include/sisl/{grpc_helper => grpc}/rpc_client.hpp (99%) rename include/sisl/{grpc_helper => grpc}/rpc_common.hpp (90%) rename include/sisl/{grpc_helper => grpc}/rpc_server.hpp (98%) create mode 100644 src/grpc/CMakeLists.txt rename src/{grpc_helper => grpc}/rpc_client.cpp (98%) rename src/{grpc_helper => grpc}/rpc_server.cpp (98%) rename src/{grpc_helper => grpc}/tests/CMakeLists.txt (100%) rename src/{grpc_helper => grpc}/tests/function/CMakeLists.txt (87%) rename src/{grpc_helper => grpc}/tests/function/echo_async_client.cpp (98%) rename src/{grpc_helper => grpc}/tests/function/echo_server.cpp (98%) rename src/{grpc_helper => grpc}/tests/function/echo_sync_client.cpp (99%) rename src/{grpc_helper => grpc}/tests/proto/CMakeLists.txt (100%) rename src/{grpc_helper => grpc}/tests/proto/grpc_helper_test.proto (100%) rename src/{grpc_helper => grpc}/tests/unit/CMakeLists.txt (85%) rename src/{grpc_helper => grpc}/tests/unit/auth_test.cpp (87%) rename src/{grpc_helper => grpc}/tests/unit/basic_http_server.hpp (89%) rename src/{grpc_helper => grpc}/tests/unit/test_token.hpp (98%) rename src/{grpc_helper => grpc}/utils.hpp (88%) delete mode 100644 src/grpc_helper/CMakeLists.txt diff --git a/conanfile.py b/conanfile.py index ebe61e08..72dc1252 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.2.9" + version = "8.3.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/grpc_helper/generic_service.hpp b/include/sisl/grpc/generic_service.hpp similarity index 95% rename from include/sisl/grpc_helper/generic_service.hpp rename to include/sisl/grpc/generic_service.hpp index 4d64ee00..b5d59e56 100644 --- a/include/sisl/grpc_helper/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -3,7 +3,7 @@ #include #include "rpc_call.hpp" -namespace grpc_helper { +namespace sisl { using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; @@ -77,7 +77,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD return in_shutdown ? nullptr : create_new(); } - RpcDataAbstract* on_buf_read(bool ok) { + RpcDataAbstract* on_buf_read(bool ) { auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; // take a ref before the handler cb is called. // unref is called in send_response which is handled by us (in case of sync calls) @@ -87,16 +87,16 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD return nullptr; } - RpcDataAbstract* on_buf_write(bool ok) { + RpcDataAbstract* on_buf_write(bool ) { m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); unref(); return nullptr; } - RpcDataAbstract* on_request_completed(bool ok) { return nullptr; } + RpcDataAbstract* on_request_completed(bool) { return nullptr; } struct RpcTagImpl : public RpcTag { - using callback_type = RpcDataAbstract* (GenericRpcData::*)(bool ok); + using callback_type = RpcDataAbstract* (GenericRpcData::*)(bool); RpcTagImpl(GenericRpcData* rpc, callback_type cb) : RpcTag{rpc}, m_callback{cb} {} RpcDataAbstract* do_process(bool ok) override { @@ -114,4 +114,4 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD RpcTagImpl m_request_completed_tag{this, &GenericRpcData::on_request_completed}; }; -} // namespace grpc_helper \ No newline at end of file +} // namespace sisl diff --git a/include/sisl/grpc_helper/rpc_call.hpp b/include/sisl/grpc/rpc_call.hpp similarity index 99% rename from include/sisl/grpc_helper/rpc_call.hpp rename to include/sisl/grpc/rpc_call.hpp index 576ea7eb..f72a99de 100644 --- a/include/sisl/grpc_helper/rpc_call.hpp +++ b/include/sisl/grpc/rpc_call.hpp @@ -26,7 +26,7 @@ SISL_LOGGING_DECL(grpc_server) }), \ msg, ##__VA_ARGS__); -namespace grpc_helper { +namespace sisl { class RpcDataAbstract : public boost::intrusive_ref_counter< RpcDataAbstract, boost::thread_safe_counter > { public: RpcDataAbstract(size_t queue_idx) : @@ -163,7 +163,7 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, // applicable) // NOTE: this function MUST `unref()` this call template < bool mode = streaming > - std::enable_if_t< !mode, void > send_response(bool is_last = true) { + std::enable_if_t< !mode, void > send_response([[maybe_unused]] bool is_last = true) { do_non_streaming_send(); } @@ -410,4 +410,4 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, RpcTagImpl m_completed_tag{this, &RpcData::on_request_completed}; }; -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/include/sisl/grpc_helper/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp similarity index 99% rename from include/sisl/grpc_helper/rpc_client.hpp rename to include/sisl/grpc/rpc_client.hpp index e64e1da5..8d5f9355 100644 --- a/include/sisl/grpc_helper/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -19,7 +19,7 @@ #include #include -namespace grpc_helper { +namespace sisl { /** * A interface for handling gRPC async response @@ -156,7 +156,7 @@ class GrpcSyncClient : public GrpcBaseClient { } }; -ENUM(ClientState, uint8_t, VOID, INIT, RUNNING, SHUTTING_DOWN, TERMINATED); +ENUM(ClientState, uint8_t, VOID, INIT, RUNNING, SHUTTING_DOWN, TERMINATED) /** * One GrpcBaseClient can have multiple stub @@ -346,4 +346,4 @@ class GrpcAsyncClient : public GrpcBaseClient { std::unique_ptr< GenericAsyncStub > make_generic_stub(const std::string& worker); }; -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/include/sisl/grpc_helper/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp similarity index 90% rename from include/sisl/grpc_helper/rpc_common.hpp rename to include/sisl/grpc/rpc_common.hpp index 74748e3c..bf2a0326 100644 --- a/include/sisl/grpc_helper/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -1,6 +1,6 @@ #pragma once -namespace grpc_helper { +namespace sisl { class GrpcServer; class GenericRpcData; struct RPCHelper { @@ -10,4 +10,4 @@ struct RPCHelper { static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/include/sisl/grpc_helper/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp similarity index 98% rename from include/sisl/grpc_helper/rpc_server.hpp rename to include/sisl/grpc/rpc_server.hpp index 4bbd298a..7c84c71f 100644 --- a/include/sisl/grpc_helper/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -11,14 +11,14 @@ #include #include "rpc_call.hpp" -namespace grpc_helper { +namespace sisl { class GenericRpcData; class GenericRpcStaticInfo; using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; using rpc_thread_start_cb_t = std::function< void(uint32_t) >; -ENUM(ServerState, uint8_t, VOID, INITED, RUNNING, SHUTTING_DOWN, TERMINATED); +ENUM(ServerState, uint8_t, VOID, INITED, RUNNING, SHUTTING_DOWN, TERMINATED) class GrpcServer : private boost::noncopyable { friend class RPCHelper; @@ -129,4 +129,4 @@ class GrpcServer : private boost::noncopyable { std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; std::shared_mutex m_generic_rpc_registry_mtx; }; -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 68e19dd1..9ec2d5c7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required (VERSION 3.11) +add_subdirectory (grpc) add_subdirectory (logging) add_subdirectory (options) add_subdirectory (version) @@ -38,6 +39,7 @@ endif() add_library(sisl ${POSIX_LIBRARIES} + $ $ $ $ diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt new file mode 100644 index 00000000..b1a6d163 --- /dev/null +++ b/src/grpc/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required (VERSION 3.11) + +find_package(flatbuffers REQUIRED) +find_package(gRPC REQUIRED) + +set(CMAKE_CXX_STANDARD 17) + +include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) + +add_library(sisl_grpc OBJECT) +target_sources(sisl_grpc PRIVATE + rpc_server.cpp + rpc_client.cpp + ) +target_link_libraries(sisl_grpc + gRPC::grpc++ + cpr::cpr + flatbuffers::flatbuffers + jwt-cpp::jwt-cpp + ${COMMON_DEPS} + ) + +add_subdirectory(tests) diff --git a/src/grpc_helper/rpc_client.cpp b/src/grpc/rpc_client.cpp similarity index 98% rename from src/grpc_helper/rpc_client.cpp rename to src/grpc/rpc_client.cpp index 2026e216..0ea2c155 100644 --- a/src/grpc_helper/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -1,7 +1,7 @@ -#include "grpc_helper/rpc_client.hpp" +#include "sisl/grpc/rpc_client.hpp" #include "utils.hpp" -namespace grpc_helper { +namespace sisl { GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string& target_domain, const std::string& ssl_cert) : @@ -144,4 +144,4 @@ std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > GrpcAsyncClient::make_gener return std::make_unique< GrpcAsyncClient::GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, m_trf_client); } -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/src/grpc_helper/rpc_server.cpp b/src/grpc/rpc_server.cpp similarity index 98% rename from src/grpc_helper/rpc_server.cpp rename to src/grpc/rpc_server.cpp index d4b14ae7..1239a33e 100644 --- a/src/grpc_helper/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -4,8 +4,8 @@ * Created on: Oct 24, 2018 */ -#include -#include "grpc_helper/generic_service.hpp" +#include "sisl/grpc/rpc_server.hpp" +#include "sisl/grpc/generic_service.hpp" #include "utils.hpp" #ifdef _POSIX_THREADS @@ -18,7 +18,7 @@ extern "C" { #include -namespace grpc_helper { +namespace sisl { GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert) : GrpcServer::GrpcServer(listen_addr, threads, ssl_key, ssl_cert, nullptr) {} @@ -246,4 +246,4 @@ grpc::StatusCode RPCHelper::to_grpc_statuscode(const sisl::AuthVerifyStatus stat return ret; } -} // namespace grpc_helper +} // namespace sisl::grpc diff --git a/src/grpc_helper/tests/CMakeLists.txt b/src/grpc/tests/CMakeLists.txt similarity index 100% rename from src/grpc_helper/tests/CMakeLists.txt rename to src/grpc/tests/CMakeLists.txt diff --git a/src/grpc_helper/tests/function/CMakeLists.txt b/src/grpc/tests/function/CMakeLists.txt similarity index 87% rename from src/grpc_helper/tests/function/CMakeLists.txt rename to src/grpc/tests/function/CMakeLists.txt index ed65373e..50994d06 100644 --- a/src/grpc_helper/tests/function/CMakeLists.txt +++ b/src/grpc/tests/function/CMakeLists.txt @@ -7,8 +7,10 @@ target_sources(echo_server PRIVATE $ ) target_link_libraries(echo_server - grpc_helper + sisl + sisl_grpc GTest::gtest + ${COMMON_DEPS} ) add_test(NAME Echo_Ping_Server COMMAND echo_server) @@ -19,7 +21,9 @@ target_sources(echo_async_client PRIVATE $ ) target_link_libraries(echo_async_client - grpc_helper + sisl + sisl_grpc GTest::gtest + ${COMMON_DEPS} ) add_test(NAME Echo_Ping_Async_Client_Server COMMAND echo_async_client) diff --git a/src/grpc_helper/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp similarity index 98% rename from src/grpc_helper/tests/function/echo_async_client.cpp rename to src/grpc/tests/function/echo_async_client.cpp index 8bf11068..ce2655b2 100644 --- a/src/grpc_helper/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -9,12 +9,12 @@ #include #include -#include "grpc_helper/rpc_client.hpp" -#include "grpc_helper/rpc_server.hpp" -#include "grpc_helper/generic_service.hpp" +#include "sisl/grpc/rpc_client.hpp" +#include "sisl/grpc/rpc_server.hpp" +#include "sisl/grpc/generic_service.hpp" #include "grpc_helper_test.grpc.pb.h" -using namespace grpc_helper; +using namespace sisl; using namespace ::grpc_helper_test; using namespace std::placeholders; diff --git a/src/grpc_helper/tests/function/echo_server.cpp b/src/grpc/tests/function/echo_server.cpp similarity index 98% rename from src/grpc_helper/tests/function/echo_server.cpp rename to src/grpc/tests/function/echo_server.cpp index 5762f3eb..4e0726f2 100644 --- a/src/grpc_helper/tests/function/echo_server.cpp +++ b/src/grpc/tests/function/echo_server.cpp @@ -16,11 +16,11 @@ #include #include -#include "grpc_helper/rpc_server.hpp" +#include "sisl/grpc/rpc_server.hpp" #include "grpc_helper_test.grpc.pb.h" using namespace ::grpc; -using namespace grpc_helper; +using namespace sisl; using namespace ::grpc_helper_test; using namespace std::placeholders; diff --git a/src/grpc_helper/tests/function/echo_sync_client.cpp b/src/grpc/tests/function/echo_sync_client.cpp similarity index 99% rename from src/grpc_helper/tests/function/echo_sync_client.cpp rename to src/grpc/tests/function/echo_sync_client.cpp index 12664991..3d904eb4 100644 --- a/src/grpc_helper/tests/function/echo_sync_client.cpp +++ b/src/grpc/tests/function/echo_sync_client.cpp @@ -19,7 +19,7 @@ #include "sds_grpc_test.grpc.pb.h" using namespace ::grpc; -using namespace ::sds::grpc; +using namespace ::sisl; using namespace ::sds_grpc_test; using namespace std::placeholders; diff --git a/src/grpc_helper/tests/proto/CMakeLists.txt b/src/grpc/tests/proto/CMakeLists.txt similarity index 100% rename from src/grpc_helper/tests/proto/CMakeLists.txt rename to src/grpc/tests/proto/CMakeLists.txt diff --git a/src/grpc_helper/tests/proto/grpc_helper_test.proto b/src/grpc/tests/proto/grpc_helper_test.proto similarity index 100% rename from src/grpc_helper/tests/proto/grpc_helper_test.proto rename to src/grpc/tests/proto/grpc_helper_test.proto diff --git a/src/grpc_helper/tests/unit/CMakeLists.txt b/src/grpc/tests/unit/CMakeLists.txt similarity index 85% rename from src/grpc_helper/tests/unit/CMakeLists.txt rename to src/grpc/tests/unit/CMakeLists.txt index 2780676c..1e82a780 100644 --- a/src/grpc_helper/tests/unit/CMakeLists.txt +++ b/src/grpc/tests/unit/CMakeLists.txt @@ -5,9 +5,10 @@ add_executable(auth_test $ ) target_link_libraries(auth_test - grpc_helper + sisl + sisl_grpc pistache::pistache - gRPC::grpc++ GTest::gmock + ${COMMON_DEPS} ) add_test(NAME Auth_Test COMMAND auth_test) diff --git a/src/grpc_helper/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp similarity index 87% rename from src/grpc_helper/tests/unit/auth_test.cpp rename to src/grpc/tests/unit/auth_test.cpp index 149e2fe0..756c8d37 100644 --- a/src/grpc_helper/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -8,15 +8,15 @@ #include #include "basic_http_server.hpp" -#include "grpc_helper/rpc_client.hpp" -#include "grpc_helper/rpc_server.hpp" +#include "sisl/grpc/rpc_client.hpp" +#include "sisl/grpc/rpc_server.hpp" #include "grpc_helper_test.grpc.pb.h" #include "test_token.hpp" SISL_LOGGING_INIT(logging, grpc_server) SISL_OPTIONS_ENABLE(logging) -namespace grpc_helper::testing { +namespace sisl::grpc::testing { using namespace sisl; using namespace ::grpc_helper_test; using namespace ::testing; @@ -91,7 +91,7 @@ class AuthBaseTest : public ::testing::Test { LOGINFO("Server listening on {}", server_address); m_echo_impl->register_rpcs(m_grpc_server); m_grpc_server->register_generic_rpc(GENERIC_METHOD, - [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }); + [](boost::intrusive_ptr< GenericRpcData >&) { return true; }); } void process_echo_reply() { @@ -99,10 +99,10 @@ class AuthBaseTest : public ::testing::Test { m_cv.notify_all(); } - void call_async_echo(EchoRequest& req, EchoReply& reply, grpc::Status& status) { + void call_async_echo(EchoRequest& req, EchoReply& reply, ::grpc::Status& status) { m_echo_stub->call_unary< EchoRequest, EchoReply >( req, &EchoService::StubInterface::AsyncEcho, - [&reply, &status, this](EchoReply& reply_, grpc::Status& status_) { + [&reply, &status, this](EchoReply& reply_, ::grpc::Status& status_) { reply = reply_; status = status_; process_echo_reply(); @@ -114,11 +114,11 @@ class AuthBaseTest : public ::testing::Test { } } - void call_async_generic_rpc(grpc::Status& status) { - grpc::ByteBuffer req; + void call_async_generic_rpc(::grpc::Status& status) { + ::grpc::ByteBuffer req; m_generic_stub->call_unary( req, GENERIC_METHOD, - [&status, this](grpc::ByteBuffer&, ::grpc::Status& status_) { + [&status, this](::grpc::ByteBuffer&, ::grpc::Status& status_) { status = status_; m_generic_received.store(true); m_cv.notify_all(); @@ -165,12 +165,12 @@ TEST_F(AuthDisableTest, allow_on_disabled_mode) { // server sets the same message as response req.set_message("dummy_msg"); EchoReply reply; - grpc::Status status; + ::grpc::Status status; call_async_echo(req, reply, status); EXPECT_TRUE(status.ok()); EXPECT_EQ(req.message(), reply.message()); - grpc::Status generic_status; + ::grpc::Status generic_status; call_async_generic_rpc(status); EXPECT_TRUE(generic_status.ok()); } @@ -224,15 +224,15 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { // server sets the same message as response req.set_message("dummy_msg"); EchoReply reply; - grpc::Status status; + ::grpc::Status status; call_async_echo(req, reply, status); EXPECT_FALSE(status.ok()); - EXPECT_EQ(status.error_code(), grpc::UNAUTHENTICATED); + EXPECT_EQ(status.error_code(), ::grpc::UNAUTHENTICATED); EXPECT_EQ(status.error_message(), "missing header authorization"); - grpc::Status generic_status; + ::grpc::Status generic_status; call_async_generic_rpc(generic_status); - EXPECT_EQ(generic_status.error_code(), grpc::UNAUTHENTICATED); + EXPECT_EQ(generic_status.error_code(), ::grpc::UNAUTHENTICATED); } class TokenApiImpl : public TokenApi { @@ -288,12 +288,12 @@ TEST_F(AuthEnableTest, allow_with_auth) { EchoRequest req; req.set_message("dummy_msg"); EchoReply reply; - grpc::Status status; + ::grpc::Status status; call_async_echo(req, reply, status); EXPECT_TRUE(status.ok()); EXPECT_EQ(req.message(), reply.message()); - grpc::Status generic_status; + ::grpc::Status generic_status; call_async_generic_rpc(status); EXPECT_TRUE(generic_status.ok()); } @@ -331,7 +331,7 @@ void validate_generic_reply(const std::string& method, ::grpc::Status& status) { if (method == "method1" || method == "method2") { EXPECT_TRUE(status.ok()); } else { - EXPECT_EQ(status.error_code(), grpc::UNIMPLEMENTED); + EXPECT_EQ(status.error_code(), ::grpc::UNIMPLEMENTED); } } @@ -341,11 +341,11 @@ TEST(GenericServiceDeathTest, basic_test) { // register rpc before generic service is registered #ifndef NDEBUG ASSERT_DEATH(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; }), "Assertion .* failed"); #else EXPECT_FALSE(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; })); #endif ASSERT_TRUE(g_grpc_server->register_async_generic_service()); @@ -354,53 +354,53 @@ TEST(GenericServiceDeathTest, basic_test) { // register rpc before server is run #ifndef NDEBUG ASSERT_DEATH(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; }), + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; }), "Assertion .* failed"); #else EXPECT_FALSE(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; })); #endif g_grpc_server->run(); EXPECT_TRUE(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; })); EXPECT_TRUE(g_grpc_server->register_generic_rpc( - "method2", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + "method2", [](boost::intrusive_ptr< GenericRpcData >&) { return true; })); // re-register method 1 EXPECT_FALSE(g_grpc_server->register_generic_rpc( - "method1", [](boost::intrusive_ptr< GenericRpcData >& rpc_data) { return true; })); + "method1", [](boost::intrusive_ptr< GenericRpcData >&) { return true; })); auto client = std::make_unique< GrpcAsyncClient >("0.0.0.0:56789", "", ""); client->init(); GrpcAsyncClientWorker::create_worker("generic_worker", 1); auto generic_stub = client->make_generic_stub("generic_worker"); - grpc::ByteBuffer cli_buf; + ::grpc::ByteBuffer cli_buf; generic_stub->call_unary( cli_buf, "method1", - [method = "method1"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + [method = "method1"](::grpc::ByteBuffer&, ::grpc::Status& status) { validate_generic_reply(method, status); }, 1); generic_stub->call_unary( cli_buf, "method2", - [method = "method2"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + [method = "method2"](::grpc::ByteBuffer&, ::grpc::Status& status) { validate_generic_reply(method, status); }, 1); generic_stub->call_unary( cli_buf, "method_unknown", - [method = "method_unknown"](grpc::ByteBuffer& reply, ::grpc::Status& status) { + [method = "method_unknown"](::grpc::ByteBuffer&, ::grpc::Status& status) { validate_generic_reply(method, status); }, 1); } -} // namespace grpc_helper::testing +} // namespace sisl::grpc::testing int main(int argc, char* argv[]) { ::testing::InitGoogleMock(&argc, argv); SISL_OPTIONS_LOAD(argc, argv, logging) sisl::logging::SetLogger("auth_test"); int ret{RUN_ALL_TESTS()}; - grpc_helper::GrpcAsyncClientWorker::shutdown_all(); + sisl::GrpcAsyncClientWorker::shutdown_all(); return ret; } diff --git a/src/grpc_helper/tests/unit/basic_http_server.hpp b/src/grpc/tests/unit/basic_http_server.hpp similarity index 89% rename from src/grpc_helper/tests/unit/basic_http_server.hpp rename to src/grpc/tests/unit/basic_http_server.hpp index 1c15d85e..9087a5b9 100644 --- a/src/grpc_helper/tests/unit/basic_http_server.hpp +++ b/src/grpc/tests/unit/basic_http_server.hpp @@ -42,11 +42,11 @@ class TokenApi : public APIBase { Pistache::Rest::Routes::bind(&TokenApi::get_key_handler, this)); } - void get_token_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + void get_token_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { this->get_token_impl(response); } - void get_key_handler(const Pistache::Rest::Request& request, Pistache::Http::ResponseWriter response) { + void get_key_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { this->get_key_impl(response); } @@ -58,4 +58,4 @@ class TokenApi : public APIBase { Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Get, "/download_key"); } -}; \ No newline at end of file +}; diff --git a/src/grpc_helper/tests/unit/test_token.hpp b/src/grpc/tests/unit/test_token.hpp similarity index 98% rename from src/grpc_helper/tests/unit/test_token.hpp rename to src/grpc/tests/unit/test_token.hpp index 416c962c..629e2530 100644 --- a/src/grpc_helper/tests/unit/test_token.hpp +++ b/src/grpc/tests/unit/test_token.hpp @@ -1,6 +1,6 @@ #pragma once -namespace grpc_helper::testing { +namespace sisl::grpc::testing { // public and private keys for unit test static const std::string rsa_pub_key = "-----BEGIN PUBLIC KEY-----\n" @@ -69,4 +69,4 @@ struct TestToken { private: token_t token; }; -} // namespace grpc_helper::testing \ No newline at end of file +} // namespace sisl::grpc::testing diff --git a/src/grpc_helper/utils.hpp b/src/grpc/utils.hpp similarity index 88% rename from src/grpc_helper/utils.hpp rename to src/grpc/utils.hpp index d579fe41..cbefdae0 100644 --- a/src/grpc_helper/utils.hpp +++ b/src/grpc/utils.hpp @@ -3,7 +3,7 @@ #include #include -namespace grpc_helper { +namespace sisl { static bool get_file_contents(const std::string& file_name, std::string& contents) { try { @@ -15,4 +15,4 @@ static bool get_file_contents(const std::string& file_name, std::string& content return false; } -} // namespace grpc_helper \ No newline at end of file +} // namespace sisl::grpc diff --git a/src/grpc_helper/CMakeLists.txt b/src/grpc_helper/CMakeLists.txt deleted file mode 100644 index aa44a113..00000000 --- a/src/grpc_helper/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -cmake_minimum_required (VERSION 3.11) - -set(CMAKE_CXX_STANDARD 17) - -add_library(${PROJECT_NAME}) -target_sources(${PROJECT_NAME} PRIVATE - rpc_server.cpp - rpc_client.cpp - ) -target_link_libraries(${PROJECT_NAME} - gRPC::grpc++ - sisl::sisl - Boost::Boost - ) From e12ac411b5347bea39a695ae6f13c6868f05650d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 13:53:43 -0700 Subject: [PATCH 238/385] sisl_grpc depends on sisl_auth_manager --- src/grpc/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index b1a6d163..888f786b 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -13,6 +13,7 @@ target_sources(sisl_grpc PRIVATE rpc_client.cpp ) target_link_libraries(sisl_grpc + sisl_auth_manager gRPC::grpc++ cpr::cpr flatbuffers::flatbuffers From 67a59bc231dd735d08ba0b5d6fb00f3c98eb423e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 14:06:00 -0700 Subject: [PATCH 239/385] Added Apache-2.0 header to grpc files. --- include/sisl/grpc/generic_service.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_call.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_client.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_common.hpp | 14 ++++++++++++++ include/sisl/grpc/rpc_server.hpp | 14 ++++++++++++++ src/grpc/rpc_client.cpp | 14 ++++++++++++++ src/grpc/rpc_server.cpp | 18 +++++++++++++----- src/grpc/tests/function/echo_async_client.cpp | 14 ++++++++++++++ src/grpc/tests/function/echo_server.cpp | 18 +++++++++++++----- src/grpc/tests/function/echo_sync_client.cpp | 18 +++++++++++++----- src/grpc/tests/proto/grpc_helper_test.proto | 15 ++++++++++++++- src/grpc/tests/unit/auth_test.cpp | 14 ++++++++++++++ src/grpc/tests/unit/basic_http_server.hpp | 14 ++++++++++++++ src/grpc/tests/unit/test_token.hpp | 14 ++++++++++++++ src/grpc/utils.hpp | 14 ++++++++++++++ 15 files changed, 207 insertions(+), 16 deletions(-) diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index b5d59e56..a530fc49 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_call.hpp b/include/sisl/grpc/rpc_call.hpp index f72a99de..2e4d5767 100644 --- a/include/sisl/grpc/rpc_call.hpp +++ b/include/sisl/grpc/rpc_call.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index 8d5f9355..4da34005 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index bf2a0326..8c726df6 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once namespace sisl { diff --git a/include/sisl/grpc/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp index 7c84c71f..a813b717 100644 --- a/include/sisl/grpc/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include diff --git a/src/grpc/rpc_client.cpp b/src/grpc/rpc_client.cpp index 0ea2c155..cc1c0baa 100644 --- a/src/grpc/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "sisl/grpc/rpc_client.hpp" #include "utils.hpp" diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index 1239a33e..e23af456 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -1,9 +1,17 @@ -/* - * server.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Oct 24, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include "sisl/grpc/rpc_server.hpp" #include "sisl/grpc/generic_service.hpp" #include "utils.hpp" diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index ce2655b2..dbd8b3fc 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/function/echo_server.cpp b/src/grpc/tests/function/echo_server.cpp index 4e0726f2..f2a8290c 100644 --- a/src/grpc/tests/function/echo_server.cpp +++ b/src/grpc/tests/function/echo_server.cpp @@ -1,9 +1,17 @@ -/* - * echo_server.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Sep 22, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/function/echo_sync_client.cpp b/src/grpc/tests/function/echo_sync_client.cpp index 3d904eb4..8ccc9e86 100644 --- a/src/grpc/tests/function/echo_sync_client.cpp +++ b/src/grpc/tests/function/echo_sync_client.cpp @@ -1,9 +1,17 @@ -/* - * echo_sync_client.cpp +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. * - * Created on: Sep 22, 2018 - */ - + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/proto/grpc_helper_test.proto b/src/grpc/tests/proto/grpc_helper_test.proto index 500816ae..d5844389 100644 --- a/src/grpc/tests/proto/grpc_helper_test.proto +++ b/src/grpc/tests/proto/grpc_helper_test.proto @@ -1,4 +1,17 @@ - +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ syntax = "proto3"; package grpc_helper_test; diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index 756c8d37..dc146c22 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/unit/basic_http_server.hpp b/src/grpc/tests/unit/basic_http_server.hpp index 9087a5b9..f01038a1 100644 --- a/src/grpc/tests/unit/basic_http_server.hpp +++ b/src/grpc/tests/unit/basic_http_server.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #include #include #include diff --git a/src/grpc/tests/unit/test_token.hpp b/src/grpc/tests/unit/test_token.hpp index 629e2530..a50bcdad 100644 --- a/src/grpc/tests/unit/test_token.hpp +++ b/src/grpc/tests/unit/test_token.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once namespace sisl::grpc::testing { diff --git a/src/grpc/utils.hpp b/src/grpc/utils.hpp index cbefdae0..1f4b4bc7 100644 --- a/src/grpc/utils.hpp +++ b/src/grpc/utils.hpp @@ -1,3 +1,17 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ #pragma once #include From 9bc6dbefab8a1144e8473454c539515d1c206305 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 22 Feb 2023 09:15:02 -0700 Subject: [PATCH 240/385] Remove standard spec in gRPC module. --- src/grpc/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 888f786b..e59bf9ef 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -3,8 +3,6 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(gRPC REQUIRED) -set(CMAKE_CXX_STANDARD 17) - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) add_library(sisl_grpc OBJECT) From 36f55da01d48cf9b446917a664c185c323e556c0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 22 Feb 2023 09:34:01 -0700 Subject: [PATCH 241/385] Revert "Remove standard spec in gRPC module." This reverts commit 3565cee16cadcc68d606727b015e5c279840428a. --- src/grpc/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index e59bf9ef..888f786b 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -3,6 +3,8 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(gRPC REQUIRED) +set(CMAKE_CXX_STANDARD 17) + include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) add_library(sisl_grpc OBJECT) From 6c7fdc75da2af8b808d2ff1ac9185a1f96d94ec5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 2 May 2023 10:57:40 -0700 Subject: [PATCH 242/385] Enforce conan 1.x series. --- .github/workflows/build_with_conan.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index c6561f9a..b08fdec0 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -35,7 +35,7 @@ jobs: - name: Install Conan run: | python -m pip install --upgrade pip - python -m pip install conan + python -m pip install conan==1.59.0 - name: Configure Conan # Configure conan profiles for build runner From 21c6968ce9e7f7e7232aff905ae989d92aec53f6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Dec 2022 14:08:17 -0700 Subject: [PATCH 243/385] Remove unused vars --- include/sisl/utility/obj_life_counter.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/sisl/utility/obj_life_counter.hpp b/include/sisl/utility/obj_life_counter.hpp index 640309ae..a37c2891 100644 --- a/include/sisl/utility/obj_life_counter.hpp +++ b/include/sisl/utility/obj_life_counter.hpp @@ -173,9 +173,9 @@ class ObjCounterRegistry { return instance; } - static void register_obj(const char* name, pair_of_atomic_ptrs ptrs) {} + static void register_obj(const char*, pair_of_atomic_ptrs) {} - static void foreach (const std::function< void(const std::string&, int64_t, int64_t) >& closure) {} + static void foreach (const std::function< void(const std::string&, int64_t, int64_t) >&) {} static inline void enable_metrics_reporting() {} }; #endif // _PRERELEASE From 5900f03a7c984cb697dd69458378e3c92f902ad0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 2 May 2023 11:38:34 -0700 Subject: [PATCH 244/385] Name misspelling. --- README.md | 2 +- include/sisl/logging/logging.h | 2 +- include/sisl/options/options.h | 2 +- src/logging/logging.cpp | 2 +- src/logging/test/example.cpp | 2 +- src/options/options.cpp | 2 +- src/options/tests/basic.cpp | 2 +- src/version/version.cpp | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index cf6f02f6..70dfa6a6 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ Harihara Kadayam hkadayam@ebay.com Copyright 2021 eBay Inc. Primary Author: Harihara Kadayam -Primary Developers: Harihara Kadayam, Rishabh Mittal, Bryan Zimmerman, Brian Szymd +Primary Developers: Harihara Kadayam, Rishabh Mittal, Bryan Zimmerman, Brian Szmyd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0. diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 802f414b..fa34b6c2 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd, Harihara Kadayam + * Author/Developer(s): Brian Szmyd, Harihara Kadayam * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/include/sisl/options/options.h b/include/sisl/options/options.h index 14078512..687fb24d 100644 --- a/include/sisl/options/options.h +++ b/include/sisl/options/options.h @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index fd4cdc0c..2ef4028d 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/logging/test/example.cpp b/src/logging/test/example.cpp index 907dd6f2..d6df91b4 100644 --- a/src/logging/test/example.cpp +++ b/src/logging/test/example.cpp @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/options/options.cpp b/src/options/options.cpp index 33346a9f..545c21d1 100644 --- a/src/options/options.cpp +++ b/src/options/options.cpp @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/options/tests/basic.cpp b/src/options/tests/basic.cpp index 7102121c..5284ef49 100644 --- a/src/options/tests/basic.cpp +++ b/src/options/tests/basic.cpp @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/version/version.cpp b/src/version/version.cpp index faf1abd7..90f74605 100644 --- a/src/version/version.cpp +++ b/src/version/version.cpp @@ -1,7 +1,7 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * Author/Developer(s): Brian Szymd + * Author/Developer(s): Brian Szmyd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. From 7d679e3b851d7566c5944a1be2e0aba5459a3798 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 4 May 2023 10:06:07 -0700 Subject: [PATCH 245/385] GCC-11 should be default. --- .jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index adaa623b..894495b6 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -1,5 +1,5 @@ pipeline { - agent any + agent { label 'sds-builder-2204' } environment { ARTIFACTORY_PASS = credentials('ARTIFACTORY_PASS') @@ -58,7 +58,7 @@ pipeline { stage('Build') { failFast true matrix { - agent { label 'sds-builder' } + agent { label 'sds-builder-2204' } axes { axis { name 'BUILD_TYPE' From 3380888ee54a02a3b8d911e6c772646be12d2c3e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 4 May 2023 09:19:44 -0700 Subject: [PATCH 246/385] v8.4.1 - Added support for jThread, default build on gcc11 --- conanfile.py | 2 +- include/sisl/utility/thread_factory.hpp | 18 ++++++++- test_package/CMakeLists.txt | 2 +- test_package/test_package.cpp | 50 ++++++++++++++++--------- 4 files changed, 51 insertions(+), 21 deletions(-) diff --git a/conanfile.py b/conanfile.py index 72dc1252..a3b1eb7c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.3.1" + version = "8.4.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index 480921ba..13b29edf 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -16,9 +16,9 @@ *********************************************************************************/ #pragma once +#include #include #include -#include #include #ifdef _POSIX_THREADS @@ -58,7 +58,7 @@ std::unique_ptr< std::thread > make_unique_thread(const std::string name, F&& f, } template < class... Args > -std::thread named_thread(const std::string name, Args&&... args) { +auto named_thread(const std::string name, Args&&... args) { auto t = std::thread(std::forward< Args >(args)...); #ifdef _POSIX_THREADS #ifndef __APPLE__ @@ -71,4 +71,18 @@ std::thread named_thread(const std::string name, Args&&... args) { return t; } +template < class... Args > +auto named_jthread(const std::string name, Args&&... args) { + auto j = std::jthread(std::forward< Args >(args)...); +#ifdef _POSIX_THREADS +#ifndef __APPLE__ + auto tname = name.substr(0, 15); + auto ret = pthread_setname_np(j.native_handle(), tname.c_str()); + if (ret != 0) { LOGERROR("Set name of thread to {} failed ret={}", tname, ret); } +#endif /* __APPLE__ */ +#endif /* _POSIX_THREADS */ + + return j; +} + } // namespace sisl diff --git a/test_package/CMakeLists.txt b/test_package/CMakeLists.txt index cad2f97c..e581ed2a 100644 --- a/test_package/CMakeLists.txt +++ b/test_package/CMakeLists.txt @@ -7,5 +7,5 @@ conan_basic_setup(TARGETS) find_package(sisl CONFIG REQUIRED) add_executable(${PROJECT_NAME} test_package.cpp example_decl.cpp) -target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17) +target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) target_link_libraries(${PROJECT_NAME} sisl::sisl) diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index c82da06f..a282b7c2 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -1,5 +1,6 @@ #include #include +#include SISL_LOGGING_INIT(my_module) @@ -7,22 +8,37 @@ SISL_OPTIONS_ENABLE(logging) extern void example_decl(); +using namespace std::chrono_literals; + int main(int argc, char** argv) { - SISL_OPTIONS_LOAD(argc, argv, logging) - sisl::logging::SetLogger(std::string(argv[0])); - spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); - - LOGTRACE("Trace"); - LOGDEBUG("Debug"); - LOGINFO("Info"); - LOGWARN("Warning"); - LOGERROR("Error"); - LOGCRITICAL("Critical"); - - example_decl(); - - auto custom_logger = sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); - LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); - DEBUG_ASSERT(true, "Always True"); - return 0; + SISL_OPTIONS_LOAD(argc, argv, logging) + sisl::logging::SetLogger(std::string(argv[0])); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + LOGTRACE("Trace"); + LOGDEBUG("Debug"); + LOGINFO("Info"); + LOGWARN("Warning"); + LOGERROR("Error"); + LOGCRITICAL("Critical"); + + auto thread = sisl::named_jthread("example_decl", [](std::stop_token stoken) { + example_decl(); + while (!stoken.stop_requested()) { + LOGWARNMOD(my_module, "Sleeping..."); + std::this_thread::sleep_for(1500ms); + } + LOGINFOMOD(my_module, "Waking..."); + std::this_thread::sleep_for(1500ms); + }); + std::this_thread::sleep_for(300ms); + auto stop_source = thread.get_stop_source(); + + auto custom_logger = + sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); + LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); + DEBUG_ASSERT(true, "Always True"); + RELEASE_ASSERT(stop_source.request_stop(), "Should be!"); + + return 0; } From 422312d3668928a1970b2d5fdf1965d893863150 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 4 May 2023 11:21:10 -0700 Subject: [PATCH 247/385] Reduce duplications. --- include/sisl/utility/thread_factory.hpp | 28 +++++++++++-------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index 13b29edf..66adbc2b 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -57,31 +57,27 @@ std::unique_ptr< std::thread > make_unique_thread(const std::string name, F&& f, }); } +template < class T > +void name_thread([[maybe_unused]] T& t, std::string const& name) { +#if defined(_POSIX_THREADS) && !defined(__APPLE__) + auto ret = pthread_setname_np(t.native_handle(), name.substr(0, 15).c_str()); + if (ret != 0) LOGERROR("Set name of thread to {} failed ret={}", name, ret); +#else + LOGINFO("No ability to set thread name: {}", name); +#endif /* _POSIX_THREADS */ +} + template < class... Args > auto named_thread(const std::string name, Args&&... args) { auto t = std::thread(std::forward< Args >(args)...); -#ifdef _POSIX_THREADS -#ifndef __APPLE__ - auto tname = name.substr(0, 15); - auto ret = pthread_setname_np(t.native_handle(), tname.c_str()); - if (ret != 0) { LOGERROR("Set name of thread to {} failed ret={}", tname, ret); } -#endif /* __APPLE__ */ -#endif /* _POSIX_THREADS */ - + name_thread(t, name); return t; } template < class... Args > auto named_jthread(const std::string name, Args&&... args) { auto j = std::jthread(std::forward< Args >(args)...); -#ifdef _POSIX_THREADS -#ifndef __APPLE__ - auto tname = name.substr(0, 15); - auto ret = pthread_setname_np(j.native_handle(), tname.c_str()); - if (ret != 0) { LOGERROR("Set name of thread to {} failed ret={}", tname, ret); } -#endif /* __APPLE__ */ -#endif /* _POSIX_THREADS */ - + name_thread(j, name); return j; } From e60fd00392953f7744d0ec8647443979ef3c2260 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Fri, 5 May 2023 09:06:04 -0700 Subject: [PATCH 248/385] add the option to add metadata for async grpc client (#99) * add the option to add metadata for async grpc client * bump minor version * clear workers map after shutdown --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/grpc/rpc_client.hpp | 19 ++++++++ src/grpc/rpc_client.cpp | 1 + src/grpc/tests/proto/grpc_helper_test.proto | 2 + src/grpc/tests/unit/auth_test.cpp | 49 ++++++++++++++++++++- 5 files changed, 71 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index a3b1eb7c..657f7695 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.4.1" + version = "8.5.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index 4da34005..b4082c7c 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -220,6 +220,9 @@ class GrpcAsyncClientWorker final { std::vector< std::thread > m_threads; }; +// common request id header +static std::string const request_id_header{"request_id"}; + class GrpcAsyncClient : public GrpcBaseClient { public: template < typename ServiceT > @@ -309,6 +312,22 @@ class GrpcAsyncClient : public GrpcBaseClient { cd->m_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); } + template < typename ReqT, typename RespT > + void call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, + const unary_callback_t< RespT >& callback, uint32_t deadline, + const std::vector< std::pair< std::string, std::string > >& metadata) { + auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); + data->set_deadline(deadline); + for (auto const& [key, value] : metadata) { + data->add_metadata(key, value); + } + if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + // Note that async unary RPCs don't post a CQ tag in call + data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); + // CQ tag posted here + data->m_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + } + StubPtr< ServiceT > m_stub; GrpcAsyncClientWorker* m_worker; std::shared_ptr< sisl::TrfClient > m_trf_client; diff --git a/src/grpc/rpc_client.cpp b/src/grpc/rpc_client.cpp index cc1c0baa..abf4a053 100644 --- a/src/grpc/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -125,6 +125,7 @@ void GrpcAsyncClientWorker::shutdown_all() { // g_core_codegen_interface it.second.reset(); } + s_workers.clear(); } void GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& request, const std::string& method, diff --git a/src/grpc/tests/proto/grpc_helper_test.proto b/src/grpc/tests/proto/grpc_helper_test.proto index d5844389..8414feab 100644 --- a/src/grpc/tests/proto/grpc_helper_test.proto +++ b/src/grpc/tests/proto/grpc_helper_test.proto @@ -19,6 +19,8 @@ package grpc_helper_test; service EchoService { rpc Echo(EchoRequest) returns (EchoReply) {} + rpc EchoMetadata(EchoRequest) returns (EchoReply) {} + rpc EchoLongReply(EchoRequest) returns (stream EchoReply) {} rpc LongEcho(stream EchoRequest) returns (EchoReply) {} diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index dc146c22..82960526 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -51,6 +51,9 @@ static void set_token_response(const std::string& raw_token) { } static const std::string GENERIC_METHOD{"generic_method"}; +static const std::vector< std::pair< std::string, std::string > > grpc_metadata{ + {sisl::request_id_header, "req_id1"}, {"key1", "val1"}, {"key2", "val2"}}; + class EchoServiceImpl final { public: ~EchoServiceImpl() = default; @@ -61,6 +64,22 @@ class EchoServiceImpl final { return true; } + bool echo_request_metadata(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + LOGDEBUG("receive echo request {}", rpc_data->request().message()); + auto& client_headers = rpc_data->server_context().client_metadata(); + for (auto const& [key, val] : grpc_metadata) { + LOGINFO("metadata received, key = {}; val = {}", key, val) + auto const& it{client_headers.find(key)}; + if (it == client_headers.end()) { + rpc_data->set_status(::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, ::grpc::string())); + } else if (it->second != val) { + LOGERROR("wrong value, expected = {}, actual = {}", val, it->second) + rpc_data->set_status(::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, ::grpc::string())); + } + } + return true; + } + bool register_service(GrpcServer* server) { if (!server->register_async_service< EchoService >()) { LOGERROR("register service failed"); @@ -78,7 +97,12 @@ class EchoServiceImpl final { LOGERROR("register rpc failed"); return false; } - + if (!server->register_rpc< EchoService, EchoRequest, EchoReply, false >( + "EchoMetadata", &EchoService::AsyncService::RequestEchoMetadata, + std::bind(&EchoServiceImpl::echo_request_metadata, this, std::placeholders::_1))) { + LOGERROR("register rpc failed"); + return false; + } return true; } }; @@ -144,6 +168,21 @@ class AuthBaseTest : public ::testing::Test { } } + void call_async_echo_metadata(EchoRequest& req, EchoReply& reply, ::grpc::Status& status) { + m_echo_stub->call_unary< EchoRequest, EchoReply >( + req, &EchoService::StubInterface::AsyncEchoMetadata, + [&reply, &status, this](EchoReply& reply_, ::grpc::Status& status_) { + reply = reply_; + status = status_; + process_echo_reply(); + }, + 1, grpc_metadata); + { + std::unique_lock lk(m_wait_mtx); + m_cv.wait(lk, [this]() { return m_echo_received.load(); }); + } + } + protected: std::shared_ptr< AuthManager > m_auth_mgr; EchoServiceImpl* m_echo_impl = nullptr; @@ -189,6 +228,14 @@ TEST_F(AuthDisableTest, allow_on_disabled_mode) { EXPECT_TRUE(generic_status.ok()); } +TEST_F(AuthDisableTest, metadata) { + EchoRequest req; + EchoReply reply; + ::grpc::Status status; + call_async_echo_metadata(req, reply, status); + EXPECT_TRUE(status.ok()); +} + static auto const grant_path = std::string{"dummy_grant.cg"}; static void load_auth_settings() { From b27fb8a77114396c2027ce688f1a0c7eac2036c5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 10 May 2023 12:22:04 -0500 Subject: [PATCH 249/385] Remove cxx-20 requirement, but support naming jthreads when available. (#101) --- CMakeLists.txt | 2 +- conanfile.py | 4 ++-- include/sisl/utility/thread_factory.hpp | 7 ------- src/grpc/CMakeLists.txt | 2 -- test_package/test_package.cpp | 5 +++-- 5 files changed, 6 insertions(+), 14 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 216549e8..6e6199d8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # turn on folder hierarchies include (cmake/Flags.cmake) -set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD 17) enable_testing() if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) diff --git a/conanfile.py b/conanfile.py index 657f7695..29f7e275 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.5.1" + version = "8.5.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -75,7 +75,7 @@ def requirements(self): def validate(self): if self.info.settings.compiler.cppstd: - check_min_cppstd(self, 20) + check_min_cppstd(self, 17) def configure(self): if self.options.shared: diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index e3ea3c13..d48e2661 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -74,11 +74,4 @@ auto named_thread(const std::string name, Args&&... args) { return t; } -template < class... Args > -auto named_jthread(const std::string name, Args&&... args) { - auto j = std::jthread(std::forward< Args >(args)...); - name_thread(j, name); - return j; -} - } // namespace sisl diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 888f786b..e59bf9ef 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -3,8 +3,6 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers REQUIRED) find_package(gRPC REQUIRED) -set(CMAKE_CXX_STANDARD 17) - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) add_library(sisl_grpc OBJECT) diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index a282b7c2..f1fcb225 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -22,7 +22,7 @@ int main(int argc, char** argv) { LOGERROR("Error"); LOGCRITICAL("Critical"); - auto thread = sisl::named_jthread("example_decl", [](std::stop_token stoken) { + auto j_thread = std::jthread([](std::stop_token stoken) { example_decl(); while (!stoken.stop_requested()) { LOGWARNMOD(my_module, "Sleeping..."); @@ -31,8 +31,9 @@ int main(int argc, char** argv) { LOGINFOMOD(my_module, "Waking..."); std::this_thread::sleep_for(1500ms); }); + sisl::name_thread(j_thread, "example_decl"); std::this_thread::sleep_for(300ms); - auto stop_source = thread.get_stop_source(); + auto stop_source = j_thread.get_stop_source(); auto custom_logger = sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); From d9508b6e8e7369944db8304dfd18c1f16192c3fa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 15 May 2023 09:59:06 -0700 Subject: [PATCH 250/385] Fix fmt error --- include/sisl/grpc/rpc_call.hpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/include/sisl/grpc/rpc_call.hpp b/include/sisl/grpc/rpc_call.hpp index 2e4d5767..5407265e 100644 --- a/include/sisl/grpc/rpc_call.hpp +++ b/include/sisl/grpc/rpc_call.hpp @@ -33,9 +33,11 @@ SISL_LOGGING_DECL(grpc_server) #define RPC_SERVER_LOG(level, msg, ...) \ LOG##level##MOD_FMT(grpc_server, ([&](fmt::memory_buffer& buf, const char* __m, auto&&... args) -> bool { \ - fmt::format_to(fmt::appender(buf), "[{}:{}] [RPC={} id={}] ", file_name(__FILE__), \ - __LINE__, m_rpc_info->m_rpc_name, request_id()); \ - fmt::format_to(fmt::appender(buf), __m, std::forward< decltype(args) >(args)...); \ + fmt::vformat_to(fmt::appender{buf}, std::string_view{"[{}:{}] [RPC={} id={}] "}, \ + fmt::make_format_args(file_name(__FILE__), __LINE__, \ + m_rpc_info->m_rpc_name, request_id())); \ + fmt::vformat_to(fmt::appender{buf}, fmt::string_view{__m}, \ + fmt::make_format_args(std::forward< decltype(args) >(args)...)); \ return true; \ }), \ msg, ##__VA_ARGS__); @@ -424,4 +426,4 @@ class RpcData : public RpcDataAbstract, sisl::ObjLifeCounter< RpcData< ServiceT, RpcTagImpl m_completed_tag{this, &RpcData::on_request_completed}; }; -} // namespace sisl::grpc +} // namespace sisl From 6586dcbc50f699d8c11fde9a6b5b95ab6e6ea9b6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 15 May 2023 17:04:33 -0500 Subject: [PATCH 251/385] Do not require 20 yet. (#103) --- CMakeLists.txt | 2 +- include/sisl/utility/thread_factory.hpp | 7 ------- test_package/test_package.cpp | 3 ++- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 17c0b472..f5a6c394 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # turn on folder hierarchies include (cmake/Flags.cmake) -set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD 17) enable_testing() if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index e3ea3c13..d48e2661 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -74,11 +74,4 @@ auto named_thread(const std::string name, Args&&... args) { return t; } -template < class... Args > -auto named_jthread(const std::string name, Args&&... args) { - auto j = std::jthread(std::forward< Args >(args)...); - name_thread(j, name); - return j; -} - } // namespace sisl diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index a282b7c2..d02dd7a5 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -22,7 +22,7 @@ int main(int argc, char** argv) { LOGERROR("Error"); LOGCRITICAL("Critical"); - auto thread = sisl::named_jthread("example_decl", [](std::stop_token stoken) { + auto thread = std::jthread([](std::stop_token stoken) { example_decl(); while (!stoken.stop_requested()) { LOGWARNMOD(my_module, "Sleeping..."); @@ -31,6 +31,7 @@ int main(int argc, char** argv) { LOGINFOMOD(my_module, "Waking..."); std::this_thread::sleep_for(1500ms); }); + sisl::name_thread(thread, "example_thread"); std::this_thread::sleep_for(300ms); auto stop_source = thread.get_stop_source(); From ca5062fe8ec0205cced7ad3e5007054b1425f68e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 25 May 2023 12:49:51 -0600 Subject: [PATCH 252/385] Added testing option. (#105) --- conanfile.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index dc081d9a..bde03d82 100644 --- a/conanfile.py +++ b/conanfile.py @@ -22,6 +22,7 @@ class SISLConan(ConanFile): "shared": ['True', 'False'], "fPIC": ['True', 'False'], "coverage": ['True', 'False'], + 'testing' : ['True', 'False'], "sanitize": ['True', 'False'], 'prerelease' : ['True', 'False'], 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], @@ -30,6 +31,7 @@ class SISLConan(ConanFile): 'shared': False, 'fPIC': True, 'coverage': False, + 'testing': True, 'sanitize': False, 'prerelease': True, 'malloc_impl': 'tcmalloc', @@ -91,6 +93,9 @@ def configure(self): raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") if self.options.coverage or self.options.sanitize: self.options.malloc_impl = 'libc' + if not self.options.testing: + if self.options.coverage or self.options.sanitize: + raise ConanInvalidConfiguration("Coverage/Sanitizer requires Testing!") def build(self): cmake = CMake(self) @@ -112,7 +117,8 @@ def build(self): cmake.configure(defs=definitions) cmake.build() - cmake.test(target=test_target, output_on_failure=True) + if self.options.testing: + cmake.test(target=test_target, output_on_failure=True) def package(self): lib_dir = join(self.package_folder, "lib") From 8a4f56fdf96b97f88ba2994512f2db8788e80e8a Mon Sep 17 00:00:00 2001 From: Sanal P Date: Mon, 15 May 2023 14:30:30 -0700 Subject: [PATCH 253/385] Add sobject manager for object tree view or update. --- conanfile.py | 2 +- include/sisl/sobject/sobject.hpp | 116 +++++++++++++++++++ src/CMakeLists.txt | 4 +- src/sobject/CMakeLists.txt | 12 ++ src/sobject/sobject.cpp | 171 +++++++++++++++++++++++++++++ src/sobject/tests/test_sobject.cpp | 161 +++++++++++++++++++++++++++ 6 files changed, 464 insertions(+), 2 deletions(-) create mode 100644 include/sisl/sobject/sobject.hpp create mode 100644 src/sobject/CMakeLists.txt create mode 100644 src/sobject/sobject.cpp create mode 100644 src/sobject/tests/test_sobject.cpp diff --git a/conanfile.py b/conanfile.py index 29f7e275..dfdbc00e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -57,7 +57,7 @@ def requirements(self): self.requires("folly/2022.01.31.00") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") - self.requires("nlohmann_json/3.10.5") + self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.0.1") self.requires("spdlog/1.11.0") if self.settings.os in ["Linux"]: diff --git a/include/sisl/sobject/sobject.hpp b/include/sisl/sobject/sobject.hpp new file mode 100644 index 00000000..a72fcfc9 --- /dev/null +++ b/include/sisl/sobject/sobject.hpp @@ -0,0 +1,116 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace sisl { + +struct sobject_id { + std::string type; + std::string name; + bool empty() const { return type.empty() && name.empty(); } +}; + +typedef struct status_request { + nlohmann::json json; + bool do_recurse{false}; + int verbose_level = 0; + std::string obj_type; + std::string obj_name; + std::vector< std::string > obj_path; + int batch_size = 10; + std::string next_cursor; +} status_request; + +typedef struct status_response { + nlohmann::json json; +} status_response; + +using status_callback_type = std::function< status_response(const status_request&) >; +class sobject; +using sobject_ptr = std::shared_ptr< sobject >; + +[[maybe_unused]] static bool operator<(const sobject_id& id1, const sobject_id& id2) { + return id1.type < id2.type || ((id1.type == id2.type) && (id1.name < id2.name)); +} + +// To search using only the type as key. +[[maybe_unused]] static bool operator<(const sobject_id& id, const std::string& key_type) { return id.type < key_type; } + +[[maybe_unused]] static bool operator<(const std::string& key_type, const sobject_id& id) { return key_type < id.type; } + +[[maybe_unused]] static status_response status_error(std::string error_str) { + status_response response; + response.json["error"] = error_str; + return response; +} + +// Keeps a heirarchy of modules/subsystems which register their callbacks to be +// whenever a get status is called from the root or directly. +class sobject { +public: + sobject(const std::string& obj_type, const std::string& obj_name, status_callback_type cb) : + m_id{obj_type, obj_name}, m_status_cb(std::move(cb)) {} + + static sobject_ptr create(const std::string& obj_type, const std::string& obj_name, status_callback_type cb) { + return std::make_shared< sobject >(obj_type, obj_name, std::move(cb)); + } + + // Every subsystem add to the json object using update(). + status_response run_callback(const status_request& request) const; + sobject_ptr get_child(const std::string& name); + void add_child(const sobject_ptr child); + + sobject_id id() const { return m_id; } + std::string name() const { return m_id.name; } + std::string type() const { return m_id.type; } + +private: + sobject_id m_id; + std::shared_mutex m_mtx; + status_callback_type m_status_cb; + // Keep a graph of child nodes. Mapping from name to child status object. + std::map< sobject_id, sobject_ptr > m_children; +}; + +class sobject_manager { +public: + sobject_ptr create_object(const std::string& type, const std::string& name, status_callback_type cb); + status_response get_status(const status_request& request); + + status_response get_object_by_path(const status_request& request); + status_response get_object_types(); + status_response get_object_status(const sobject_id& id, const status_request& request); + status_response get_objects(const status_request& request); + +private: + // Mapping from object name to object metadata. + std::map< sobject_id, sobject_ptr, std::less<> > m_object_store; + std::set< std::string > m_object_types; + std::shared_mutex m_mtx; +}; + +} // namespace sisl diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9ec2d5c7..15d33f20 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,6 +4,7 @@ add_subdirectory (grpc) add_subdirectory (logging) add_subdirectory (options) add_subdirectory (version) +add_subdirectory (sobject) # These sub-libraries currently do not support MacOS due to dependencies # on Folly and pistache. It is unknown if Windows is supported... @@ -32,7 +33,7 @@ if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) $ $ ) - list(APPEND SISL_DEPS + list(APPEND SISL_DEPS Folly::Folly ) endif() @@ -43,6 +44,7 @@ add_library(sisl $ $ $ + $ ) if (DEFINED MALLOC_IMPL) diff --git a/src/sobject/CMakeLists.txt b/src/sobject/CMakeLists.txt new file mode 100644 index 00000000..3a24caaa --- /dev/null +++ b/src/sobject/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required (VERSION 3.11) + +add_library(sisl_sobject_mgr OBJECT) +target_sources(sisl_sobject_mgr PRIVATE sobject.cpp) +target_link_libraries(sisl_sobject_mgr ${COMMON_DEPS}) + +add_executable(test_sobject) +target_sources(test_sobject PRIVATE + tests/test_sobject.cpp + ) +target_link_libraries(test_sobject sisl ${COMMON_DEPS} GTest::gtest) +add_test(NAME Sobject COMMAND test_sobject) diff --git a/src/sobject/sobject.cpp b/src/sobject/sobject.cpp new file mode 100644 index 00000000..3642d5c6 --- /dev/null +++ b/src/sobject/sobject.cpp @@ -0,0 +1,171 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include "sisl/logging/logging.h" +#include "sisl/sobject/sobject.hpp" + +namespace sisl { + +sobject_ptr sobject::get_child(const std::string& name) { + std::shared_lock lock{m_mtx}; + for (const auto& [id, obj] : m_children) { + if (id.name == name) { return obj; } + } + return nullptr; +} + +// Add a child to current object. +void sobject::add_child(const sobject_ptr child) { + std::unique_lock lock{m_mtx}; + LOGINFO("Parent {}/{} added child {}/{}", type(), name(), child->type(), child->name()); + m_children.emplace(child->id(), child); +} + +status_response sobject::run_callback(const status_request& request) const { + status_response response; + response.json = nlohmann::json::object(); + response.json["type"] = m_id.type; + response.json["name"] = m_id.name; + response.json.update(m_status_cb(request).json); + response.json["children"] = nlohmann::json::object(); + + for (const auto& [id, obj] : m_children) { + if (response.json["children"][id.type] == nullptr) { + if (request.do_recurse) { + response.json["children"][id.type] == nlohmann::json::object(); + } else { + response.json["children"][id.type] == nlohmann::json::array(); + } + } + + if (request.do_recurse) { + // Call recursive. + auto child_json = obj->run_callback(request).json; + response.json["children"][id.type].emplace(id.name, child_json); + } else { + response.json["children"][id.type].push_back(id.name); + } + } + + return response; +} + +sobject_ptr sobject_manager::create_object(const std::string& type, const std::string& name, status_callback_type cb) { + std::unique_lock lock{m_mtx}; + auto obj = sobject::create(type, name, std::move(cb)); + sobject_id id{type, name}; + m_object_store[id] = obj; + m_object_types.insert(type); + LOGINFO("Created status object type={} name={}", type, name); + return obj; +} + +status_response sobject_manager::get_object_types() { + status_response response; + auto types = nlohmann::json::array(); + for (const auto& type : m_object_types) { + types.emplace_back(type); + } + + response.json["types"] = std::move(types); + return response; +} + +status_response sobject_manager::get_objects(const status_request& request) { + status_response response; + + auto iter = m_object_store.begin(); + if (!request.next_cursor.empty()) { + // Extract cursor which is of format "type:name" + auto index = request.next_cursor.find_first_of("^"); + if (index == std::string::npos) return status_error("Invalid cursor"); + auto type = request.next_cursor.substr(0, index); + auto name = request.next_cursor.substr(index + 1); + iter = m_object_store.find(sobject_id{type, name}); + if (iter == m_object_store.end()) return status_error("Cursor not found"); + } else if (request.obj_name.empty() && !request.obj_type.empty()) { + // Get all objects of type requested. + iter = m_object_store.find(request.obj_type); + } + + int batch_size = request.batch_size; + while (iter != m_object_store.end() && batch_size > 0) { + if (request.obj_name.empty() && !request.obj_type.empty() && request.obj_type != iter->first.type) { + // If only one type of objects requested. + return response; + } + + response.json[iter->first.name] = iter->second->run_callback(request).json; + iter++; + batch_size--; + } + + if (iter != m_object_store.end()) { response.json["next_cursor"] = iter->first.type + "^" + iter->first.name; } + + return response; +} + +status_response sobject_manager::get_object_status(const sobject_id& id, const status_request& request) { + auto iter = m_object_store.find(id); + if (iter == m_object_store.end()) { return status_error("Object identifier not found"); } + return iter->second->run_callback(request); +} + +status_response sobject_manager::get_object_by_path(const status_request& request) { + sobject_ptr obj = nullptr; + for (const auto& [id, obj_ptr] : m_object_store) { + if (id.name == request.obj_path[0]) { + obj = obj_ptr; + break; + } + } + + if (obj == nullptr) { return status_error("Object identifier not found"); } + for (uint32_t ii = 1; ii < request.obj_path.size(); ii++) { + obj = obj->get_child(request.obj_path[ii]); + if (obj == nullptr) { return status_error("Object identifier not found"); } + } + return obj->run_callback(request); +} + +status_response sobject_manager::get_status(const status_request& request) { + std::shared_lock lock{m_mtx}; + + if (!request.obj_path.empty()) { + // Return object status by path. + return get_object_by_path(request); + } + + // If both are empty, we return all the types. If both not empty, we return the specific object. + // Its an error to have name non empty and type empty. + if (!request.obj_name.empty() && request.obj_type.empty()) { return status_error("Type details not given"); } + + if (!request.obj_name.empty() && !request.obj_type.empty()) { + // Return specific object. + sobject_id id{request.obj_type, request.obj_name}; + return get_object_status(std::move(id), request); + } + + if (!request.do_recurse && request.obj_name.empty() && request.obj_type.empty()) { + return get_object_types(); + } + + // Dump all objects. + return get_objects(request); +} + +} // namespace sisl diff --git a/src/sobject/tests/test_sobject.cpp b/src/sobject/tests/test_sobject.cpp new file mode 100644 index 00000000..45d54bb9 --- /dev/null +++ b/src/sobject/tests/test_sobject.cpp @@ -0,0 +1,161 @@ +/********************************************************************************* + * Modifications Copyright 2023 eBay Inc. + * + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include +#include + +#include +#include + +#include +#include "sisl/sobject/sobject.hpp" + +using namespace sisl; +using namespace std; + +SISL_LOGGING_INIT(test_sobject) +SISL_OPTIONS_ENABLE(logging) + +namespace { + +class SobjectTest : public testing::Test { +public: + SobjectTest() : testing::Test() {} + + sobject_manager mgr; + +protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(SobjectTest, BasicTest) { + + auto create_nodes = [this](sobject_ptr parent, string type, string prefix, int count) { + vector< sobject_ptr > res; + for (int i = 1; i <= count; i++) { + auto n = prefix + to_string(i); + auto cb = [n](const status_request&) { + status_response resp; + resp.json[n + "_metric"] = 1; + return resp; + }; + + auto o = mgr.create_object(type, n, cb); + res.push_back(o); + if (parent) { parent->add_child(o); } + } + return res; + }; + + // Create heirarchy of objects. + auto a_vec = create_nodes(nullptr, "A", "A", 2); + auto a_child_vec = create_nodes(a_vec[0], "A_child", "A_child", 2); + auto b_vec = create_nodes(nullptr, "B", "B", 2); + auto b_child_vec = create_nodes(b_vec[0], "B", "BB", 2); + auto c_vec = create_nodes(nullptr, "C", "C", 2); + auto c_child_vec = create_nodes(c_vec[0], "C_child", "C_child", 2); + auto c_child_child_vec = create_nodes(c_child_vec[0], "C_child_child", "C_child_child", 2); + + { + status_request req; + status_response resp; + resp = mgr.get_status(req); + ASSERT_EQ(resp.json["types"].size(), 6) << resp.json.dump(2); + + req.do_recurse = true; + req.batch_size = 100; + resp = mgr.get_status(req); + ASSERT_EQ(resp.json.size(), 14) << resp.json.dump(2); + } + + { + status_request req; + status_response resp; + req.obj_type = "B"; + req.obj_name = "B1"; + req.do_recurse = true; + resp = mgr.get_status(req); + ASSERT_EQ(resp.json["children"]["B"]["BB1"]["name"], "BB1") << resp.json.dump(2); + + req.do_recurse = false; + resp = mgr.get_status(req); + ASSERT_EQ(resp.json["children"]["B"].size(), 2) << resp.json.dump(2); + ASSERT_EQ(resp.json["children"]["B"][0], "BB1") << resp.json.dump(2); + } + { + status_request req; + status_response resp; + req.do_recurse = true; + req.obj_type = "C"; + resp = mgr.get_status(req); + ASSERT_EQ(resp.json.size(), 2) << resp.json.dump(2); + } + + { + status_request req; + status_response resp; + req.obj_type = "C_child_child"; + req.obj_name = "C_child_child2"; + resp = mgr.get_status(req); + LOGINFO("Response {}", resp.json.dump(2)); + ASSERT_EQ(resp.json["name"], "C_child_child2") << resp.json.dump(2); + ASSERT_EQ(resp.json["type"], "C_child_child") << resp.json.dump(2); + } + + { + status_request req; + status_response resp; + req.obj_path = {"C1", "C_child1", "C_child_child1"}; + req.do_recurse = false; + resp = mgr.get_status(req); + LOGINFO("Response {}", resp.json.dump(2)); + ASSERT_EQ(resp.json["name"], "C_child_child1") << resp.json.dump(2); + ASSERT_EQ(resp.json["type"], "C_child_child") << resp.json.dump(2); + } + + { + status_request req; + status_response resp; + auto d_vec = create_nodes(nullptr, "D", "D", 10); + req.do_recurse = true; + req.batch_size = 1; + req.obj_type = "D"; + auto count = 10; + while (true) { + resp = mgr.get_status(req); + count--; + LOGINFO("Response {}", resp.json.dump(2)); + if (!resp.json.contains("next_cursor")) break; + req.next_cursor = resp.json["next_cursor"]; + } + + ASSERT_EQ(count, 0) << resp.json.dump(2); + } +} +} // namespace + +int main(int argc, char* argv[]) { + SISL_OPTIONS_LOAD(argc, argv, logging) + ::testing::InitGoogleTest(&argc, argv); + sisl::logging::SetLogger("test_sobject"); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + const auto ret{RUN_ALL_TESTS()}; + return ret; +} From 19d55651ad5c5ec51112fc1aee97c8fc32549cd8 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 30 May 2023 19:42:50 -0700 Subject: [PATCH 254/385] Fixed some compilation issue on downstream --- conanfile.py | 2 +- include/sisl/utility/enum.hpp | 3 +++ include/sisl/utility/thread_factory.hpp | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index bde03d82..9aeb3536 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.3.0" + version = "9.3.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/utility/enum.hpp b/include/sisl/utility/enum.hpp index a8b4c8cf..37b8bbb7 100644 --- a/include/sisl/utility/enum.hpp +++ b/include/sisl/utility/enum.hpp @@ -165,6 +165,9 @@ class EnumSupportBase { } \ [[nodiscard]] inline const std::string& enum_name(const FQEnumName##Support::enum_type es) { \ return FQEnumName##Support::instance().get_name(es); \ + } \ + [[nodiscard]] inline FQEnumName##Support::underlying_type enum_value(const FQEnumName##Support::enum_type es) { \ + return static_cast< FQEnumName##Support::underlying_type >(es); \ } #endif // SISL_ENUM_HPP diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index d48e2661..16e5323f 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -20,6 +20,8 @@ #include #include #include +#include +#include #ifdef _POSIX_THREADS #include From 459bc529dd9fb91c2d092f1d9187e53da0673f3f Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Wed, 31 May 2023 09:41:53 -0700 Subject: [PATCH 255/385] Comp cb (#106) * Add callback to be called after rpc completion * add completion cb for generic rpc. Add user context field to generic rpc. Make sanitize default for debug builds. * remove sanitize as default option. TODO: use a dummy package dependency like prerelease to cascade the sanitize option downstream revert asserting in completion cb: * wait in the unit test before comparing completion count --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/grpc/generic_service.hpp | 21 +++- include/sisl/grpc/rpc_common.hpp | 6 ++ include/sisl/grpc/rpc_server.hpp | 8 +- src/grpc/rpc_server.cpp | 25 ++++- src/grpc/tests/function/echo_async_client.cpp | 102 +++++++++--------- 6 files changed, 106 insertions(+), 58 deletions(-) diff --git a/conanfile.py b/conanfile.py index 9aeb3536..32df93c6 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.3.1" + version = "9.4.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index a530fc49..2362f770 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -19,8 +19,6 @@ namespace sisl { -using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; - /** * Callbacks are registered by a name. The client generic stub uses the method name to call the RPC * We assume the Request and Response types are grpc::ByteBuffer @@ -36,6 +34,12 @@ class GenericRpcStaticInfo : public RpcStaticInfoBase { grpc::AsyncGenericService* m_generic_service; }; +class GenericRpcContextBase { +public: + virtual ~GenericRpcContextBase() = default; +}; +using generic_rpc_ctx_ptr = std::unique_ptr< GenericRpcContextBase >; + class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcData > { public: static RpcDataAbstract* make(GenericRpcStaticInfo* rpc_info, size_t queue_idx) { @@ -60,6 +64,9 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD void send_response() { m_stream.Write(m_response, static_cast< void* >(m_buf_write_tag.ref())); } + void set_context(generic_rpc_ctx_ptr ctx) { m_rpc_context = std::move(ctx); } + GenericRpcContextBase* get_context() { return m_rpc_context.get(); } + GenericRpcData(GenericRpcStaticInfo* rpc_info, size_t queue_idx) : RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(&m_ctx) {} @@ -70,6 +77,8 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD grpc::ByteBuffer m_request; grpc::ByteBuffer m_response; grpc::Status m_retstatus{grpc::Status::OK}; + // user can set and retrieve the context. Its life cycle is tied to that of rpc data. + generic_rpc_ctx_ptr m_rpc_context; private: bool do_authorization() { @@ -107,7 +116,13 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD return nullptr; } - RpcDataAbstract* on_request_completed(bool) { return nullptr; } + RpcDataAbstract* on_request_completed(bool) { + auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; + if (m_retstatus.error_code() != grpc::StatusCode::UNIMPLEMENTED) { + RPCHelper::run_generic_completion_cb(m_rpc_info->m_server, m_ctx.method(), this_rpc_data); + } + return nullptr; + } struct RpcTagImpl : public RpcTag { using callback_type = RpcDataAbstract* (GenericRpcData::*)(bool); diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index 64be224e..2c99c79c 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -18,10 +18,16 @@ namespace sisl { class GrpcServer; class GenericRpcData; enum class AuthVerifyStatus : uint8_t; + +using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; +using generic_rpc_completed_cb_t = std::function< void(boost::intrusive_ptr< GenericRpcData >&) >; + struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); static bool run_generic_handler_cb(GrpcServer* server, const std::string& method, boost::intrusive_ptr< GenericRpcData >& rpc_data); + static void run_generic_completion_cb(GrpcServer* server, const std::string& method, + boost::intrusive_ptr< GenericRpcData >& rpc_data); static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; diff --git a/include/sisl/grpc/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp index a813b717..4a6c20f9 100644 --- a/include/sisl/grpc/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -28,7 +28,6 @@ namespace sisl { class GenericRpcData; class GenericRpcStaticInfo; -using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; using rpc_thread_start_cb_t = std::function< void(uint32_t) >; @@ -118,8 +117,10 @@ class GrpcServer : private boost::noncopyable { // generic service methods bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); + void run_generic_completion_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); bool register_async_generic_service(); - bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler); + bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler, + const generic_rpc_completed_cb_t& done_handler = nullptr); private: void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); @@ -140,7 +141,8 @@ class GrpcServer : private boost::noncopyable { std::unique_ptr< grpc::AsyncGenericService > m_generic_service; std::unique_ptr< GenericRpcStaticInfo > m_generic_rpc_static_info; bool m_generic_service_registered{false}; - std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; + std::unordered_map< std::string, std::pair< generic_rpc_handler_cb_t, generic_rpc_completed_cb_t > > + m_generic_rpc_registry; std::shared_mutex m_generic_rpc_registry_mtx; }; } // namespace sisl::grpc diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index e23af456..824a3899 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -155,11 +155,23 @@ bool GrpcServer::run_generic_handler_cb(const std::string& rpc_name, boost::intr // respond immediately return true; } - cb = it->second; + cb = it->second.first; } return cb(rpc_data); } +void GrpcServer::run_generic_completion_cb(const std::string& rpc_name, + boost::intrusive_ptr< GenericRpcData >& rpc_data) { + generic_rpc_completed_cb_t cb; + { + std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); + auto it = m_generic_rpc_registry.find(rpc_name); + LOGMSG_ASSERT(it != m_generic_rpc_registry.end(), "completion cb not found for rpc {}", rpc_name); + cb = it->second.second; + } + if (cb) { cb(rpc_data); } +} + bool GrpcServer::register_async_generic_service() { if (m_state.load() != ServerState::INITED) { LOGMSG_ASSERT(false, "register service in non-INITED state"); @@ -177,7 +189,8 @@ bool GrpcServer::register_async_generic_service() { return true; } -bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler) { +bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler, + const generic_rpc_completed_cb_t& done_handler) { if (m_state.load() != ServerState::RUNNING) { LOGMSG_ASSERT(false, "register service in non-INITED state"); return false; @@ -190,7 +203,8 @@ bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc { std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); - if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { + if (auto [it, happened]{m_generic_rpc_registry.emplace(name, std::make_pair(rpc_handler, done_handler))}; + !happened) { LOGWARN("duplicate generic RPC {} registration attempted", name); return false; } @@ -215,6 +229,11 @@ bool RPCHelper::run_generic_handler_cb(GrpcServer* server, const std::string& me return server->run_generic_handler_cb(method, rpc_data); } +void RPCHelper::run_generic_completion_cb(GrpcServer* server, const std::string& method, + boost::intrusive_ptr< GenericRpcData >& rpc_data) { + server->run_generic_completion_cb(method, rpc_data); +} + grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx) { if (!server->is_auth_enabled()) { return grpc::Status(); } auto& client_headers = srv_ctx->client_metadata(); diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index dbd8b3fc..6e465b20 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -225,21 +225,6 @@ class TestServer { public: ~EchoServiceImpl() = default; - bool echo_request(const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { - if ((++num_calls % 2) == 0) { - LOGDEBUGMOD(grpc_server, "respond async echo request {}", rpc_data->request().message()); - auto t = std::thread([rpc = rpc_data] { - rpc->response().set_message(rpc->request().message()); - rpc->send_response(); - }); - t.detach(); - return false; - } - LOGDEBUGMOD(grpc_server, "respond sync echo request {}", rpc_data->request().message()); - rpc_data->response().set_message(rpc_data->request().message()); - return true; - } - void register_service(GrpcServer* server) { auto const res = server->register_async_service< EchoService >(); RELEASE_ASSERT(res, "Failed to Register Service"); @@ -248,7 +233,20 @@ class TestServer { void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); auto const res = server->register_rpc< EchoService, EchoRequest, EchoReply, false >( - "Echo", &EchoService::AsyncService::RequestEcho, std::bind(&EchoServiceImpl::echo_request, this, _1)); + "Echo", &EchoService::AsyncService::RequestEcho, + [this](const AsyncRpcDataPtr< EchoService, EchoRequest, EchoReply >& rpc_data) { + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async echo request {}", rpc_data->request().message()); + std::thread([rpc = rpc_data] { + rpc->response().set_message(rpc->request().message()); + rpc->send_response(); + }).detach(); + return false; + } + LOGDEBUGMOD(grpc_server, "respond sync echo request {}", rpc_data->request().message()); + rpc_data->response().set_message(rpc_data->request().message()); + return true; + }); RELEASE_ASSERT(res, "register rpc failed"); } }; @@ -259,21 +257,6 @@ class TestServer { public: ~PingServiceImpl() = default; - bool ping_request(const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { - if ((++num_calls % 2) == 0) { - LOGDEBUGMOD(grpc_server, "respond async ping request {}", rpc_data->request().seqno()); - auto t = std::thread([rpc = rpc_data] { - rpc->response().set_seqno(rpc->request().seqno()); - rpc->send_response(); - }); - t.detach(); - return false; - } - LOGDEBUGMOD(grpc_server, "respond sync ping request {}", rpc_data->request().seqno()); - rpc_data->response().set_seqno(rpc_data->request().seqno()); - return true; - } - void register_service(GrpcServer* server) { auto const res = server->register_async_service< PingService >(); RELEASE_ASSERT(res, "Failed to Register Service"); @@ -282,13 +265,27 @@ class TestServer { void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); auto const res = server->register_rpc< PingService, PingRequest, PingReply, false >( - "Ping", &PingService::AsyncService::RequestPing, std::bind(&PingServiceImpl::ping_request, this, _1)); + "Ping", &PingService::AsyncService::RequestPing, + [this](const AsyncRpcDataPtr< PingService, PingRequest, PingReply >& rpc_data) { + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async ping request {}", rpc_data->request().seqno()); + std::thread([rpc = rpc_data] { + rpc->response().set_seqno(rpc->request().seqno()); + rpc->send_response(); + }).detach(); + return false; + } + LOGDEBUGMOD(grpc_server, "respond sync ping request {}", rpc_data->request().seqno()); + rpc_data->response().set_seqno(rpc_data->request().seqno()); + return true; + }); RELEASE_ASSERT(res, "register ping rpc failed"); } }; class GenericServiceImpl final { std::atomic< uint32_t > num_calls = 0ul; + std::atomic< uint32_t > num_completions = 0ul; static void set_response(const grpc::ByteBuffer& req, grpc::ByteBuffer& resp) { DataMessage cli_request; @@ -298,20 +295,6 @@ class TestServer { } public: - bool receive_data(boost::intrusive_ptr< GenericRpcData >& rpc_data) { - if ((++num_calls % 2) == 0) { - LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); - auto t = std::thread([rpc = rpc_data] { - set_response(rpc->request(), rpc->response()); - rpc->send_response(); - }); - t.detach(); - return false; - } - set_response(rpc_data->request(), rpc_data->response()); - return true; - } - void register_service(GrpcServer* server) { auto const res = server->register_async_generic_service(); RELEASE_ASSERT(res, "Failed to Register Service"); @@ -319,10 +302,31 @@ class TestServer { void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - auto const res = - server->register_generic_rpc(GENERIC_METHOD, std::bind(&GenericServiceImpl::receive_data, this, _1)); + auto const res = server->register_generic_rpc( + GENERIC_METHOD, + [this](boost::intrusive_ptr< GenericRpcData >& rpc_data) { + if ((++num_calls % 2) == 0) { + LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); + std::thread([rpc = rpc_data] { + set_response(rpc->request(), rpc->response()); + rpc->send_response(); + }).detach(); + return false; + } + set_response(rpc_data->request(), rpc_data->response()); + return true; + }, + [this](boost::intrusive_ptr< GenericRpcData >&) { num_completions++; }); RELEASE_ASSERT(res, "register generic rpc failed"); } + + bool compare_counters() { + if (num_calls != num_completions) { + LOGERROR("num calls: {}, num_completions = {}", num_calls, num_completions); + return false; + } + return true; + } }; void start(const std::string& server_address) { @@ -346,6 +350,8 @@ class TestServer { } void shutdown() { + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + RELEASE_ASSERT(m_generic_impl->compare_counters(), "num calls and num completions do not match!"); LOGINFO("Shutting down grpc server"); m_grpc_server->shutdown(); delete m_grpc_server; From e409667c9627cf01e8d760dd84a94b2dc86cbc95 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 6 Jun 2023 11:41:05 -0700 Subject: [PATCH 256/385] make comp cb a part of generic rpc data (#109) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/grpc/generic_service.hpp | 8 +++--- include/sisl/grpc/rpc_common.hpp | 2 -- include/sisl/grpc/rpc_server.hpp | 6 ++--- src/grpc/rpc_server.cpp | 25 +++---------------- src/grpc/tests/function/echo_async_client.cpp | 11 ++++---- 6 files changed, 16 insertions(+), 38 deletions(-) diff --git a/conanfile.py b/conanfile.py index 32df93c6..19876e0c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.4.1" + version = "9.4.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index 2362f770..9782638a 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -67,6 +67,8 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD void set_context(generic_rpc_ctx_ptr ctx) { m_rpc_context = std::move(ctx); } GenericRpcContextBase* get_context() { return m_rpc_context.get(); } + void set_comp_cb(generic_rpc_completed_cb_t const& comp_cb) { m_comp_cb = comp_cb; } + GenericRpcData(GenericRpcStaticInfo* rpc_info, size_t queue_idx) : RpcDataAbstract{queue_idx}, m_rpc_info{rpc_info}, m_stream(&m_ctx) {} @@ -79,6 +81,8 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD grpc::Status m_retstatus{grpc::Status::OK}; // user can set and retrieve the context. Its life cycle is tied to that of rpc data. generic_rpc_ctx_ptr m_rpc_context; + // the handler cb can fill in the completion cb if it needs one + generic_rpc_completed_cb_t m_comp_cb{nullptr}; private: bool do_authorization() { @@ -118,9 +122,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD RpcDataAbstract* on_request_completed(bool) { auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; - if (m_retstatus.error_code() != grpc::StatusCode::UNIMPLEMENTED) { - RPCHelper::run_generic_completion_cb(m_rpc_info->m_server, m_ctx.method(), this_rpc_data); - } + if (m_comp_cb) { m_comp_cb(this_rpc_data); } return nullptr; } diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index 2c99c79c..593f31e2 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -26,8 +26,6 @@ struct RPCHelper { static bool has_server_shutdown(const GrpcServer* server); static bool run_generic_handler_cb(GrpcServer* server, const std::string& method, boost::intrusive_ptr< GenericRpcData >& rpc_data); - static void run_generic_completion_cb(GrpcServer* server, const std::string& method, - boost::intrusive_ptr< GenericRpcData >& rpc_data); static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; diff --git a/include/sisl/grpc/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp index 4a6c20f9..41defd2c 100644 --- a/include/sisl/grpc/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -119,8 +119,7 @@ class GrpcServer : private boost::noncopyable { bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); void run_generic_completion_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); bool register_async_generic_service(); - bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler, - const generic_rpc_completed_cb_t& done_handler = nullptr); + bool register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler); private: void handle_rpcs(uint32_t thread_num, const rpc_thread_start_cb_t& thread_start_cb); @@ -141,8 +140,7 @@ class GrpcServer : private boost::noncopyable { std::unique_ptr< grpc::AsyncGenericService > m_generic_service; std::unique_ptr< GenericRpcStaticInfo > m_generic_rpc_static_info; bool m_generic_service_registered{false}; - std::unordered_map< std::string, std::pair< generic_rpc_handler_cb_t, generic_rpc_completed_cb_t > > - m_generic_rpc_registry; + std::unordered_map< std::string, generic_rpc_handler_cb_t > m_generic_rpc_registry; std::shared_mutex m_generic_rpc_registry_mtx; }; } // namespace sisl::grpc diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index 824a3899..e23af456 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -155,23 +155,11 @@ bool GrpcServer::run_generic_handler_cb(const std::string& rpc_name, boost::intr // respond immediately return true; } - cb = it->second.first; + cb = it->second; } return cb(rpc_data); } -void GrpcServer::run_generic_completion_cb(const std::string& rpc_name, - boost::intrusive_ptr< GenericRpcData >& rpc_data) { - generic_rpc_completed_cb_t cb; - { - std::shared_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); - auto it = m_generic_rpc_registry.find(rpc_name); - LOGMSG_ASSERT(it != m_generic_rpc_registry.end(), "completion cb not found for rpc {}", rpc_name); - cb = it->second.second; - } - if (cb) { cb(rpc_data); } -} - bool GrpcServer::register_async_generic_service() { if (m_state.load() != ServerState::INITED) { LOGMSG_ASSERT(false, "register service in non-INITED state"); @@ -189,8 +177,7 @@ bool GrpcServer::register_async_generic_service() { return true; } -bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler, - const generic_rpc_completed_cb_t& done_handler) { +bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc_handler_cb_t& rpc_handler) { if (m_state.load() != ServerState::RUNNING) { LOGMSG_ASSERT(false, "register service in non-INITED state"); return false; @@ -203,8 +190,7 @@ bool GrpcServer::register_generic_rpc(const std::string& name, const generic_rpc { std::unique_lock< std::shared_mutex > lock(m_generic_rpc_registry_mtx); - if (auto [it, happened]{m_generic_rpc_registry.emplace(name, std::make_pair(rpc_handler, done_handler))}; - !happened) { + if (auto [it, happened]{m_generic_rpc_registry.emplace(name, rpc_handler)}; !happened) { LOGWARN("duplicate generic RPC {} registration attempted", name); return false; } @@ -229,11 +215,6 @@ bool RPCHelper::run_generic_handler_cb(GrpcServer* server, const std::string& me return server->run_generic_handler_cb(method, rpc_data); } -void RPCHelper::run_generic_completion_cb(GrpcServer* server, const std::string& method, - boost::intrusive_ptr< GenericRpcData >& rpc_data) { - server->run_generic_completion_cb(method, rpc_data); -} - grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx) { if (!server->is_auth_enabled()) { return grpc::Status(); } auto& client_headers = srv_ctx->client_metadata(); diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index 6e465b20..de36ff1d 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -302,12 +302,12 @@ class TestServer { void register_rpcs(GrpcServer* server) { LOGINFO("register rpc calls"); - auto const res = server->register_generic_rpc( - GENERIC_METHOD, - [this](boost::intrusive_ptr< GenericRpcData >& rpc_data) { + auto const res = + server->register_generic_rpc(GENERIC_METHOD, [this](boost::intrusive_ptr< GenericRpcData >& rpc_data) { + rpc_data->set_comp_cb([this](boost::intrusive_ptr< GenericRpcData >&) { num_completions++; }); if ((++num_calls % 2) == 0) { LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); - std::thread([rpc = rpc_data] { + std::thread([this, rpc = rpc_data] { set_response(rpc->request(), rpc->response()); rpc->send_response(); }).detach(); @@ -315,8 +315,7 @@ class TestServer { } set_response(rpc_data->request(), rpc_data->response()); return true; - }, - [this](boost::intrusive_ptr< GenericRpcData >&) { num_completions++; }); + }); RELEASE_ASSERT(res, "register generic rpc failed"); } From 5a9e526e0dc8e0bf7187e1d00e7f44de765475c3 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 8 Jun 2023 16:54:25 -0600 Subject: [PATCH 257/385] Improve Actions (#111) * Missing dependencies (gperftools, jemalloc, prerelease, etc.) have recipes located in 3rd_party/ * CI logic to export each required dependency (should be script?) * Added matrix columns for missing variants that work (sanitize no-go for now) * Cache 3rd party libraries that rarely are updated --- .github/workflows/build_with_conan.yml | 59 ++++++- .jenkins/Jenkinsfile | 17 ++- 3rd_party/gperftools/conanfile.py | 49 ++++++ 3rd_party/jemalloc/conanfile.py | 195 ++++++++++++++++++++++++ 3rd_party/prerelease_dummy/conanfile.py | 23 +++ conanfile.py | 16 +- include/sisl/fds/malloc_helper.hpp | 8 +- src/fds/CMakeLists.txt | 2 +- 8 files changed, 351 insertions(+), 18 deletions(-) create mode 100644 3rd_party/gperftools/conanfile.py create mode 100644 3rd_party/jemalloc/conanfile.py create mode 100644 3rd_party/prerelease_dummy/conanfile.py diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index 800a96d1..95a6982c 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -23,15 +23,31 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8"] + # Currently folly in conan-center has a bug when used with gcc 11.3 and sanitizer + # sanitize: ["True", "False"] build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc", "jemalloc"] + prerelease: ["True", "False"] + exclude: + - build-type: Debug + prerelease: "True" + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Debug + malloc-impl: jemalloc + - build-type: Release + malloc-impl: libc + # - build-type: Release + # sanitize: "True" + - prerelease: "True" + malloc-impl: jemalloc steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} + - name: Set up Python uses: actions/setup-python@v3 with: - python-version: ${{ matrix.python-version }} + python-version: "3.8" + - name: Install Conan run: | python -m pip install --upgrade pip @@ -41,11 +57,42 @@ jobs: # Configure conan profiles for build runner run: | conan user + conan profile new --detect default + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Cache 3rdParty + id: cache-3rd-party + uses: actions/cache@v3 + with: + path: | + ~/.conan/data/folly + ~/.conan/data/gperftools + ~/.conan/data/jemalloc + ~/.conan/data/prerelease_dummy + ~/.conan/data/spdlog + key: ${{ matrix.build-type }}-${{ matrix.malloc-impl }} + + - name: Export Dependencies + run: | + conan export 3rd_party/gperftools + conan export 3rd_party/jemalloc + conan export 3rd_party/prerelease_dummy - name: Install dependencies - # Build your program with the given configuration run: | - conan install -o malloc_impl=libc -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . + conan install \ + -o prerelease=${{ matrix.prerelease }} \ + -o malloc_impl=${{ matrix.malloc-impl }} \ + -s build_type=${{ matrix.build-type }} \ + --build missing . + #-o sanitize=${{ matrix.sanitize }} \ + + - name: Cleanup dep builds + run: | + rm -rf ~/.conan/data/*/*/*/*/build - name: Build # Build your program with the given configuration diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 2bed99e7..ffd2ec2d 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,11 +93,22 @@ pipeline { */ stage("Compile") { steps { + # For Sanitized Unit Testing (no publish) sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG}" + sh "conan remove -f ${PROJECT}/${TAG}" + + # Debug/Release with libc for OM (disabled for now) + #sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" + #sh "conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" + + # Debug w/ libc for downstream Sanitizer builds sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" - sh "conan create ${BUILD_MISSING} -pr debug -o sisl:prerelease=False . ${PROJECT}/${TAG}" - sh "conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" - sh "conan create ${BUILD_MISSING} -pr test -o sisl:prerelease=False -pr test . ${PROJECT}/${TAG}" + + # Pre-Release for Stability + sh "conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG}" + + # Release for Prod Build + sh "conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG}" } } diff --git a/3rd_party/gperftools/conanfile.py b/3rd_party/gperftools/conanfile.py new file mode 100644 index 00000000..8d114f31 --- /dev/null +++ b/3rd_party/gperftools/conanfile.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from conans import ConanFile, AutoToolsBuildEnvironment, tools + +class GPerfToolsConan(ConanFile): + name = "gperftools" + version = "2.7.0" + release = "2.7" + license = "BSD" + + description = "A portable library to determine the call-chain of a C program" + settings = "os", "arch", "compiler", "build_type" + + options = {"shared": [True, False], "fPIC": [True, False]} + default_options = "shared=False", "fPIC=True" + + requires = (("xz_utils/5.2.4")) + + generators = "compiler_args" + + def source(self): + source_url = "https://github.com/{0}/{0}/releases/download".format(self.name) + tools.get("{0}/{1}-{2}/{1}-{2}.tar.gz".format(source_url, self.name, self.release)) + + def build(self): + env_build = AutoToolsBuildEnvironment(self) + env_build.cxx_flags.append("@conanbuildinfo.args") + if self.settings.build_type != "Debug": + env_build.defines.append('NDEBUG') + configure_args = ['--disable-dependency-tracking', '--enable-libunwind'] + if self.options.shared: + configure_args += ['--enable-shared=yes', '--enable-static=no'] + else: + configure_args += ['--enable-shared=no', '--enable-static=yes'] + env_build.configure(args=configure_args,configure_dir="{0}-{1}".format(self.name, self.release)) + env_build.make(args=["-j1"]) + + def package(self): + headers = ['heap-checker.h', 'heap-profiler.h', 'malloc_extension.h', 'malloc_extension_c.h', + 'malloc_hook.h', 'malloc_hook_c.h', 'profiler.h', 'stacktrace.h', 'tcmalloc.h'] + for header in headers: + self.copy("*{0}".format(header), dst="include/google", src="{0}-{1}/src/google".format(self.name, self.release), keep_path=False) + self.copy("*{0}".format(header), dst="include/gperftools", src="{0}-{1}/src/gperftools".format(self.name, self.release), keep_path=False) + self.copy("*.so*", dst="lib", keep_path=False, symlinks=True) + self.copy("*.a", dst="lib", keep_path=False, symlinks=True) + + def package_info(self): + self.cpp_info.libs = ['tcmalloc_minimal'] diff --git a/3rd_party/jemalloc/conanfile.py b/3rd_party/jemalloc/conanfile.py new file mode 100644 index 00000000..80c50505 --- /dev/null +++ b/3rd_party/jemalloc/conanfile.py @@ -0,0 +1,195 @@ +from conans import AutoToolsBuildEnvironment, ConanFile, MSBuild, tools +from conans.errors import ConanInvalidConfiguration +from conans.client.tools import msvs_toolset +import os +import shutil +import string + + +class JemallocConan(ConanFile): + name = "jemalloc" + description = "jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support." + url = "https://github.com/conan-io/conan-center-index" + license = "BSD-2-Clause" + homepage = "http://jemalloc.net/" + topics = ("conan", "jemalloc", "malloc", "free") + settings = "os", "arch", "compiler" + version = "5.2.1" + source_url = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" + options = { + "shared": [True, False], + "fPIC": [True, False], + "prefix": "ANY", + "enable_cxx": [True, False], + "enable_fill": [True, False], + "enable_xmalloc": [True, False], + "enable_readlinkat": [True, False], + "enable_syscall": [True, False], + "enable_lazy_lock": [True, False], + "enable_debug_logging": [True, False], + "enable_initial_exec_tls": [True, False], + "enable_libdl": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "prefix": "", + "enable_cxx": True, + "enable_fill": True, + "enable_xmalloc": False, + "enable_readlinkat": False, + "enable_syscall": True, + "enable_lazy_lock": False, + "enable_debug_logging": False, + "enable_initial_exec_tls": True, + "enable_libdl": True, + } + + _autotools = None + + _source_subfolder = "source_subfolder" + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + + def configure(self): + if self.settings.compiler.get_safe("libcxx") == "libc++": + raise ConanInvalidConfiguration("libc++ is missing a mutex implementation. Remove this when it is added") + if self.settings.compiler == "Visual Studio" and self.settings.compiler.version != "15": + # https://github.com/jemalloc/jemalloc/issues/1703 + raise ConanInvalidConfiguration("Only Visual Studio 15 2017 is supported. Please fix this if other versions are supported") + if self.options.shared: + del self.options.fPIC + if not self.options.enable_cxx: + del self.settings.compiler.libcxx + del self.settings.compiler.cppstd + if self.settings.compiler == "Visual Studio" and self.settings.arch not in ("x86_64", "x86"): + raise ConanInvalidConfiguration("Unsupported arch") + + def source(self): + tools.get(self.source_url) + os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) + + def build_requirements(self): + if tools.os_info.is_windows and not os.environ.get("CONAN_BASH_PATH", None): + self.build_requires("msys2/20190524") + + @property + def _autotools_args(self): + conf_args = [ + "--with-jemalloc-prefix={}".format(self.options.prefix), + "--disable-debug", + "--enable-cxx" if self.options.enable_cxx else "--disable-cxx", + "--enable-fill" if self.options.enable_fill else "--disable-fill", + "--enable-xmalloc" if self.options.enable_cxx else "--disable-xmalloc", + "--enable-readlinkat" if self.options.enable_readlinkat else "--disable-readlinkat", + "--enable-syscall" if self.options.enable_syscall else "--disable-syscall", + "--enable-lazy-lock" if self.options.enable_lazy_lock else "--disable-lazy-lock", + "--enable-log" if self.options.enable_debug_logging else "--disable-log", + "--enable-initial-exec-tld" if self.options.enable_initial_exec_tls else "--disable-initial-exec-tls", + "--enable-libdl" if self.options.enable_libdl else "--disable-libdl", + ] + if self.options.shared: + conf_args.extend(["--enable-shared", "--disable-static"]) + else: + conf_args.extend(["--disable-shared", "--enable-static"]) + return conf_args + + def _configure_autotools(self): + if self._autotools: + return self._autotools + self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) + self._autotools.configure(args=self._autotools_args, configure_dir=self._source_subfolder) + return self._autotools + + @property + def _msvc_build_type(self): + build_type = "Release" + if not self.options.shared: + build_type += "-static" + return build_type + + def _patch_sources(self): + if self.settings.os == "Windows": + makefile_in = os.path.join(self._source_subfolder, "Makefile.in") + tools.replace_in_file(makefile_in, + "DSO_LDFLAGS = @DSO_LDFLAGS@", + "DSO_LDFLAGS = @DSO_LDFLAGS@ -Wl,--out-implib,lib/libjemalloc.a") + tools.replace_in_file(makefile_in, + "\t$(INSTALL) -d $(LIBDIR)\n" + "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)", + "\t$(INSTALL) -d $(BINDIR)\n" + "\t$(INSTALL) -d $(LIBDIR)\n" + "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(BINDIR)\n" + "\t$(INSTALL) -m 644 $(objroot)lib/libjemalloc.a $(LIBDIR)") + + def build(self): + self._patch_sources() + if self.settings.compiler == "Visual Studio": + with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op(): + with tools.environment_append({"CC": "cl", "CXX": "cl"}) if self.settings.compiler == "Visual Studio" else tools.no_op(): + with tools.chdir(self._source_subfolder): + # Do not use AutoToolsBuildEnvironment because we want to run configure as ./configure + self.run("./configure {}".format(" ".join(self._autotools_args)), win_bash=tools.os_info.is_windows) + msbuild = MSBuild(self) + # Do not use the 2015 solution: unresolved external symbols: test_hooks_libc_hook and test_hooks_arena_new_hook + sln_file = os.path.join(self._source_subfolder, "msvc", "jemalloc_vc2017.sln") + msbuild.build(sln_file, targets=["jemalloc"], build_type=self._msvc_build_type) + else: + autotools = self._configure_autotools() + autotools.make() + + @property + def _library_name(self): + libname = "jemalloc" + if self.settings.compiler == "Visual Studio": + if self.options.shared: + if "Release" == "Debug": + libname += "d" + else: + toolset = msvs_toolset(self.settings) + toolset_number = "".join(c for c in toolset if c in string.digits) + libname += "-vc{}-{}".format(toolset_number, self._msvc_build_type) + else: + if self.settings.os == "Windows": + if not self.options.shared: + libname += "_s" + else: + if not self.options.shared and self.options.fPIC: + libname += "_pic" + return libname + + def package(self): + self.copy(pattern="COPYING", src=self._source_subfolder, dst="licenses") + if self.settings.compiler == "Visual Studio": + arch_subdir = { + "x86_64": "x64", + "x86": "x86", + }[str(self.settings.arch)] + self.copy("*.lib", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "lib")) + self.copy("*.dll", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "bin")) + self.copy("jemalloc.h", src=os.path.join(self._source_subfolder, "include", "jemalloc"), dst=os.path.join(self.package_folder, "include", "jemalloc"), keep_path=True) + shutil.copytree(os.path.join(self._source_subfolder, "include", "msvc_compat"), + os.path.join(self.package_folder, "include", "msvc_compat")) + else: + autotools = self._configure_autotools() + # Use install_lib_XXX and install_include to avoid mixing binaries and dll's + autotools.make(target="install_lib_shared" if self.options.shared else "install_lib_static") + autotools.make(target="install_include") + if self.settings.os == "Windows" and self.settings.compiler == "gcc": + os.rename(os.path.join(self.package_folder, "lib", "{}.lib".format(self._library_name)), + os.path.join(self.package_folder, "lib", "lib{}.a".format(self._library_name))) + if not self.options.shared: + os.unlink(os.path.join(self.package_folder, "lib", "jemalloc.lib")) + + def package_info(self): + self.cpp_info.libs = [self._library_name] + self.cpp_info.includedirs = [os.path.join(self.package_folder, "include"), + os.path.join(self.package_folder, "include", "jemalloc")] + if self.settings.compiler == "Visual Studio": + self.cpp_info.includedirs.append(os.path.join(self.package_folder, "include", "msvc_compat")) + if not self.options.shared: + self.cpp_info.defines = ["JEMALLOC_EXPORT="] + if self.settings.os == "Linux": + self.cpp_info.system_libs.extend(["dl", "pthread"]) diff --git a/3rd_party/prerelease_dummy/conanfile.py b/3rd_party/prerelease_dummy/conanfile.py new file mode 100644 index 00000000..34daaeed --- /dev/null +++ b/3rd_party/prerelease_dummy/conanfile.py @@ -0,0 +1,23 @@ +from conans import ConanFile, CMake, tools + +class PrereleaseConan(ConanFile): + name = "prerelease_dummy" + version = "1.0.1" + homepage = "https://github.corp.ebay.com/SDS/prerelease_dummy" + description = "A dummy package to invoke PRERELEASE option" + topics = ("ebay", "nublox") + url = "https://github.corp.ebay.com/SDS/prerelease_dummy" + license = "Apache-2.0" + + settings = () + + exports_sources = ("LICENSE") + + def build(self): + pass + + def package(self): + pass + + def package_info(self): + self.cpp_info.cxxflags.append("-D_PRERELEASE=1") diff --git a/conanfile.py b/conanfile.py index 19876e0c..28bb8d94 100644 --- a/conanfile.py +++ b/conanfile.py @@ -4,7 +4,7 @@ from conan.tools.build import check_min_cppstd from conans import CMake -required_conan_version = ">=1.50.0" +required_conan_version = ">=1.52.0" class SISLConan(ConanFile): name = "sisl" @@ -33,12 +33,18 @@ class SISLConan(ConanFile): 'coverage': False, 'testing': True, 'sanitize': False, - 'prerelease': True, - 'malloc_impl': 'tcmalloc', + 'prerelease': False, + 'malloc_impl': 'libc', } generators = "cmake", "cmake_find_package" - exports_sources = ("CMakeLists.txt", "cmake/*", "include/*", "src/*", "LICENSE") + exports = ["LICENSE"] + exports_sources = ( + "CMakeLists.txt", + "cmake/*", + "include/*", + "src/*", + ) def build_requirements(self): self.build_requires("benchmark/1.7.1") @@ -86,6 +92,8 @@ def validate(self): check_min_cppstd(self, 20) def configure(self): + if self.settings.compiler in ["gcc"]: + self.options['pistache'].with_ssl: True if self.options.shared: del self.options.fPIC if self.settings.build_type == "Debug": diff --git a/include/sisl/fds/malloc_helper.hpp b/include/sisl/fds/malloc_helper.hpp index 18742629..d5d31967 100644 --- a/include/sisl/fds/malloc_helper.hpp +++ b/include/sisl/fds/malloc_helper.hpp @@ -569,7 +569,7 @@ static void print_my_jemalloc_data(void* const opaque, const char* const buf) { #endif #endif -[[maybe_unused]] static bool set_memory_release_rate(const double level) { +[[maybe_unused]] static bool set_memory_release_rate([[maybe_unused]] const double level) { #if defined(USING_TCMALLOC) MallocExtension::instance()->SetMemoryReleaseRate(level); return true; @@ -599,8 +599,8 @@ static std::atomic< bool > s_is_aggressive_decommit{false}; return true; } -[[maybe_unused]] static bool reset_aggressive_decommit_mem_if_needed(const size_t mem_usage, - const size_t aggressive_threshold) { +[[maybe_unused]] static bool +reset_aggressive_decommit_mem_if_needed([[maybe_unused]] const size_t mem_usage, [[maybe_unused]] const size_t aggressive_threshold) { #if defined(USING_TCMALLOC) if (tcmalloc_helper::s_is_aggressive_decommit.load(std::memory_order_acquire)) { LOGINFO("Total memory alloced={} is restored back to less than aggressive threshold limit {}, " @@ -628,7 +628,7 @@ static std::atomic< bool > s_is_aggressive_decommit{false}; return true; } -[[maybe_unused]] static bool release_mem_if_needed(const size_t soft_threshold, const size_t aggressive_threshold_in) { +[[maybe_unused]] static bool release_mem_if_needed([[maybe_unused]] const size_t soft_threshold, [[maybe_unused]] const size_t aggressive_threshold_in) { bool ret{false}; #if defined(USING_TCMALLOC) || defined(USING_JEMALLOC) || defined(USE_JEMALLOC) size_t mem_usage{0}; diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index c1468ad3..52d6379d 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -71,7 +71,7 @@ if (DEFINED MALLOC_IMPL) target_sources(test_jemalloc PRIVATE tests/test_jemalloc_helper.cpp ) - target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} jemalloc GTest::gtest) + target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME TestJemalloc COMMAND test_jemalloc) elseif (${MALLOC_IMPL} STREQUAL "tcmalloc") add_executable(test_tcmalloc) From 0f93703a0259b4b5ff9ee659765e0af3f3d9e0fa Mon Sep 17 00:00:00 2001 From: Sanal P Date: Thu, 8 Jun 2023 16:05:04 -0700 Subject: [PATCH 258/385] Fix review comments --- conanfile.py | 2 +- include/sisl/sobject/sobject.hpp | 30 ++++++++++++++++++---------- src/sobject/sobject.cpp | 32 +++++++++++++++++++----------- src/sobject/tests/test_sobject.cpp | 7 +------ 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/conanfile.py b/conanfile.py index dfdbc00e..85dbabd1 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.5.2" + version = "8.5.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/sobject/sobject.hpp b/include/sisl/sobject/sobject.hpp index a72fcfc9..a2da9fca 100644 --- a/include/sisl/sobject/sobject.hpp +++ b/include/sisl/sobject/sobject.hpp @@ -28,10 +28,15 @@ namespace sisl { +// Each object is uniquely identified by its type and name. +// Ex: type=volume and name=volume_1, type=module and name=HomeBlks. struct sobject_id { std::string type; std::string name; bool empty() const { return type.empty() && name.empty(); } + [[maybe_unused]] bool operator<(const sobject_id& id) const { + return type < id.type || ((type == id.type) && (name < id.name)); + } }; typedef struct status_request { @@ -51,12 +56,9 @@ typedef struct status_response { using status_callback_type = std::function< status_response(const status_request&) >; class sobject; +class sobject_manager; using sobject_ptr = std::shared_ptr< sobject >; -[[maybe_unused]] static bool operator<(const sobject_id& id1, const sobject_id& id2) { - return id1.type < id2.type || ((id1.type == id2.type) && (id1.name < id2.name)); -} - // To search using only the type as key. [[maybe_unused]] static bool operator<(const sobject_id& id, const std::string& key_type) { return id.type < key_type; } @@ -68,15 +70,18 @@ using sobject_ptr = std::shared_ptr< sobject >; return response; } -// Keeps a heirarchy of modules/subsystems which register their callbacks to be +// Similar to sysfs kobject, sobject is a lightweight utility to create relationships +// between different classes and modules. This can be used to get or change the state of a class +// and all its children. Modules/subsystems which register their callbacks to be // whenever a get status is called from the root or directly. class sobject { public: - sobject(const std::string& obj_type, const std::string& obj_name, status_callback_type cb) : - m_id{obj_type, obj_name}, m_status_cb(std::move(cb)) {} + sobject(sobject_manager* mgr, const std::string& obj_type, const std::string& obj_name, status_callback_type cb) : + m_mgr(mgr), m_id{obj_type, obj_name}, m_status_cb(std::move(cb)) {} - static sobject_ptr create(const std::string& obj_type, const std::string& obj_name, status_callback_type cb) { - return std::make_shared< sobject >(obj_type, obj_name, std::move(cb)); + static sobject_ptr create(sobject_manager* mgr, const std::string& obj_type, const std::string& obj_name, + status_callback_type cb) { + return std::make_shared< sobject >(mgr, obj_type, obj_name, std::move(cb)); } // Every subsystem add to the json object using update(). @@ -89,6 +94,7 @@ class sobject { std::string type() const { return m_id.type; } private: + sobject_manager* m_mgr; sobject_id m_id; std::shared_mutex m_mtx; status_callback_type m_status_cb; @@ -102,14 +108,16 @@ class sobject_manager { status_response get_status(const status_request& request); status_response get_object_by_path(const status_request& request); - status_response get_object_types(); status_response get_object_status(const sobject_id& id, const status_request& request); status_response get_objects(const status_request& request); + status_response get_object_types(); + void add_object_type(const std::string& parent_type, const std::string& child_type); private: // Mapping from object name to object metadata. std::map< sobject_id, sobject_ptr, std::less<> > m_object_store; - std::set< std::string > m_object_types; + // Mapping from parent type to set of all children type to display the schema. + std::map< std::string, std::set< std::string > > m_object_types; std::shared_mutex m_mtx; }; diff --git a/src/sobject/sobject.cpp b/src/sobject/sobject.cpp index 3642d5c6..a091d337 100644 --- a/src/sobject/sobject.cpp +++ b/src/sobject/sobject.cpp @@ -23,16 +23,19 @@ namespace sisl { sobject_ptr sobject::get_child(const std::string& name) { std::shared_lock lock{m_mtx}; for (const auto& [id, obj] : m_children) { + // Return the first child found. We assume if user asks for a path + // there is a unique child in the parent. if (id.name == name) { return obj; } } return nullptr; } -// Add a child to current object. void sobject::add_child(const sobject_ptr child) { + // Add a child to current object. std::unique_lock lock{m_mtx}; LOGINFO("Parent {}/{} added child {}/{}", type(), name(), child->type(), child->name()); m_children.emplace(child->id(), child); + m_mgr->add_object_type(type(), child->type()); } status_response sobject::run_callback(const status_request& request) const { @@ -40,7 +43,8 @@ status_response sobject::run_callback(const status_request& request) const { response.json = nlohmann::json::object(); response.json["type"] = m_id.type; response.json["name"] = m_id.name; - response.json.update(m_status_cb(request).json); + auto res = m_status_cb(request).json; + if (!res.is_null()) { response.json.update(res); } response.json["children"] = nlohmann::json::object(); for (const auto& [id, obj] : m_children) { @@ -66,22 +70,28 @@ status_response sobject::run_callback(const status_request& request) const { sobject_ptr sobject_manager::create_object(const std::string& type, const std::string& name, status_callback_type cb) { std::unique_lock lock{m_mtx}; - auto obj = sobject::create(type, name, std::move(cb)); + auto obj = sobject::create(this, type, name, std::move(cb)); sobject_id id{type, name}; m_object_store[id] = obj; - m_object_types.insert(type); + if (m_object_types.count(type) == 0) { m_object_types[type] = {}; } LOGINFO("Created status object type={} name={}", type, name); return obj; } +void sobject_manager::add_object_type(const std::string& parent_type, const std::string& child_type) { + std::unique_lock lock{m_mtx}; + m_object_types[parent_type].insert(child_type); +} + status_response sobject_manager::get_object_types() { status_response response; - auto types = nlohmann::json::array(); - for (const auto& type : m_object_types) { - types.emplace_back(type); - } - response.json["types"] = std::move(types); + for (const auto& [type, children] : m_object_types) { + response.json[type] = nlohmann::json::array(); + for (const auto& child_type : children) { + response.json[type].emplace_back(child_type); + } + } return response; } @@ -160,9 +170,7 @@ status_response sobject_manager::get_status(const status_request& request) { return get_object_status(std::move(id), request); } - if (!request.do_recurse && request.obj_name.empty() && request.obj_type.empty()) { - return get_object_types(); - } + if (request.obj_name.empty() && request.obj_type.empty()) { return get_object_types(); } // Dump all objects. return get_objects(request); diff --git a/src/sobject/tests/test_sobject.cpp b/src/sobject/tests/test_sobject.cpp index 45d54bb9..4f4a283f 100644 --- a/src/sobject/tests/test_sobject.cpp +++ b/src/sobject/tests/test_sobject.cpp @@ -76,12 +76,7 @@ TEST_F(SobjectTest, BasicTest) { status_request req; status_response resp; resp = mgr.get_status(req); - ASSERT_EQ(resp.json["types"].size(), 6) << resp.json.dump(2); - - req.do_recurse = true; - req.batch_size = 100; - resp = mgr.get_status(req); - ASSERT_EQ(resp.json.size(), 14) << resp.json.dump(2); + LOGINFO("{}", resp.json.dump(2)); } { From 7a0ab21b444c781b4a708b14caf1dc01d376b5b0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 9 Jun 2023 12:22:00 -0600 Subject: [PATCH 259/385] Reuse workflows (#113) * Split up cache. * Install again from local src. * Modularize * Add dependency. --- .github/workflows/build_dependencies.yml | 78 ++++++++++++++++++++++++ .github/workflows/build_with_conan.yml | 68 +++++++++------------ 2 files changed, 108 insertions(+), 38 deletions(-) create mode 100644 .github/workflows/build_dependencies.yml diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml new file mode 100644 index 00000000..34de77ed --- /dev/null +++ b/.github/workflows/build_dependencies.yml @@ -0,0 +1,78 @@ +name: Conan Build + +on: + workflow_call: + inputs: + branch: + required: true + type: string + build-type: + required: true + type: string + malloc-impl: + required: true + type: string + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - name: Setup Python + uses: actions/setup-python@v3 + with: + python-version: "3.8" + + - name: Install Conan + run: | + python -m pip install --upgrade pip + python -m pip install conan~=1.0 + conan user + conan profile new --detect default + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + + - name: Retrieve Dependencies + uses: actions/checkout@v3 + with: + repository: ebay/sisl + path: deps/sisl + ref: ${{ inputs.branch }} + + - name: Restore Sisl 3rdParty Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data/folly + ~/.conan/data/gperftools + ~/.conan/data/jemalloc + ~/.conan/data/prerelease_dummy + ~/.conan/data/spdlog + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }} + + - name: Install Dependencies + run: | + conan export deps/sisl/3rd_party/gperftools + conan export deps/sisl/3rd_party/jemalloc + conan export deps/sisl/3rd_party/prerelease_dummy + conan install \ + -o prerelease=True \ + -o malloc_impl=${{ inputs.malloc-impl }} \ + -s build_type=${{ inputs.build-type }} \ + --build missing deps/sisl + + - name: Cleanup dep builds + run: | + rm -rf ~/.conan/data/*/*/*/*/build + + - name: Save Sisl 3rdParty Cache + id: save-cache-sisl + uses: actions/cache/save@v3 + with: + path: | + ~/.conan/data/folly + ~/.conan/data/gperftools + ~/.conan/data/jemalloc + ~/.conan/data/prerelease_dummy + ~/.conan/data/spdlog + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }} diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml index 95a6982c..9dd3a3d0 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/build_with_conan.yml @@ -4,21 +4,32 @@ on: push: branches: - master - - 'stable/v*' pull_request: branches: - master - - 'stable/v*' - -#env: - # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) - #BUILD_TYPE: Release jobs: + build_deps: + strategy: + fail-fast: false + matrix: + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc", "jemalloc"] + exclude: + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Debug + malloc-impl: jemalloc + - build-type: Release + malloc-impl: libc + uses: ./.github/workflows/build_dependencies.yml + with: + branch: master + build-type: ${{ matrix.build-type }} + malloc-impl: ${{ matrix.malloc-impl }} + build: - # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. - # You can convert this to a matrix build if you need cross-platform coverage. - # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + needs: build_deps runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -37,13 +48,11 @@ jobs: malloc-impl: jemalloc - build-type: Release malloc-impl: libc - # - build-type: Release - # sanitize: "True" - prerelease: "True" malloc-impl: jemalloc steps: - - name: Set up Python + - name: Setup Python uses: actions/setup-python@v3 with: python-version: "3.8" @@ -52,20 +61,14 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install conan~=1.0 - - - name: Configure Conan - # Configure conan profiles for build runner - run: | conan user conan profile new --detect default + # Set std::string to non-CoW C++11 version sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default - - name: Checkout Code - uses: actions/checkout@v3 - - - name: Cache 3rdParty - id: cache-3rd-party - uses: actions/cache@v3 + - name: Restore Sisl 3rdParty Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 with: path: | ~/.conan/data/folly @@ -73,28 +76,17 @@ jobs: ~/.conan/data/jemalloc ~/.conan/data/prerelease_dummy ~/.conan/data/spdlog - key: ${{ matrix.build-type }}-${{ matrix.malloc-impl }} + key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }} - - name: Export Dependencies - run: | - conan export 3rd_party/gperftools - conan export 3rd_party/jemalloc - conan export 3rd_party/prerelease_dummy + - name: Checkout Code + uses: actions/checkout@v3 - - name: Install dependencies + - name: Build + # Build your program with the given configuration run: | conan install \ -o prerelease=${{ matrix.prerelease }} \ -o malloc_impl=${{ matrix.malloc-impl }} \ -s build_type=${{ matrix.build-type }} \ --build missing . - #-o sanitize=${{ matrix.sanitize }} \ - - - name: Cleanup dep builds - run: | - rm -rf ~/.conan/data/*/*/*/*/build - - - name: Build - # Build your program with the given configuration - run: | conan build . From b62657ee9a262347276009b702eed47f9fe79afa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 10 Jun 2023 18:08:31 -0600 Subject: [PATCH 260/385] Rename sisl job. --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 34de77ed..68e99b2a 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -14,7 +14,7 @@ on: type: string jobs: - build: + BuildSislDeps: runs-on: ubuntu-22.04 steps: - name: Setup Python From adc3748245c474758d4d78f05baac61478f7c9d2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 10:08:50 -0600 Subject: [PATCH 261/385] Split workflows for main and pr builds. --- .github/workflows/build_dependencies.yml | 6 +- ...ld_with_conan.yml => main_conan_build.yml} | 15 +--- .github/workflows/pr_cleanup_caches.yml | 33 +++++++ .github/workflows/pr_conan_build.yml | 85 +++++++++++++++++++ README.md | 2 +- 5 files changed, 126 insertions(+), 15 deletions(-) rename .github/workflows/{build_with_conan.yml => main_conan_build.yml} (88%) create mode 100644 .github/workflows/pr_cleanup_caches.yml create mode 100644 .github/workflows/pr_conan_build.yml diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 68e99b2a..9e2660d4 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -48,7 +48,7 @@ jobs: ~/.conan/data/jemalloc ~/.conan/data/prerelease_dummy ~/.conan/data/spdlog - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }} + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Install Dependencies run: | @@ -56,7 +56,7 @@ jobs: conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy conan install \ - -o prerelease=True \ + -o prerelease=${{ inputs.prerelease }} \ -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing deps/sisl @@ -75,4 +75,4 @@ jobs: ~/.conan/data/jemalloc ~/.conan/data/prerelease_dummy ~/.conan/data/spdlog - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }} + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/main_conan_build.yml similarity index 88% rename from .github/workflows/build_with_conan.yml rename to .github/workflows/main_conan_build.yml index 9dd3a3d0..2fd7cc15 100644 --- a/.github/workflows/build_with_conan.yml +++ b/.github/workflows/main_conan_build.yml @@ -1,12 +1,9 @@ -name: Conan Build +name: Sisl Main Build on: push: branches: - master - pull_request: - branches: - - master jobs: build_deps: @@ -27,6 +24,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} + prerelease: True build: needs: build_deps @@ -38,18 +36,14 @@ jobs: # sanitize: ["True", "False"] build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True", "False"] + prerelease: ["True"] exclude: - - build-type: Debug - prerelease: "True" - build-type: Debug malloc-impl: tcmalloc - build-type: Debug malloc-impl: jemalloc - build-type: Release malloc-impl: libc - - prerelease: "True" - malloc-impl: jemalloc steps: - name: Setup Python @@ -76,13 +70,12 @@ jobs: ~/.conan/data/jemalloc ~/.conan/data/prerelease_dummy ~/.conan/data/spdlog - key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }} + key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code uses: actions/checkout@v3 - name: Build - # Build your program with the given configuration run: | conan install \ -o prerelease=${{ matrix.prerelease }} \ diff --git a/.github/workflows/pr_cleanup_caches.yml b/.github/workflows/pr_cleanup_caches.yml new file mode 100644 index 00000000..3936ed92 --- /dev/null +++ b/.github/workflows/pr_cleanup_caches.yml @@ -0,0 +1,33 @@ +name: cleanup caches by a branch +on: + pull_request: + types: + - closed + +jobs: + cleanup: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Cleanup + run: | + gh extension install actions/gh-actions-cache + + REPO=${{ github.repository }} + BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" + + echo "Fetching list of cache key" + cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 ) + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + done + echo "Done" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml new file mode 100644 index 00000000..92c1a642 --- /dev/null +++ b/.github/workflows/pr_conan_build.yml @@ -0,0 +1,85 @@ +name: Sisl PR Build + +on: + pull_request: + branches: + - master + +jobs: + build_deps: + strategy: + fail-fast: true + matrix: + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc", "jemalloc"] + exclude: + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Debug + malloc-impl: jemalloc + - build-type: Release + malloc-impl: libc + uses: ./.github/workflows/build_dependencies.yml + with: + branch: master + build-type: ${{ matrix.build-type }} + malloc-impl: ${{ matrix.malloc-impl }} + prerelease: True + + build: + needs: build_deps + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + # Currently folly in conan-center has a bug when used with gcc 11.3 and sanitizer + # sanitize: ["True", "False"] + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc", "jemalloc"] + prerelease: ["True"] + exclude: + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Debug + malloc-impl: jemalloc + - build-type: Release + malloc-impl: libc + + steps: + - name: Setup Python + uses: actions/setup-python@v3 + with: + python-version: "3.8" + + - name: Install Conan + run: | + python -m pip install --upgrade pip + python -m pip install conan~=1.0 + conan user + conan profile new --detect default + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + + - name: Restore Sisl 3rdParty Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data/folly + ~/.conan/data/gperftools + ~/.conan/data/jemalloc + ~/.conan/data/prerelease_dummy + ~/.conan/data/spdlog + key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} + + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Build + run: | + conan create \ + -o prerelease=${{ matrix.prerelease }} \ + -o malloc_impl=${{ matrix.malloc-impl }} \ + -s build_type=${{ matrix.build-type }} \ + --build missing \ + . diff --git a/README.md b/README.md index 70dfa6a6..6293b293 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SymbiosisLib (sisl) -[![Conan Build](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml) +[![Conan Build](https://github.com/eBay/sisl/actions/workflows/main_conan_build.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/main_conan_build.yml) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data structures and utilities. This is mostly on top of folly, boost, STL and other good well known libraries. Thus its not trying From a03acaaaf013409e41c7e5936e8ef5d236d9b9a2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 10:14:58 -0600 Subject: [PATCH 262/385] Missing input. --- .github/workflows/build_dependencies.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 9e2660d4..7c643262 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -12,6 +12,10 @@ on: malloc-impl: required: true type: string + prereleasee: + required: false + type: string + default: 'False' jobs: BuildSislDeps: From 470e52b308fac4b40439989c51d9a6b716040097 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 10:16:24 -0600 Subject: [PATCH 263/385] Typo --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 7c643262..d568a031 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -12,7 +12,7 @@ on: malloc-impl: required: true type: string - prereleasee: + prerelease: required: false type: string default: 'False' From d9680844937d8caef9f9eb912b143c6f6754a5f5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 10:18:55 -0600 Subject: [PATCH 264/385] Needs to be string. --- .github/workflows/main_conan_build.yml | 2 +- .github/workflows/pr_conan_build.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index 2fd7cc15..da8e8f64 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -24,7 +24,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: True + prerelease: "True" build: needs: build_deps diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 92c1a642..98a827d0 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -24,7 +24,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: True + prerelease: "True" build: needs: build_deps From d7984939fa11dc1698dce9317ff6caa62ef12593 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 21:13:44 -0600 Subject: [PATCH 265/385] No cache on PR. --- .github/workflows/build_dependencies.yml | 14 ++++---------- .github/workflows/main_conan_build.yml | 6 +----- .github/workflows/pr_conan_build.yml | 6 +----- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index d568a031..22455300 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -47,11 +47,7 @@ jobs: uses: actions/cache/restore@v3 with: path: | - ~/.conan/data/folly - ~/.conan/data/gperftools - ~/.conan/data/jemalloc - ~/.conan/data/prerelease_dummy - ~/.conan/data/spdlog + ~/.conan/data key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Install Dependencies @@ -67,6 +63,7 @@ jobs: - name: Cleanup dep builds run: | + ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)' | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build - name: Save Sisl 3rdParty Cache @@ -74,9 +71,6 @@ jobs: uses: actions/cache/save@v3 with: path: | - ~/.conan/data/folly - ~/.conan/data/gperftools - ~/.conan/data/jemalloc - ~/.conan/data/prerelease_dummy - ~/.conan/data/spdlog + ~/.conan/data key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + if: github.event_name != 'pull_request' diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index da8e8f64..138e05c8 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -65,11 +65,7 @@ jobs: uses: actions/cache/restore@v3 with: path: | - ~/.conan/data/folly - ~/.conan/data/gperftools - ~/.conan/data/jemalloc - ~/.conan/data/prerelease_dummy - ~/.conan/data/spdlog + ~/.conan/data key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 98a827d0..698ed952 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -65,11 +65,7 @@ jobs: uses: actions/cache/restore@v3 with: path: | - ~/.conan/data/folly - ~/.conan/data/gperftools - ~/.conan/data/jemalloc - ~/.conan/data/prerelease_dummy - ~/.conan/data/spdlog + ~/.conan/data key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code From 214442b60d3f05f36f43cc36960ea46d857ea524 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 21:16:03 -0600 Subject: [PATCH 266/385] No cache to cleanup --- .github/workflows/main_conan_build.yml | 2 +- .github/workflows/pr_cleanup_caches.yml | 33 ------------------------- 2 files changed, 1 insertion(+), 34 deletions(-) delete mode 100644 .github/workflows/pr_cleanup_caches.yml diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index 138e05c8..63f07beb 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -1,4 +1,4 @@ -name: Sisl Main Build +name: Sisl Conan Build on: push: diff --git a/.github/workflows/pr_cleanup_caches.yml b/.github/workflows/pr_cleanup_caches.yml deleted file mode 100644 index 3936ed92..00000000 --- a/.github/workflows/pr_cleanup_caches.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: cleanup caches by a branch -on: - pull_request: - types: - - closed - -jobs: - cleanup: - runs-on: ubuntu-latest - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Cleanup - run: | - gh extension install actions/gh-actions-cache - - REPO=${{ github.repository }} - BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" - - echo "Fetching list of cache key" - cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 ) - - ## Setting this to not fail the workflow while deleting cache keys. - set +e - echo "Deleting caches..." - for cacheKey in $cacheKeysForPR - do - gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm - done - echo "Done" - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 1f4683ac36b5b1a475b946978437749e94481a5e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 21:37:10 -0600 Subject: [PATCH 267/385] Better cache intelligence. --- .github/workflows/build_dependencies.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 22455300..2ca7a547 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -60,11 +60,13 @@ jobs: -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing deps/sisl + if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }} - name: Cleanup dep builds run: | - ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)' | xargs rm -rf + ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)' | xargs rm -rfv rm -rf ~/.conan/data/*/*/*/*/build + if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }} - name: Save Sisl 3rdParty Cache id: save-cache-sisl @@ -73,4 +75,4 @@ jobs: path: | ~/.conan/data key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - if: github.event_name != 'pull_request' + if: ${{ github.event_name != 'pull_request' && steps.cache-npm.outputs.cache-hit != 'true' }} From 5b4848ec21fe14d6ff323275fccc470b78e04160 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 21:46:43 -0600 Subject: [PATCH 268/385] Fix cleanup. --- .github/workflows/build_dependencies.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 2ca7a547..b1f09158 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -60,13 +60,13 @@ jobs: -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing deps/sisl - if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }} + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Cleanup dep builds run: | - ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)' | xargs rm -rfv + while read pkg; do echo "Cleaning ${pkg}"; rm -rf ${pkg}; done < $(ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)') rm -rf ~/.conan/data/*/*/*/*/build - if: ${{ steps.cache-npm.outputs.cache-hit != 'true' }} + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl 3rdParty Cache id: save-cache-sisl @@ -75,4 +75,4 @@ jobs: path: | ~/.conan/data key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - if: ${{ github.event_name != 'pull_request' && steps.cache-npm.outputs.cache-hit != 'true' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} From a9aa61b36a9574f182ad0681c63a0528bf9a76e5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 11 Jun 2023 21:59:46 -0600 Subject: [PATCH 269/385] Fix regex. --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index b1f09158..8e714ccb 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -64,7 +64,7 @@ jobs: - name: Cleanup dep builds run: | - while read pkg; do echo "Cleaning ${pkg}"; rm -rf ${pkg}; done < $(ls -1d ~/.conan/data/* | grep -Ev '(folly, gperftools, jemalloc, prerelease_dummy, spdlog)') + ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|prerelease_dummy|spdlog)' | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} From b9645ab8a04bb28f036a0074198af192e2b77e12 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 12 Jun 2023 08:17:26 -0700 Subject: [PATCH 270/385] Test fresh cache fixes. --- .github/workflows/build_dependencies.yml | 11 +++++------ .github/workflows/main_conan_build.yml | 5 +++-- .github/workflows/pr_conan_build.yml | 5 +++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 8e714ccb..1569ebcb 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -1,4 +1,4 @@ -name: Conan Build +name: BuildSislDeps on: workflow_call: @@ -48,7 +48,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + restore-keys: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Install Dependencies run: | @@ -60,13 +60,12 @@ jobs: -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing deps/sisl - if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + conan info 2>/dev/null deps/sisl | grep ' ID:' | sort | uniq > pkg_id - name: Cleanup dep builds run: | ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|prerelease_dummy|spdlog)' | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build - if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl 3rdParty Cache id: save-cache-sisl @@ -74,5 +73,5 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('pkg_id') }} + if: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index 63f07beb..7e9a8f01 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -12,6 +12,7 @@ jobs: matrix: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] + prerelease: ["True"] exclude: - build-type: Debug malloc-impl: tcmalloc @@ -24,7 +25,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: "True" + prerelease: ${{ matrix.prerelease }} build: needs: build_deps @@ -66,7 +67,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} + restore-keys: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code uses: actions/checkout@v3 diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 698ed952..3845dbfd 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -12,6 +12,7 @@ jobs: matrix: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] + prerelease: ["True"] exclude: - build-type: Debug malloc-impl: tcmalloc @@ -24,7 +25,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: "True" + prerelease: ${{ matrix.prerelease }} build: needs: build_deps @@ -66,7 +67,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} + restore-keys: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code uses: actions/checkout@v3 From 6d43c15953e54e8fe8a6662ccdc55a13923618e3 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 12 Jun 2023 08:19:18 -0700 Subject: [PATCH 271/385] Revert "Test fresh cache fixes." This reverts commit b9645ab8a04bb28f036a0074198af192e2b77e12. --- .github/workflows/build_dependencies.yml | 11 ++++++----- .github/workflows/main_conan_build.yml | 5 ++--- .github/workflows/pr_conan_build.yml | 5 ++--- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 1569ebcb..8e714ccb 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -1,4 +1,4 @@ -name: BuildSislDeps +name: Conan Build on: workflow_call: @@ -48,7 +48,7 @@ jobs: with: path: | ~/.conan/data - restore-keys: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Install Dependencies run: | @@ -60,12 +60,13 @@ jobs: -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing deps/sisl - conan info 2>/dev/null deps/sisl | grep ' ID:' | sort | uniq > pkg_id + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Cleanup dep builds run: | ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|prerelease_dummy|spdlog)' | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl 3rdParty Cache id: save-cache-sisl @@ -73,5 +74,5 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('pkg_id') }} - if: ${{ github.event_name != 'pull_request' }} + key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index 7e9a8f01..63f07beb 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -12,7 +12,6 @@ jobs: matrix: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True"] exclude: - build-type: Debug malloc-impl: tcmalloc @@ -25,7 +24,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: ${{ matrix.prerelease }} + prerelease: "True" build: needs: build_deps @@ -67,7 +66,7 @@ jobs: with: path: | ~/.conan/data - restore-keys: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} + key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code uses: actions/checkout@v3 diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 3845dbfd..698ed952 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -12,7 +12,6 @@ jobs: matrix: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True"] exclude: - build-type: Debug malloc-impl: tcmalloc @@ -25,7 +24,7 @@ jobs: branch: master build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} - prerelease: ${{ matrix.prerelease }} + prerelease: "True" build: needs: build_deps @@ -67,7 +66,7 @@ jobs: with: path: | ~/.conan/data - restore-keys: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} + key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - name: Checkout Code uses: actions/checkout@v3 From 37f8e51690ff5aa9139e0848fed5c815867cd708 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 12 Jun 2023 14:27:56 -0600 Subject: [PATCH 272/385] Reduce build redundancy. (#115) --- .github/workflows/build_dependencies.yml | 34 +++++++++++--- .github/workflows/main_conan_build.yml | 57 +----------------------- .github/workflows/pr_conan_build.yml | 57 +----------------------- conanfile.py | 1 + 4 files changed, 32 insertions(+), 117 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 8e714ccb..a9781485 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -16,6 +16,10 @@ on: required: false type: string default: 'False' + native: + required: false + default: 'false' + type: string jobs: BuildSislDeps: @@ -35,13 +39,6 @@ jobs: # Set std::string to non-CoW C++11 version sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default - - name: Retrieve Dependencies - uses: actions/checkout@v3 - with: - repository: ebay/sisl - path: deps/sisl - ref: ${{ inputs.branch }} - - name: Restore Sisl 3rdParty Cache id: restore-cache-sisl uses: actions/cache/restore@v3 @@ -50,6 +47,18 @@ jobs: ~/.conan/data key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + - name: Retrieve Code + uses: actions/checkout@v3 + if: ${{ inputs.native == 'true' }} + + - name: Retrieve Dependencies + uses: actions/checkout@v3 + with: + repository: ebay/sisl + path: deps/sisl + ref: ${{ inputs.branch }} + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + - name: Install Dependencies run: | conan export deps/sisl/3rd_party/gperftools @@ -62,6 +71,17 @@ jobs: --build missing deps/sisl if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + - name: Build + run: | + conan create \ + -o sisl:prerelease=${{ inputs.prerelease }} \ + -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + . \ + oss/master + if: ${{ inputs.native == 'true' }} + - name: Cleanup dep builds run: | ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|prerelease_dummy|spdlog)' | xargs rm -rf diff --git a/.github/workflows/main_conan_build.yml b/.github/workflows/main_conan_build.yml index 63f07beb..4010fc85 100644 --- a/.github/workflows/main_conan_build.yml +++ b/.github/workflows/main_conan_build.yml @@ -6,7 +6,7 @@ on: - master jobs: - build_deps: + Build: strategy: fail-fast: false matrix: @@ -25,57 +25,4 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: "True" - - build: - needs: build_deps - runs-on: ubuntu-22.04 - strategy: - fail-fast: false - matrix: - # Currently folly in conan-center has a bug when used with gcc 11.3 and sanitizer - # sanitize: ["True", "False"] - build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True"] - exclude: - - build-type: Debug - malloc-impl: tcmalloc - - build-type: Debug - malloc-impl: jemalloc - - build-type: Release - malloc-impl: libc - - steps: - - name: Setup Python - uses: actions/setup-python@v3 - with: - python-version: "3.8" - - - name: Install Conan - run: | - python -m pip install --upgrade pip - python -m pip install conan~=1.0 - conan user - conan profile new --detect default - # Set std::string to non-CoW C++11 version - sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default - - - name: Restore Sisl 3rdParty Cache - id: restore-cache-sisl - uses: actions/cache/restore@v3 - with: - path: | - ~/.conan/data - key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - - - name: Checkout Code - uses: actions/checkout@v3 - - - name: Build - run: | - conan install \ - -o prerelease=${{ matrix.prerelease }} \ - -o malloc_impl=${{ matrix.malloc-impl }} \ - -s build_type=${{ matrix.build-type }} \ - --build missing . - conan build . + native: 'true' diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 698ed952..64d00bf5 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -6,7 +6,7 @@ on: - master jobs: - build_deps: + Build: strategy: fail-fast: true matrix: @@ -25,57 +25,4 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: "True" - - build: - needs: build_deps - runs-on: ubuntu-22.04 - strategy: - fail-fast: false - matrix: - # Currently folly in conan-center has a bug when used with gcc 11.3 and sanitizer - # sanitize: ["True", "False"] - build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True"] - exclude: - - build-type: Debug - malloc-impl: tcmalloc - - build-type: Debug - malloc-impl: jemalloc - - build-type: Release - malloc-impl: libc - - steps: - - name: Setup Python - uses: actions/setup-python@v3 - with: - python-version: "3.8" - - - name: Install Conan - run: | - python -m pip install --upgrade pip - python -m pip install conan~=1.0 - conan user - conan profile new --detect default - # Set std::string to non-CoW C++11 version - sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default - - - name: Restore Sisl 3rdParty Cache - id: restore-cache-sisl - uses: actions/cache/restore@v3 - with: - path: | - ~/.conan/data - key: SislDeps-${{ matrix.build-type }}-${{ matrix.malloc-impl }}-${{ matrix.prerelease }} - - - name: Checkout Code - uses: actions/checkout@v3 - - - name: Build - run: | - conan create \ - -o prerelease=${{ matrix.prerelease }} \ - -o malloc_impl=${{ matrix.malloc-impl }} \ - -s build_type=${{ matrix.build-type }} \ - --build missing \ - . + native: 'true' diff --git a/conanfile.py b/conanfile.py index 28bb8d94..a2d10546 100644 --- a/conanfile.py +++ b/conanfile.py @@ -110,6 +110,7 @@ def build(self): definitions = {'CONAN_BUILD_COVERAGE': 'OFF', 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', + 'CONAN_CMAKE_SILENT_OUTPUT': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', 'MALLOC_IMPL': self.options.malloc_impl} test_target = None From 60860be85baa33ce790d8d45e42ce0eb14d66b52 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 11:39:34 -0600 Subject: [PATCH 273/385] Update CI (#116) --- .github/workflows/build_dependencies.yml | 130 ++++++++++++ .github/workflows/build_with_conan.yml | 53 ----- .github/workflows/merge_conan_build.yml | 26 +++ .github/workflows/pr_conan_build.yml | 28 +++ 3rd_party/gperftools/conanfile.py | 49 +++++ 3rd_party/jemalloc/conanfile.py | 195 ++++++++++++++++++ 3rd_party/pistache/CMakeLists.txt | 8 + 3rd_party/pistache/conandata.yml | 10 + 3rd_party/pistache/conanfile.py | 118 +++++++++++ .../pistache/patches/0001-remove-fpic.patch | 12 ++ .../patches/0002-include-stddef.patch | 12 ++ 3rd_party/prerelease_dummy/conanfile.py | 23 +++ README.md | 4 +- conanfile.py | 1 + 14 files changed, 614 insertions(+), 55 deletions(-) create mode 100644 .github/workflows/build_dependencies.yml delete mode 100644 .github/workflows/build_with_conan.yml create mode 100644 .github/workflows/merge_conan_build.yml create mode 100644 .github/workflows/pr_conan_build.yml create mode 100644 3rd_party/gperftools/conanfile.py create mode 100644 3rd_party/jemalloc/conanfile.py create mode 100644 3rd_party/pistache/CMakeLists.txt create mode 100644 3rd_party/pistache/conandata.yml create mode 100644 3rd_party/pistache/conanfile.py create mode 100644 3rd_party/pistache/patches/0001-remove-fpic.patch create mode 100644 3rd_party/pistache/patches/0002-include-stddef.patch create mode 100644 3rd_party/prerelease_dummy/conanfile.py diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml new file mode 100644 index 00000000..1d6092c2 --- /dev/null +++ b/.github/workflows/build_dependencies.yml @@ -0,0 +1,130 @@ +name: Conan Build + +on: + workflow_call: + inputs: + branch: + required: true + type: string + build-type: + required: true + type: string + malloc-impl: + required: true + type: string + prerelease: + required: false + type: string + default: false + testing: + required: false + type: boolean + default: false + workflow_dispatch: + inputs: + branch: + required: true + type: string + build-type: + required: true + type: choice + options: + - Debug + - Release + - RelWithDebInfo + malloc-impl: + description: 'Allocation Library' + required: true + type: choice + options: + - libc + - tcmalloc + - jemalloc + prerelease: + description: 'Fault Instrumentation' + required: false + type: choice + options: + - True + - False + default: false + testing: + description: 'Run Tests' + required: false + type: boolean + default: false + +jobs: + BuildSislDeps: + runs-on: ubuntu-22.04 + steps: + - name: Restore Sisl Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: Sisl8Deps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + + - name: Retrieve Code + uses: actions/checkout@v3 + with: + repository: ebay/sisl + path: deps/sisl + ref: ${{ inputs.branch }} + if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Setup Python + uses: actions/setup-python@v3 + with: + python-version: "3.8" + if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Setup Conan and Export Recipes + run: | + python -m pip install --upgrade pip + python -m pip install conan~=1.0 + conan user + conan profile new --detect default + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + conan export deps/sisl/3rd_party/gperftools + conan export deps/sisl/3rd_party/jemalloc + conan export deps/sisl/3rd_party/prerelease_dummy + conan export deps/sisl/3rd_party/pistache pistache/cci.20201127@ + if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Build Cache + run: | + conan install \ + -o prerelease=${{ inputs.prerelease }} \ + -o malloc_impl=${{ inputs.malloc-impl }} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + deps/sisl + if: ${{ inputs.testing != 'true' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Test Package + run: | + conan create \ + -o sisl:prerelease=${{ inputs.prerelease }} \ + -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + deps/sisl + if: ${{ inputs.testing == 'true' }} + + - name: Cleanup + run: | + ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|pistache|prerelease_dummy|spdlog)' | xargs rm -rf + rm -rf ~/.conan/data/*/*/*/*/build + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Save Sisl Cache + id: save-cache-sisl + uses: actions/cache/save@v3 + with: + path: | + ~/.conan/data + key: Sisl8Deps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/build_with_conan.yml b/.github/workflows/build_with_conan.yml deleted file mode 100644 index b08fdec0..00000000 --- a/.github/workflows/build_with_conan.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Conan Build - -on: - push: - branches: - - master - - 'stable/v*' - pull_request: - branches: - - master - - 'stable/v*' - -#env: - # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) - #BUILD_TYPE: Release - -jobs: - build: - # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. - # You can convert this to a matrix build if you need cross-platform coverage. - # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - python-version: ["3.8"] - build-type: ["Debug", "Release"] - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 - with: - python-version: ${{ matrix.python-version }} - - name: Install Conan - run: | - python -m pip install --upgrade pip - python -m pip install conan==1.59.0 - - - name: Configure Conan - # Configure conan profiles for build runner - run: | - conan user - - - name: Install dependencies - # Build your program with the given configuration - run: | - conan install -o malloc_impl=libc -o prerelease=False -s build_type=${{ matrix.build-type }} --build missing . - - - name: Build - # Build your program with the given configuration - run: | - conan build . diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml new file mode 100644 index 00000000..aa344e44 --- /dev/null +++ b/.github/workflows/merge_conan_build.yml @@ -0,0 +1,26 @@ +name: Sisl Merge Build + +on: + push: + branches: + - stable/v8.x + - master + +jobs: + Build: + strategy: + fail-fast: false + matrix: + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc"] + prerelease: ["True", "False"] + exclude: + - build-type: Release + malloc-impl: libc + uses: ./.github/workflows/build_dependencies.yml + with: + branch: ${{ github.ref }} + build-type: ${{ matrix.build-type }} + malloc-impl: ${{ matrix.malloc-impl }} + prerelease: ${{ matrix.prerelease }} + testing: true diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml new file mode 100644 index 00000000..daba7ed3 --- /dev/null +++ b/.github/workflows/pr_conan_build.yml @@ -0,0 +1,28 @@ +name: Sisl PR Build + +on: + pull_request: + branches: + - stable/v8.x + - master + +jobs: + Build: + strategy: + fail-fast: false + matrix: + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc"] + prerelease: ["True", "False"] + exclude: + - build-type: Debug + prerelease: "False" + - build-type: Release + malloc-impl: libc + uses: ./.github/workflows/build_dependencies.yml + with: + branch: ${{ github.ref }} + build-type: ${{ matrix.build-type }} + malloc-impl: ${{ matrix.malloc-impl }} + prerelease: ${{ matrix.prerelease }} + testing: true diff --git a/3rd_party/gperftools/conanfile.py b/3rd_party/gperftools/conanfile.py new file mode 100644 index 00000000..8d114f31 --- /dev/null +++ b/3rd_party/gperftools/conanfile.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from conans import ConanFile, AutoToolsBuildEnvironment, tools + +class GPerfToolsConan(ConanFile): + name = "gperftools" + version = "2.7.0" + release = "2.7" + license = "BSD" + + description = "A portable library to determine the call-chain of a C program" + settings = "os", "arch", "compiler", "build_type" + + options = {"shared": [True, False], "fPIC": [True, False]} + default_options = "shared=False", "fPIC=True" + + requires = (("xz_utils/5.2.4")) + + generators = "compiler_args" + + def source(self): + source_url = "https://github.com/{0}/{0}/releases/download".format(self.name) + tools.get("{0}/{1}-{2}/{1}-{2}.tar.gz".format(source_url, self.name, self.release)) + + def build(self): + env_build = AutoToolsBuildEnvironment(self) + env_build.cxx_flags.append("@conanbuildinfo.args") + if self.settings.build_type != "Debug": + env_build.defines.append('NDEBUG') + configure_args = ['--disable-dependency-tracking', '--enable-libunwind'] + if self.options.shared: + configure_args += ['--enable-shared=yes', '--enable-static=no'] + else: + configure_args += ['--enable-shared=no', '--enable-static=yes'] + env_build.configure(args=configure_args,configure_dir="{0}-{1}".format(self.name, self.release)) + env_build.make(args=["-j1"]) + + def package(self): + headers = ['heap-checker.h', 'heap-profiler.h', 'malloc_extension.h', 'malloc_extension_c.h', + 'malloc_hook.h', 'malloc_hook_c.h', 'profiler.h', 'stacktrace.h', 'tcmalloc.h'] + for header in headers: + self.copy("*{0}".format(header), dst="include/google", src="{0}-{1}/src/google".format(self.name, self.release), keep_path=False) + self.copy("*{0}".format(header), dst="include/gperftools", src="{0}-{1}/src/gperftools".format(self.name, self.release), keep_path=False) + self.copy("*.so*", dst="lib", keep_path=False, symlinks=True) + self.copy("*.a", dst="lib", keep_path=False, symlinks=True) + + def package_info(self): + self.cpp_info.libs = ['tcmalloc_minimal'] diff --git a/3rd_party/jemalloc/conanfile.py b/3rd_party/jemalloc/conanfile.py new file mode 100644 index 00000000..80c50505 --- /dev/null +++ b/3rd_party/jemalloc/conanfile.py @@ -0,0 +1,195 @@ +from conans import AutoToolsBuildEnvironment, ConanFile, MSBuild, tools +from conans.errors import ConanInvalidConfiguration +from conans.client.tools import msvs_toolset +import os +import shutil +import string + + +class JemallocConan(ConanFile): + name = "jemalloc" + description = "jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support." + url = "https://github.com/conan-io/conan-center-index" + license = "BSD-2-Clause" + homepage = "http://jemalloc.net/" + topics = ("conan", "jemalloc", "malloc", "free") + settings = "os", "arch", "compiler" + version = "5.2.1" + source_url = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" + options = { + "shared": [True, False], + "fPIC": [True, False], + "prefix": "ANY", + "enable_cxx": [True, False], + "enable_fill": [True, False], + "enable_xmalloc": [True, False], + "enable_readlinkat": [True, False], + "enable_syscall": [True, False], + "enable_lazy_lock": [True, False], + "enable_debug_logging": [True, False], + "enable_initial_exec_tls": [True, False], + "enable_libdl": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "prefix": "", + "enable_cxx": True, + "enable_fill": True, + "enable_xmalloc": False, + "enable_readlinkat": False, + "enable_syscall": True, + "enable_lazy_lock": False, + "enable_debug_logging": False, + "enable_initial_exec_tls": True, + "enable_libdl": True, + } + + _autotools = None + + _source_subfolder = "source_subfolder" + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + + def configure(self): + if self.settings.compiler.get_safe("libcxx") == "libc++": + raise ConanInvalidConfiguration("libc++ is missing a mutex implementation. Remove this when it is added") + if self.settings.compiler == "Visual Studio" and self.settings.compiler.version != "15": + # https://github.com/jemalloc/jemalloc/issues/1703 + raise ConanInvalidConfiguration("Only Visual Studio 15 2017 is supported. Please fix this if other versions are supported") + if self.options.shared: + del self.options.fPIC + if not self.options.enable_cxx: + del self.settings.compiler.libcxx + del self.settings.compiler.cppstd + if self.settings.compiler == "Visual Studio" and self.settings.arch not in ("x86_64", "x86"): + raise ConanInvalidConfiguration("Unsupported arch") + + def source(self): + tools.get(self.source_url) + os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) + + def build_requirements(self): + if tools.os_info.is_windows and not os.environ.get("CONAN_BASH_PATH", None): + self.build_requires("msys2/20190524") + + @property + def _autotools_args(self): + conf_args = [ + "--with-jemalloc-prefix={}".format(self.options.prefix), + "--disable-debug", + "--enable-cxx" if self.options.enable_cxx else "--disable-cxx", + "--enable-fill" if self.options.enable_fill else "--disable-fill", + "--enable-xmalloc" if self.options.enable_cxx else "--disable-xmalloc", + "--enable-readlinkat" if self.options.enable_readlinkat else "--disable-readlinkat", + "--enable-syscall" if self.options.enable_syscall else "--disable-syscall", + "--enable-lazy-lock" if self.options.enable_lazy_lock else "--disable-lazy-lock", + "--enable-log" if self.options.enable_debug_logging else "--disable-log", + "--enable-initial-exec-tld" if self.options.enable_initial_exec_tls else "--disable-initial-exec-tls", + "--enable-libdl" if self.options.enable_libdl else "--disable-libdl", + ] + if self.options.shared: + conf_args.extend(["--enable-shared", "--disable-static"]) + else: + conf_args.extend(["--disable-shared", "--enable-static"]) + return conf_args + + def _configure_autotools(self): + if self._autotools: + return self._autotools + self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) + self._autotools.configure(args=self._autotools_args, configure_dir=self._source_subfolder) + return self._autotools + + @property + def _msvc_build_type(self): + build_type = "Release" + if not self.options.shared: + build_type += "-static" + return build_type + + def _patch_sources(self): + if self.settings.os == "Windows": + makefile_in = os.path.join(self._source_subfolder, "Makefile.in") + tools.replace_in_file(makefile_in, + "DSO_LDFLAGS = @DSO_LDFLAGS@", + "DSO_LDFLAGS = @DSO_LDFLAGS@ -Wl,--out-implib,lib/libjemalloc.a") + tools.replace_in_file(makefile_in, + "\t$(INSTALL) -d $(LIBDIR)\n" + "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)", + "\t$(INSTALL) -d $(BINDIR)\n" + "\t$(INSTALL) -d $(LIBDIR)\n" + "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(BINDIR)\n" + "\t$(INSTALL) -m 644 $(objroot)lib/libjemalloc.a $(LIBDIR)") + + def build(self): + self._patch_sources() + if self.settings.compiler == "Visual Studio": + with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op(): + with tools.environment_append({"CC": "cl", "CXX": "cl"}) if self.settings.compiler == "Visual Studio" else tools.no_op(): + with tools.chdir(self._source_subfolder): + # Do not use AutoToolsBuildEnvironment because we want to run configure as ./configure + self.run("./configure {}".format(" ".join(self._autotools_args)), win_bash=tools.os_info.is_windows) + msbuild = MSBuild(self) + # Do not use the 2015 solution: unresolved external symbols: test_hooks_libc_hook and test_hooks_arena_new_hook + sln_file = os.path.join(self._source_subfolder, "msvc", "jemalloc_vc2017.sln") + msbuild.build(sln_file, targets=["jemalloc"], build_type=self._msvc_build_type) + else: + autotools = self._configure_autotools() + autotools.make() + + @property + def _library_name(self): + libname = "jemalloc" + if self.settings.compiler == "Visual Studio": + if self.options.shared: + if "Release" == "Debug": + libname += "d" + else: + toolset = msvs_toolset(self.settings) + toolset_number = "".join(c for c in toolset if c in string.digits) + libname += "-vc{}-{}".format(toolset_number, self._msvc_build_type) + else: + if self.settings.os == "Windows": + if not self.options.shared: + libname += "_s" + else: + if not self.options.shared and self.options.fPIC: + libname += "_pic" + return libname + + def package(self): + self.copy(pattern="COPYING", src=self._source_subfolder, dst="licenses") + if self.settings.compiler == "Visual Studio": + arch_subdir = { + "x86_64": "x64", + "x86": "x86", + }[str(self.settings.arch)] + self.copy("*.lib", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "lib")) + self.copy("*.dll", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "bin")) + self.copy("jemalloc.h", src=os.path.join(self._source_subfolder, "include", "jemalloc"), dst=os.path.join(self.package_folder, "include", "jemalloc"), keep_path=True) + shutil.copytree(os.path.join(self._source_subfolder, "include", "msvc_compat"), + os.path.join(self.package_folder, "include", "msvc_compat")) + else: + autotools = self._configure_autotools() + # Use install_lib_XXX and install_include to avoid mixing binaries and dll's + autotools.make(target="install_lib_shared" if self.options.shared else "install_lib_static") + autotools.make(target="install_include") + if self.settings.os == "Windows" and self.settings.compiler == "gcc": + os.rename(os.path.join(self.package_folder, "lib", "{}.lib".format(self._library_name)), + os.path.join(self.package_folder, "lib", "lib{}.a".format(self._library_name))) + if not self.options.shared: + os.unlink(os.path.join(self.package_folder, "lib", "jemalloc.lib")) + + def package_info(self): + self.cpp_info.libs = [self._library_name] + self.cpp_info.includedirs = [os.path.join(self.package_folder, "include"), + os.path.join(self.package_folder, "include", "jemalloc")] + if self.settings.compiler == "Visual Studio": + self.cpp_info.includedirs.append(os.path.join(self.package_folder, "include", "msvc_compat")) + if not self.options.shared: + self.cpp_info.defines = ["JEMALLOC_EXPORT="] + if self.settings.os == "Linux": + self.cpp_info.system_libs.extend(["dl", "pthread"]) diff --git a/3rd_party/pistache/CMakeLists.txt b/3rd_party/pistache/CMakeLists.txt new file mode 100644 index 00000000..8fbc72a2 --- /dev/null +++ b/3rd_party/pistache/CMakeLists.txt @@ -0,0 +1,8 @@ +cmake_minimum_required(VERSION 3.1) +project(cmake_wrapper) + +include("conanbuildinfo.cmake") +conan_basic_setup() + + +add_subdirectory(source_subfolder) diff --git a/3rd_party/pistache/conandata.yml b/3rd_party/pistache/conandata.yml new file mode 100644 index 00000000..819faa99 --- /dev/null +++ b/3rd_party/pistache/conandata.yml @@ -0,0 +1,10 @@ +patches: + cci.20201127: + - base_path: source_subfolder + patch_file: patches/0001-remove-fpic.patch + - base_path: source_subfolder + patch_file: patches/0002-include-stddef.patch +sources: + cci.20201127: + sha256: f1abb9e43ff847ebff8edb72623c9942162df134bccfb571af9c7817d3261fae + url: https://github.com/pistacheio/pistache/archive/a3c5c68e0f08e19331d53d12846079ad761fe974.tar.gz diff --git a/3rd_party/pistache/conanfile.py b/3rd_party/pistache/conanfile.py new file mode 100644 index 00000000..9152b1d4 --- /dev/null +++ b/3rd_party/pistache/conanfile.py @@ -0,0 +1,118 @@ +from conans import ConanFile, CMake, tools +from conans.errors import ConanInvalidConfiguration +import os + +required_conan_version = ">=1.33.0" + + +class PistacheConan(ConanFile): + name = "pistache" + license = "Apache-2.0" + homepage = "https://github.com/pistacheio/pistache" + url = "https://github.com/conan-io/conan-center-index" + topics = ("http", "rest", "framework", "networking") + description = "Pistache is a modern and elegant HTTP and REST framework for C++" + + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + "with_ssl": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "with_ssl": False, + } + + generators = "cmake", "cmake_find_package" + _cmake = None + + @property + def _source_subfolder(self): + return "source_subfolder" + + @property + def _build_subfolder(self): + return "build_subfolder" + + def export_sources(self): + self.copy("CMakeLists.txt") + for patch in self.conan_data.get("patches", {}).get(self.version, []): + self.copy(patch["patch_file"]) + + def configure(self): + if self.options.shared: + del self.options.fPIC + + def requirements(self): + self.requires("rapidjson/1.1.0") + if self.options.with_ssl: + self.requires("openssl/1.1.1q") + + def validate(self): + compilers = { + "gcc": "7", + "clang": "6", + } + if self.settings.os != "Linux": + raise ConanInvalidConfiguration("Pistache is only support by Linux.") + + if self.settings.compiler == "clang": + raise ConanInvalidConfiguration("Clang support is broken. See pistacheio/pistache#835.") + + if self.settings.compiler.cppstd: + tools.check_min_cppstd(self, 17) + minimum_compiler = compilers.get(str(self.settings.compiler)) + if minimum_compiler: + if tools.Version(self.settings.compiler.version) < minimum_compiler: + raise ConanInvalidConfiguration("Pistache requires c++17, which your compiler does not support.") + else: + self.output.warn("Pistache requires c++17, but this compiler is unknown to this recipe. Assuming your compiler supports c++17.") + + def source(self): + tools.get(**self.conan_data["sources"][self.version], + destination=self._source_subfolder, strip_root=True) + + def _configure_cmake(self): + if self._cmake: + return self._cmake + self._cmake = CMake(self) + self._cmake.definitions["PISTACHE_ENABLE_NETWORK_TESTS"] = False + self._cmake.definitions["PISTACHE_USE_SSL"] = self.options.with_ssl + # pistache requires explicit value for fPIC + self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True) + self._cmake.configure(build_folder=self._build_subfolder) + return self._cmake + + def build(self): + for patch in self.conan_data.get("patches", {}).get(self.version, []): + tools.patch(**patch) + cmake = self._configure_cmake() + cmake.build() + + def package(self): + self.copy("LICENSE", src=self._source_subfolder, dst="licenses") + cmake = self._configure_cmake() + cmake.install() + tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) + tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) + if self.options.shared: + tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.a") + + def package_info(self): + # TODO: Pistache does not use namespace + # TODO: Pistache variables are CamelCase e.g Pistache_BUILD_DIRS + self.cpp_info.filenames["cmake_find_package"] = "Pistache" + self.cpp_info.filenames["cmake_find_package_multi"] = "Pistache" + self.cpp_info.names["pkg_config"] = "libpistache" + suffix = "_{}".format("shared" if self.options.shared else "static") + self.cpp_info.components["libpistache"].names["cmake_find_package"] = "pistache" + suffix + self.cpp_info.components["libpistache"].names["cmake_find_package_multi"] = "pistache" + suffix + self.cpp_info.components["libpistache"].libs = tools.collect_libs(self) + self.cpp_info.components["libpistache"].requires = ["rapidjson::rapidjson"] + if self.options.with_ssl: + self.cpp_info.components["libpistache"].requires.append("openssl::openssl") + self.cpp_info.components["libpistache"].defines = ["PISTACHE_USE_SSL=1"] + if self.settings.os == "Linux": + self.cpp_info.components["libpistache"].system_libs = ["pthread"] diff --git a/3rd_party/pistache/patches/0001-remove-fpic.patch b/3rd_party/pistache/patches/0001-remove-fpic.patch new file mode 100644 index 00000000..fa8d26ee --- /dev/null +++ b/3rd_party/pistache/patches/0001-remove-fpic.patch @@ -0,0 +1,12 @@ +diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt +index 9cdac6b..b2d13b4 100644 +--- a/src/CMakeLists.txt ++++ b/src/CMakeLists.txt +@@ -12,7 +12,6 @@ set(SOURCE_FILES + ) + + add_library(pistache OBJECT ${SOURCE_FILES}) +-set_target_properties(pistache PROPERTIES POSITION_INDEPENDENT_CODE 1) + add_definitions(-DONLY_C_LOCALE=1) + + set(PISTACHE_INCLUDE diff --git a/3rd_party/pistache/patches/0002-include-stddef.patch b/3rd_party/pistache/patches/0002-include-stddef.patch new file mode 100644 index 00000000..1f4d2425 --- /dev/null +++ b/3rd_party/pistache/patches/0002-include-stddef.patch @@ -0,0 +1,12 @@ +fixed upstream https://github.com/pistacheio/pistache/pull/965 + +--- a/include/pistache/typeid.h ++++ b/include/pistache/typeid.h +@@ -11,6 +11,7 @@ + + #pragma once + ++#include + #include + + namespace Pistache { diff --git a/3rd_party/prerelease_dummy/conanfile.py b/3rd_party/prerelease_dummy/conanfile.py new file mode 100644 index 00000000..34daaeed --- /dev/null +++ b/3rd_party/prerelease_dummy/conanfile.py @@ -0,0 +1,23 @@ +from conans import ConanFile, CMake, tools + +class PrereleaseConan(ConanFile): + name = "prerelease_dummy" + version = "1.0.1" + homepage = "https://github.corp.ebay.com/SDS/prerelease_dummy" + description = "A dummy package to invoke PRERELEASE option" + topics = ("ebay", "nublox") + url = "https://github.corp.ebay.com/SDS/prerelease_dummy" + license = "Apache-2.0" + + settings = () + + exports_sources = ("LICENSE") + + def build(self): + pass + + def package(self): + pass + + def package_info(self): + self.cpp_info.cxxflags.append("-D_PRERELEASE=1") diff --git a/README.md b/README.md index cf6f02f6..d0daa907 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SymbiosisLib (sisl) -[![Conan Build](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/build_with_conan.yml) +[![Conan Build](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml/badge.svg?branch=stable/v8.x)](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data structures and utilities. This is mostly on top of folly, boost, STL and other good well known libraries. Thus its not trying @@ -94,7 +94,7 @@ Harihara Kadayam hkadayam@ebay.com Copyright 2021 eBay Inc. Primary Author: Harihara Kadayam -Primary Developers: Harihara Kadayam, Rishabh Mittal, Bryan Zimmerman, Brian Szymd +Primary Developers: Harihara Kadayam, Rishabh Mittal, Bryan Zimmerman, Brian Szmyd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0. diff --git a/conanfile.py b/conanfile.py index 85dbabd1..7a701e1a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -57,6 +57,7 @@ def requirements(self): self.requires("folly/2022.01.31.00") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") + self.requires("libcurl/7.86.0") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.0.1") self.requires("spdlog/1.11.0") From 919ed63a97a1a423351521a573ac83725e7fef4b Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 11:55:54 -0600 Subject: [PATCH 274/385] Fix testing param (#117) --- .github/workflows/build_dependencies.yml | 22 ++++++++++++---------- .github/workflows/merge_conan_build.yml | 2 +- .github/workflows/pr_conan_build.yml | 2 +- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 1d6092c2..5748f7f1 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -18,8 +18,8 @@ on: default: false testing: required: false - type: boolean - default: false + type: string + default: 'False' workflow_dispatch: inputs: branch: @@ -50,9 +50,11 @@ on: default: false testing: description: 'Run Tests' - required: false - type: boolean - default: false + required: true + type: choice + - True + - False + default: 'false' jobs: BuildSislDeps: @@ -72,13 +74,13 @@ jobs: repository: ebay/sisl path: deps/sisl ref: ${{ inputs.branch }} - if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Setup Python uses: actions/setup-python@v3 with: python-version: "3.8" - if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Setup Conan and Export Recipes run: | @@ -92,7 +94,7 @@ jobs: conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy conan export deps/sisl/3rd_party/pistache pistache/cci.20201127@ - if: ${{ inputs.testing == 'true' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Build Cache run: | @@ -102,7 +104,7 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl - if: ${{ inputs.testing != 'true' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing != 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Test Package run: | @@ -112,7 +114,7 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl - if: ${{ inputs.testing == 'true' }} + if: ${{ inputs.testing == 'True' }} - name: Cleanup run: | diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index aa344e44..0cf6a75e 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -23,4 +23,4 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} - testing: true + testing: 'True' diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index daba7ed3..ebdbe6b2 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -25,4 +25,4 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} - testing: true + testing: 'True' From 4e83741b6b63e65d0c0aafd5d58cb9ae770bfca2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 12:25:03 -0600 Subject: [PATCH 275/385] Fix typo --- .github/workflows/build_dependencies.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index b2de24fd..fff5dad5 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -15,7 +15,7 @@ on: prerelease: required: false type: string - default: false + default: 'False' testing: required: false type: string @@ -47,17 +47,18 @@ on: options: - True - False - default: false + default: 'False' testing: description: 'Run Tests' required: true type: choice - - True - - False - default: 'false' + options: + - 'True' + - 'False' + default: 'False' jobs: -BuildSislDeps: + BuildSislDeps: runs-on: ubuntu-22.04 steps: - name: Restore Sisl Cache From ffa655709c3935e30243310e025d61461f9b774a Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 11:27:10 -0700 Subject: [PATCH 276/385] Remove merge fix. --- conanfile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index a7a3c03d..25b2d563 100644 --- a/conanfile.py +++ b/conanfile.py @@ -73,7 +73,6 @@ def requirements(self): self.requires("flatbuffers/1.12.0") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") - self.requires("libcurl/7.86.0") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.1.0") self.requires("spdlog/1.11.0") From 770a41f85097437f311b4f6fbcdc2ae4681df387 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 11:51:57 -0700 Subject: [PATCH 277/385] Fix input value --- .github/workflows/build_dependencies.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index fff5dad5..f038cf2e 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -45,8 +45,8 @@ on: required: false type: choice options: - - True - - False + - 'True' + - 'False' default: 'False' testing: description: 'Run Tests' From 2f8694cc650bd1f395f58b0241a48fbe904f194d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 14:09:48 -0600 Subject: [PATCH 278/385] Fixes for private workflows. (#121) --- .github/workflows/build_dependencies.yml | 16 ++++++++-------- .github/workflows/pr_conan_build.yml | 2 +- README.md | 1 + 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 5748f7f1..df46d10e 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -15,7 +15,7 @@ on: prerelease: required: false type: string - default: false + default: 'False' testing: required: false type: string @@ -45,16 +45,17 @@ on: required: false type: choice options: - - True - - False - default: false + - 'True' + - 'False' + default: 'False' testing: description: 'Run Tests' required: true type: choice - - True - - False - default: 'false' + options: + - 'True' + - 'False' + default: 'False' jobs: BuildSislDeps: @@ -71,7 +72,6 @@ jobs: - name: Retrieve Code uses: actions/checkout@v3 with: - repository: ebay/sisl path: deps/sisl ref: ${{ inputs.branch }} if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index ebdbe6b2..49ee0395 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -16,7 +16,7 @@ jobs: prerelease: ["True", "False"] exclude: - build-type: Debug - prerelease: "False" + malloc-impl: tcmalloc - build-type: Release malloc-impl: libc uses: ./.github/workflows/build_dependencies.yml diff --git a/README.md b/README.md index d0daa907..121d00cd 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ Harihara Kadayam hkadayam@ebay.com Copyright 2021 eBay Inc. Primary Author: Harihara Kadayam + Primary Developers: Harihara Kadayam, Rishabh Mittal, Bryan Zimmerman, Brian Szmyd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0. From 8dfb71fe7978509a6941a9cdbe7771e2ca2e26dc Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 13:15:33 -0700 Subject: [PATCH 279/385] Fix downstream builds. --- .github/workflows/build_dependencies.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index df46d10e..997e3eca 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -74,7 +74,15 @@ jobs: with: path: deps/sisl ref: ${{ inputs.branch }} - if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'True' }} + + - name: Retrieve Recipe + uses: actions/checkout@v3 + with: + repository: ebay/sisl + path: deps/sisl + ref: ${{ inputs.branch }} + if: ${{ inputs.testing == 'False' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Setup Python uses: actions/setup-python@v3 From 0c94edd43973c0fac7b22c442a1188b8604ca08c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 13 Jun 2023 14:17:58 -0600 Subject: [PATCH 280/385] Fix downstream builds. (#122) --- .github/workflows/build_dependencies.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index df46d10e..997e3eca 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -74,7 +74,15 @@ jobs: with: path: deps/sisl ref: ${{ inputs.branch }} - if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'True' }} + + - name: Retrieve Recipe + uses: actions/checkout@v3 + with: + repository: ebay/sisl + path: deps/sisl + ref: ${{ inputs.branch }} + if: ${{ inputs.testing == 'False' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Setup Python uses: actions/setup-python@v3 From 090b3d7005beb118656adfc16742c91763f9a145 Mon Sep 17 00:00:00 2001 From: Sanal P Date: Tue, 13 Jun 2023 15:06:57 -0700 Subject: [PATCH 281/385] Fix build for sisl 8.5.3 --- conanfile.py | 2 +- include/sisl/utility/thread_factory.hpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 7a701e1a..5c95be0d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.5.3" + version = "8.5.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/utility/thread_factory.hpp b/include/sisl/utility/thread_factory.hpp index d48e2661..591f4cc8 100644 --- a/include/sisl/utility/thread_factory.hpp +++ b/include/sisl/utility/thread_factory.hpp @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef _POSIX_THREADS #include From 1db50693452ffaba324e793e80e94c437fd97f0e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 07:57:59 -0700 Subject: [PATCH 282/385] Dispatch for Merge Build --- .github/workflows/merge_conan_build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 5262623a..43c481ae 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -1,6 +1,7 @@ name: Sisl Merge Build on: + workflow_dispatch: push: branches: - stable/v8.x From 1cf061cea71d824b4b7321a75b1bf9b17a752311 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 13:15:04 -0600 Subject: [PATCH 283/385] Platform support (#127) * Platform Support. * Dispatch for Merge Build * Allow updates of dependencies. --- .github/workflows/build_dependencies.yml | 62 +++++++++++++++++------- .github/workflows/merge_conan_build.yml | 29 +++++++++-- .github/workflows/pr_conan_build.yml | 27 +++++++++-- CMakeLists.txt | 38 +++++++-------- src/CMakeLists.txt | 10 ++-- src/auth_manager/CMakeLists.txt | 4 +- src/flip/CMakeLists.txt | 2 +- src/grpc/CMakeLists.txt | 8 +-- src/grpc/tests/CMakeLists.txt | 4 +- src/settings/CMakeLists.txt | 2 +- test_package/CMakeLists.txt | 4 +- test_package/conanfile.py | 2 +- test_package/test_package.cpp | 12 ++--- 13 files changed, 134 insertions(+), 70 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 2c05711c..c122d7a7 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -3,6 +3,10 @@ name: Conan Build on: workflow_call: inputs: + platform: + required: false + default: 'ubuntu-22.04' + type: string branch: required: true type: string @@ -22,6 +26,15 @@ on: default: 'False' workflow_dispatch: inputs: + platform: + required: true + type: choice + options: + - ubuntu-22.04 + - ubuntu-20.04 + - macos-13 + - macos-12 + default: 'ubuntu-22.04' branch: required: true type: string @@ -49,26 +62,18 @@ on: - 'False' default: 'False' testing: - description: 'Run Tests' + description: 'Build and Run' required: true type: choice options: - 'True' - 'False' - default: 'False' + default: 'True' jobs: BuildSislDeps: - runs-on: ubuntu-22.04 + runs-on: ${{ inputs.platform }} steps: - - name: Restore Sisl Cache - id: restore-cache-sisl - uses: actions/cache/restore@v3 - with: - path: | - ~/.conan/data - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - - name: Retrieve Code uses: actions/checkout@v3 with: @@ -82,7 +87,18 @@ jobs: repository: ebay/sisl path: deps/sisl ref: ${{ inputs.branch }} - if: ${{ inputs.testing == 'False' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'False' }} + + - name: Restore Sisl Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + restore-keys: | + SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}- + SislDeps-${{ inputs.platform }}- - name: Setup Python uses: actions/setup-python@v3 @@ -96,13 +112,19 @@ jobs: python -m pip install conan~=1.0 conan user conan profile new --detect default - # Set std::string to non-CoW C++11 version - sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default conan export deps/sisl/3rd_party/gperftools conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy + cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/package | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) + echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + - name: Fixup libstdc++ + run: | + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + if: ${{ inputs.platform == 'ubuntu-22.04' && ( inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' ) }} + - name: Build Cache run: | conan install \ @@ -111,7 +133,7 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl - if: ${{ inputs.testing != 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Test Package run: | @@ -121,12 +143,18 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl + conan remove -f sisl if: ${{ inputs.testing == 'True' }} - name: Cleanup run: | - ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|prerelease_dummy|spdlog)' | xargs rm -rf + dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build | sed 's,.*data/,,') + dirty_pkgs_v=$(echo "${dirty_pkgs}" | cut -d'/' -f1,2 | paste -sd',' - -) + echo "::info:: Caching: ${dirty_pkgs_v}" + dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) + ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d})" | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build + rm -rf ~/.conan/data/*/*/*/*/source if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl Cache @@ -135,5 +163,5 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 43c481ae..047cb059 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -1,4 +1,4 @@ -name: Sisl Merge Build +name: Sisl Build on: workflow_dispatch: @@ -12,18 +12,37 @@ jobs: strategy: fail-fast: false matrix: + platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] prerelease: ["True", "False"] exclude: - build-type: Debug - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: jemalloc + platform: ubuntu-20.04 - build-type: Debug - malloc-impl: jemalloc - - build-type: Release - malloc-impl: libc + platform: macos-13 + - malloc-impl: libc + prerelease: "False" + - malloc-impl: tcmalloc + platform: macos-13 + - malloc-impl: jemalloc + platform: macos-13 + - malloc-impl: jemalloc + build-type: Debug + - malloc-impl: jemalloc + prerelease: "False" + - malloc-impl: libc + build-type: Release + platform: ubuntu-22.04 + - prerelease: "False" + build-type: Debug uses: ./.github/workflows/build_dependencies.yml with: + platform: ${{ matrix.platform }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 5ab92423..1f155af5 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -11,18 +11,37 @@ jobs: strategy: fail-fast: false matrix: + platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc", "jemalloc"] prerelease: ["True", "False"] exclude: - build-type: Debug - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: jemalloc + platform: ubuntu-20.04 - build-type: Debug - malloc-impl: jemalloc - - build-type: Release - malloc-impl: libc + platform: macos-13 + - malloc-impl: libc + prerelease: "False" + - malloc-impl: tcmalloc + platform: macos-13 + - malloc-impl: jemalloc + platform: macos-13 + - malloc-impl: jemalloc + build-type: Debug + - malloc-impl: jemalloc + prerelease: "False" + - malloc-impl: libc + build-type: Release + platform: ubuntu-22.04 + - prerelease: "False" + build-type: Debug uses: ./.github/workflows/build_dependencies.yml with: + platform: ${{ matrix.platform }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/CMakeLists.txt b/CMakeLists.txt index f5a6c394..a8485f46 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,32 +49,30 @@ if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") set(CMAKE_THREAD_PREFER_PTHREAD TRUE) endif() -find_package(benchmark REQUIRED) -find_package(Boost REQUIRED) -find_package(cpr REQUIRED) -find_package(cxxopts REQUIRED) -find_package(GTest REQUIRED) +find_package(benchmark QUIET REQUIRED) +find_package(Boost QUIET REQUIRED) +find_package(cpr QUIET REQUIRED) +find_package(cxxopts QUIET REQUIRED) +find_package(GTest QUIET REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") - find_package(gperftools REQUIRED) + find_package(gperftools QUIET REQUIRED) endif() if (${MALLOC_IMPL} STREQUAL "jemalloc") - find_package(jemalloc REQUIRED) + find_package(jemalloc QUIET REQUIRED) endif() -find_package(jwt-cpp REQUIRED) -find_package(nlohmann_json REQUIRED) +find_package(jwt-cpp QUIET REQUIRED) +find_package(nlohmann_json QUIET REQUIRED) find_package(prerelease_dummy QUIET) -find_package(prometheus-cpp REQUIRED) -find_package(zmarok-semver REQUIRED) -find_package(spdlog REQUIRED) -find_package(Threads REQUIRED) +find_package(prometheus-cpp QUIET REQUIRED) +find_package(zmarok-semver QUIET REQUIRED) +find_package(spdlog QUIET REQUIRED) +find_package(Threads QUIET REQUIRED) # Linux Specific dependencies -if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) - find_package(folly REQUIRED) - find_package(userspace-rcu REQUIRED) -endif() +find_package(folly QUIET) +find_package(userspace-rcu QUIET) list (APPEND COMMON_DEPS Boost::headers @@ -83,10 +81,8 @@ list (APPEND COMMON_DEPS prometheus-cpp::prometheus-cpp spdlog::spdlog ) -if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) - list (APPEND COMMON_DEPS - userspace-rcu::userspace-rcu - ) +if(${userspace-rcu_FOUND}) + list (APPEND COMMON_DEPS userspace-rcu::userspace-rcu) endif() if (${prerelease_dummy_FOUND}) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 15d33f20..645a69b5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,6 +1,5 @@ cmake_minimum_required (VERSION 3.11) -add_subdirectory (grpc) add_subdirectory (logging) add_subdirectory (options) add_subdirectory (version) @@ -16,7 +15,13 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") $ ) endif() -if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) +if(${userspace-rcu_FOUND}) + add_subdirectory (grpc) + list(APPEND POSIX_LIBRARIES + $ + ) +endif() +if(${folly_FOUND}) add_subdirectory (cache) add_subdirectory (fds) add_subdirectory (file_watcher) @@ -40,7 +45,6 @@ endif() add_library(sisl ${POSIX_LIBRARIES} - $ $ $ $ diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index ff83dc6d..b1e1b34f 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.11) -find_package(flatbuffers REQUIRED) -find_package(Pistache REQUIRED) +find_package(flatbuffers QUIET REQUIRED) +find_package(Pistache QUIET REQUIRED) if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index 34640c19..dc305577 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.11) -find_package(gRPC REQUIRED) +find_package(gRPC QUIET REQUIRED) add_subdirectory (proto) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index e59bf9ef..ea4c1ee0 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.11) -find_package(flatbuffers REQUIRED) -find_package(gRPC REQUIRED) +find_package(flatbuffers QUIET REQUIRED) +find_package(gRPC QUIET REQUIRED) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/../auth_manager) @@ -19,4 +19,6 @@ target_link_libraries(sisl_grpc ${COMMON_DEPS} ) -add_subdirectory(tests) +if(${Pistache_FOUND}) + add_subdirectory(tests) +endif() diff --git a/src/grpc/tests/CMakeLists.txt b/src/grpc/tests/CMakeLists.txt index 995c4076..f951c983 100644 --- a/src/grpc/tests/CMakeLists.txt +++ b/src/grpc/tests/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.11) -find_package(GTest REQUIRED) -find_package(Pistache REQUIRED) +find_package(GTest QUIET REQUIRED) +find_package(Pistache QUIET REQUIRED) add_subdirectory(proto) diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index e429df32..1d73c2ad 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required (VERSION 3.11) -find_package(flatbuffers REQUIRED) +find_package(flatbuffers QUIET REQUIRED) add_library(sisl_settings OBJECT) target_sources(sisl_settings PRIVATE diff --git a/test_package/CMakeLists.txt b/test_package/CMakeLists.txt index e581ed2a..489b1a68 100644 --- a/test_package/CMakeLists.txt +++ b/test_package/CMakeLists.txt @@ -1,10 +1,12 @@ cmake_minimum_required(VERSION 3.11) project(test_package) +set(CMAKE_CXX_STANDARD 20) + include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) conan_basic_setup(TARGETS) -find_package(sisl CONFIG REQUIRED) +find_package(sisl CONFIG QUIET REQUIRED) add_executable(${PROJECT_NAME} test_package.cpp example_decl.cpp) target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) diff --git a/test_package/conanfile.py b/test_package/conanfile.py index 9ebf2674..3b0b71cb 100644 --- a/test_package/conanfile.py +++ b/test_package/conanfile.py @@ -9,7 +9,7 @@ class TestPackageConan(ConanFile): def build(self): cmake = CMake(self) - cmake.configure() + cmake.configure(defs={'CONAN_CMAKE_SILENT_OUTPUT': 'ON'}) cmake.build() def test(self): diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index d02dd7a5..bfbb13f2 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -22,24 +22,18 @@ int main(int argc, char** argv) { LOGERROR("Error"); LOGCRITICAL("Critical"); - auto thread = std::jthread([](std::stop_token stoken) { - example_decl(); - while (!stoken.stop_requested()) { - LOGWARNMOD(my_module, "Sleeping..."); - std::this_thread::sleep_for(1500ms); - } - LOGINFOMOD(my_module, "Waking..."); + auto thread = std::thread([]() { + LOGWARNMOD(my_module, "Sleeping..."); std::this_thread::sleep_for(1500ms); }); sisl::name_thread(thread, "example_thread"); std::this_thread::sleep_for(300ms); - auto stop_source = thread.get_stop_source(); auto custom_logger = sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); DEBUG_ASSERT(true, "Always True"); - RELEASE_ASSERT(stop_source.request_stop(), "Should be!"); + thread.join(); return 0; } From 473c4f8469316f468349088fac9dd6f00fb06906 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 13:12:26 -0700 Subject: [PATCH 284/385] Need all pre-reqs for downstream builds to match. --- .github/workflows/build_dependencies.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index c122d7a7..52bca095 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -97,8 +97,7 @@ jobs: ~/.conan/data key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} restore-keys: | - SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}- - SislDeps-${{ inputs.platform }}- + SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Setup Python uses: actions/setup-python@v3 From fa92e4c10aa6cf53b67b18ee1e437f7441b7e0f2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 13:24:09 -0700 Subject: [PATCH 285/385] Smarter cache. --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 52bca095..d901d1a1 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -97,7 +97,7 @@ jobs: ~/.conan/data key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} restore-keys: | - SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease == 'True' && 'True-' || '' }} - name: Setup Python uses: actions/setup-python@v3 From 37fa9031d12732f4a92ee49bc12ecb18ab87c8fa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 18:01:00 -0600 Subject: [PATCH 286/385] Hashd caches. (#128) --- .github/workflows/build_dependencies.yml | 59 +++++++++++++++++------- .github/workflows/merge_conan_build.yml | 16 ++++++- .github/workflows/pr_conan_build.yml | 15 ++++-- test_package/test_package.cpp | 14 ++---- 4 files changed, 73 insertions(+), 31 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 997e3eca..5bb9feac 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -3,6 +3,10 @@ name: Conan Build on: workflow_call: inputs: + platform: + required: false + default: 'ubuntu-22.04' + type: string branch: required: true type: string @@ -22,6 +26,13 @@ on: default: 'False' workflow_dispatch: inputs: + platform: + required: true + type: choice + options: + - ubuntu-22.04 + - ubuntu-20.04 + default: 'ubuntu-22.04' branch: required: true type: string @@ -49,26 +60,18 @@ on: - 'False' default: 'False' testing: - description: 'Run Tests' + description: 'Build and Run' required: true type: choice options: - 'True' - 'False' - default: 'False' + default: 'True' jobs: BuildSislDeps: - runs-on: ubuntu-22.04 + runs-on: ${{ inputs.platform }} steps: - - name: Restore Sisl Cache - id: restore-cache-sisl - uses: actions/cache/restore@v3 - with: - path: | - ~/.conan/data - key: Sisl8Deps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - - name: Retrieve Code uses: actions/checkout@v3 with: @@ -82,7 +85,17 @@ jobs: repository: ebay/sisl path: deps/sisl ref: ${{ inputs.branch }} - if: ${{ inputs.testing == 'False' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.testing == 'False' }} + + - name: Restore Sisl Cache + id: restore-cache-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + restore-keys: | + SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease == 'True' && 'True-' || '' }} - name: Setup Python uses: actions/setup-python@v3 @@ -96,14 +109,20 @@ jobs: python -m pip install conan~=1.0 conan user conan profile new --detect default - # Set std::string to non-CoW C++11 version - sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default conan export deps/sisl/3rd_party/gperftools conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy conan export deps/sisl/3rd_party/pistache pistache/cci.20201127@ + cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/package | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) + echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + - name: Fixup libstdc++ + run: | + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + if: ${{ inputs.platform == 'ubuntu-22.04' && ( inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' ) }} + - name: Build Cache run: | conan install \ @@ -112,7 +131,7 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl - if: ${{ inputs.testing != 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Test Package run: | @@ -122,12 +141,18 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl + conan remove -f sisl if: ${{ inputs.testing == 'True' }} - name: Cleanup run: | - ls -1d ~/.conan/data/* | grep -Ev '(folly|gperftools|jemalloc|pistache|prerelease_dummy|spdlog)' | xargs rm -rf + dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build | sed 's,.*data/,,') + dirty_pkgs_v=$(echo "${dirty_pkgs}" | cut -d'/' -f1,2 | paste -sd',' - -) + echo "::info:: Caching: ${dirty_pkgs_v}" + dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) + ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d})" | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build + rm -rf ~/.conan/data/*/*/*/*/source if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl Cache @@ -136,5 +161,5 @@ jobs: with: path: | ~/.conan/data - key: Sisl8Deps-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 0cf6a75e..b9891c7a 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -1,6 +1,7 @@ name: Sisl Merge Build on: + workflow_dispatch: push: branches: - stable/v8.x @@ -11,14 +12,25 @@ jobs: strategy: fail-fast: false matrix: + platform: ["ubuntu-22.04", "ubuntu-20.04"] build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] exclude: - - build-type: Release - malloc-impl: libc + - build-type: Debug + platform: ubuntu-20.04 + - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: libc + prerelease: "False" + - malloc-impl: libc + build-type: Release + platform: ubuntu-22.04 + - prerelease: "False" + build-type: Debug uses: ./.github/workflows/build_dependencies.yml with: + platform: ${{ matrix.platform }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 49ee0395..935f0cc5 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -11,16 +11,25 @@ jobs: strategy: fail-fast: false matrix: + platform: ["ubuntu-22.04", "ubuntu-20.04"] build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] exclude: - build-type: Debug - malloc-impl: tcmalloc - - build-type: Release - malloc-impl: libc + platform: ubuntu-20.04 + - malloc-impl: tcmalloc + platform: ubuntu-20.04 + - malloc-impl: libc + prerelease: "False" + - malloc-impl: libc + build-type: Release + platform: ubuntu-22.04 + - prerelease: "False" + build-type: Debug uses: ./.github/workflows/build_dependencies.yml with: + platform: ${{ matrix.platform }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index f1fcb225..dc82c6fe 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -22,24 +22,20 @@ int main(int argc, char** argv) { LOGERROR("Error"); LOGCRITICAL("Critical"); - auto j_thread = std::jthread([](std::stop_token stoken) { - example_decl(); - while (!stoken.stop_requested()) { - LOGWARNMOD(my_module, "Sleeping..."); - std::this_thread::sleep_for(1500ms); - } + auto _thread = std::thread([]() { + LOGWARNMOD(my_module, "Sleeping..."); + std::this_thread::sleep_for(1500ms); LOGINFOMOD(my_module, "Waking..."); std::this_thread::sleep_for(1500ms); }); - sisl::name_thread(j_thread, "example_decl"); + sisl::name_thread(_thread, "example_decl"); std::this_thread::sleep_for(300ms); - auto stop_source = j_thread.get_stop_source(); auto custom_logger = sisl::logging::CreateCustomLogger("test_package", "_custom", false /*stdout*/, true /*stderr*/); LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); DEBUG_ASSERT(true, "Always True"); - RELEASE_ASSERT(stop_source.request_stop(), "Should be!"); + _thread.join(); return 0; } From 628fc5140f054a7c964a3b555c32392d977cd9aa Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 19:15:50 -0600 Subject: [PATCH 287/385] Cache only if dirty pkgs. (#129) --- .github/workflows/build_dependencies.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 5bb9feac..bdae86ec 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -145,8 +145,13 @@ jobs: if: ${{ inputs.testing == 'True' }} - name: Cleanup + id: cleanup run: | - dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build | sed 's,.*data/,,') + dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build 2>/dev/null | sed 's,.*data/,,') + if [ -z "${dirty_pkgs}" ]; then + dirty_pkgs="nothing" + fi; + echo "dirty_pkgs=${dirty_pkgs}" >> "$GITHUB_OUTPUT" dirty_pkgs_v=$(echo "${dirty_pkgs}" | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Caching: ${dirty_pkgs_v}" dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) @@ -162,4 +167,4 @@ jobs: path: | ~/.conan/data key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.cleanup.outputs.dirty_pkgs != 'nothing' }} From 7f0734334cf4b2a50fd3c3e95fca4498c1dd18c2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 20:56:08 -0600 Subject: [PATCH 288/385] Do not use restore keys. --- .github/workflows/build_dependencies.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 1b91acda..327ab273 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -95,9 +95,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - restore-keys: | - SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease == 'True' && 'True-' || '' }} + key: Sisl8Deps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - name: Setup Python uses: actions/setup-python@v3 @@ -167,5 +165,5 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + key: Sisl8Deps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.cleanup.outputs.dirty_pkgs != 'nothing' }} From 4730c8424147bd5e85b302a49446ed3a15abc6c7 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 21:01:08 -0600 Subject: [PATCH 289/385] No restore keys. (#131) --- .github/workflows/build_dependencies.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index bdae86ec..97a893f8 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -94,8 +94,6 @@ jobs: path: | ~/.conan/data key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - restore-keys: | - SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease == 'True' && 'True-' || '' }} - name: Setup Python uses: actions/setup-python@v3 From 6bf92260db535541490ece21dbe6b8e09de65171 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 21:46:14 -0600 Subject: [PATCH 290/385] Fix output --- .github/workflows/build_dependencies.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 97a893f8..2bac6799 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -147,15 +147,15 @@ jobs: run: | dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build 2>/dev/null | sed 's,.*data/,,') if [ -z "${dirty_pkgs}" ]; then - dirty_pkgs="nothing" + dirty_pkgs="nothing/0" fi; - echo "dirty_pkgs=${dirty_pkgs}" >> "$GITHUB_OUTPUT" dirty_pkgs_v=$(echo "${dirty_pkgs}" | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Caching: ${dirty_pkgs_v}" dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d})" | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build rm -rf ~/.conan/data/*/*/*/*/source + echo "dirty_pkgs=${dirty_pkgs_d}" >> "$GITHUB_OUTPUT" if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl Cache From 429b6c1deeb181606079f9675a07882df1ad28ea Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 15 Jun 2023 22:36:50 -0600 Subject: [PATCH 291/385] Have to use major prefix for cache. --- .github/workflows/build_dependencies.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 2bac6799..06a3fea4 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -93,7 +93,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - name: Setup Python uses: actions/setup-python@v3 @@ -164,5 +164,5 @@ jobs: with: path: | ~/.conan/data - key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.cleanup.outputs.dirty_pkgs != 'nothing' }} From fd8b16b73778ed5aefa933c14709d94acc5794e6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 16 Jun 2023 09:53:09 -0600 Subject: [PATCH 292/385] Fixup cache from 3rd party. (#132) --- .github/workflows/build_dependencies.yml | 43 ++++++++++++------------ test_package/conanfile.py | 2 +- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 06a3fea4..9e0a48e7 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -122,6 +122,7 @@ jobs: if: ${{ inputs.platform == 'ubuntu-22.04' && ( inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' ) }} - name: Build Cache + id: build_cache run: | conan install \ -o prerelease=${{ inputs.prerelease }} \ @@ -129,30 +130,17 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl - if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - - - name: Test Package - run: | - conan create \ - -o sisl:prerelease=${{ inputs.prerelease }} \ - -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ - -s build_type=${{ inputs.build-type }} \ - --build missing \ - deps/sisl - conan remove -f sisl - if: ${{ inputs.testing == 'True' }} - - - name: Cleanup - id: cleanup - run: | + dep_pkgs=$(ls -1d deps/sisl/3rd_party/* 2>/dev/null | cut -d'/' -f4 | paste -sd'|' - -) + if [ -z "${dep_pkgs}" ]; then + dep_pkgs="no_3rd_party" + fi dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build 2>/dev/null | sed 's,.*data/,,') if [ -z "${dirty_pkgs}" ]; then - dirty_pkgs="nothing/0" - fi; - dirty_pkgs_v=$(echo "${dirty_pkgs}" | cut -d'/' -f1,2 | paste -sd',' - -) - echo "::info:: Caching: ${dirty_pkgs_v}" + dirty_pkgs="no_public/0" + fi dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) - ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d})" | xargs rm -rf + echo "::info:: Caching: ${dirty_pkgs_d}|${dep_pkgs}" + ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d}|${dep_pkgs})" | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build rm -rf ~/.conan/data/*/*/*/*/source echo "dirty_pkgs=${dirty_pkgs_d}" >> "$GITHUB_OUTPUT" @@ -165,4 +153,15 @@ jobs: path: | ~/.conan/data key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.cleanup.outputs.dirty_pkgs != 'nothing' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.build_cache.outputs.dirty_pkgs != 'nothing' }} + + - name: Create and test Package + run: | + conan create \ + -o sisl:prerelease=${{ inputs.prerelease }} \ + -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + deps/sisl + conan remove -f sisl + if: ${{ inputs.testing == 'True' }} diff --git a/test_package/conanfile.py b/test_package/conanfile.py index 9ebf2674..64d9250d 100644 --- a/test_package/conanfile.py +++ b/test_package/conanfile.py @@ -9,7 +9,7 @@ class TestPackageConan(ConanFile): def build(self): cmake = CMake(self) - cmake.configure() + cmake.configure(defs={'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON'}) cmake.build() def test(self): From 9fcb61086d7cbfaa4d106f692a6108dcfa71ae27 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 16 Jun 2023 10:53:26 -0600 Subject: [PATCH 293/385] Restore similar cache if testing anyways. (#135) --- .github/workflows/build_dependencies.yml | 19 +++++++++++++++---- conanfile.py | 2 +- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 9e0a48e7..d447ada8 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -95,6 +95,15 @@ jobs: ~/.conan/data key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + - name: Restore Testing Cache + id: restore-cache-testing-sisl + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}- + if: ${{ github.event_name == 'pull_request' && inputs.testing == 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + - name: Setup Python uses: actions/setup-python@v3 with: @@ -122,7 +131,6 @@ jobs: if: ${{ inputs.platform == 'ubuntu-22.04' && ( inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' ) }} - name: Build Cache - id: build_cache run: | conan install \ -o prerelease=${{ inputs.prerelease }} \ @@ -130,6 +138,10 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl + if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + + - name: Clean Package Cache + run: | dep_pkgs=$(ls -1d deps/sisl/3rd_party/* 2>/dev/null | cut -d'/' -f4 | paste -sd'|' - -) if [ -z "${dep_pkgs}" ]; then dep_pkgs="no_3rd_party" @@ -143,8 +155,7 @@ jobs: ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d}|${dep_pkgs})" | xargs rm -rf rm -rf ~/.conan/data/*/*/*/*/build rm -rf ~/.conan/data/*/*/*/*/source - echo "dirty_pkgs=${dirty_pkgs_d}" >> "$GITHUB_OUTPUT" - if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Save Sisl Cache id: save-cache-sisl @@ -153,7 +164,7 @@ jobs: path: | ~/.conan/data key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' && steps.build_cache.outputs.dirty_pkgs != 'nothing' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Create and test Package run: | diff --git a/conanfile.py b/conanfile.py index 5c95be0d..172c655e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -4,7 +4,7 @@ from conan.tools.build import check_min_cppstd from conans import CMake -required_conan_version = ">=1.50.0" +required_conan_version = ">=1.52.0" class SISLConan(ConanFile): name = "sisl" From 86c1d1915d4035d2896eb7842d4a8b91c9be6642 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 09:04:02 -0700 Subject: [PATCH 294/385] Added custom folly. --- 3rd_party/folly/CMakeLists.txt | 7 + 3rd_party/folly/conandata.yml | 58 ++++ 3rd_party/folly/conanfile.py | 307 ++++++++++++++++++ .../folly/patches/0001-find-packages.patch | 93 ++++++ .../folly/patches/0002-compiler-flags.patch | 24 ++ .../folly/patches/0003-boost-shared-ptr.patch | 24 ++ .../patches/0004-disable-posix-names.patch | 27 ++ .../folly/patches/0005-include-atomic.patch | 12 + .../folly/patches/0006-duplicate-hash.patch | 13 + .../folly/patches/0007-allow-builtins.patch | 128 ++++++++ .../folly/patches/0008-find-packages.patch | 73 +++++ .../patches/0009-ill-formed-atomic-copy.patch | 13 + .../folly/patches/0010-duplicate-hash.patch | 13 + .../patches/0011-disable-logger-example.patch | 12 + .../folly/patches/0012-compiler-flags.patch | 24 ++ .../folly/patches/0013-include-bit.patch | 13 + 3rd_party/folly/patches/0014-find-librt.patch | 18 + .../0015-benchmark-format-macros.patch | 15 + .../folly/patches/0016-find-packages.patch | 80 +++++ .../folly/patches/0017-compiler-flags.patch | 24 ++ 3rd_party/folly/patches/0018-find-glog.patch | 16 + .../folly/patches/0019-exclude-example.patch | 12 + .../folly/patches/0020-include-ssizet.patch | 12 + .../folly/patches/0021-typedef-clockid.patch | 12 + .../patches/0022-fix-windows-minmax.patch | 12 + .../0023-fix-safe-check-sanitize.patch | 16 + 3rd_party/folly/test_package/CMakeLists.txt | 16 + 3rd_party/folly/test_package/conanfile.py | 31 ++ 3rd_party/folly/test_package/test_package.cpp | 29 ++ .../folly/test_v1_package/CMakeLists.txt | 17 + 3rd_party/folly/test_v1_package/conanfile.py | 18 + .../folly/test_v1_package/test_package.cpp | 29 ++ 32 files changed, 1198 insertions(+) create mode 100644 3rd_party/folly/CMakeLists.txt create mode 100644 3rd_party/folly/conandata.yml create mode 100755 3rd_party/folly/conanfile.py create mode 100644 3rd_party/folly/patches/0001-find-packages.patch create mode 100644 3rd_party/folly/patches/0002-compiler-flags.patch create mode 100644 3rd_party/folly/patches/0003-boost-shared-ptr.patch create mode 100644 3rd_party/folly/patches/0004-disable-posix-names.patch create mode 100644 3rd_party/folly/patches/0005-include-atomic.patch create mode 100644 3rd_party/folly/patches/0006-duplicate-hash.patch create mode 100644 3rd_party/folly/patches/0007-allow-builtins.patch create mode 100644 3rd_party/folly/patches/0008-find-packages.patch create mode 100644 3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch create mode 100644 3rd_party/folly/patches/0010-duplicate-hash.patch create mode 100644 3rd_party/folly/patches/0011-disable-logger-example.patch create mode 100644 3rd_party/folly/patches/0012-compiler-flags.patch create mode 100644 3rd_party/folly/patches/0013-include-bit.patch create mode 100644 3rd_party/folly/patches/0014-find-librt.patch create mode 100644 3rd_party/folly/patches/0015-benchmark-format-macros.patch create mode 100644 3rd_party/folly/patches/0016-find-packages.patch create mode 100644 3rd_party/folly/patches/0017-compiler-flags.patch create mode 100644 3rd_party/folly/patches/0018-find-glog.patch create mode 100644 3rd_party/folly/patches/0019-exclude-example.patch create mode 100644 3rd_party/folly/patches/0020-include-ssizet.patch create mode 100644 3rd_party/folly/patches/0021-typedef-clockid.patch create mode 100644 3rd_party/folly/patches/0022-fix-windows-minmax.patch create mode 100644 3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch create mode 100644 3rd_party/folly/test_package/CMakeLists.txt create mode 100644 3rd_party/folly/test_package/conanfile.py create mode 100644 3rd_party/folly/test_package/test_package.cpp create mode 100644 3rd_party/folly/test_v1_package/CMakeLists.txt create mode 100644 3rd_party/folly/test_v1_package/conanfile.py create mode 100644 3rd_party/folly/test_v1_package/test_package.cpp diff --git a/3rd_party/folly/CMakeLists.txt b/3rd_party/folly/CMakeLists.txt new file mode 100644 index 00000000..61f3d3b0 --- /dev/null +++ b/3rd_party/folly/CMakeLists.txt @@ -0,0 +1,7 @@ +cmake_minimum_required(VERSION 3.1) +project(cmake_wrapper) + +include(conanbuildinfo.cmake) +conan_basic_setup(KEEP_RPATHS) + +add_subdirectory("source_subfolder") diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml new file mode 100644 index 00000000..75a25a20 --- /dev/null +++ b/3rd_party/folly/conandata.yml @@ -0,0 +1,58 @@ +sources: + "2019.10.21.00": + url: "https://github.com/facebook/folly/archive/v2019.10.21.00.tar.gz" + sha256: "6efcc2b2090691a9fe3d339c433d102d6399bbdc6dc4893080d59f15f648f393" + "2020.08.10.00": + url: "https://github.com/facebook/folly/archive/v2020.08.10.00.tar.gz" + sha256: "e81140d04a4e89e3f848e528466a9b3d3ae37d7eeb9e65467fca50d70918eef6" + "2022.01.31.00": + url: "https://github.com/facebook/folly/archive/v2022.01.31.00.tar.gz" + sha256: "d764b9a7832d967bb7cfea4bcda15d650315aa4d559fde1da2a52b015cd88b9c" +patches: + "2019.10.21.00": + - patch_file: "patches/0001-find-packages.patch" + base_path: "source_subfolder" + - patch_file: "patches/0002-compiler-flags.patch" + base_path: "source_subfolder" + - patch_file: "patches/0003-boost-shared-ptr.patch" + base_path: "source_subfolder" + - patch_file: "patches/0004-disable-posix-names.patch" + base_path: "source_subfolder" + - patch_file: "patches/0005-include-atomic.patch" + base_path: "source_subfolder" + - patch_file: "patches/0006-duplicate-hash.patch" + base_path: "source_subfolder" + - patch_file: "patches/0007-allow-builtins.patch" + base_path: "source_subfolder" + - patch_file: "patches/0013-include-bit.patch" + base_path: "source_subfolder" + - patch_file: "patches/0020-include-ssizet.patch" + base_path: "source_subfolder" + "2020.08.10.00": + - patch_file: "patches/0008-find-packages.patch" + base_path: "source_subfolder" + - patch_file: "patches/0009-ill-formed-atomic-copy.patch" + base_path: "source_subfolder" + - patch_file: "patches/0010-duplicate-hash.patch" + base_path: "source_subfolder" + - patch_file: "patches/0011-disable-logger-example.patch" + base_path: "source_subfolder" + - patch_file: "patches/0012-compiler-flags.patch" + base_path: "source_subfolder" + - patch_file: "patches/0014-find-librt.patch" + base_path: "source_subfolder" + - patch_file: "patches/0015-benchmark-format-macros.patch" + base_path: "source_subfolder" + "2022.01.31.00": + - patch_file: "patches/0016-find-packages.patch" + base_path: "source_subfolder" + - patch_file: "patches/0017-compiler-flags.patch" + base_path: "source_subfolder" + - patch_file: "patches/0018-find-glog.patch" + base_path: "source_subfolder" + - patch_file: "patches/0019-exclude-example.patch" + base_path: "source_subfolder" + - patch_file: "patches/0022-fix-windows-minmax.patch" + base_path: "source_subfolder" + - patch_file: "patches/0023-fix-safe-check-sanitize.patch" + base_path: "source_subfolder" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py new file mode 100755 index 00000000..06dc6965 --- /dev/null +++ b/3rd_party/folly/conanfile.py @@ -0,0 +1,307 @@ +from conan.tools.microsoft import is_msvc, msvc_runtime_flag +from conan.tools.build import can_run +from conan.tools.scm import Version +from conan.tools import files +from conan import ConanFile +from conans import CMake, tools +from conan.errors import ConanInvalidConfiguration +import functools +import os + +required_conan_version = ">=1.45.0" + + +class FollyConan(ConanFile): + name = "folly" + description = "An open-source C++ components library developed and used at Facebook" + topics = ("facebook", "components", "core", "efficiency") + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://github.com/facebook/folly" + license = "Apache-2.0" + + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + "use_sse4_2" : [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "use_sse4_2" : False + } + + generators = "cmake", "cmake_find_package" + + @property + def _source_subfolder(self): + return "source_subfolder" + + @property + def _minimum_cpp_standard(self): + return 17 if Version(self.version) >= "2022.01.31.00" else 14 + + @property + def _minimum_compilers_version(self): + return { + "Visual Studio": "15", + "gcc": "5", + "clang": "6", + "apple-clang": "8", + } if self._minimum_cpp_standard == 14 else { + "gcc": "7", + "Visual Studio": "16", + "clang": "6", + "apple-clang": "10", + } + + def export_sources(self): + self.copy("CMakeLists.txt") + for patch in self.conan_data.get("patches", {}).get(self.version, []): + self.copy(patch["patch_file"]) + + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + + if str(self.settings.arch) not in ['x86', 'x86_64']: + del self.options.use_sse4_2 + + def configure(self): + if self.options.shared: + del self.options.fPIC + + def requirements(self): + self.requires("boost/1.78.0") + self.requires("bzip2/1.0.8") + self.requires("double-conversion/3.2.0") + self.requires("gflags/2.2.2") + self.requires("glog/0.4.0") + self.requires("libevent/2.1.12") + self.requires("openssl/1.1.1q") + self.requires("lz4/1.9.3") + self.requires("snappy/1.1.9") + self.requires("zlib/1.2.12") + self.requires("zstd/1.5.2") + if not is_msvc(self): + self.requires("libdwarf/20191104") + self.requires("libsodium/1.0.18") + self.requires("xz_utils/5.2.5") + # FIXME: Causing compilation issues on clang: self.requires("jemalloc/5.2.1") + if self.settings.os == "Linux": + self.requires("libiberty/9.1.0") + self.requires("libunwind/1.5.0") + if Version(self.version) >= "2020.08.10.00": + self.requires("fmt/7.1.3") + + @property + def _required_boost_components(self): + return ["context", "filesystem", "program_options", "regex", "system", "thread"] + + def validate(self): + if self.settings.compiler.get_safe("cppstd"): + tools.check_min_cppstd(self, self._minimum_cpp_standard) + min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) + if not min_version: + self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) + else: + if Version(self.settings.compiler.version) < min_version: + raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format( + self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) + + if Version(self.version) < "2022.01.31.00" and self.settings.os != "Linux": + raise ConanInvalidConfiguration("Conan support for non-Linux platforms starts with Folly version 2022.01.31.00") + + if self.settings.os == "Macos" and self.settings.arch != "x86_64": + raise ConanInvalidConfiguration("Conan currently requires a 64bit target architecture for Folly on Macos") + + if self.settings.os == "Windows" and self.settings.arch != "x86_64": + raise ConanInvalidConfiguration("Folly requires a 64bit target architecture on Windows") + + if self.settings.os in ["Macos", "Windows"] and self.options.shared: + raise ConanInvalidConfiguration("Folly could not be built on {} as shared library".format(self.settings.os)) + + if Version(self.version) == "2020.08.10.00" and self.settings.compiler == "clang" and self.options.shared: + raise ConanInvalidConfiguration("Folly could not be built by clang as a shared library") + + if self.options["boost"].header_only: + raise ConanInvalidConfiguration("Folly could not be built with a header only Boost") + + miss_boost_required_comp = any(getattr(self.options["boost"], "without_{}".format(boost_comp), True) for boost_comp in self._required_boost_components) + if miss_boost_required_comp: + raise ConanInvalidConfiguration("Folly requires these boost components: {}".format(", ".join(self._required_boost_components))) + + min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) + if not min_version: + self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) + else: + if Version(self.settings.compiler.version) < min_version: + raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format( + self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) + + if self.options.get_safe("use_sse4_2") and str(self.settings.arch) not in ['x86', 'x86_64']: + raise ConanInvalidConfiguration(f"{self.ref} can use the option use_sse4_2 only on x86 and x86_64 archs.") + + # FIXME: Freeze max. CMake version at 3.16.2 to fix the Linux build + def build_requirements(self): + self.build_requires("cmake/3.16.9") + + def source(self): + files.get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) + + @functools.lru_cache(1) + def _configure_cmake(self): + cmake = CMake(self) + if can_run(self): + cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE"] = "0" + cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE__TRYRUN_OUTPUT"] = "" + cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE"] = "0" + cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE__TRYRUN_OUTPUT"] = "" + cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE"] = "0" + cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE__TRYRUN_OUTPUT"] = "" + cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE"] = "0" + cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE__TRYRUN_OUTPUT"] = "" + + if self.options.get_safe("use_sse4_2") and str(self.settings.arch) in ['x86', 'x86_64']: + # in folly, if simd >=sse4.2, we also needs -mfma flag to avoid compiling error. + if not is_msvc(self): + cmake.definitions["CMAKE_C_FLAGS"] = "-mfma" + cmake.definitions["CMAKE_CXX_FLAGS"] = "-mfma" + else: + cmake.definitions["CMAKE_C_FLAGS"] = "/arch:FMA" + cmake.definitions["CMAKE_CXX_FLAGS"] = "/arch:FMA" + + cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True) + + cxx_std_flag = tools.cppstd_flag(self.settings) + cxx_std_value = cxx_std_flag.split('=')[1] if cxx_std_flag else "c++{}".format(self._minimum_cpp_standard) + cmake.definitions["CXX_STD"] = cxx_std_value + if is_msvc: + cmake.definitions["MSVC_LANGUAGE_VERSION"] = cxx_std_value + cmake.definitions["MSVC_ENABLE_ALL_WARNINGS"] = False + cmake.definitions["MSVC_USE_STATIC_RUNTIME"] = "MT" in msvc_runtime_flag(self) + cmake.configure() + return cmake + + + def build(self): + for patch in self.conan_data.get("patches", {}).get(self.version, []): + tools.patch(**patch) + cmake = self._configure_cmake() + cmake.build() + + def package(self): + self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) + cmake = self._configure_cmake() + cmake.install() + files.rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) + files.rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) + + def package_info(self): + self.cpp_info.set_property("cmake_file_name", "folly") + self.cpp_info.set_property("cmake_target_name", "Folly::folly") + self.cpp_info.set_property("pkg_config_name", "libfolly") + + # TODO: back to global scope in conan v2 once cmake_find_package_* generators removed + if Version(self.version) == "2019.10.21.00": + self.cpp_info.components["libfolly"].libs = [ + "follybenchmark", + "folly_test_util", + "folly" + ] + elif Version(self.version) >= "2020.08.10.00": + if self.settings.os == "Linux": + self.cpp_info.components["libfolly"].libs = [ + "folly_exception_counter", + "folly_exception_tracer", + "folly_exception_tracer_base", + "folly_test_util", + "follybenchmark", + "folly" + ] + else: + self.cpp_info.components["libfolly"].libs = [ + "folly_test_util", + "follybenchmark", + "folly" + ] + + self.cpp_info.components["libfolly"].requires = [ + "boost::boost", + "bzip2::bzip2", + "double-conversion::double-conversion", + "gflags::gflags", + "glog::glog", + "libevent::libevent", + "lz4::lz4", + "openssl::openssl", + "snappy::snappy", + "zlib::zlib", + "zstd::zstd", + "libsodium::libsodium", + "xz_utils::xz_utils" + ] + if not is_msvc(self): + self.cpp_info.components["libfolly"].requires.append("libdwarf::libdwarf") + if self.settings.os == "Linux": + self.cpp_info.components["libfolly"].requires.extend(["libiberty::libiberty", "libunwind::libunwind"]) + self.cpp_info.components["libfolly"].system_libs.extend(["pthread", "dl", "rt"]) + + if Version(self.version) >= "2020.08.10.00": + self.cpp_info.components["libfolly"].requires.append("fmt::fmt") + if self.settings.os == "Linux": + self.cpp_info.components["libfolly"].defines.extend(["FOLLY_HAVE_ELF", "FOLLY_HAVE_DWARF"]) + + elif self.settings.os == "Windows": + self.cpp_info.components["libfolly"].system_libs.extend(["ws2_32", "iphlpapi", "crypt32"]) + + if (self.settings.os == "Linux" and self.settings.compiler == "clang" and + self.settings.compiler.libcxx == "libstdc++") or \ + (self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and + Version(self.settings.compiler.version.value) == "9.0" and self.settings.compiler.libcxx == "libc++"): + self.cpp_info.components["libfolly"].system_libs.append("atomic") + + if self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version.value) >= "11.0": + self.cpp_info.components["libfolly"].system_libs.append("c++abi") + + if self.options.get_safe("use_sse4_2") and str(self.settings.arch) in ['x86', 'x86_64']: + self.cpp_info.components["libfolly"].defines = ["FOLLY_SSE=4", "FOLLY_SSE_MINOR=2"] + + # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed + self.cpp_info.filenames["cmake_find_package"] = "folly" + self.cpp_info.filenames["cmake_find_package_multi"] = "folly" + self.cpp_info.names["cmake_find_package"] = "Folly" + self.cpp_info.names["cmake_find_package_multi"] = "Folly" + self.cpp_info.names["pkg_config"] = "libfolly" + self.cpp_info.components["libfolly"].names["cmake_find_package"] = "folly" + self.cpp_info.components["libfolly"].names["cmake_find_package_multi"] = "folly" + self.cpp_info.components["libfolly"].set_property("cmake_target_name", "Folly::folly") + self.cpp_info.components["libfolly"].set_property("pkg_config_name", "libfolly") + + if Version(self.version) >= "2019.10.21.00": + self.cpp_info.components["follybenchmark"].set_property("cmake_target_name", "Folly::follybenchmark") + self.cpp_info.components["follybenchmark"].set_property("pkg_config_name", "libfollybenchmark") + self.cpp_info.components["follybenchmark"].libs = ["follybenchmark"] + self.cpp_info.components["follybenchmark"].requires = ["libfolly"] + + self.cpp_info.components["folly_test_util"].set_property("cmake_target_name", "Folly::folly_test_util") + self.cpp_info.components["folly_test_util"].set_property("pkg_config_name", "libfolly_test_util") + self.cpp_info.components["folly_test_util"].libs = ["folly_test_util"] + self.cpp_info.components["folly_test_util"].requires = ["libfolly"] + + if Version(self.version) >= "2020.08.10.00" and self.settings.os == "Linux": + self.cpp_info.components["folly_exception_tracer_base"].set_property("cmake_target_name", "Folly::folly_exception_tracer_base") + self.cpp_info.components["folly_exception_tracer_base"].set_property("pkg_config_name", "libfolly_exception_tracer_base") + self.cpp_info.components["folly_exception_tracer_base"].libs = ["folly_exception_tracer_base"] + self.cpp_info.components["folly_exception_tracer_base"].requires = ["libfolly"] + + self.cpp_info.components["folly_exception_tracer"].set_property("cmake_target_name", "Folly::folly_exception_tracer") + self.cpp_info.components["folly_exception_tracer"].set_property("pkg_config_name", "libfolly_exception_tracer") + self.cpp_info.components["folly_exception_tracer"].libs = ["folly_exception_tracer"] + self.cpp_info.components["folly_exception_tracer"].requires = ["folly_exception_tracer_base"] + + self.cpp_info.components["folly_exception_counter"].set_property("cmake_target_name", "Folly::folly_exception_counter") + self.cpp_info.components["folly_exception_counter"].set_property("pkg_config_name", "libfolly_exception_counter") + self.cpp_info.components["folly_exception_counter"].libs = ["folly_exception_counter"] + self.cpp_info.components["folly_exception_counter"].requires = ["folly_exception_tracer"] diff --git a/3rd_party/folly/patches/0001-find-packages.patch b/3rd_party/folly/patches/0001-find-packages.patch new file mode 100644 index 00000000..4cee77cd --- /dev/null +++ b/3rd_party/folly/patches/0001-find-packages.patch @@ -0,0 +1,93 @@ +diff --git a/CMake/FindLibsodium.cmake b/CMake/FindLibsodium.cmake +index 18d4d0c..2b3cd2a 100644 +--- a/CMake/FindLibsodium.cmake ++++ b/CMake/FindLibsodium.cmake +@@ -15,7 +15,7 @@ + find_path(LIBSODIUM_INCLUDE_DIR NAMES sodium.h) + mark_as_advanced(LIBSODIUM_INCLUDE_DIR) + +-find_library(LIBSODIUM_LIBRARY NAMES sodium) ++find_library(LIBSODIUM_LIBRARY NAMES sodium libsodium PATHS ${CONAN_LIBSODIUM_ROOT}) + mark_as_advanced(LIBSODIUM_LIBRARY) + + include(FindPackageHandleStandardArgs) +diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake +index 048e1cd..da3ab8e 100644 +--- a/CMake/folly-deps.cmake ++++ b/CMake/folly-deps.cmake +@@ -36,19 +36,19 @@ find_package(DoubleConversion MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) + +-find_package(Gflags MODULE) +-set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) +-list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) +-list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBGFLAGS_INCLUDE_DIR}) +-list(APPEND CMAKE_REQUIRED_LIBRARIES ${LIBGFLAGS_LIBRARY}) +-list(APPEND CMAKE_REQUIRED_INCLUDES ${LIBGFLAGS_INCLUDE_DIR}) +- +-find_package(Glog MODULE) ++find_package(gflags MODULE REQUIRED) ++set(FOLLY_HAVE_LIBGFLAGS ${GFLAGS_FOUND}) ++list(APPEND FOLLY_LINK_LIBRARIES ${CONAN_LIBS_GFLAGS}) ++list(APPEND FOLLY_INCLUDE_DIRECTORIES ${CONAN_INCLUDE_DIRS_GFLAGS}) ++list(APPEND CMAKE_REQUIRED_LIBRARIES ${gflags_LIBRARY}) ++list(APPEND CMAKE_REQUIRED_INCLUDES ${gflags_INCLUDE_DIR}) ++ ++find_package(glog MODULE) + set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) + list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) + +-find_package(LibEvent MODULE REQUIRED) ++find_package(Libevent MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) + +diff --git a/build/fbcode_builder/CMake/FindGflags.cmake b/build/fbcode_builder/CMake/FindGflags.cmake +index 246ceac..385605e 100644 +--- a/build/fbcode_builder/CMake/FindGflags.cmake ++++ b/build/fbcode_builder/CMake/FindGflags.cmake +@@ -48,8 +48,13 @@ if (gflags_FOUND) + else() + FIND_PATH(LIBGFLAGS_INCLUDE_DIR gflags/gflags.h) + +- FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflagsd gflags_staticd) +- FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags gflags_static) ++ if(CMAKE_SYSTEM_NAME STREQUAL "Windows") ++ FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflagsd gflags_nothreads_staticd gflags_nothreads_static_debug PATHS ${CONAN_GFLAGS_ROOT}) ++ FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags gflags_nothreads_static PATHS ${CONAN_GFLAGS_ROOT}) ++ else() ++ FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflags gflags_nothreads_debug PATHS ${CONAN_GFLAGS_ROOT}) ++ FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags_nothreads gflags PATHS ${CONAN_GFLAGS_ROOT}) ++ endif() + + INCLUDE(SelectLibraryConfigurations) + SELECT_LIBRARY_CONFIGURATIONS(LIBGFLAGS) +diff --git a/build/fbcode_builder/CMake/FindGlog.cmake b/build/fbcode_builder/CMake/FindGlog.cmake +index a589b2e..15aef75 100644 +--- a/build/fbcode_builder/CMake/FindGlog.cmake ++++ b/build/fbcode_builder/CMake/FindGlog.cmake +@@ -8,8 +8,7 @@ + + include(FindPackageHandleStandardArgs) + +-find_library(GLOG_LIBRARY glog +- PATHS ${GLOG_LIBRARYDIR}) ++find_library(GLOG_LIBRARY glog glogd PATHS ${CONAN_GLOG_ROOT}) + + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_INCLUDEDIR}) +diff --git a/build/fbcode_builder/CMake/FindLibEvent.cmake b/build/fbcode_builder/CMake/FindLibEvent.cmake +index dd11ebd..9ef0807 100644 +--- a/build/fbcode_builder/CMake/FindLibEvent.cmake ++++ b/build/fbcode_builder/CMake/FindLibEvent.cmake +@@ -50,7 +50,7 @@ if (TARGET event) + endif() + else() + find_path(LIBEVENT_INCLUDE_DIR event.h PATHS ${LibEvent_INCLUDE_PATHS}) +- find_library(LIBEVENT_LIB NAMES event PATHS ${LibEvent_LIB_PATHS}) ++ find_library(LIBEVENT_LIB NAMES event libevent PATHS ${CONAN_LIBEVENT_ROOT}) + + if (LIBEVENT_LIB AND LIBEVENT_INCLUDE_DIR) + set(LibEvent_FOUND TRUE) diff --git a/3rd_party/folly/patches/0002-compiler-flags.patch b/3rd_party/folly/patches/0002-compiler-flags.patch new file mode 100644 index 00000000..b9213ff6 --- /dev/null +++ b/3rd_party/folly/patches/0002-compiler-flags.patch @@ -0,0 +1,24 @@ +diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake +index 7fba75f..019d30f 100644 +--- a/CMake/FollyCompilerUnix.cmake ++++ b/CMake/FollyCompilerUnix.cmake +@@ -28,9 +28,9 @@ set( + ) + mark_as_advanced(CXX_STD) + +-set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") ++set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") +-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") ++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") + + # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") +@@ -44,7 +44,6 @@ function(apply_folly_compile_options_to_target THETARGET) + ) + target_compile_options(${THETARGET} + PRIVATE +- -g + -std=${CXX_STD} + -finput-charset=UTF-8 + -fsigned-char diff --git a/3rd_party/folly/patches/0003-boost-shared-ptr.patch b/3rd_party/folly/patches/0003-boost-shared-ptr.patch new file mode 100644 index 00000000..7608d6b2 --- /dev/null +++ b/3rd_party/folly/patches/0003-boost-shared-ptr.patch @@ -0,0 +1,24 @@ +diff --git a/folly/portability/PThread.cpp b/folly/portability/PThread.cpp +index f8cd6d4..0908668 100644 +--- a/folly/portability/PThread.cpp ++++ b/folly/portability/PThread.cpp +@@ -18,7 +18,9 @@ + + #if !FOLLY_HAVE_PTHREAD && defined(_WIN32) + #include // @manual +- ++#include ++#include ++#include + #include + + #include +@@ -683,7 +685,7 @@ int pthread_setspecific(pthread_key_t key, const void* value) { + // function, which we don't want to do. + boost::detail::set_tss_data( + realKey, +- boost::shared_ptr(), ++ 0,0, + const_cast(value), + false); + return 0; diff --git a/3rd_party/folly/patches/0004-disable-posix-names.patch b/3rd_party/folly/patches/0004-disable-posix-names.patch new file mode 100644 index 00000000..9efd4e24 --- /dev/null +++ b/3rd_party/folly/patches/0004-disable-posix-names.patch @@ -0,0 +1,27 @@ +diff --git a/folly/portability/Windows.h b/folly/portability/Windows.h +index f7990ca..b22fac5 100644 +--- a/folly/portability/Windows.h ++++ b/folly/portability/Windows.h +@@ -26,16 +26,12 @@ + // These have to be this way because we define our own versions + // of close(), because the normal Windows versions don't handle + // sockets at all. +-#ifndef __STDC__ +-/* nolint */ +-#define __STDC__ 1 +-#include // @manual nolint +-#include // @manual nolint +-#undef __STDC__ +-#else +-#include // @manual nolint +-#include // @manual nolint +-#endif ++#include ++#pragma push_macro("_CRT_INTERNAL_NONSTDC_NAMES") ++#define _CRT_INTERNAL_NONSTDC_NAMES 0 ++#include ++#include ++#pragma pop_macro("_CRT_INTERNAL_NONSTDC_NAMES") + + #if defined(min) || defined(max) + #error Windows.h needs to be included by this header, or else NOMINMAX needs \ diff --git a/3rd_party/folly/patches/0005-include-atomic.patch b/3rd_party/folly/patches/0005-include-atomic.patch new file mode 100644 index 00000000..0eb9382e --- /dev/null +++ b/3rd_party/folly/patches/0005-include-atomic.patch @@ -0,0 +1,12 @@ +diff --git a/folly/portability/PThread.cpp b/folly/portability/PThread.cpp +index 2891c4c..7c98975 100644 +--- a/folly/portability/PThread.cpp ++++ b/folly/portability/PThread.cpp +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #include + #include diff --git a/3rd_party/folly/patches/0006-duplicate-hash.patch b/3rd_party/folly/patches/0006-duplicate-hash.patch new file mode 100644 index 00000000..f8905d00 --- /dev/null +++ b/3rd_party/folly/patches/0006-duplicate-hash.patch @@ -0,0 +1,13 @@ +diff --git a/folly/hash/Hash.h b/folly/hash/Hash.h +index 33fa553..4266cf1 100644 +--- a/folly/hash/Hash.h ++++ b/folly/hash/Hash.h +@@ -730,7 +730,7 @@ struct TupleHasher<0, Ts...> { + + // Custom hash functions. + namespace std { +-#if FOLLY_SUPPLY_MISSING_INT128_TRAITS ++#if 0 + template <> + struct hash<__int128> : folly::detail::integral_hasher<__int128> {}; + diff --git a/3rd_party/folly/patches/0007-allow-builtins.patch b/3rd_party/folly/patches/0007-allow-builtins.patch new file mode 100644 index 00000000..de09722f --- /dev/null +++ b/3rd_party/folly/patches/0007-allow-builtins.patch @@ -0,0 +1,128 @@ +diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h +index 971cb8819..e68de4456 100644 +--- a/folly/portability/Builtins.h ++++ b/folly/portability/Builtins.h +@@ -41,7 +41,6 @@ FOLLY_ALWAYS_INLINE void __builtin___clear_cache(char* begin, char* end) { + } + } + +-#if !defined(_MSC_VER) || (_MSC_VER < 1923) + FOLLY_ALWAYS_INLINE int __builtin_clz(unsigned int x) { + unsigned long index; + return int(_BitScanReverse(&index, (unsigned long)x) ? 31 - index : 32); +@@ -93,7 +92,6 @@ FOLLY_ALWAYS_INLINE int __builtin_ctzll(unsigned long long x) { + return int(_BitScanForward64(&index, x) ? index : 64); + } + #endif +-#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) + + FOLLY_ALWAYS_INLINE int __builtin_ffs(int x) { + unsigned long index; +@@ -119,15 +117,12 @@ FOLLY_ALWAYS_INLINE int __builtin_popcount(unsigned int x) { + return int(__popcnt(x)); + } + +-#if !defined(_MSC_VER) || (_MSC_VER < 1923) + FOLLY_ALWAYS_INLINE int __builtin_popcountl(unsigned long x) { + static_assert(sizeof(x) == 4, ""); + return int(__popcnt(x)); + } +-#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) + #endif + +-#if !defined(_MSC_VER) || (_MSC_VER < 1923) + #if defined(_M_IX86) + FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { + return int(__popcnt((unsigned int)(x >> 32))) + +@@ -138,7 +133,6 @@ FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { + return int(__popcnt64(x)); + } + #endif +-#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) + + FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { + // I really hope frame is zero... +-- + +diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h +index e68de4456..30caf4003 100644 +--- a/folly/portability/Builtins.h ++++ b/folly/portability/Builtins.h +@@ -16,7 +16,7 @@ + + #pragma once + +-#if defined(_WIN32) && !defined(__clang__) ++#if defined(_WIN32) && !defined(__MINGW32__) && !defined(__clang__) + #include + #include + #include +-- +see https://github.com/facebook/folly/issues/1412 +diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h +index 30caf4003..e8ef97266 100644 +--- a/folly/portability/Builtins.h ++++ b/folly/portability/Builtins.h +@@ -22,6 +22,14 @@ + #include + #include + ++// MSVC had added support for __builtin_clz etc. in 16.3 (1923) but it will be ++// removed in 16.8 (1928). ++#if (_MSC_VER >= 1923) && (_MSC_VER < 1928) ++#define FOLLY_DETAILFOLLY_DETAIL_MSC_BUILTIN_SUPPORT 1 ++#else ++#define FOLLY_DETAILFOLLY_DETAIL_MSC_BUILTIN_SUPPORT 0 ++#endif ++ + namespace folly { + namespace portability { + namespace detail { +@@ -41,6 +49,7 @@ FOLLY_ALWAYS_INLINE void __builtin___clear_cache(char* begin, char* end) { + } + } + ++#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + FOLLY_ALWAYS_INLINE int __builtin_clz(unsigned int x) { + unsigned long index; + return int(_BitScanReverse(&index, (unsigned long)x) ? 31 - index : 32); +@@ -92,6 +101,7 @@ FOLLY_ALWAYS_INLINE int __builtin_ctzll(unsigned long long x) { + return int(_BitScanForward64(&index, x) ? index : 64); + } + #endif ++#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + + FOLLY_ALWAYS_INLINE int __builtin_ffs(int x) { + unsigned long index; +@@ -117,12 +127,15 @@ FOLLY_ALWAYS_INLINE int __builtin_popcount(unsigned int x) { + return int(__popcnt(x)); + } + ++#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + FOLLY_ALWAYS_INLINE int __builtin_popcountl(unsigned long x) { + static_assert(sizeof(x) == 4, ""); + return int(__popcnt(x)); + } ++#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + #endif + ++#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + #if defined(_M_IX86) + FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { + return int(__popcnt((unsigned int)(x >> 32))) + +@@ -133,6 +146,7 @@ FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { + return int(__popcnt64(x)); + } + #endif ++#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) + + FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { + // I really hope frame is zero... +@@ -141,3 +155,5 @@ FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { + return _ReturnAddress(); + } + #endif ++ ++#undef FOLLY_DETAIL_MSC_BUILTIN_SUPPORT +-- + diff --git a/3rd_party/folly/patches/0008-find-packages.patch b/3rd_party/folly/patches/0008-find-packages.patch new file mode 100644 index 00000000..3329a684 --- /dev/null +++ b/3rd_party/folly/patches/0008-find-packages.patch @@ -0,0 +1,73 @@ +diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake +index 3169b972d52..23dc6d509b1 100644 +--- a/CMake/folly-deps.cmake ++++ b/CMake/folly-deps.cmake +@@ -46,11 +46,11 @@ find_package(Boost 1.51.0 MODULE + list(APPEND FOLLY_LINK_LIBRARIES ${Boost_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIRS}) + +-find_package(DoubleConversion MODULE REQUIRED) ++find_package(double-conversion MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) + +-find_package(Gflags MODULE) ++find_package(gflags MODULE) + set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) + if(LIBGFLAGS_FOUND) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) +@@ -59,12 +59,12 @@ if(LIBGFLAGS_FOUND) + list(APPEND CMAKE_REQUIRED_INCLUDES ${LIBGFLAGS_INCLUDE_DIR}) + endif() + +-find_package(Glog MODULE) ++find_package(glog MODULE) + set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) + list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) + +-find_package(LibEvent MODULE REQUIRED) ++find_package(Libevent MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) + +@@ -96,14 +96,14 @@ if (LIBLZMA_FOUND) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBLZMA_LIBRARIES}) + endif() + +-find_package(LZ4 MODULE) ++find_package(lz4 MODULE) + set(FOLLY_HAVE_LIBLZ4 ${LZ4_FOUND}) + if (LZ4_FOUND) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LZ4_INCLUDE_DIR}) + list(APPEND FOLLY_LINK_LIBRARIES ${LZ4_LIBRARY}) + endif() + +-find_package(Zstd MODULE) ++find_package(zstd MODULE) + set(FOLLY_HAVE_LIBZSTD ${ZSTD_FOUND}) + if(ZSTD_FOUND) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${ZSTD_INCLUDE_DIR}) +@@ -117,11 +117,11 @@ if (SNAPPY_FOUND) + list(APPEND FOLLY_LINK_LIBRARIES ${SNAPPY_LIBRARY}) + endif() + +-find_package(LibDwarf) ++find_package(libdwarf) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBDWARF_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBDWARF_INCLUDE_DIRS}) + +-find_package(Libiberty) ++find_package(libiberty) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBIBERTY_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBIBERTY_INCLUDE_DIRS}) + +@@ -133,7 +133,7 @@ find_package(LibUring) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBURING_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBURING_INCLUDE_DIRS}) + +-find_package(Libsodium) ++find_package(libsodium) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBSODIUM_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBSODIUM_INCLUDE_DIRS}) + diff --git a/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch b/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch new file mode 100644 index 00000000..58f95224 --- /dev/null +++ b/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch @@ -0,0 +1,13 @@ +diff --git a/folly/fibers/SemaphoreBase.cpp b/folly/fibers/SemaphoreBase.cpp +index 06e9ecc7111..77e2da75c18 100644 +--- a/folly/fibers/SemaphoreBase.cpp ++++ b/folly/fibers/SemaphoreBase.cpp +@@ -170,7 +170,7 @@ namespace { + class FutureWaiter final : public fibers::Baton::Waiter { + public: + explicit FutureWaiter(int64_t tokens) +- : semaphoreWaiter(SemaphoreBase::Waiter(tokens)) { ++ : semaphoreWaiter(tokens) { + semaphoreWaiter.baton.setWaiter(*this); + } + diff --git a/3rd_party/folly/patches/0010-duplicate-hash.patch b/3rd_party/folly/patches/0010-duplicate-hash.patch new file mode 100644 index 00000000..69268c6a --- /dev/null +++ b/3rd_party/folly/patches/0010-duplicate-hash.patch @@ -0,0 +1,13 @@ +diff --git a/folly/hash/Hash.h b/folly/hash/Hash.h +index a8a50e8e8dc..d7a3da8e61f 100644 +--- a/folly/hash/Hash.h ++++ b/folly/hash/Hash.h +@@ -733,7 +733,7 @@ struct TupleHasher<0, Ts...> { + + // Custom hash functions. + namespace std { +-#if FOLLY_SUPPLY_MISSING_INT128_TRAITS ++#if 0 + template <> + struct hash<__int128> : folly::detail::integral_hasher<__int128> {}; + diff --git a/3rd_party/folly/patches/0011-disable-logger-example.patch b/3rd_party/folly/patches/0011-disable-logger-example.patch new file mode 100644 index 00000000..fa209053 --- /dev/null +++ b/3rd_party/folly/patches/0011-disable-logger-example.patch @@ -0,0 +1,12 @@ +diff --git a/folly/CMakeLists.txt b/folly/CMakeLists.txt +index 08de7daf20f..cdc1f03bf46 100644 +--- a/folly/CMakeLists.txt ++++ b/folly/CMakeLists.txt +@@ -27,7 +27,6 @@ install( + ) + + add_subdirectory(experimental/exception_tracer) +-add_subdirectory(logging/example) + + if (PYTHON_EXTENSIONS) + # Create tree of symbolic links in structure required for successful diff --git a/3rd_party/folly/patches/0012-compiler-flags.patch b/3rd_party/folly/patches/0012-compiler-flags.patch new file mode 100644 index 00000000..358500a1 --- /dev/null +++ b/3rd_party/folly/patches/0012-compiler-flags.patch @@ -0,0 +1,24 @@ +diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake +index 8dcaf141a3a..200fe8d3798 100644 +--- a/CMake/FollyCompilerUnix.cmake ++++ b/CMake/FollyCompilerUnix.cmake +@@ -28,9 +28,9 @@ set( + ) + mark_as_advanced(CXX_STD) + +-set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") ++set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") +-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") ++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") + + # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") +@@ -43,7 +43,6 @@ function(apply_folly_compile_options_to_target THETARGET) + ) + target_compile_options(${THETARGET} + PRIVATE +- -g + -std=${CXX_STD} + -finput-charset=UTF-8 + -fsigned-char diff --git a/3rd_party/folly/patches/0013-include-bit.patch b/3rd_party/folly/patches/0013-include-bit.patch new file mode 100644 index 00000000..1a8ac249 --- /dev/null +++ b/3rd_party/folly/patches/0013-include-bit.patch @@ -0,0 +1,13 @@ +--- a/folly/lang/Bits.h ++++ b/folly/lang/Bits.h +@@ -64,6 +64,10 @@ + #include + #include + ++#if __has_include() && __cpp_lib_bit_cast ++#include ++#endif ++ + namespace folly { + + #if __cpp_lib_bit_cast diff --git a/3rd_party/folly/patches/0014-find-librt.patch b/3rd_party/folly/patches/0014-find-librt.patch new file mode 100644 index 00000000..90a1f0f7 --- /dev/null +++ b/3rd_party/folly/patches/0014-find-librt.patch @@ -0,0 +1,18 @@ +diff --git a/CMake/FollyConfigChecks.cmake b/CMake/FollyConfigChecks.cmake +index 6b8b308c7..908d72d51 100644 +--- a/CMake/FollyConfigChecks.cmake ++++ b/CMake/FollyConfigChecks.cmake +@@ -83,6 +83,13 @@ string(REGEX REPLACE + CMAKE_REQUIRED_FLAGS + "${CMAKE_REQUIRED_FLAGS}") + ++if (CMAKE_SYSTEM_NAME STREQUAL "Linux") ++ find_library(LIBRT rt) ++ if (LIBRT) ++ list(APPEND CMAKE_REQUIRED_LIBRARIES "rt") ++ endif() ++endif() ++ + check_symbol_exists(pthread_atfork pthread.h FOLLY_HAVE_PTHREAD_ATFORK) + + # Unfortunately check_symbol_exists() does not work for memrchr(): diff --git a/3rd_party/folly/patches/0015-benchmark-format-macros.patch b/3rd_party/folly/patches/0015-benchmark-format-macros.patch new file mode 100644 index 00000000..14f8b208 --- /dev/null +++ b/3rd_party/folly/patches/0015-benchmark-format-macros.patch @@ -0,0 +1,15 @@ +diff --git a/folly/Benchmark.cpp b/folly/Benchmark.cpp +index 389ee46a1..390b7674b 100644 +--- a/folly/Benchmark.cpp ++++ b/folly/Benchmark.cpp +@@ -16,6 +16,10 @@ + + // @author Andrei Alexandrescu (andrei.alexandrescu@fb.com) + ++#ifndef __STDC_FORMAT_MACROS ++#define __STDC_FORMAT_MACROS 1 ++#endif ++ + #include + + #include diff --git a/3rd_party/folly/patches/0016-find-packages.patch b/3rd_party/folly/patches/0016-find-packages.patch new file mode 100644 index 00000000..c6cd14fa --- /dev/null +++ b/3rd_party/folly/patches/0016-find-packages.patch @@ -0,0 +1,80 @@ +diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake +index 9c9d9ea60..e78611542 100644 +--- a/CMake/folly-deps.cmake ++++ b/CMake/folly-deps.cmake +@@ -48,25 +48,25 @@ find_package(Boost 1.51.0 MODULE + list(APPEND FOLLY_LINK_LIBRARIES ${Boost_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIRS}) + +-find_package(DoubleConversion MODULE REQUIRED) ++find_package(double-conversion MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) + +-find_package(Gflags MODULE) +-set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) +-if(LIBGFLAGS_FOUND) +- list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) +- list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBGFLAGS_INCLUDE_DIR}) +- set(FOLLY_LIBGFLAGS_LIBRARY ${LIBGFLAGS_LIBRARY}) +- set(FOLLY_LIBGFLAGS_INCLUDE ${LIBGFLAGS_INCLUDE_DIR}) ++find_package(gflags MODULE) ++set(FOLLY_HAVE_LIBGFLAGS ${gflags_FOUND}) ++if(gflags_FOUND) ++ list(APPEND FOLLY_LINK_LIBRARIES ${gflags_LIBRARIES}) ++ list(APPEND FOLLY_INCLUDE_DIRECTORIES ${gflags_INCLUDE_DIRS}) ++ set(FOLLY_LIBGFLAGS_LIBRARY ${gflags_LIBRARIES}) ++ set(FOLLY_LIBGFLAGS_INCLUDE ${gflags_INCLUDE_DIRS}) + endif() + +-find_package(Glog MODULE) ++find_package(glog MODULE) + set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) + list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) + +-find_package(LibEvent MODULE REQUIRED) ++find_package(Libevent MODULE REQUIRED) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) + +@@ -104,14 +104,14 @@ if (LIBLZMA_FOUND) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBLZMA_LIBRARIES}) + endif() + +-find_package(LZ4 MODULE) ++find_package(lz4 MODULE) + set(FOLLY_HAVE_LIBLZ4 ${LZ4_FOUND}) + if (LZ4_FOUND) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LZ4_INCLUDE_DIR}) + list(APPEND FOLLY_LINK_LIBRARIES ${LZ4_LIBRARY}) + endif() + +-find_package(Zstd MODULE) ++find_package(zstd MODULE) + set(FOLLY_HAVE_LIBZSTD ${ZSTD_FOUND}) + if(ZSTD_FOUND) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${ZSTD_INCLUDE_DIR}) +@@ -125,11 +125,11 @@ if (SNAPPY_FOUND) + list(APPEND FOLLY_LINK_LIBRARIES ${SNAPPY_LIBRARY}) + endif() + +-find_package(LibDwarf) ++find_package(libdwarf) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBDWARF_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBDWARF_INCLUDE_DIRS}) + +-find_package(Libiberty) ++find_package(libiberty) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBIBERTY_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBIBERTY_INCLUDE_DIRS}) + +@@ -141,7 +141,7 @@ find_package(LibUring) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBURING_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBURING_INCLUDE_DIRS}) + +-find_package(Libsodium) ++find_package(libsodium) + list(APPEND FOLLY_LINK_LIBRARIES ${LIBSODIUM_LIBRARIES}) + list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBSODIUM_INCLUDE_DIRS}) + diff --git a/3rd_party/folly/patches/0017-compiler-flags.patch b/3rd_party/folly/patches/0017-compiler-flags.patch new file mode 100644 index 00000000..1290e801 --- /dev/null +++ b/3rd_party/folly/patches/0017-compiler-flags.patch @@ -0,0 +1,24 @@ +diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake +index 8dcaf14..200fe8d 100644 +--- a/CMake/FollyCompilerUnix.cmake ++++ b/CMake/FollyCompilerUnix.cmake +@@ -28,9 +28,9 @@ set( + ) + mark_as_advanced(CXX_STD) + +-set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") ++set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") +-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") ++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") + + # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") +@@ -43,7 +43,6 @@ function(apply_folly_compile_options_to_target THETARGET) + ) + target_compile_options(${THETARGET} + PRIVATE +- -g + -std=${CXX_STD} + -finput-charset=UTF-8 + -fsigned-char diff --git a/3rd_party/folly/patches/0018-find-glog.patch b/3rd_party/folly/patches/0018-find-glog.patch new file mode 100644 index 00000000..b2d17ad4 --- /dev/null +++ b/3rd_party/folly/patches/0018-find-glog.patch @@ -0,0 +1,16 @@ +diff --git a/build/fbcode_builder/CMake/FindGlog.cmake b/build/fbcode_builder/CMake/FindGlog.cmake +index 752647c..aa2fa1c 100644 +--- a/build/fbcode_builder/CMake/FindGlog.cmake ++++ b/build/fbcode_builder/CMake/FindGlog.cmake +@@ -10,9 +10,9 @@ include(FindPackageHandleStandardArgs) + include(SelectLibraryConfigurations) + + find_library(GLOG_LIBRARY_RELEASE glog +- PATHS ${GLOG_LIBRARYDIR}) ++ PATHS ${CONAN_GLOG_ROOT}) + find_library(GLOG_LIBRARY_DEBUG glogd +- PATHS ${GLOG_LIBRARYDIR}) ++ PATHS ${CONAN_GLOG_ROOT}) + + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_INCLUDEDIR}) diff --git a/3rd_party/folly/patches/0019-exclude-example.patch b/3rd_party/folly/patches/0019-exclude-example.patch new file mode 100644 index 00000000..d2afb310 --- /dev/null +++ b/3rd_party/folly/patches/0019-exclude-example.patch @@ -0,0 +1,12 @@ +diff --git a/folly/CMakeLists.txt b/folly/CMakeLists.txt +index 883f27c..2d2086f 100644 +--- a/folly/CMakeLists.txt ++++ b/folly/CMakeLists.txt +@@ -28,7 +28,6 @@ install( + ) + + add_subdirectory(experimental/exception_tracer) +-add_subdirectory(logging/example) + + if (PYTHON_EXTENSIONS) + # Create tree of symbolic links in structure required for successful diff --git a/3rd_party/folly/patches/0020-include-ssizet.patch b/3rd_party/folly/patches/0020-include-ssizet.patch new file mode 100644 index 00000000..0575fecd --- /dev/null +++ b/3rd_party/folly/patches/0020-include-ssizet.patch @@ -0,0 +1,12 @@ +diff --git a/folly/executors/ExecutorWithPriority.h b/folly/executors/ExecutorWithPriority.h +index b95a6c4..18b8110 100644 +--- a/folly/executors/ExecutorWithPriority.h ++++ b/folly/executors/ExecutorWithPriority.h +@@ -18,6 +18,7 @@ + + #include + #include ++#include + + namespace folly { + diff --git a/3rd_party/folly/patches/0021-typedef-clockid.patch b/3rd_party/folly/patches/0021-typedef-clockid.patch new file mode 100644 index 00000000..fb46c057 --- /dev/null +++ b/3rd_party/folly/patches/0021-typedef-clockid.patch @@ -0,0 +1,12 @@ +diff --git a/folly/portability/Time.h b/folly/portability/Time.h +index 994a09e5d70..e4f0d101ca9 100644 +--- a/folly/portability/Time.h ++++ b/folly/portability/Time.h +@@ -49,7 +49,6 @@ + #define CLOCK_PROCESS_CPUTIME_ID 2 + #define CLOCK_THREAD_CPUTIME_ID 3 + +-typedef uint8_t clockid_t; + extern "C" int clock_gettime(clockid_t clk_id, struct timespec* ts); + extern "C" int clock_getres(clockid_t clk_id, struct timespec* ts); + #endif diff --git a/3rd_party/folly/patches/0022-fix-windows-minmax.patch b/3rd_party/folly/patches/0022-fix-windows-minmax.patch new file mode 100644 index 00000000..1fc69a43 --- /dev/null +++ b/3rd_party/folly/patches/0022-fix-windows-minmax.patch @@ -0,0 +1,12 @@ +diff --git a/CMake/FollyCompilerMSVC.cmake b/CMake/FollyCompilerMSVC.cmake +index ec2ce1a1d..16deda71c 100644 +--- a/CMake/FollyCompilerMSVC.cmake ++++ b/CMake/FollyCompilerMSVC.cmake +@@ -289,6 +289,7 @@ function(apply_folly_compile_options_to_target THETARGET) + # And the extra defines: + target_compile_definitions(${THETARGET} + PUBLIC ++ NOMINMAX + _CRT_NONSTDC_NO_WARNINGS # Don't deprecate posix names of functions. + _CRT_SECURE_NO_WARNINGS # Don't deprecate the non _s versions of various standard library functions, because safety is for chumps. + _SCL_SECURE_NO_WARNINGS # Don't deprecate the non _s versions of various standard library functions, because safety is for chumps. diff --git a/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch b/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch new file mode 100644 index 00000000..78ca432f --- /dev/null +++ b/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch @@ -0,0 +1,16 @@ +diff -Naur a/folly/lang/SafeAssert.h b/folly/lang/SafeAssert.h +--- a/folly/lang/SafeAssert.h 2022-01-29 03:30:47.000000000 -0700 ++++ b/folly/lang/SafeAssert.h 2022-06-28 09:47:46.779345576 -0700 +@@ -24,12 +24,7 @@ + #include + #include + +-#if __GNUC__ && !__clang__ && FOLLY_SANITIZE_ADDRESS +-// gcc+asan has a bug that discards sections when using `static` below +-#define FOLLY_DETAIL_SAFE_CHECK_LINKAGE +-#else + #define FOLLY_DETAIL_SAFE_CHECK_LINKAGE static +-#endif + + #define FOLLY_DETAIL_SAFE_CHECK_IMPL(d, p, expr, expr_s, ...) \ + do { \ diff --git a/3rd_party/folly/test_package/CMakeLists.txt b/3rd_party/folly/test_package/CMakeLists.txt new file mode 100644 index 00000000..6a9df4ea --- /dev/null +++ b/3rd_party/folly/test_package/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 3.15) +project(test_package CXX) + +find_package(folly REQUIRED CONFIG) + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} + Folly::folly + Folly::follybenchmark) + + +if (${FOLLY_VERSION} VERSION_LESS "2021.07.20.00") + set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14) +else() + set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17) +endif() diff --git a/3rd_party/folly/test_package/conanfile.py b/3rd_party/folly/test_package/conanfile.py new file mode 100644 index 00000000..63889d52 --- /dev/null +++ b/3rd_party/folly/test_package/conanfile.py @@ -0,0 +1,31 @@ +import os +from conan import ConanFile +from conan.tools.cmake import CMake, CMakeToolchain +from conan.tools.build import can_run +from conan.tools.cmake import cmake_layout + +required_conan_version = ">=1.43.0" + +class TestPackageConan(ConanFile): + settings = "os", "compiler", "build_type", "arch" + generators = "CMakeDeps", "VirtualRunEnv" + + def requirements(self): + self.requires(self.tested_reference_str) + + def generate(self): + tc = CMakeToolchain(self) + tc.variables["FOLLY_VERSION"] = self.dependencies["folly"].ref.version + tc.generate() + + def layout(self): + cmake_layout(self) + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + if can_run(self): + self.run(os.path.join(self.cpp.build.bindirs[0], "test_package"), env="conanrun") diff --git a/3rd_party/folly/test_package/test_package.cpp b/3rd_party/folly/test_package/test_package.cpp new file mode 100644 index 00000000..cc522b8b --- /dev/null +++ b/3rd_party/folly/test_package/test_package.cpp @@ -0,0 +1,29 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#if FOLLY_HAVE_ELF +#include +#endif + +static void print_uri(const folly::fbstring& value) { + const folly::Uri uri(value); + std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; +} + +int main() { + folly::ThreadedExecutor executor; + folly::Promise promise; + folly::Future future = promise.getSemiFuture().via(&executor); + folly::Future unit = std::move(future).thenValue(print_uri); + promise.setValue("https://github.com/bincrafters"); + std::move(unit).get(); +#if FOLLY_HAVE_ELF + folly::symbolizer::ElfFile elffile; +#endif + return EXIT_SUCCESS; +} diff --git a/3rd_party/folly/test_v1_package/CMakeLists.txt b/3rd_party/folly/test_v1_package/CMakeLists.txt new file mode 100644 index 00000000..f8cc697a --- /dev/null +++ b/3rd_party/folly/test_v1_package/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.1) +project(test_package CXX) + +include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) +conan_basic_setup(TARGETS) + +find_package(folly CONFIG REQUIRED) + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} Folly::folly) + + +if (${FOLLY_VERSION} VERSION_LESS "2021.07.20.00") + set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14) +else() + set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17) +endif() diff --git a/3rd_party/folly/test_v1_package/conanfile.py b/3rd_party/folly/test_v1_package/conanfile.py new file mode 100644 index 00000000..8b8cfae4 --- /dev/null +++ b/3rd_party/folly/test_v1_package/conanfile.py @@ -0,0 +1,18 @@ +from conans import ConanFile, CMake, tools +import os + + +class TestPackageConan(ConanFile): + settings = "os", "compiler", "build_type", "arch" + generators = "cmake", "cmake_find_package_multi" + + def build(self): + cmake = CMake(self) + cmake.definitions["FOLLY_VERSION"] = self.deps_cpp_info["folly"].version + cmake.configure() + cmake.build() + + def test(self): + if not tools.cross_building(self): + bin_path = os.path.join("bin", "test_package") + self.run(command=bin_path, run_environment=True) diff --git a/3rd_party/folly/test_v1_package/test_package.cpp b/3rd_party/folly/test_v1_package/test_package.cpp new file mode 100644 index 00000000..cc522b8b --- /dev/null +++ b/3rd_party/folly/test_v1_package/test_package.cpp @@ -0,0 +1,29 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#if FOLLY_HAVE_ELF +#include +#endif + +static void print_uri(const folly::fbstring& value) { + const folly::Uri uri(value); + std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; +} + +int main() { + folly::ThreadedExecutor executor; + folly::Promise promise; + folly::Future future = promise.getSemiFuture().via(&executor); + folly::Future unit = std::move(future).thenValue(print_uri); + promise.setValue("https://github.com/bincrafters"); + std::move(unit).get(); +#if FOLLY_HAVE_ELF + folly::symbolizer::ElfFile elffile; +#endif + return EXIT_SUCCESS; +} From f0cb92bab1635ed6b15ffaaf33181eb018d2db5d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 10:09:03 -0600 Subject: [PATCH 295/385] Added sanitize option --- .github/workflows/build_dependencies.yml | 17 ++++++++++++++++- .github/workflows/merge_conan_build.yml | 5 +++++ .github/workflows/pr_conan_build.yml | 5 +++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index d447ada8..e31245d8 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -20,6 +20,10 @@ on: required: false type: string default: 'False' + sanitize: + required: false + type: string + default: 'False' testing: required: false type: string @@ -59,6 +63,14 @@ on: - 'True' - 'False' default: 'False' + sanitize: + description: 'Memory Sanitize' + required: false + type: choice + options: + - 'True' + - 'False' + default: 'False' testing: description: 'Build and Run' required: true @@ -116,6 +128,7 @@ jobs: python -m pip install conan~=1.0 conan user conan profile new --detect default + conan export deps/sisl/3rd_party/folly folly/2022.01.31.00@ conan export deps/sisl/3rd_party/gperftools conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy @@ -135,6 +148,7 @@ jobs: conan install \ -o prerelease=${{ inputs.prerelease }} \ -o malloc_impl=${{ inputs.malloc-impl }} \ + -o sanitize=${{ inputs.sanitize }} \ -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl @@ -164,13 +178,14 @@ jobs: path: | ~/.conan/data key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ inputs.sanitize == 'False' && github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Create and test Package run: | conan create \ -o sisl:prerelease=${{ inputs.prerelease }} \ -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -o sisl:sanitize=${{ inputs.sanitize }} \ -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index b9891c7a..74b74a5b 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -16,7 +16,12 @@ jobs: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] + sanitize: ["True", "False"] exclude: + - build-type: Release + sanitize: "True" + - malloc-impl: tcmalloc + sanitize: "True" - build-type: Debug platform: ubuntu-20.04 - malloc-impl: tcmalloc diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 935f0cc5..2939e5ee 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -15,7 +15,12 @@ jobs: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] + sanitize: ["True", "False"] exclude: + - build-type: Release + sanitize: "True" + - malloc-impl: tcmalloc + sanitize: "True" - build-type: Debug platform: ubuntu-20.04 - malloc-impl: tcmalloc From 76ae06d72ef37946f33f86a51da65c3851630bf8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 10:44:05 -0600 Subject: [PATCH 296/385] Need sanitize input. --- .github/workflows/build_dependencies.yml | 1 - .github/workflows/merge_conan_build.yml | 1 + .github/workflows/pr_conan_build.yml | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index e31245d8..ec3541c5 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -148,7 +148,6 @@ jobs: conan install \ -o prerelease=${{ inputs.prerelease }} \ -o malloc_impl=${{ inputs.malloc-impl }} \ - -o sanitize=${{ inputs.sanitize }} \ -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 74b74a5b..3cc22833 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -36,6 +36,7 @@ jobs: uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} + sanitize: ${{ matrix.sanitize }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 2939e5ee..2b3e65f6 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -39,4 +39,5 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} + sanitize: ${{ matrix.sanitize }} testing: 'True' From 6755af1e50c29d215e027e4eff50db9aed942a32 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 12:44:01 -0700 Subject: [PATCH 297/385] Make implicit --- .github/workflows/build_dependencies.yml | 17 +++-------------- .github/workflows/merge_conan_build.yml | 6 ------ .github/workflows/pr_conan_build.yml | 6 ------ 3 files changed, 3 insertions(+), 26 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index ec3541c5..696dbaed 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -20,10 +20,6 @@ on: required: false type: string default: 'False' - sanitize: - required: false - type: string - default: 'False' testing: required: false type: string @@ -63,14 +59,6 @@ on: - 'True' - 'False' default: 'False' - sanitize: - description: 'Memory Sanitize' - required: false - type: choice - options: - - 'True' - - 'False' - default: 'False' testing: description: 'Build and Run' required: true @@ -177,14 +165,15 @@ jobs: path: | ~/.conan/data key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ inputs.sanitize == 'False' && github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Create and test Package run: | + sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc_impl }}" == "libc" ]] && echo "True" || echo "False") conan create \ -o sisl:prerelease=${{ inputs.prerelease }} \ -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ - -o sisl:sanitize=${{ inputs.sanitize }} \ + -o sisl:sanitize=${sanitize} \ -s build_type=${{ inputs.build-type }} \ --build missing \ deps/sisl diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 3cc22833..b9891c7a 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -16,12 +16,7 @@ jobs: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] - sanitize: ["True", "False"] exclude: - - build-type: Release - sanitize: "True" - - malloc-impl: tcmalloc - sanitize: "True" - build-type: Debug platform: ubuntu-20.04 - malloc-impl: tcmalloc @@ -36,7 +31,6 @@ jobs: uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} - sanitize: ${{ matrix.sanitize }} branch: ${{ github.ref }} build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 2b3e65f6..935f0cc5 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -15,12 +15,7 @@ jobs: build-type: ["Debug", "Release"] malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] - sanitize: ["True", "False"] exclude: - - build-type: Release - sanitize: "True" - - malloc-impl: tcmalloc - sanitize: "True" - build-type: Debug platform: ubuntu-20.04 - malloc-impl: tcmalloc @@ -39,5 +34,4 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} - sanitize: ${{ matrix.sanitize }} testing: 'True' From ed1e75d4792683d4d316ad0cd7ff93df3a9fe801 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 15:37:54 -0600 Subject: [PATCH 298/385] Bad variable. --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 696dbaed..e12973ba 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -169,7 +169,7 @@ jobs: - name: Create and test Package run: | - sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc_impl }}" == "libc" ]] && echo "True" || echo "False") + sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" ]] && echo "True" || echo "False") conan create \ -o sisl:prerelease=${{ inputs.prerelease }} \ -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ From 642a354273118d68037dbd41ba019ce87d689952 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 18:57:22 -0600 Subject: [PATCH 299/385] Add code coverage. (#138) --- .github/workflows/build_dependencies.yml | 38 +++++++++++++++++------- .github/workflows/merge_conan_build.yml | 8 ++--- .github/workflows/pr_conan_build.yml | 11 +++---- conanfile.py | 5 ++-- src/options/CMakeLists.txt | 6 ---- test_package/conanfile.py | 2 +- 6 files changed, 37 insertions(+), 33 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index e12973ba..461b5436 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -114,6 +114,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install conan~=1.0 + python -m pip install gcovr conan user conan profile new --detect default conan export deps/sisl/3rd_party/folly folly/2022.01.31.00@ @@ -121,7 +122,7 @@ jobs: conan export deps/sisl/3rd_party/jemalloc conan export deps/sisl/3rd_party/prerelease_dummy conan export deps/sisl/3rd_party/pistache pistache/cci.20201127@ - cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/package | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) + cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/export 2>/dev/null | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} @@ -169,13 +170,30 @@ jobs: - name: Create and test Package run: | - sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" ]] && echo "True" || echo "False") - conan create \ - -o sisl:prerelease=${{ inputs.prerelease }} \ - -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ - -o sisl:sanitize=${sanitize} \ - -s build_type=${{ inputs.build-type }} \ - --build missing \ - deps/sisl - conan remove -f sisl + if [[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" && "${{ inputs.prerelease }}" == "False" ]]; then + conan install \ + -o prerelease=${{ inputs.prerelease }} \ + -o malloc_impl=${{ inputs.malloc-impl }} \ + -o coverage=True \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + deps/sisl + conan build deps/sisl + else + sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" && "${{ inputs.prerelease }}" == "True" ]] && echo "True" || echo "False") + conan create \ + -o sisl:prerelease=${{ inputs.prerelease }} \ + -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -o sisl:sanitize=${sanitize} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + deps/sisl + fi if: ${{ inputs.testing == 'True' }} + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + gcov: true + gcov_ignore: src/*,deps/sisl/**/*_test.cpp diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index b9891c7a..f32b9c8e 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -19,15 +19,11 @@ jobs: exclude: - build-type: Debug platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: libc - prerelease: "False" - malloc-impl: libc build-type: Release platform: ubuntu-22.04 - - prerelease: "False" - build-type: Debug + - prerelease: "True" + platform: ubuntu-20.04 uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 935f0cc5..7841bc5b 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -18,15 +18,12 @@ jobs: exclude: - build-type: Debug platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: libc - prerelease: "False" + - build-type: Debug + platform: tcmalloc - malloc-impl: libc build-type: Release - platform: ubuntu-22.04 - - prerelease: "False" - build-type: Debug + - prerelease: "True" + platform: ubuntu-20.04 uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} diff --git a/conanfile.py b/conanfile.py index 172c655e..aa13b392 100644 --- a/conanfile.py +++ b/conanfile.py @@ -92,22 +92,21 @@ def build(self): definitions = {'CONAN_BUILD_COVERAGE': 'OFF', 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', + 'CONAN_CMAKE_SILENT_OUTPUT': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', 'MALLOC_IMPL': self.options.malloc_impl} - test_target = None if self.settings.build_type == "Debug": if self.options.sanitize: definitions['MEMORY_SANITIZER_ON'] = 'ON' elif self.options.coverage: definitions['CONAN_BUILD_COVERAGE'] = 'ON' - test_target = 'coverage' definitions['MALLOC_IMPL'] = self.options.malloc_impl cmake.configure(defs=definitions) cmake.build() - cmake.test(target=test_target, output_on_failure=True) + cmake.test(output_on_failure=True) def package(self): lib_dir = join(self.package_folder, "lib") diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index 4fc99004..f80d2a3d 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -11,10 +11,4 @@ target_sources(basic_test PRIVATE tests/basic.cpp ) target_link_libraries(basic_test sisl ${COMMON_DEPS} GTest::gtest) - -if (DEFINED CONAN_BUILD_COVERAGE) - if (${CONAN_BUILD_COVERAGE}) - list(APPEND extra_args "--gtest_output=xml:/output/test_basic.xml") - endif () -endif () add_test(NAME OptionsBasics COMMAND basic_test ${extra_args}) diff --git a/test_package/conanfile.py b/test_package/conanfile.py index 64d9250d..3b0b71cb 100644 --- a/test_package/conanfile.py +++ b/test_package/conanfile.py @@ -9,7 +9,7 @@ class TestPackageConan(ConanFile): def build(self): cmake = CMake(self) - cmake.configure(defs={'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON'}) + cmake.configure(defs={'CONAN_CMAKE_SILENT_OUTPUT': 'ON'}) cmake.build() def test(self): From 11d0e97b87d81d21baa8a8d567d805cc862cdead Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 17 Jun 2023 19:14:10 -0600 Subject: [PATCH 300/385] Add code coverage badge. (#139) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 121d00cd..37c528e3 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # SymbiosisLib (sisl) [![Conan Build](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml/badge.svg?branch=stable/v8.x)](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml) +[![CodeCov](https://codecov.io/gh/eBay/sisl/branch/stable/v8.x/graph/badge.svg)](https://codecov.io/gh/eBay/Sisl) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data structures and utilities. This is mostly on top of folly, boost, STL and other good well known libraries. Thus its not trying From 259543b4ffe5a01ce1383d18a1618f8e0e443d3f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 18 Jun 2023 11:35:12 -0600 Subject: [PATCH 301/385] Provide codecov settings. (#142) --- .codecov.yml | 15 +++++++++++++++ .github/workflows/build_dependencies.yml | 1 - 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 .codecov.yml diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 00000000..de66488c --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,15 @@ +codecov: + notify: + require_ci_to_pass: no + +fixes: + - "deps/sisl/::" + +ignore: + - "**/*_test.c*" + - "**/*_test.h*" + - "**/*_generated.h" + - "**/*_pb.cc" + - "**/*_pb.h" + - "**/test*/*.c*" + - "**/test*/*.h*" \ No newline at end of file diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 51e92332..483a5f86 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -200,4 +200,3 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} gcov: true - gcov_ignore: src/*,deps/sisl/**/*_test.cpp From 31c041a4c6c4d8b203707bdb051c0ce1ab3d4f58 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sun, 18 Jun 2023 11:46:21 -0600 Subject: [PATCH 302/385] Adjust coverage (#141) --- .codecov.yml | 16 ++++++++++++++++ .github/workflows/build_dependencies.yml | 1 - 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .codecov.yml diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 00000000..696c7182 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,16 @@ +codecov: + notify: + require_ci_to_pass: no + +fixes: + - "deps/sisl/::" + +ignore: + - "**/*_test.c*" + - "**/*_test.h*" + - "**/*_generated.h" + - "**/*_pb.cc" + - "**/*_pb.h" + - "**/test*/*.c*" + - "**/test*/*.h*" + - "**/*test_flip.*" diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 461b5436..d3863a02 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -196,4 +196,3 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} gcov: true - gcov_ignore: src/*,deps/sisl/**/*_test.cpp From 7ed895a4335153aea30ba3ecc72306679bbd23e0 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 08:30:07 -0700 Subject: [PATCH 303/385] Remove grpc tests from coverage. --- .codecov.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.codecov.yml b/.codecov.yml index 696c7182..a9f92641 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -14,3 +14,4 @@ ignore: - "**/test*/*.c*" - "**/test*/*.h*" - "**/*test_flip.*" + - "**/grpc/tests/**" From 9e9dbdde8123af963890ac98cc9fa876d355084f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 09:48:18 -0700 Subject: [PATCH 304/385] Use components. --- .codecov.yml | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index a9f92641..ee7370c9 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -15,3 +15,78 @@ ignore: - "**/test*/*.h*" - "**/*test_flip.*" - "**/grpc/tests/**" + +comment: + layout: "header, diff, flags, components" + +component_management: + default_rules: + statuses: + - type: project + target: auto + individual_components: + - component_id: modules_auth_manager + name: AuthManager + paths: + - src/auth_manager/** + - include/sisl/auth_manager/** + - component_id: modules_cache + name: Cache + paths: + - src/cache/** + - include/sisl/cache/** + - component_id: modules_fds + name: FDS + paths: + - src/fds/** + - include/sisl/fds/** + - component_id: modules_file_watcher + name: FileWatcher + paths: + - src/file_watcher/** + - include/sisl/file_watcher/** + - component_id: modules_flip + name: Flip + paths: + - src/flip/** + - include/sisl/flip/** + - component_id: modules_grpc + name: gRPC + paths: + - src/grpc/** + - include/sisl/grpc/** + - component_id: modules_logging + name: Logging + paths: + - src/logging/** + - include/sisl/logging/** + - component_id: modules_metrics + name: Metrics + paths: + - src/metrics/** + - include/sisl/metrics/** + - component_id: modules_options + name: Options + paths: + - src/options/** + - include/sisl/options/** + - component_id: modules_settings + name: Setting + paths: + - src/settings/** + - include/sisl/settings/** + - component_id: modules_sobject + name: StatusObject + paths: + - src/sobject/** + - include/sisl/sobject/** + - component_id: modules_version + name: Utility + paths: + - src/utility/** + - include/sisl/utility/** + - component_id: modules_version + name: Version + paths: + - src/version/** + - include/sisl/version.hpp From 5d6510dcee683ddb90df76d13b7d4ff46cad74b6 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 09:48:18 -0700 Subject: [PATCH 305/385] Use components. --- .codecov.yml | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index a9f92641..ee7370c9 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -15,3 +15,78 @@ ignore: - "**/test*/*.h*" - "**/*test_flip.*" - "**/grpc/tests/**" + +comment: + layout: "header, diff, flags, components" + +component_management: + default_rules: + statuses: + - type: project + target: auto + individual_components: + - component_id: modules_auth_manager + name: AuthManager + paths: + - src/auth_manager/** + - include/sisl/auth_manager/** + - component_id: modules_cache + name: Cache + paths: + - src/cache/** + - include/sisl/cache/** + - component_id: modules_fds + name: FDS + paths: + - src/fds/** + - include/sisl/fds/** + - component_id: modules_file_watcher + name: FileWatcher + paths: + - src/file_watcher/** + - include/sisl/file_watcher/** + - component_id: modules_flip + name: Flip + paths: + - src/flip/** + - include/sisl/flip/** + - component_id: modules_grpc + name: gRPC + paths: + - src/grpc/** + - include/sisl/grpc/** + - component_id: modules_logging + name: Logging + paths: + - src/logging/** + - include/sisl/logging/** + - component_id: modules_metrics + name: Metrics + paths: + - src/metrics/** + - include/sisl/metrics/** + - component_id: modules_options + name: Options + paths: + - src/options/** + - include/sisl/options/** + - component_id: modules_settings + name: Setting + paths: + - src/settings/** + - include/sisl/settings/** + - component_id: modules_sobject + name: StatusObject + paths: + - src/sobject/** + - include/sisl/sobject/** + - component_id: modules_version + name: Utility + paths: + - src/utility/** + - include/sisl/utility/** + - component_id: modules_version + name: Version + paths: + - src/version/** + - include/sisl/version.hpp From 74f504011f8198fe769b259b29498aaa8783b3d8 Mon Sep 17 00:00:00 2001 From: Sanal P Date: Fri, 16 Jun 2023 09:15:39 -0700 Subject: [PATCH 306/385] Add api for getting child types in object tree view. Make object name unique. Support get state by name, type, path or both. --- conanfile.py | 2 +- include/sisl/sobject/sobject.hpp | 43 ++++----- src/sobject/sobject.cpp | 141 +++++++++++++++++------------ src/sobject/tests/test_sobject.cpp | 59 ++++++------ 4 files changed, 126 insertions(+), 119 deletions(-) diff --git a/conanfile.py b/conanfile.py index 5c95be0d..b5d51ea0 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.5.4" + version = "8.6.0" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/sobject/sobject.hpp b/include/sisl/sobject/sobject.hpp index a2da9fca..c6495390 100644 --- a/include/sisl/sobject/sobject.hpp +++ b/include/sisl/sobject/sobject.hpp @@ -28,17 +28,6 @@ namespace sisl { -// Each object is uniquely identified by its type and name. -// Ex: type=volume and name=volume_1, type=module and name=HomeBlks. -struct sobject_id { - std::string type; - std::string name; - bool empty() const { return type.empty() && name.empty(); } - [[maybe_unused]] bool operator<(const sobject_id& id) const { - return type < id.type || ((type == id.type) && (name < id.name)); - } -}; - typedef struct status_request { nlohmann::json json; bool do_recurse{false}; @@ -59,11 +48,6 @@ class sobject; class sobject_manager; using sobject_ptr = std::shared_ptr< sobject >; -// To search using only the type as key. -[[maybe_unused]] static bool operator<(const sobject_id& id, const std::string& key_type) { return id.type < key_type; } - -[[maybe_unused]] static bool operator<(const std::string& key_type, const sobject_id& id) { return key_type < id.type; } - [[maybe_unused]] static status_response status_error(std::string error_str) { status_response response; response.json["error"] = error_str; @@ -73,11 +57,11 @@ using sobject_ptr = std::shared_ptr< sobject >; // Similar to sysfs kobject, sobject is a lightweight utility to create relationships // between different classes and modules. This can be used to get or change the state of a class // and all its children. Modules/subsystems which register their callbacks to be -// whenever a get status is called from the root or directly. +// whenever a get status is called from the root or directly. Object is uniquely identified by its name. class sobject { public: sobject(sobject_manager* mgr, const std::string& obj_type, const std::string& obj_name, status_callback_type cb) : - m_mgr(mgr), m_id{obj_type, obj_name}, m_status_cb(std::move(cb)) {} + m_mgr(mgr), m_type(obj_type), m_name(obj_name), m_status_cb(std::move(cb)) {} static sobject_ptr create(sobject_manager* mgr, const std::string& obj_type, const std::string& obj_name, status_callback_type cb) { @@ -88,34 +72,39 @@ class sobject { status_response run_callback(const status_request& request) const; sobject_ptr get_child(const std::string& name); void add_child(const sobject_ptr child); + void add_child_type(const std::string& child_type); - sobject_id id() const { return m_id; } - std::string name() const { return m_id.name; } - std::string type() const { return m_id.type; } + std::string name() const { return m_name; } + std::string type() const { return m_type; } private: sobject_manager* m_mgr; - sobject_id m_id; + std::string m_type; + std::string m_name; std::shared_mutex m_mtx; status_callback_type m_status_cb; // Keep a graph of child nodes. Mapping from name to child status object. - std::map< sobject_id, sobject_ptr > m_children; + std::map< std::string, sobject_ptr > m_children; + friend class sobject_manager; }; class sobject_manager { +private: public: sobject_ptr create_object(const std::string& type, const std::string& name, status_callback_type cb); status_response get_status(const status_request& request); + status_response get_child_type_status( const status_request& request); status_response get_object_by_path(const status_request& request); - status_response get_object_status(const sobject_id& id, const status_request& request); + status_response get_object_status(const std::string& name, const status_request& request); status_response get_objects(const status_request& request); - status_response get_object_types(); + status_response get_object_types(const std::string& type); void add_object_type(const std::string& parent_type, const std::string& child_type); private: - // Mapping from object name to object metadata. - std::map< sobject_id, sobject_ptr, std::less<> > m_object_store; + // Mapping from object name to object metadata. Object names are required + // to be unique. + std::map< std::string, sobject_ptr, std::less<> > m_object_store; // Mapping from parent type to set of all children type to display the schema. std::map< std::string, std::set< std::string > > m_object_types; std::shared_mutex m_mtx; diff --git a/src/sobject/sobject.cpp b/src/sobject/sobject.cpp index a091d337..5d892c1b 100644 --- a/src/sobject/sobject.cpp +++ b/src/sobject/sobject.cpp @@ -22,46 +22,48 @@ namespace sisl { sobject_ptr sobject::get_child(const std::string& name) { std::shared_lock lock{m_mtx}; - for (const auto& [id, obj] : m_children) { - // Return the first child found. We assume if user asks for a path - // there is a unique child in the parent. - if (id.name == name) { return obj; } - } - return nullptr; + auto iter = m_children.find(name); + if (iter == m_children.end()) { return nullptr; } + return iter->second; } void sobject::add_child(const sobject_ptr child) { // Add a child to current object. std::unique_lock lock{m_mtx}; LOGINFO("Parent {}/{} added child {}/{}", type(), name(), child->type(), child->name()); - m_children.emplace(child->id(), child); + m_children.emplace(child->name(), child); m_mgr->add_object_type(type(), child->type()); } +void sobject::add_child_type(const std::string& child_type) { + std::unique_lock lock{m_mtx}; + LOGINFO("Added type parent {} child {}", type(), child_type); + m_mgr->add_object_type(type(), child_type); +} + status_response sobject::run_callback(const status_request& request) const { status_response response; response.json = nlohmann::json::object(); - response.json["type"] = m_id.type; - response.json["name"] = m_id.name; + response.json["type"] = m_type; + response.json["name"] = m_name; auto res = m_status_cb(request).json; if (!res.is_null()) { response.json.update(res); } - response.json["children"] = nlohmann::json::object(); - - for (const auto& [id, obj] : m_children) { - if (response.json["children"][id.type] == nullptr) { - if (request.do_recurse) { - response.json["children"][id.type] == nlohmann::json::object(); - } else { - response.json["children"][id.type] == nlohmann::json::array(); - } + + for (const auto& [name, obj] : m_children) { + auto child_type = obj->type(); + auto child_name = obj->name(); + if (response.json["children"] == nullptr) { response.json["children"] = nlohmann::json::object(); } + + if (response.json["children"][child_type] == nullptr) { + response.json["children"][child_type] == nlohmann::json::array(); } if (request.do_recurse) { // Call recursive. auto child_json = obj->run_callback(request).json; - response.json["children"][id.type].emplace(id.name, child_json); + response.json["children"][child_type].emplace_back(child_json); } else { - response.json["children"][id.type].push_back(id.name); + response.json["children"][child_type].emplace_back(child_name); } } @@ -71,8 +73,7 @@ status_response sobject::run_callback(const status_request& request) const { sobject_ptr sobject_manager::create_object(const std::string& type, const std::string& name, status_callback_type cb) { std::unique_lock lock{m_mtx}; auto obj = sobject::create(this, type, name, std::move(cb)); - sobject_id id{type, name}; - m_object_store[id] = obj; + m_object_store[name] = obj; if (m_object_types.count(type) == 0) { m_object_types[type] = {}; } LOGINFO("Created status object type={} name={}", type, name); return obj; @@ -83,68 +84,78 @@ void sobject_manager::add_object_type(const std::string& parent_type, const std: m_object_types[parent_type].insert(child_type); } -status_response sobject_manager::get_object_types() { +status_response sobject_manager::get_object_types(const std::string& type) { status_response response; - - for (const auto& [type, children] : m_object_types) { - response.json[type] = nlohmann::json::array(); - for (const auto& child_type : children) { - response.json[type].emplace_back(child_type); - } + auto children = nlohmann::json::object(); + for (const auto& child : m_object_types[type]) { + children.emplace(child, get_object_types(child).json); } + response.json = children; return response; } status_response sobject_manager::get_objects(const status_request& request) { status_response response; + // We by default start from the 'module' types recursively as they are + // the top of the heirarchy. + std::string obj_type = request.obj_type.empty() ? "module" : request.obj_type; auto iter = m_object_store.begin(); if (!request.next_cursor.empty()) { - // Extract cursor which is of format "type:name" - auto index = request.next_cursor.find_first_of("^"); - if (index == std::string::npos) return status_error("Invalid cursor"); - auto type = request.next_cursor.substr(0, index); - auto name = request.next_cursor.substr(index + 1); - iter = m_object_store.find(sobject_id{type, name}); + // Extract cursor which has name. + iter = m_object_store.find(request.next_cursor); if (iter == m_object_store.end()) return status_error("Cursor not found"); - } else if (request.obj_name.empty() && !request.obj_type.empty()) { - // Get all objects of type requested. - iter = m_object_store.find(request.obj_type); } int batch_size = request.batch_size; while (iter != m_object_store.end() && batch_size > 0) { - if (request.obj_name.empty() && !request.obj_type.empty() && request.obj_type != iter->first.type) { - // If only one type of objects requested. - return response; + if (obj_type != iter->second->type()) { + iter++; + continue; } - response.json[iter->first.name] = iter->second->run_callback(request).json; + response.json[iter->first] = iter->second->run_callback(request).json; iter++; batch_size--; } - if (iter != m_object_store.end()) { response.json["next_cursor"] = iter->first.type + "^" + iter->first.name; } + if (iter != m_object_store.end() && obj_type == iter->second->type()) { + response.json["next_cursor"] = iter->second->name(); + } return response; } -status_response sobject_manager::get_object_status(const sobject_id& id, const status_request& request) { - auto iter = m_object_store.find(id); +status_response sobject_manager::get_object_status(const std::string& name, const status_request& request) { + auto iter = m_object_store.find(name); if (iter == m_object_store.end()) { return status_error("Object identifier not found"); } return iter->second->run_callback(request); } -status_response sobject_manager::get_object_by_path(const status_request& request) { - sobject_ptr obj = nullptr; - for (const auto& [id, obj_ptr] : m_object_store) { - if (id.name == request.obj_path[0]) { - obj = obj_ptr; - break; +status_response sobject_manager::get_child_type_status( const status_request& request) { + status_response response; + auto iter = m_object_store.find(request.obj_name); + if (iter == m_object_store.end()) { return status_error("Object identifier not found"); } + for (const auto& [child_name, child_obj] : iter->second->m_children) { + if (child_obj->type() == request.obj_type) { + response.json[child_name] = child_obj->run_callback(request).json; } } + if (!response.json.empty()) { + // If we found child in object tree return it. + return response; + } + + // Else ask the parent object to do the work. This is used to lazily + // get objects of type which are not created by default. + return iter->second->run_callback(request); +} - if (obj == nullptr) { return status_error("Object identifier not found"); } +status_response sobject_manager::get_object_by_path(const status_request& request) { + sobject_ptr obj = nullptr; + auto iter = m_object_store.find(request.obj_path[0]); + if (iter == m_object_store.end()) { return status_error("Object identifier not found"); } + obj = iter->second; for (uint32_t ii = 1; ii < request.obj_path.size(); ii++) { obj = obj->get_child(request.obj_path[ii]); if (obj == nullptr) { return status_error("Object identifier not found"); } @@ -160,19 +171,29 @@ status_response sobject_manager::get_status(const status_request& request) { return get_object_by_path(request); } - // If both are empty, we return all the types. If both not empty, we return the specific object. - // Its an error to have name non empty and type empty. - if (!request.obj_name.empty() && request.obj_type.empty()) { return status_error("Type details not given"); } + if (!request.obj_type.empty() && !request.obj_name.empty()) { + // Get all children under the parent of type. + return get_child_type_status(request); + } - if (!request.obj_name.empty() && !request.obj_type.empty()) { + if (!request.obj_name.empty()) { // Return specific object. - sobject_id id{request.obj_type, request.obj_name}; - return get_object_status(std::move(id), request); + return get_object_status(request.obj_name, request); } - if (request.obj_name.empty() && request.obj_type.empty()) { return get_object_types(); } + if (!request.obj_type.empty()) { + // Return all objects of this type. + return get_objects(request); + } + + if (!request.do_recurse) { + // If no recurse we only return the types. + status_response response; + response.json["module"] = get_object_types("module").json; + return response; + } - // Dump all objects. + // Dump all objects recursively. return get_objects(request); } diff --git a/src/sobject/tests/test_sobject.cpp b/src/sobject/tests/test_sobject.cpp index 4f4a283f..7fdd0752 100644 --- a/src/sobject/tests/test_sobject.cpp +++ b/src/sobject/tests/test_sobject.cpp @@ -45,11 +45,10 @@ class SobjectTest : public testing::Test { }; TEST_F(SobjectTest, BasicTest) { - auto create_nodes = [this](sobject_ptr parent, string type, string prefix, int count) { vector< sobject_ptr > res; for (int i = 1; i <= count; i++) { - auto n = prefix + to_string(i); + auto n = prefix + "_" + to_string(i); auto cb = [n](const status_request&) { status_response resp; resp.json[n + "_metric"] = 1; @@ -64,64 +63,62 @@ TEST_F(SobjectTest, BasicTest) { }; // Create heirarchy of objects. - auto a_vec = create_nodes(nullptr, "A", "A", 2); - auto a_child_vec = create_nodes(a_vec[0], "A_child", "A_child", 2); - auto b_vec = create_nodes(nullptr, "B", "B", 2); - auto b_child_vec = create_nodes(b_vec[0], "B", "BB", 2); - auto c_vec = create_nodes(nullptr, "C", "C", 2); - auto c_child_vec = create_nodes(c_vec[0], "C_child", "C_child", 2); - auto c_child_child_vec = create_nodes(c_child_vec[0], "C_child_child", "C_child_child", 2); + auto module_vec = create_nodes(nullptr, "module", "module", 3); + auto a_vec = create_nodes(module_vec[0], "A", "A", 2); + auto b_vec = create_nodes(module_vec[1], "B", "B", 2); + auto c_vec = create_nodes(module_vec[0], "C", "C", 2); + + auto a_sub_vec = create_nodes(a_vec[0], "A_sub", "A_sub", 2); + auto b_sub_vec = create_nodes(b_vec[0], "B_sub", "B_sub", 2); + auto c_sub_vec = create_nodes(c_vec[0], "C_sub", "C_sub", 2); + + auto c_child_child_vec = create_nodes(c_sub_vec[0], "C_sub_sub", "C_sub_sub", 2); + { + // Get all objects. status_request req; status_response resp; resp = mgr.get_status(req); - LOGINFO("{}", resp.json.dump(2)); + LOGINFO("{}", resp.json.dump()); + ASSERT_EQ(resp.json.dump(), R"({"module":{"A":{"A_sub":{}},"B":{"B_sub":{}},"C":{"C_sub":{"C_sub_sub":{}}}}})"); } { + // Get object by name recursive and non recursive. status_request req; status_response resp; - req.obj_type = "B"; - req.obj_name = "B1"; + req.obj_name = "module_1"; req.do_recurse = true; resp = mgr.get_status(req); - ASSERT_EQ(resp.json["children"]["B"]["BB1"]["name"], "BB1") << resp.json.dump(2); + LOGINFO("{}", resp.json.dump()); + // TODO add validation. req.do_recurse = false; resp = mgr.get_status(req); - ASSERT_EQ(resp.json["children"]["B"].size(), 2) << resp.json.dump(2); - ASSERT_EQ(resp.json["children"]["B"][0], "BB1") << resp.json.dump(2); + LOGINFO("{}", resp.json.dump()); + ASSERT_EQ(resp.json.dump(), R"({"children":{"A":["A_1","A_2"],"C":["C_1","C_2"]},"module_1_metric":1,"name":"module_1","type":"module"})"); } + { + // Get object by type recursive and non recursive. status_request req; status_response resp; req.do_recurse = true; req.obj_type = "C"; resp = mgr.get_status(req); - ASSERT_EQ(resp.json.size(), 2) << resp.json.dump(2); - } - - { - status_request req; - status_response resp; - req.obj_type = "C_child_child"; - req.obj_name = "C_child_child2"; - resp = mgr.get_status(req); - LOGINFO("Response {}", resp.json.dump(2)); - ASSERT_EQ(resp.json["name"], "C_child_child2") << resp.json.dump(2); - ASSERT_EQ(resp.json["type"], "C_child_child") << resp.json.dump(2); + LOGINFO("{}", resp.json.dump()); } { status_request req; status_response resp; - req.obj_path = {"C1", "C_child1", "C_child_child1"}; + req.obj_path = {"module_1", "C_1", "C_sub_1", "C_sub_sub_1"}; req.do_recurse = false; resp = mgr.get_status(req); LOGINFO("Response {}", resp.json.dump(2)); - ASSERT_EQ(resp.json["name"], "C_child_child1") << resp.json.dump(2); - ASSERT_EQ(resp.json["type"], "C_child_child") << resp.json.dump(2); + ASSERT_EQ(resp.json["name"], "C_sub_sub_1") << resp.json.dump(); + ASSERT_EQ(resp.json["type"], "C_sub_sub") << resp.json.dump(); } { @@ -135,7 +132,7 @@ TEST_F(SobjectTest, BasicTest) { while (true) { resp = mgr.get_status(req); count--; - LOGINFO("Response {}", resp.json.dump(2)); + LOGINFO("Response {}", resp.json.dump()); if (!resp.json.contains("next_cursor")) break; req.next_cursor = resp.json["next_cursor"]; } From b3ded472c8e239f974199f6aa17ea3952568089c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 12:41:13 -0700 Subject: [PATCH 307/385] Adding breakpad library. --- .github/workflows/build_dependencies.yml | 1 + 3rd_party/breakpad/conandata.yml | 8 + 3rd_party/breakpad/conanfile.py | 81 +++ .../patches/0001-Use_conans_lss.patch | 228 ++++++ .../patches/0002-Remove-hardcoded-fpic.patch | 29 + CMakeLists.txt | 2 + conanfile.py | 2 + include/sisl/logging/logging.h | 2 +- src/CMakeLists.txt | 1 + src/logging/CMakeLists.txt | 1 - src/logging/backtrace.cpp | 684 ------------------ src/logging/backtrace.h | 134 ---- src/logging/logging.cpp | 2 - src/logging/stacktrace.cpp | 149 +--- test_package/test_package.cpp | 7 +- 15 files changed, 374 insertions(+), 957 deletions(-) create mode 100644 3rd_party/breakpad/conandata.yml create mode 100644 3rd_party/breakpad/conanfile.py create mode 100644 3rd_party/breakpad/patches/0001-Use_conans_lss.patch create mode 100644 3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch delete mode 100644 src/logging/backtrace.cpp delete mode 100644 src/logging/backtrace.h diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index d3863a02..ec49a47b 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -117,6 +117,7 @@ jobs: python -m pip install gcovr conan user conan profile new --detect default + conan export deps/sisl/3rd_party/breakpad breakpad/cci.20230127@ conan export deps/sisl/3rd_party/folly folly/2022.01.31.00@ conan export deps/sisl/3rd_party/gperftools conan export deps/sisl/3rd_party/jemalloc diff --git a/3rd_party/breakpad/conandata.yml b/3rd_party/breakpad/conandata.yml new file mode 100644 index 00000000..ceaf3e10 --- /dev/null +++ b/3rd_party/breakpad/conandata.yml @@ -0,0 +1,8 @@ +sources: + "cci.20230127": + url: "https://github.com/google/breakpad/archive/bae713b.tar.gz" + sha256: "65a0dd6db9065dc539ddf35f969d10b5ad8a7b2c305d2dc5a66a1f8d46f4a904" +patches: + "cci.20230127": + - patch_file: "patches/0001-Use_conans_lss.patch" + - patch_file: "patches/0002-Remove-hardcoded-fpic.patch" diff --git a/3rd_party/breakpad/conanfile.py b/3rd_party/breakpad/conanfile.py new file mode 100644 index 00000000..f4cdf5d9 --- /dev/null +++ b/3rd_party/breakpad/conanfile.py @@ -0,0 +1,81 @@ +from conan import ConanFile +from conan.errors import ConanInvalidConfiguration +from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir +from conan.tools.gnu import Autotools, AutotoolsDeps, AutotoolsToolchain +from conan.tools.layout import basic_layout +import os + +required_conan_version = ">=1.52.0" + + +class BreakpadConan(ConanFile): + name = "breakpad" + description = "A set of client and server components which implement a crash-reporting system" + topics = ["crash", "report", "breakpad"] + license = "BSD-3-Clause" + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://chromium.googlesource.com/breakpad/breakpad/" + + settings = "os", "arch", "compiler", "build_type" + options = { + "fPIC": [True, False], + } + default_options = { + "fPIC": True, + } + + def export_sources(self): + export_conandata_patches(self) + + def layout(self): + basic_layout(self, src_folder="src") + + def requirements(self): + self.requires("linux-syscall-support/cci.20200813") + + def validate(self): + if self.settings.os != "Linux": + raise ConanInvalidConfiguration("Breakpad can only be built on Linux. For other OSs check sentry-breakpad") + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + def generate(self): + tc = AutotoolsToolchain(self) + # see https://github.com/conan-io/conan/issues/12020 + tc.configure_args.append("--libexecdir=${prefix}/bin") + tc.generate() + deps = AutotoolsDeps(self) + deps.generate() + + def build(self): + apply_conandata_patches(self) + autotools = Autotools(self) + autotools.configure() + autotools.make() + + def package(self): + copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) + autotools = Autotools(self) + autotools.install() + rmdir(self, os.path.join(self.package_folder, "share")) + rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) + + def package_info(self): + self.cpp_info.components["libbreakpad"].set_property("pkg_config_name", "breakpad") + self.cpp_info.components["libbreakpad"].libs = ["breakpad"] + self.cpp_info.components["libbreakpad"].includedirs.append(os.path.join("include", "breakpad")) + self.cpp_info.components["libbreakpad"].system_libs.append("pthread") + self.cpp_info.components["libbreakpad"].requires.append("linux-syscall-support::linux-syscall-support") + + self.cpp_info.components["client"].set_property("pkg_config_name", "breakpad-client") + self.cpp_info.components["client"].libs = ["breakpad_client"] + self.cpp_info.components["client"].includedirs.append(os.path.join("include", "breakpad")) + self.cpp_info.components["client"].system_libs.append("pthread") + self.cpp_info.components["client"].requires.append("linux-syscall-support::linux-syscall-support") + + # workaround to always produce a global pkgconfig file for PkgConfigDeps + self.cpp_info.set_property("pkg_config_name", "breakpad-do-not-use") + + # TODO: to remove in conan v2 + self.env_info.PATH.append(os.path.join(self.package_folder, "bin")) diff --git a/3rd_party/breakpad/patches/0001-Use_conans_lss.patch b/3rd_party/breakpad/patches/0001-Use_conans_lss.patch new file mode 100644 index 00000000..d2bedfe2 --- /dev/null +++ b/3rd_party/breakpad/patches/0001-Use_conans_lss.patch @@ -0,0 +1,228 @@ +diff -Naur a/src/client/linux/crash_generation/crash_generation_client.cc b/src/client/linux/crash_generation/crash_generation_client.cc +--- a/src/client/linux/crash_generation/crash_generation_client.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/crash_generation/crash_generation_client.cc 2023-06-20 10:56:18.746685403 -0700 +@@ -36,7 +36,7 @@ + + #include "common/linux/eintr_wrapper.h" + #include "common/linux/ignore_ret.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/client/linux/handler/exception_handler.cc b/src/client/linux/handler/exception_handler.cc +--- a/src/client/linux/handler/exception_handler.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/handler/exception_handler.cc 2023-06-20 10:56:18.750685408 -0700 +@@ -94,7 +94,7 @@ + #include "client/linux/minidump_writer/linux_dumper.h" + #include "client/linux/minidump_writer/minidump_writer.h" + #include "common/linux/eintr_wrapper.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + #if defined(__ANDROID__) + #include "linux/sched.h" +diff -Naur a/src/client/linux/handler/exception_handler_unittest.cc b/src/client/linux/handler/exception_handler_unittest.cc +--- a/src/client/linux/handler/exception_handler_unittest.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/handler/exception_handler_unittest.cc 2023-06-20 10:56:18.750685408 -0700 +@@ -49,7 +49,7 @@ + #include "common/linux/linux_libc_support.h" + #include "common/tests/auto_tempdir.h" + #include "common/using_std_string.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + #include "google_breakpad/processor/minidump.h" + + using namespace google_breakpad; +diff -Naur a/src/client/linux/log/log.cc b/src/client/linux/log/log.cc +--- a/src/client/linux/log/log.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/log/log.cc 2023-06-20 10:56:18.754685413 -0700 +@@ -32,7 +32,7 @@ + #include + #include + #else +-#include "third_party/lss/linux_syscall_support.h" ++#include + #endif + + namespace logger { +diff -Naur a/src/client/linux/minidump_writer/cpu_set.h b/src/client/linux/minidump_writer/cpu_set.h +--- a/src/client/linux/minidump_writer/cpu_set.h 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/cpu_set.h 2023-06-20 10:56:21.690688837 -0700 +@@ -34,7 +34,7 @@ + #include + + #include "common/linux/linux_libc_support.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/client/linux/minidump_writer/directory_reader.h b/src/client/linux/minidump_writer/directory_reader.h +--- a/src/client/linux/minidump_writer/directory_reader.h 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/directory_reader.h 2023-06-20 10:56:21.694688842 -0700 +@@ -37,7 +37,7 @@ + #include + + #include "common/linux/linux_libc_support.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/client/linux/minidump_writer/line_reader.h b/src/client/linux/minidump_writer/line_reader.h +--- a/src/client/linux/minidump_writer/line_reader.h 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/line_reader.h 2023-06-20 10:56:21.694688842 -0700 +@@ -34,7 +34,7 @@ + #include + + #include "common/linux/linux_libc_support.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/client/linux/minidump_writer/linux_dumper.cc b/src/client/linux/minidump_writer/linux_dumper.cc +--- a/src/client/linux/minidump_writer/linux_dumper.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/linux_dumper.cc 2023-06-20 10:56:18.766685426 -0700 +@@ -50,7 +50,7 @@ + #include "common/linux/memory_mapped_file.h" + #include "common/linux/safe_readlink.h" + #include "google_breakpad/common/minidump_exception_linux.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + using google_breakpad::elf::FileID; + +diff -Naur a/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc b/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc +--- a/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc 2023-06-20 10:56:18.766685426 -0700 +@@ -38,7 +38,7 @@ + #include + + #include "common/scoped_ptr.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + #if defined(__ARM_EABI__) + #define TID_PTR_REGISTER "r3" +diff -Naur a/src/client/linux/minidump_writer/linux_ptrace_dumper.cc b/src/client/linux/minidump_writer/linux_ptrace_dumper.cc +--- a/src/client/linux/minidump_writer/linux_ptrace_dumper.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/linux_ptrace_dumper.cc 2023-06-20 10:56:18.766685426 -0700 +@@ -56,7 +56,7 @@ + #include "client/linux/minidump_writer/directory_reader.h" + #include "client/linux/minidump_writer/line_reader.h" + #include "common/linux/linux_libc_support.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + // Suspends a thread by attaching to it. + static bool SuspendThread(pid_t pid) { +diff -Naur a/src/client/linux/minidump_writer/minidump_writer.cc b/src/client/linux/minidump_writer/minidump_writer.cc +--- a/src/client/linux/minidump_writer/minidump_writer.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/minidump_writer.cc 2023-06-20 10:56:18.770685431 -0700 +@@ -78,7 +78,7 @@ + #include "common/linux/linux_libc_support.h" + #include "common/minidump_type_helper.h" + #include "google_breakpad/common/minidump_format.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace { + +diff -Naur a/src/client/linux/minidump_writer/proc_cpuinfo_reader.h b/src/client/linux/minidump_writer/proc_cpuinfo_reader.h +--- a/src/client/linux/minidump_writer/proc_cpuinfo_reader.h 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/linux/minidump_writer/proc_cpuinfo_reader.h 2023-06-20 10:56:21.702688851 -0700 +@@ -35,7 +35,7 @@ + + #include "client/linux/minidump_writer/line_reader.h" + #include "common/linux/linux_libc_support.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/client/minidump_file_writer.cc b/src/client/minidump_file_writer.cc +--- a/src/client/minidump_file_writer.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/client/minidump_file_writer.cc 2023-06-20 10:56:18.794685460 -0700 +@@ -40,7 +40,7 @@ + #include "common/linux/linux_libc_support.h" + #include "common/string_conversion.h" + #if defined(__linux__) && __linux__ +-#include "third_party/lss/linux_syscall_support.h" ++#include + #endif + + #if defined(__ANDROID__) +diff -Naur a/src/common/linux/file_id.cc b/src/common/linux/file_id.cc +--- a/src/common/linux/file_id.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/common/linux/file_id.cc 2023-06-20 10:56:18.846685520 -0700 +@@ -45,7 +45,7 @@ + #include "common/linux/linux_libc_support.h" + #include "common/linux/memory_mapped_file.h" + #include "common/using_std_string.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + namespace elf { +diff -Naur a/src/common/linux/memory_mapped_file.cc b/src/common/linux/memory_mapped_file.cc +--- a/src/common/linux/memory_mapped_file.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/common/linux/memory_mapped_file.cc 2023-06-20 10:56:18.854685530 -0700 +@@ -39,7 +39,7 @@ + #include + + #include "common/memory_range.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/common/linux/safe_readlink.cc b/src/common/linux/safe_readlink.cc +--- a/src/common/linux/safe_readlink.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/common/linux/safe_readlink.cc 2023-06-20 10:56:18.858685533 -0700 +@@ -31,7 +31,7 @@ + + #include + +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace google_breakpad { + +diff -Naur a/src/common/memory_allocator.h b/src/common/memory_allocator.h +--- a/src/common/memory_allocator.h 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/common/memory_allocator.h 2023-06-20 10:56:21.818688987 -0700 +@@ -46,7 +46,7 @@ + #define sys_munmap munmap + #define MAP_ANONYMOUS MAP_ANON + #else +-#include "third_party/lss/linux_syscall_support.h" ++#include + #endif + + namespace google_breakpad { +diff -Naur a/src/processor/testdata/linux_test_app.cc b/src/processor/testdata/linux_test_app.cc +--- a/src/processor/testdata/linux_test_app.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/processor/testdata/linux_test_app.cc 2023-06-20 10:56:18.990685688 -0700 +@@ -45,7 +45,7 @@ + #include + + #include "client/linux/handler/exception_handler.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + + namespace { + +diff -Naur a/src/tools/linux/md2core/minidump-2-core.cc b/src/tools/linux/md2core/minidump-2-core.cc +--- a/src/tools/linux/md2core/minidump-2-core.cc 2023-01-27 13:36:21.000000000 -0700 ++++ b/src/tools/linux/md2core/minidump-2-core.cc 2023-06-20 10:56:18.994685693 -0700 +@@ -51,7 +51,7 @@ + #include "common/using_std_string.h" + #include "google_breakpad/common/breakpad_types.h" + #include "google_breakpad/common/minidump_format.h" +-#include "third_party/lss/linux_syscall_support.h" ++#include + #include "tools/linux/md2core/minidump_memory_range.h" + + #if ULONG_MAX == 0xffffffffffffffff diff --git a/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch b/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch new file mode 100644 index 00000000..bd0eaba8 --- /dev/null +++ b/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch @@ -0,0 +1,29 @@ +--- a/Makefile.in 2023-01-27 13:36:21.000000000 -0700 ++++ b/Makefile.in 2023-06-20 11:07:14.611452052 -0700 +@@ -129,8 +129,6 @@ + @ANDROID_HOST_TRUE@ -I$(top_srcdir)/src/common/android/testing/include + + # Build as PIC on Linux, for linux_client_unittest_shlib +-@LINUX_HOST_TRUE@am__append_2 = -fPIC +-@LINUX_HOST_TRUE@am__append_3 = -fPIC + libexec_PROGRAMS = $(am__EXEEXT_10) + bin_PROGRAMS = $(am__EXEEXT_2) $(am__EXEEXT_3) $(am__EXEEXT_4) + check_PROGRAMS = src/common/safe_math_unittest$(EXEEXT) \ +@@ -1744,7 +1742,7 @@ + HEADERS = $(includec_HEADERS) $(includecl_HEADERS) \ + $(includeclc_HEADERS) $(includecldwc_HEADERS) \ + $(includeclh_HEADERS) $(includeclm_HEADERS) \ +- $(includegbc_HEADERS) $(includelss_HEADERS) \ ++ $(includegbc_HEADERS) \ + $(includep_HEADERS) + am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) + # Read a list of newline-separated strings from the standard input, +@@ -9650,7 +9648,7 @@ + install-includeclHEADERS install-includeclcHEADERS \ + install-includecldwcHEADERS install-includeclhHEADERS \ + install-includeclmHEADERS install-includegbcHEADERS \ +- install-includelssHEADERS install-includepHEADERS \ ++ install-includepHEADERS \ + install-pkgconfigDATA + + install-dvi: install-dvi-am diff --git a/CMakeLists.txt b/CMakeLists.txt index 6e6199d8..8944503d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,8 +67,10 @@ find_package(Threads REQUIRED) # Linux Specific dependencies if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) + find_package(breakpad REQUIRED) find_package(folly REQUIRED) find_package(userspace-rcu REQUIRED) + list (APPEND COMMON_DEPS breakpad::breakpad) endif() list (APPEND COMMON_DEPS diff --git a/conanfile.py b/conanfile.py index d5a263a3..f77a88d1 100644 --- a/conanfile.py +++ b/conanfile.py @@ -50,6 +50,8 @@ def requirements(self): # Generic packages (conan-center) self.requires("boost/1.79.0") + if self.settings.os in ["Linux"]: + self.requires("breakpad/cci.20230127") self.requires("cpr/1.8.1") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 3b244526..16407415 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -245,7 +245,7 @@ constexpr const char* file_name(const char* const str) { return str_slant(str) ? #define _ABORT_OR_DUMP(is_log_assert) \ assert(0); \ if (is_log_assert) { \ - if (sisl::logging::is_crash_handler_installed()) { sisl::logging::log_stack_trace(false); } \ + if (sisl::logging::is_crash_handler_installed()) { raise(SIGUSR3); } \ } else { \ abort(); \ } diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 15d33f20..d39a86c1 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -35,6 +35,7 @@ if(${CMAKE_HOST_SYSTEM_NAME} STREQUAL Linux) ) list(APPEND SISL_DEPS Folly::Folly + breakpad::breakpad ) endif() diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 97747fc4..83870062 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -2,7 +2,6 @@ cmake_minimum_required (VERSION 3.11) add_library(sisl_logging OBJECT) target_sources(sisl_logging PRIVATE - backtrace.cpp logging.cpp stacktrace.cpp ) diff --git a/src/logging/backtrace.cpp b/src/logging/backtrace.cpp deleted file mode 100644 index 9ad62b47..00000000 --- a/src/logging/backtrace.cpp +++ /dev/null @@ -1,684 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Author/Developer(s): Harihara Kadayam, Bryan Zimmerman - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(__linux__) || defined(__APPLE__) -#include -#include -#endif - -#ifdef __linux__ -#include -#endif - -#ifdef __APPLE__ -#include -#include -#endif - -#include - -#include "backtrace.h" - -namespace { - -#ifdef __APPLE__ -[[maybe_unused]] uint64_t static_base_address(void) { - const struct segment_command_64* const command{::getsegbyname(SEG_TEXT /*"__TEXT"*/)}; - const uint64_t addr{command->vmaddr}; - return addr; -} - -[[maybe_unused]] std::string get_exec_path() { - std::array< char, 1024 > path; - uint32_t size{static_cast< uint32_t >(path.size())}; - if (::_NSGetExecutablePath(path.data(), &size) != 0) return std::string{}; - - return std::string{path.data()}; -} - -[[maybe_unused]] std::string get_file_part(const std::string& full_path) { - const size_t pos{full_path.rfind("/")}; - if (pos == std::string::npos) return full_path; - - return full_path.substr(pos + 1, full_path.size() - pos - 1); -} - -[[maybe_unused]] intptr_t image_slide(void) { - const std::string exec_path{get_exec_path()}; - if (exec_path.empty()) return -1; - - const auto image_count{::_dyld_image_count()}; - for (std::remove_const_t< decltype(image_count) > i{0}; i < image_count; ++i) { - if (std::strcmp(::_dyld_get_image_name(i), exec_path.c_str()) == 0) { - return ::_dyld_get_image_vmaddr_slide(i); - } - } - return -1; -} -#endif // __APPLE__ - -template < typename... Args > -[[maybe_unused]] void t_snprintf(char* const msg, size_t& avail_len, size_t& cur_len, size_t& msg_len, Args&&... args) { - avail_len = (avail_len > cur_len) ? (avail_len - cur_len) : 0; - if (avail_len > 0) { - msg_len = std::snprintf(msg + cur_len, avail_len, std::forward< Args >(args)...); - cur_len += (avail_len > msg_len) ? msg_len : avail_len; - } -} - -[[maybe_unused]] uintptr_t convert_hex_to_integer(const char* const input_str) { - uintptr_t actual_addr{0}; - - // Convert hex string -> integer address. - const char* pos{std::strpbrk(input_str, "xX")}; - if (!pos) return actual_addr; - - while (++pos && (*pos != 0x00)) { - const char c{*pos}; - uint8_t val{0}; - if ((c >= '0') && (c <= '9')) { - val = static_cast< uint8_t >(c - '0'); - } else if ((c >= 'A') && (c <= 'F')) { - val = static_cast< uint8_t >((c - 'A') + 10); - } else if ((c >= 'a') && (c <= 'f')) { - val = static_cast< uint8_t >((c - 'a') + 10); - } else { - break; - } - actual_addr <<= 4; - actual_addr += val; - } - return actual_addr; -} - -// trim whitespace of the given null terminated string of length input_length not including null terminator -[[maybe_unused]] size_t trim_whitespace(char* const input, const size_t input_length) { - size_t length{input_length}; - if (length == 0) return length; - - // trim beginning - size_t trim{0}; - while (trim < length) { - if (std::isspace(input[trim]) != 0) - ++trim; - else - break; - } - if (trim > 0) { - length -= trim; - std::memmove(&input[0], &input[trim], length + 1); // include null terminator - } - - // trim end - while (length > 0) { - if (std::isspace(input[length - 1]) != 0) { - input[length - 1] = 0x00; - --length; - } else - break; - } - return length; -} - -[[maybe_unused]] void skip_whitespace(const std::string& base_str, size_t& cursor) { - while ((cursor < base_str.size()) && (base_str[cursor] == ' ')) - ++cursor; -} - -[[maybe_unused]] void skip_glyph(const std::string& base_str, size_t& cursor) { - while ((cursor < base_str.size()) && (base_str[cursor] != ' ')) - ++cursor; -} - -template < typename... Args > -[[maybe_unused]] void log_message(fmt::format_string< Args... > msg_fmt, Args&&... args) { - auto& logger{sisl::logging::GetLogger()}; - auto& critical_logger{sisl::logging::GetCriticalLogger()}; - - if (logger) { logger->critical(msg_fmt, std::forward< Args >(args)...); } - if (critical_logger) { critical_logger->critical(msg_fmt, std::forward< Args >(args)...); } -} - -#ifdef __linux__ -std::pair< bool, uintptr_t > offset_symbol_address(const char* const file_name, const char* const symbol_str, - const uintptr_t symbol_address) { - bool status{false}; - uintptr_t offset_address{symbol_address}; - Dl_info symbol_info; - - void* addr{nullptr}; - { - const std::unique_ptr< void, std::function< void(void* const) > > obj_file{::dlopen(file_name, RTLD_LAZY), - [](void* const ptr) { - if (ptr) ::dlclose(ptr); - }}; - if (!obj_file) { return {status, offset_address}; } - - addr = ::dlsym(obj_file.get(), symbol_str); - if (!addr) { return {status, offset_address}; } - } - - // extract the symbolic information pointed by address - if (!::dladdr(addr, &symbol_info)) { return {status, offset_address}; } - offset_address += - (reinterpret_cast< uintptr_t >(symbol_info.dli_saddr) - reinterpret_cast< uintptr_t >(symbol_info.dli_fbase)) - - 1; - status = true; - - return {status, offset_address}; -} - -std::pair< const char*, const char* > convert_symbol_line(const char* const file_name, const size_t file_name_length, - const uintptr_t address, const char* const symbol_name) { - static constexpr size_t line_number_length{24}; - static constexpr std::array< char, 10 > s_pipe_unknown{"??\0??:?\0"}; - const char* mangled_name{s_pipe_unknown.data()}; - size_t mangled_name_length{2}; - const char* file_line{s_pipe_unknown.data() + 3}; - size_t file_line_length{4}; - - if (file_name_length == 0) return {mangled_name, file_line}; - - // form the command - static constexpr size_t extra_length{ - 10}; // includes single quotes around process name and " -a 0x" and null terminator - static constexpr std::array< char, 18 > prefix{"addr2line -f -e \'"}; - static std::array< - char, extra_length + prefix.size() + backtrace_detail::file_name_length + backtrace_detail::address_length > - s_command; - size_t command_length{prefix.size() - 1}; - std::memcpy(s_command.data(), prefix.data(), command_length); - std::memcpy(s_command.data() + command_length, file_name, file_name_length); - command_length += file_name_length; - static std::array< char, backtrace_detail::address_length + 1 > s_address; - std::snprintf(s_address.data(), s_address.size(), "%" PRIxPTR, address); - std::snprintf(s_command.data() + command_length, s_command.size() - command_length, "\' -a 0x%s", s_address.data()); - // log_message("SISL Logging - symbol_line with command {}", s_command.data()); - - // execute command and read data from pipe - { - const std::unique_ptr< FILE, std::function< void(FILE* const) > > fp{::popen(s_command.data(), "re"), - [](FILE* const ptr) { - if (ptr) ::pclose(ptr); - }}; - if (fp) { - // wait on pipe - const auto waitOnPipe{[rfd{::fileno(fp.get())}](const uint64_t wait_ms) { - fd_set rfds; - FD_ZERO(&rfds); - FD_SET(rfd, &rfds); - - timespec ts; - ts.tv_sec = static_cast< decltype(ts.tv_sec) >(wait_ms / 1000); - ts.tv_nsec = static_cast< decltype(ts.tv_nsec) >((wait_ms % 1000) * 1000000); - const int result{::pselect(FD_SETSIZE, &rfds, nullptr, nullptr, &ts, nullptr)}; - return (result > 0); - }}; - - // read the pipe - constexpr uint64_t loop_wait_ms{1000}; - constexpr size_t read_tries{static_cast< size_t >(backtrace_detail::pipe_timeout_ms / loop_wait_ms)}; - constexpr size_t newlines_expected{3}; - std::array< const char*, newlines_expected > newline_positions; - size_t total_bytes_read{0}; - size_t total_newlines{0}; - static std::array< - char, backtrace_detail::symbol_name_length + backtrace_detail::file_name_length + line_number_length > - s_pipe_data; - bool address_found{false}; - for (size_t read_try{0}; (read_try < read_tries) && (total_newlines < newlines_expected); ++read_try) { - if (waitOnPipe(loop_wait_ms)) { - size_t bytes{std::fread(s_pipe_data.data() + total_bytes_read, 1, - s_pipe_data.size() - total_bytes_read, fp.get())}; - // count new newlines and null terminate at those positions - for (size_t byte_num{0}; byte_num < bytes; ++byte_num) { - const auto updateNewlines{[&total_newlines, &newline_positions](const size_t offset) { - if (total_newlines < newlines_expected) { - newline_positions[total_newlines] = &s_pipe_data[offset]; - } - ++total_newlines; - }}; - - const size_t offset{byte_num + total_bytes_read}; - if (s_pipe_data[offset] == '\n') { - s_pipe_data[offset] = 0x00; // convert newline to null terminator - if (!address_found) { - // check for address in pipe data - const char* const address_ptr{std::strstr(s_pipe_data.data(), s_address.data())}; - if (address_ptr) { - address_found = true; - updateNewlines(offset); - } else { - // wipe all pipe data up to and including null ptr - if (byte_num < bytes - 1) { - std::memmove(s_pipe_data.data(), s_pipe_data.data() + offset + 1, - bytes + total_bytes_read - offset - 1); - bytes -= byte_num + 1; - } else { - bytes = 0; - } - total_bytes_read = 0; - byte_num = 0; - } - } else { - updateNewlines(offset); - } - } - } - total_bytes_read += bytes; - } - } - s_pipe_data[total_bytes_read] = 0; - - // read the pipe - if (total_newlines > 0) { - if (total_newlines == 3) { - // file and name info - file_line = newline_positions[1] + 1; - file_line_length = static_cast< size_t >(newline_positions[2] - file_line); - file_line_length = trim_whitespace(const_cast< char* >(file_line), file_line_length); - mangled_name = newline_positions[0] + 1; - mangled_name_length = static_cast< size_t >(newline_positions[1] - mangled_name); - mangled_name_length = trim_whitespace(const_cast< char* >(mangled_name), mangled_name_length); - } else if (total_newlines == 2) { - log_message("SISL Logging - Pipe did not return expected number of newlines {}", total_newlines); - mangled_name = newline_positions[0] + 1; - mangled_name_length = static_cast< size_t >(newline_positions[1] - mangled_name); - mangled_name_length = trim_whitespace(const_cast< char* >(mangled_name), mangled_name_length); - } else { - log_message("SISL Logging - Pipe did not return expected number of newlines {}", total_newlines); - } - } else { - // no pipe data just continue - log_message("SISL Logging - No pipe data"); - } - } else { - // no pipe just continue - log_message("SISL Logging - Could not open pipe to resolve symbol_line with command {}", s_command.data()); - } - if (std::strstr(mangled_name, "??")) { - log_message("SISL Logging - Could not resolve symbol_line with command {}", s_command.data()); - } - } - - // demangle the name - static std::array< char, backtrace_detail::symbol_name_length > demangled_name; - { - [[maybe_unused]] int status{-3}; // one of the arguments is invalid - const std::unique_ptr< const char, std::function< void(const char* const) > > cxa_demangled_name{ - std::strstr(mangled_name, "??") ? nullptr : abi::__cxa_demangle(mangled_name, 0, 0, &status), - [](const char* const ptr) { - if (ptr) std::free(static_cast< void* >(const_cast< char* >(ptr))); - }}; - if (!cxa_demangled_name) { - if (status != -2) { // check that not a mangled name - log_message("SISL Logging - Could not demangle name {} error {}", mangled_name, status); - } - if (!symbol_name || (symbol_name[0] == '+') || (symbol_name[0] == 0x00)) { - // no symbol name so use mangled name - std::memcpy(demangled_name.data(), mangled_name, mangled_name_length); - demangled_name[mangled_name_length] = 0x00; - } else { - // use the symbol name - std::snprintf(demangled_name.data(), demangled_name.size(), "%s", symbol_name); - } - } else { - // use the demangled name - std::snprintf(demangled_name.data(), demangled_name.size(), "%s", cxa_demangled_name.get()); - } - } - - // resolve file name absolute path - static std::array< char, backtrace_detail::file_name_length + line_number_length > s_absolute_file_path; - static std::array< char, backtrace_detail::file_name_length > s_relative_file_path; - const char* const colon_ptr{std::strrchr(file_line, ':')}; - const size_t relative_file_name_length{colon_ptr ? static_cast< size_t >(colon_ptr - file_line) : file_line_length}; - if (std::strstr(file_line, "??") || (relative_file_name_length == 0)) { - // no resolved file name, use process/lib name - std::memcpy(s_relative_file_path.data(), file_name, file_name_length); - s_relative_file_path[file_name_length] = 0x00; - } else { - // use previoulsy received possibly relative path - std::memcpy(s_relative_file_path.data(), file_line, relative_file_name_length); - s_relative_file_path[relative_file_name_length] = 0x00; - } - if (const char* const path{::realpath(s_relative_file_path.data(), s_absolute_file_path.data())}) { - // absolute path resolved - } else { - // use the relative file name path - std::strcpy(s_absolute_file_path.data(), s_relative_file_path.data()); - } - // append line number - if (colon_ptr) { - std::strcat(s_absolute_file_path.data(), colon_ptr); - } else { - std::strcat(s_absolute_file_path.data(), ":?"); - } - - return {demangled_name.data(), s_absolute_file_path.data()}; -} - -#endif // __linux__ - -} // anonymous namespace - -#ifdef __linux__ - -const char* linux_process_name() { - static std::array< char, backtrace_detail::file_name_length > s_process_name; - const auto length{::readlink("/proc/self/exe", s_process_name.data(), s_process_name.size())}; - if (length == -1) { - s_process_name[0] = 0; - } else if (static_cast< size_t >(length) == s_process_name.size()) { - // truncation occurred so null terminate - s_process_name[s_process_name.size() - 1] = 0; - } else { - // success so null terminate - s_process_name[static_cast< size_t >(length)] = 0; - } - return s_process_name.data(); -} - -size_t stack_interpret_linux_file(const void* const* const stack_ptr, FILE* const stack_file, const size_t stack_size, - char* const output_buf, const size_t output_buflen, const bool trim_internal) { - std::rewind(stack_file); - char c{0x00}; - - /* - while (!feof(stack_file)) std::putc(fgetc(stack_file), stdout); - std::rewind(stack_file); - */ - - // get the current process name - const char* const absolute_process_name{linux_process_name()}; - const char* const slash_pos{std::strrchr(absolute_process_name, '/')}; - const char* const process_name{slash_pos ? slash_pos + 1 : absolute_process_name}; - const size_t process_name_length{std::strlen(process_name)}; - - static std::array< size_t, backtrace_detail::max_backtrace > s_output_line_start; - size_t cur_len{0}; - size_t chars_read{0}; - const auto extractName{[&stack_file, &c, &chars_read](auto& dest, const auto& term_chars) { - size_t len{0}; - const auto nullTerminate{[&len, &dest]() { - if (len < dest.size()) { - dest[len] = 0x00; - } else { - dest[dest.size() - 1] = 0x00; - } - return std::min(len, dest.size() - 1); - }}; - while (!std::feof(stack_file)) { - c = static_cast< char >(std::fgetc(stack_file)); - if (!std::feof(stack_file)) { - ++chars_read; - if (std::find(std::cbegin(term_chars), std::cend(term_chars), c) != std::cend(term_chars)) { - return nullTerminate(); - } else if (len < dest.size()) { - dest[len] = c; - } - ++len; - } - } - return nullTerminate(); - }}; - - // read till end of line - const auto readTillEOL{[&stack_file, &c, &chars_read]() { - while (!std::feof(stack_file)) { - c = static_cast< char >(std::fgetc(stack_file)); - if (!std::feof(stack_file)) { - ++chars_read; - if (c == '\n') return; - } - } - return; - }}; - - size_t trim_line{0}; - size_t line_num{0}; - size_t msg_len{0}; - size_t avail_len{output_buflen}; - // NOTE: starting from 1, skipping this line. - readTillEOL(); - for (size_t i{1}; (i < stack_size) && !std::feof(stack_file); ++i) { - // `stack_msg[x]` format: - // /foo/bar/executable() [0xabcdef] - // /foo/bar/executable()(+0xf0) [0x123456] - // /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0) [0x123456] - - // NOTE: with ASLR - // /foo/bar/executable(+0x5996) [0x555555559996] - - static std::array< char, backtrace_detail::file_name_length > s_file_name; - const size_t file_name_length{ - trim_whitespace(s_file_name.data(), extractName(s_file_name, std::array< char, 2 >{'(', '\n'}))}; - - if (file_name_length == 0) { - if (c != '\n') readTillEOL(); - continue; - } - - uintptr_t actual_addr{reinterpret_cast< uintptr_t >(stack_ptr[i])}; - static std::array< char, backtrace_detail::symbol_name_length > s_symbol; - s_symbol[0] = 0x00; - if (c == '(') { - // Extract the symbol if present - const size_t symbol_len{ - trim_whitespace(s_symbol.data(), extractName(s_symbol, std::array< char, 2 >{')', '\n'}))}; - - // Extract the offset - if (symbol_len > 0) { - char* const plus{std::strchr(s_symbol.data(), '+')}; - const uintptr_t symbol_address{plus ? convert_hex_to_integer(plus + 1) : 0}; - - if (plus == s_symbol.data()) { - // ASLR is enabled, get the offset from here. - actual_addr = symbol_address; - } else { - if (plus) { - // truncate symbol at + so just function name - *plus = 0x00; - } - const bool main_program{ - file_name_length < process_name_length - ? false - : std::strcmp(process_name, s_file_name.data() + file_name_length - process_name_length) == - 0}; - const auto [offset_result, offset_addr]{offset_symbol_address( - main_program ? nullptr : s_file_name.data(), s_symbol.data(), symbol_address)}; - if (offset_result) { - actual_addr = offset_addr; - } else { - log_message( - "SISL Logging - Could not resolve offset_symbol_address for symbol {} with address {}", - s_symbol.data(), symbol_address); - } - } - } - } - - const auto [demangled_name, - file_line]{convert_symbol_line(s_file_name.data(), file_name_length, actual_addr, s_symbol.data())}; - if (!demangled_name || !file_line) { - if (c != '\n') readTillEOL(); - continue; - } - - if (trim_internal) { - if (std::strstr(demangled_name, "sisl::logging::bt_dumper") || - std::strstr(demangled_name, "sisl::logging::crash_handler")) { - trim_line = line_num; - } - } - s_output_line_start[line_num] = cur_len; - t_snprintf(output_buf, avail_len, cur_len, msg_len, "#%-3zu 0x%016" PRIxPTR " in %s at %s\n", line_num, - actual_addr, demangled_name, file_line); - ++line_num; - - if (c != '\n') readTillEOL(); - } - - if (trim_line > 0) { - // trim characters and include null character at end - const size_t offset{s_output_line_start[trim_line]}; - cur_len -= offset; - std::memmove(output_buf, output_buf + offset, cur_len + 1); // move terminating null - - // renumber lines - for (size_t current_line{0}; current_line < line_num - trim_line; ++current_line) { - std::array< char, 5 > line_str; - const int length{std::snprintf(line_str.data(), line_str.size(), "#%-3zu", current_line)}; - std::memcpy(output_buf + s_output_line_start[trim_line + current_line] - offset, line_str.data(), length); - } - } - - return cur_len; -} -#endif // __linux__ - -#ifdef __APPLE__ -size_t stack_interpret_apple([[maybe_unused]] const void* const* const stack_ptr, const char* const* const stack_msg, - const size_t stack_size, char* const output_buf, const size_t output_buflen, - [[maybe_unused]] const bool trim_internal) { - size_t cur_len{0}; - - [[maybe_unused]] size_t frame_num{0}; - - const std::string exec_full_path{get_exec_path()}; - const std::string exec_file{get_file_part(exec_full_path)}; - const uint64_t load_base{static_cast< uint64_t >(image_slide()) + static_base_address()}; - - // NOTE: starting from 1, skipping this frame. - for (size_t i{1}; i < stack_size; ++i) { - // `stack_msg[x]` format: - // 8 foobar 0x000000010fd490da main + 1322 - if (!stack_msg[i] || (stack_msg[i][0] == 0x0)) continue; - - const std::string base_str{stack_msg[i]}; - - size_t s_pos{0}; - size_t len{0}; - size_t cursor{0}; - - // Skip frame number part. - skip_glyph(base_str, cursor); - - // Skip whitespace. - skip_whitespace(base_str, cursor); - s_pos = cursor; - // Filename part. - skip_glyph(base_str, cursor); - len = cursor - s_pos; - const std::string filename{base_str.substr(s_pos, len)}; - - // Skip whitespace. - skip_whitespace(base_str, cursor); - s_pos = cursor; - // Address part. - skip_glyph(base_str, cursor); - len = cursor - s_pos; - const std::string address{base_str.substr(s_pos, len)}; - if (!address.empty() && address[0] == '?') continue; - - // Skip whitespace. - skip_whitespace(base_str, cursor); - s_pos = cursor; - // Mangled function name part. - skip_glyph(base_str, cursor); - len = cursor - s_pos; - const std::string func_mangled{base_str.substr(s_pos, len)}; - - size_t msg_len{0}; - size_t avail_len = output_buflen; - - t_snprintf(output_buf, avail_len, cur_len, msg_len, "#%-3zu %s in ", frame_num++, address.c_str()); - - if (filename != exec_file) { - // Dynamic library. - int status; - const std::unique_ptr< const char, std::function< void(const char* const) > > cc{ - abi::__cxa_demangle(func_mangled.c_str(), 0, 0, &status), [](const char* const ptr) { - if (ptr) std::free(static_cast< void* >(const_cast< char* >(ptr))); - }}; - if (cc) { - t_snprintf(output_buf, avail_len, cur_len, msg_len, "%s at %s\n", cc.get(), filename.c_str()); - } else { - t_snprintf(output_buf, avail_len, cur_len, msg_len, "%s() at %s\n", func_mangled.c_str(), - filename.c_str()); - } - } else { - // atos return format: - // bbb(char) (in crash_example) (crash_example.cc:37) - std::ostringstream ss; - ss << "atos -l 0x"; - ss << std::hex << load_base; - ss << " -o " << exec_full_path; - ss << " " << address; - const std::unique_ptr< FILE, std::function< void(FILE* const) > > fp{::popen(ss.str().c_str(), "r"), - [](FILE* const ptr) { - if (ptr) ::pclose(ptr); - }}; - if (!fp) continue; - - std::array< char, 4096 > atos_cstr; - std::fgets(atos_cstr.data(), atos_cstr.size() - 1, fp.get()); - - const std::string atos_str{atos_cstr.data()}; - size_t d_pos{atos_str.find(" (in ")}; - if (d_pos == std::string::npos) continue; - const std::string function_part{atos_str.substr(0, d_pos)}; - - d_pos = atos_str.find(") (", d_pos); - if (d_pos == std::string::npos) continue; - std::string source_part{atos_str.substr(d_pos + 3)}; - source_part = source_part.substr(0, source_part.size() - 2); - - t_snprintf(output_buf, avail_len, cur_len, msg_len, "%s at %s\n", function_part.c_str(), - source_part.c_str()); - } - } - - return cur_len; -} -#endif // __APPLE__ - -size_t stack_interpret_other([[maybe_unused]] const void* const* const stack_ptr, const char* const* const stack_msg, - const size_t stack_size, char* const output_buf, const size_t output_buflen, - [[maybe_unused]] const bool trim_internal) { - size_t cur_len{0}; - [[maybe_unused]] size_t frame_num{0}; - - // NOTE: starting from 1, skipping this frame. - for (size_t i{1}; i < stack_size; ++i) { - // On non-Linux platform, just use the raw symbols. - size_t msg_len{0}; - size_t avail_len{output_buflen}; - t_snprintf(output_buf, avail_len, cur_len, msg_len, "%s\n", stack_msg[i]); - } - return cur_len; -} diff --git a/src/logging/backtrace.h b/src/logging/backtrace.h deleted file mode 100644 index 5e862d89..00000000 --- a/src/logging/backtrace.h +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2017-present Jung-Sang Ahn - * All rights reserved. - * - * https://github.com/greensky00 - * - * Stack Backtrace - * Version: 0.3.5 - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * =========================================================== - * - * Enhanced by hkadayam: - * - While dlsym is available, backtrace does not provide symbol name, fixed it - * by calculating the offset through dlsym. - */ - -#pragma once - -// LCOV_EXCL_START - -#include -#include -#include -#include -#include -#include -#include - -#if defined(__linux__) || defined(__APPLE__) -#include -#include -#endif - -#if defined(__linux__) -#include -#endif - -#ifdef __APPLE__ -#include -#endif - -namespace backtrace_detail { -constexpr size_t max_backtrace{256}; -constexpr size_t file_name_length{PATH_MAX}; -constexpr size_t symbol_name_length{1024}; -constexpr size_t address_length{16}; -constexpr uint64_t pipe_timeout_ms{15000}; // 15 seconds. Addr2line can be extremely slow the first time -} // namespace backtrace_detail - - -[[maybe_unused]] static size_t stack_backtrace_impl(void** const stack_ptr, const size_t stack_ptr_capacity) { - return ::backtrace(stack_ptr, static_cast< int >(stack_ptr_capacity)); -} - -#if defined(__linux__) -[[maybe_unused]] extern size_t stack_interpret_linux_file(const void* const* const stack_ptr, FILE* const stack_file, - const size_t stack_size, char* const output_buf, - const size_t output_buflen, const bool trim_internal); -#elif defined(__APPLE__) -[[maybe_unused]] extern size_t stack_interpret_apple(const void* const* const stack_ptr, - const char* const* const stack_msg, const size_t stack_size, - char* const output_buf, const size_t output_buflen, - [[maybe_unused]] const bool trim_internal); -#else -[[maybe_unused]] extern size_t stack_interpret_other(const void* const* const stack_ptr, - const char* const* const stack_msg, const size_t stack_size, - char* const output_buf, const size_t output_buflen, - [[maybe_unused]] const bool trim_internal); -#endif - -[[maybe_unused]] static size_t stack_interpret(void* const* const stack_ptr, const size_t stack_size, - char* const output_buf, const size_t output_buflen, - const bool trim_internal) { -#if defined(__linux__) - std::unique_ptr< FILE, std::function< void(FILE* const) > > stack_file{std::tmpfile(), [](FILE* const fp) { - if (fp) - std::fclose(fp); - }}; - if (!stack_file) - return 0; - - ::backtrace_symbols_fd(stack_ptr, static_cast< int >(stack_size), ::fileno(stack_file.get())); - - const size_t len{ - stack_interpret_linux_file(stack_ptr, stack_file.get(), stack_size, output_buf, output_buflen, trim_internal)}; -#else - const std::unique_ptr< char*, std::function< void(char** const) > > stack_msg{ - ::backtrace_symbols(stack_ptr, static_cast< int >(stack_size)), - [](char** const ptr) { if (ptr) std::free(static_cast< void* >(ptr)); }}; -#if defined(__APPLE__) - const size_t len{ - stack_interpret_apple(stack_ptr, stack_msg.get(), stack_size, output_buf, output_buflen, trim_internal)}; -#else - const size_t len{ - stack_interpret_other(stack_ptr, stack_msg.get(), stack_size, output_buf, output_buflen, trim_internal)}; -#endif -#endif - return len; -} - -[[maybe_unused]] static size_t stack_backtrace(char* const output_buf, const size_t output_buflen, - const bool trim_internal) { - // make this static so no memory allocation needed - static std::mutex s_lock; - static std::array< void*, backtrace_detail::max_backtrace > stack_ptr; - { - std::lock_guard< std::mutex > lock{s_lock}; - const size_t stack_size{stack_backtrace_impl(stack_ptr.data(), stack_ptr.size())}; - return stack_interpret(stack_ptr.data(), stack_size, output_buf, output_buflen, trim_internal); - } -} - -// LCOV_EXCL_STOP diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index 5b4f3e3f..e8bf2c41 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -43,8 +43,6 @@ #include #include -#include "backtrace.h" - // clang-format off SISL_OPTION_GROUP(logging, (enab_mods, "", "log_mods", "Module loggers to enable", ::cxxopts::value(), "mod[:level][,mod2[:level2],...]"), \ (async_size, "", "log_queue", "Size of async log queue", ::cxxopts::value()->default_value("4096"), "(power of 2)"), \ diff --git a/src/logging/stacktrace.cpp b/src/logging/stacktrace.cpp index f7656c87..8287507b 100644 --- a/src/logging/stacktrace.cpp +++ b/src/logging/stacktrace.cpp @@ -33,22 +33,14 @@ #endif #include -#include "backtrace.h" - -namespace { -constexpr uint64_t backtrace_timeout_ms{4 * backtrace_detail::pipe_timeout_ms}; -} +#include namespace sisl { namespace logging { static bool g_custom_signal_handler_installed{false}; static size_t g_custom_signal_handlers{0}; static bool g_crash_handle_all_threads{true}; -static std::mutex g_mtx_stack_dump_outstanding; -static size_t g_stack_dump_outstanding{0}; -static std::condition_variable g_stack_dump_cv; static std::mutex g_hdlr_mutex; -static std::array< char, max_stacktrace_size() > g_stacktrace_buff; typedef struct SignalHandlerData { SignalHandlerData(std::string name, const sig_handler_t handler) : name{std::move(name)}, handler{handler} {} @@ -127,7 +119,19 @@ static const char* exit_reason_name(const SignalType fatal_id) { } } +static bool dumpCallback(const google_breakpad::MinidumpDescriptor& descriptor, [[maybe_unused]] void*, + bool succeeded) { + std::cerr << std::endl << "Minidump path: " << descriptor.path() << std::endl; + return succeeded; +} + +static void bt_dumper([[maybe_unused]] const SignalType signal_number) { + google_breakpad::ExceptionHandler::WriteMinidump("./", dumpCallback, nullptr); +} + static void crash_handler(const SignalType signal_number) { + LOGCRITICAL("\n * ****Received fatal SIGNAL : {}({})\tPID : {}", exit_reason_name(signal_number), signal_number, + ::getpid()); const auto flush_logs{[]() { // flush all logs spdlog::apply_all([&](std::shared_ptr< spdlog::logger > l) { if (l) l->flush(); @@ -145,17 +149,10 @@ static void crash_handler(const SignalType signal_number) { } else { flush_logs(); } - - // remove all default logging info except for message - GetLogger()->set_pattern("%v"); - log_stack_trace(g_crash_handle_all_threads); - LOGCRITICAL("\n * ****Received fatal SIGNAL : {}({})\tPID : {}", exit_reason_name(signal_number), signal_number, - ::getpid()); - - // flush again and shutdown - flush_logs(); spdlog::shutdown(); + bt_dumper(signal_number); + exit_with_default_sighandler(signal_number); } @@ -170,110 +167,6 @@ static void sigint_handler(const SignalType signal_number) { exit_with_default_sighandler(signal_number); } -static void bt_dumper([[maybe_unused]] const SignalType signal_number) { - g_stacktrace_buff.fill(0); - stack_backtrace(g_stacktrace_buff.data(), g_stacktrace_buff.size(), true); - bool notify{false}; - { - std::unique_lock lock{g_mtx_stack_dump_outstanding}; - if (g_stack_dump_outstanding > 0) { - --g_stack_dump_outstanding; - notify = true; - } - } - if (notify) g_stack_dump_cv.notify_all(); -} - -static void log_stack_trace_all_threads() { - std::unique_lock logger_lock{LoggerThreadContext::s_logger_thread_mutex}; - auto& logger{GetLogger()}; - auto& critical_logger{GetCriticalLogger()}; - size_t thread_count{1}; - - const auto dump_thread{[&logger, &critical_logger, &thread_count](const bool signal_thread, const auto thread_id) { - if (signal_thread) { - const auto log_failure{[&logger, &critical_logger, &thread_count, &thread_id](const char* const msg) { - if (logger) { -#ifndef __APPLE__ - logger->critical("Thread ID: {}, Thread num: {} - {}\n", thread_id, thread_count, msg); -#else - logger->critical("Thread num: {} - {}\n", thread_count, msg); -#endif - logger->flush(); - } - if (critical_logger) { -#ifndef __APPLE__ - critical_logger->critical("Thread ID: {}, Thread num: {} - {}\n", thread_id, thread_count, msg); -#else - critical_logger->critical("Thread num: {} - {}\n", thread_count, msg); -#endif - critical_logger->flush(); - } - }}; - - { - std::unique_lock outstanding_lock{g_mtx_stack_dump_outstanding}; - assert(g_stack_dump_outstanding == 0); - g_stack_dump_outstanding = 1; - } - if (!send_thread_signal(thread_id, SIGUSR3)) { - { - std::unique_lock outstanding_lock{g_mtx_stack_dump_outstanding}; - g_stack_dump_outstanding = 0; - } - log_failure("Invalid/terminated thread"); - return; - } - - { - std::unique_lock outstanding_lock{g_mtx_stack_dump_outstanding}; - const auto result{g_stack_dump_cv.wait_for(outstanding_lock, - std::chrono::milliseconds{backtrace_timeout_ms}, - [] { return (g_stack_dump_outstanding == 0); })}; - if (!result) { - g_stack_dump_outstanding = 0; - outstanding_lock.unlock(); - log_failure("Timeout waiting for stacktrace"); - return; - } - } - } else { - // dump the thread without recursive signal - g_stacktrace_buff.fill(0); - stack_backtrace(g_stacktrace_buff.data(), g_stacktrace_buff.size(), true); - } - - if (logger) { -#ifndef __APPLE__ - logger->critical("Thread ID: {}, Thread num: {}\n{}", thread_id, thread_count, g_stacktrace_buff.data()); -#else - logger->critical("Thread num: {}\n{}", thread_count, g_stacktrace_buff.data()); -#endif - logger->flush(); - } - if (critical_logger) { -#ifndef __APPLE__ - critical_logger->critical("Thread ID: {}, Thread num: {}\n{}", thread_id, thread_count, - g_stacktrace_buff.data()); -#else - critical_logger->critical("Thread num: {}\n{}", thread_count, g_stacktrace_buff.data()); -#endif - critical_logger->flush(); - } - }}; - - // First dump this thread context - dump_thread(false, logger_thread_ctx.m_thread_id); - ++thread_count; - - // dump other threads - for (auto* const ctx : LoggerThreadContext::s_logger_thread_set) { - if (ctx == &logger_thread_ctx) { continue; } - dump_thread(true, ctx->m_thread_id); - ++thread_count; - } -} - /************************************************* Exported APIs **********************************/ static std::map< SignalType, signame_handler_data_t > g_sighandler_map{ {SIGABRT, {"SIGABRT", &crash_handler}}, @@ -431,18 +324,6 @@ void log_custom_signal_handlers() { LOGINFO("Custom Signal handlers: {}", m); } -void log_stack_trace(const bool all_threads) { - if (is_crash_handler_installed() && all_threads) { - log_stack_trace_all_threads(); - } else { - // make this static so that no memory allocation is necessary - static std::array< char, max_stacktrace_size() > buff; - buff.fill(0); - [[maybe_unused]] const size_t s{stack_backtrace(buff.data(), buff.size(), true)}; - LOGCRITICAL("\n\n{}", buff.data()); - } -} - bool send_thread_signal(const pthread_t thr, const SignalType sig_num) { return (::pthread_kill(thr, sig_num) == 0); } bool install_crash_handler(const bool all_threads) { return install_signal_handler(all_threads); } diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index dc82c6fe..f040d3cf 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -1,6 +1,7 @@ #include #include #include +#include SISL_LOGGING_INIT(my_module) @@ -10,9 +11,13 @@ extern void example_decl(); using namespace std::chrono_literals; +[[ maybe_unused ]] +void crash() { volatile int* a = (int*)(NULL); *a = 1; } + int main(int argc, char** argv) { SISL_OPTIONS_LOAD(argc, argv, logging) sisl::logging::SetLogger(std::string(argv[0])); + sisl::logging::install_crash_handler(); spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); LOGTRACE("Trace"); @@ -36,6 +41,6 @@ int main(int argc, char** argv) { LOGINFOMOD_USING_LOGGER(my_module, custom_logger, "hello world"); DEBUG_ASSERT(true, "Always True"); _thread.join(); - + // crash(); return 0; } From 2404e401ee558b243ec05cea1a15625809128d13 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 14:39:43 -0700 Subject: [PATCH 308/385] Bad testing cache key. --- .github/workflows/build_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 70ab5b9b..dda480eb 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -103,7 +103,7 @@ jobs: with: path: | ~/.conan/data - key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}- + key: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}- if: ${{ github.event_name == 'pull_request' && inputs.testing == 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - name: Setup Python From 6304029133ede0c8435fd0d6061c57a002c882f8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 20 Jun 2023 14:42:17 -0700 Subject: [PATCH 309/385] Extra include. --- test_package/test_package.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index f040d3cf..d0b62e0f 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -1,7 +1,6 @@ #include #include #include -#include SISL_LOGGING_INIT(my_module) From 78527238bf9eb0aad0908e51b4cf82dee69ba62e Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 21 Jun 2023 08:36:35 -0600 Subject: [PATCH 310/385] Chain builds. --- .github/workflows/merge_conan_build.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index f32b9c8e..0824ec17 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -32,3 +32,9 @@ jobs: malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} testing: 'True' + ChainBuild: + run: | + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homereplication/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' From cc4cc3ea337653e0adab5cef6bdbd58cf7f125a5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 21 Jun 2023 08:39:42 -0600 Subject: [PATCH 311/385] Fix syntax. --- .github/workflows/merge_conan_build.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 0824ec17..6c0c2338 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -33,8 +33,11 @@ jobs: prerelease: ${{ matrix.prerelease }} testing: 'True' ChainBuild: - run: | - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homereplication/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' + runs-on: "ubuntu-22.04" + steps: + - name: Start IOManager Build + run: | + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v8.x"}' + - name: Start IOManager Build + run: | + curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' From ddf40a510ed2a83a8cca5e15dc665d7522482d00 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 21 Jun 2023 08:44:32 -0600 Subject: [PATCH 312/385] Fix curl requests. --- .github/workflows/merge_conan_build.yml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 6c0c2338..3fc157e2 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -37,7 +37,19 @@ jobs: steps: - name: Start IOManager Build run: | - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v8.x"}' + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ + -d '{"ref":"stable/v8.x","inputs":{}}' - name: Start IOManager Build run: | - curl -XPOST -u "eBay:${{secrets.CHAIN_BUILD_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches --data '{"ref": "stable/v3.x"}' + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches \ + -d '{"ref":"stable/v3.x","inputs":{}}' From 31b694ee50e509280b9b3365a8800a4142724f96 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 21 Jun 2023 08:49:27 -0600 Subject: [PATCH 313/385] Just chain directly. --- .github/workflows/merge_conan_build.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 3fc157e2..e8b184a4 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -44,12 +44,3 @@ jobs: -H "X-GitHub-Api-Version: 2022-11-28" \ https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ -d '{"ref":"stable/v8.x","inputs":{}}' - - name: Start IOManager Build - run: | - curl -L \ - -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/homestore/actions/workflows/merge_conan_build.yml/dispatches \ - -d '{"ref":"stable/v3.x","inputs":{}}' From 3857c06daaeb6f8e3f2ddb620efc7cd5f37abef8 Mon Sep 17 00:00:00 2001 From: Sanal P Date: Wed, 21 Jun 2023 15:09:22 -0700 Subject: [PATCH 314/385] Fix in flip grpc server to use correct service. --- conanfile.py | 2 +- src/flip/lib/flip_rpc_server.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conanfile.py b/conanfile.py index f77a88d1..107a8bea 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.0" + version = "8.6.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 2a4a2b4e..b555c5b7 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -56,7 +56,7 @@ class FlipRPCServiceWrapper : public FlipRPCServer::Service { void FlipRPCServer::rpc_thread() { std::string server_address("0.0.0.0:50051"); - FlipRPCServiceWrapper service; + FlipRPCServer service; grpc::ServerBuilder builder; builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); From 6f4e054dfd008ddeb3411bc5acd5e31e72ec4dd5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 14:28:13 -0600 Subject: [PATCH 315/385] Custom actions. (#148) --- .codecov.yml | 3 - .github/actions/load_conan/action.yml | 48 ++++++++ .github/actions/setup_conan/action.yml | 31 +++++ .github/actions/store_conan/action.yml | 34 ++++++ .github/workflows/build_dependencies.yml | 138 ++++++++--------------- .github/workflows/merge_conan_build.yml | 1 + 6 files changed, 164 insertions(+), 91 deletions(-) create mode 100644 .github/actions/load_conan/action.yml create mode 100644 .github/actions/setup_conan/action.yml create mode 100644 .github/actions/store_conan/action.yml diff --git a/.codecov.yml b/.codecov.yml index ee7370c9..d39809e7 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -2,9 +2,6 @@ codecov: notify: require_ci_to_pass: no -fixes: - - "deps/sisl/::" - ignore: - "**/*_test.c*" - "**/*_test.h*" diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml new file mode 100644 index 00000000..579d27e1 --- /dev/null +++ b/.github/actions/load_conan/action.yml @@ -0,0 +1,48 @@ +name: 'Load Conan Cache' +description: 'Loads Local Conan Cache' +inputs: + testing: + description: 'Support building tests' + required: true + key_prefix: + description: 'Cache prefix' + required: true + default: 'Deps' + fail_on_cache_miss: + description: 'Fail if key missing' + required: false + default: false + path: + description: 'Recipe path' + required: false + default: '.' +outputs: + cache-hit: + description: 'Cache match found' + value: ${{ steps.restore-cache.outputs.cache-hit }} +runs: + using: "composite" + steps: + - name: Calc Hash Key + id: hash-key + shell: bash + run: | + echo "hash-key=${{ inputs.path }}/**/conanfile.py" >> $GITHUB_OUTPUT + + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.hash-key ) }} + fail-on-cache-miss: ${{ inputs.fail_on_cache_miss }} + + - name: Restore Testing Cache + uses: actions/cache/restore@v3 + with: + path: | + ~/.conan/data + key: ${{ inputs.key_prefix }}- + if: ${{ github.event_name == 'pull_request' && inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} + diff --git a/.github/actions/setup_conan/action.yml b/.github/actions/setup_conan/action.yml new file mode 100644 index 00000000..0d3dfe01 --- /dev/null +++ b/.github/actions/setup_conan/action.yml @@ -0,0 +1,31 @@ +name: 'Setup Conan' +description: 'Sets up Conan for Sisl Builds' +inputs: + platform: + description: 'Platform conan will be building on' + required: true + default: 'ubuntu-22.04' +runs: + using: "composite" + steps: + - name: Setup Python + uses: actions/setup-python@v3 + with: + python-version: "3.8" + + - name: Setup Conan and Export Recipes + shell: bash + run: | + python -m pip install --upgrade pip + python -m pip install conan~=1.0 + python -m pip install gcovr + conan user + conan profile new --detect default + + - name: Fixup libstdc++ + shell: bash + run: | + # Set std::string to non-CoW C++11 version + sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default + if: ${{ inputs.platform == 'ubuntu-22.04' }} + diff --git a/.github/actions/store_conan/action.yml b/.github/actions/store_conan/action.yml new file mode 100644 index 00000000..cc4bd8cb --- /dev/null +++ b/.github/actions/store_conan/action.yml @@ -0,0 +1,34 @@ +name: 'Store Conan Cache' +description: 'Cleans Local Conan Cache and Persists Dirty Packages' +inputs: + key_prefix: + description: 'Cache prefix' + required: true + default: 'Deps' +runs: + using: "composite" + steps: + - name: Setup Conan and Export Recipes + shell: bash + run: | + dep_pkgs=$(ls -1d 3rd_party/* 2>/dev/null | cut -d'/' -f2 | paste -sd'|' - -) + if [ -z "${dep_pkgs}" ]; then + dep_pkgs="no_3rd_party" + fi + dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build 2>/dev/null | sed 's,.*data/,,') + if [ -z "${dirty_pkgs}" ]; then + dirty_pkgs="no_public/0" + fi + dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) + echo "::info:: Caching: ${dirty_pkgs_d}|${dep_pkgs}" + ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d}|${dep_pkgs})" | xargs rm -rf + rm -rf ~/.conan/data/*/*/*/*/build + rm -rf ~/.conan/data/*/*/*/*/source + + - name: Save Cache + uses: actions/cache/save@v3 + with: + path: | + ~/.conan/data + key: ${{ inputs.key_prefix }}-${{ hashFiles('**/conanfile.py') }} + diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index ec49a47b..c4ea6ccf 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -75,63 +75,40 @@ jobs: - name: Retrieve Code uses: actions/checkout@v3 with: - path: deps/sisl ref: ${{ inputs.branch }} if: ${{ inputs.testing == 'True' }} - name: Retrieve Recipe uses: actions/checkout@v3 with: - repository: ebay/sisl - path: deps/sisl + repository: szmyd/sisl ref: ${{ inputs.branch }} if: ${{ inputs.testing == 'False' }} - - name: Restore Sisl Cache - id: restore-cache-sisl - uses: actions/cache/restore@v3 + - name: Load Conan Cache + id: restore-cache + uses: szmyd/sisl/.github/actions/load_conan@test_custom_action with: - path: | - ~/.conan/data - key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} + testing: ${{ inputs.testing }} + key_prefix: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - - name: Restore Testing Cache - id: restore-cache-testing-sisl - uses: actions/cache/restore@v3 + - name: Setup Conan + uses: szmyd/sisl/.github/actions/setup_conan@test_custom_action with: - path: | - ~/.conan/data - key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}- - if: ${{ github.event_name == 'pull_request' && inputs.testing == 'True' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + platform: ${{ inputs.platform }} + if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} - - name: Setup Python - uses: actions/setup-python@v3 - with: - python-version: "3.8" - if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - - - name: Setup Conan and Export Recipes + - name: Export Recipes run: | - python -m pip install --upgrade pip - python -m pip install conan~=1.0 - python -m pip install gcovr - conan user - conan profile new --detect default - conan export deps/sisl/3rd_party/breakpad breakpad/cci.20230127@ - conan export deps/sisl/3rd_party/folly folly/2022.01.31.00@ - conan export deps/sisl/3rd_party/gperftools - conan export deps/sisl/3rd_party/jemalloc - conan export deps/sisl/3rd_party/prerelease_dummy - conan export deps/sisl/3rd_party/pistache pistache/cci.20201127@ + conan export 3rd_party/breakpad breakpad/cci.20230127@ + conan export 3rd_party/folly folly/2022.01.31.00@ + conan export 3rd_party/gperftools + conan export 3rd_party/jemalloc + conan export 3rd_party/prerelease_dummy + conan export 3rd_party/pistache pistache/cci.20201127@ cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/export 2>/dev/null | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Pre-cached: ${cached_pkgs}" - if: ${{ inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - - - name: Fixup libstdc++ - run: | - # Set std::string to non-CoW C++11 version - sed -i 's,compiler.libcxx=libstdc++$,compiler.libcxx=libstdc++11,g' ~/.conan/profiles/default - if: ${{ inputs.platform == 'ubuntu-22.04' && ( inputs.testing == 'True' || steps.restore-cache-sisl.outputs.cache-hit != 'true' ) }} + if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} - name: Build Cache run: | @@ -140,60 +117,45 @@ jobs: -o malloc_impl=${{ inputs.malloc-impl }} \ -s build_type=${{ inputs.build-type }} \ --build missing \ - deps/sisl - if: ${{ steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + . + if: ${{ steps.restore-cache.outputs.cache-hit != 'true' }} - - name: Clean Package Cache - run: | - dep_pkgs=$(ls -1d deps/sisl/3rd_party/* 2>/dev/null | cut -d'/' -f4 | paste -sd'|' - -) - if [ -z "${dep_pkgs}" ]; then - dep_pkgs="no_3rd_party" - fi - dirty_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/build 2>/dev/null | sed 's,.*data/,,') - if [ -z "${dirty_pkgs}" ]; then - dirty_pkgs="no_public/0" - fi - dirty_pkgs_d=$(echo "${dirty_pkgs}" | cut -d'/' -f1 | paste -sd'|' - -) - echo "::info:: Caching: ${dirty_pkgs_d}|${dep_pkgs}" - ls -1d ~/.conan/data/* | grep -Ev "(${dirty_pkgs_d}|${dep_pkgs})" | xargs rm -rf - rm -rf ~/.conan/data/*/*/*/*/build - rm -rf ~/.conan/data/*/*/*/*/source - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} - - - name: Save Sisl Cache - id: save-cache-sisl - uses: actions/cache/save@v3 + - name: Save Conan Cache + uses: szmyd/sisl/.github/actions/store_conan@test_custom_action with: - path: | - ~/.conan/data - key: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }}-${{ hashFiles('**/conanfile.py') }} - if: ${{ github.event_name != 'pull_request' && steps.restore-cache-sisl.outputs.cache-hit != 'true' }} + key_prefix: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + if: ${{ github.event_name != 'pull_request' && steps.restore-cache.outputs.cache-hit != 'true' }} - - name: Create and test Package + - name: Code Coverage Run run: | - if [[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" && "${{ inputs.prerelease }}" == "False" ]]; then - conan install \ - -o prerelease=${{ inputs.prerelease }} \ - -o malloc_impl=${{ inputs.malloc-impl }} \ - -o coverage=True \ - -s build_type=${{ inputs.build-type }} \ - --build missing \ - deps/sisl - conan build deps/sisl - else - sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && "${{ inputs.malloc-impl }}" == "libc" && "${{ inputs.prerelease }}" == "True" ]] && echo "True" || echo "False") - conan create \ - -o sisl:prerelease=${{ inputs.prerelease }} \ - -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ - -o sisl:sanitize=${sanitize} \ - -s build_type=${{ inputs.build-type }} \ - --build missing \ - deps/sisl - fi - if: ${{ inputs.testing == 'True' }} + conan install \ + -o prerelease=${{ inputs.prerelease }} \ + -o malloc_impl=${{ inputs.malloc-impl }} \ + -o coverage=True \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + . + conan build . + if: ${{ inputs.testing == 'True' && inputs.platform == 'ubuntu-22.04' && inputs.build-type == 'Debug' && inputs.malloc-impl == 'libc' && inputs.prerelease == 'False' }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} gcov: true + if: ${{ inputs.testing == 'True' && inputs.platform == 'ubuntu-22.04' && inputs.build-type == 'Debug' && inputs.malloc-impl == 'libc' && inputs.prerelease == 'False' }} + + - name: Create and Test Package + run: | + sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && \ + "${{ inputs.malloc-impl }}" == "libc" && \ + "${{ inputs.prerelease }}" == "True" ]] && \ + echo "True" || echo "False") + conan create \ + -o sisl:prerelease=${{ inputs.prerelease }} \ + -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ + -o sisl:sanitize=${sanitize} \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + . + if: ${{ inputs.testing == 'True' && ( inputs.platform != 'ubuntu-22.04' || inputs.build-type != 'Debug' || inputs.malloc-impl != 'libc' || inputs.prerelease != 'False' ) }} diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index e8b184a4..65391851 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -44,3 +44,4 @@ jobs: -H "X-GitHub-Api-Version: 2022-11-28" \ https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ -d '{"ref":"stable/v8.x","inputs":{}}' + if: ${{ github.ref == 'stable/v8.x' }} From 4e680129869049e7def0b304271c60391fd2361f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 13:36:45 -0700 Subject: [PATCH 316/385] Remove alt repo. --- .github/workflows/build_dependencies.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index c4ea6ccf..93f988f5 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -81,19 +81,19 @@ jobs: - name: Retrieve Recipe uses: actions/checkout@v3 with: - repository: szmyd/sisl + repository: eBay/sisl ref: ${{ inputs.branch }} if: ${{ inputs.testing == 'False' }} - name: Load Conan Cache id: restore-cache - uses: szmyd/sisl/.github/actions/load_conan@test_custom_action + uses: eBay/sisl/.github/actions/load_conan@stable/v8.x with: testing: ${{ inputs.testing }} key_prefix: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Setup Conan - uses: szmyd/sisl/.github/actions/setup_conan@test_custom_action + uses: eBay/sisl/.github/actions/setup_conan@stable/v8.x with: platform: ${{ inputs.platform }} if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} @@ -121,7 +121,7 @@ jobs: if: ${{ steps.restore-cache.outputs.cache-hit != 'true' }} - name: Save Conan Cache - uses: szmyd/sisl/.github/actions/store_conan@test_custom_action + uses: eBay/sisl/.github/actions/store_conan@stable/v8.x with: key_prefix: SislDeps8-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache.outputs.cache-hit != 'true' }} From 598cca290ce1ad85b7a4ad4cea73e3dd46360d61 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 15:18:12 -0700 Subject: [PATCH 317/385] Use specific hash keys. --- .github/actions/load_conan/action.yml | 5 +++-- .github/actions/store_conan/action.yml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index 579d27e1..23e06e2d 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -27,7 +27,8 @@ runs: id: hash-key shell: bash run: | - echo "hash-key=${{ inputs.path }}/**/conanfile.py" >> $GITHUB_OUTPUT + echo "3rd_party=${{ inputs.path }}/3rd_party/**/conanfile.py" >> $GITHUB_OUTPUT + echo "primary=${{ inputs.path }}/conanfile.py" >> $GITHUB_OUTPUT - name: Restore Cache id: restore-cache @@ -35,7 +36,7 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.hash-key ) }} + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.primary, steps.hash-key.outputs.3rd_party ) }} fail-on-cache-miss: ${{ inputs.fail_on_cache_miss }} - name: Restore Testing Cache diff --git a/.github/actions/store_conan/action.yml b/.github/actions/store_conan/action.yml index cc4bd8cb..906f3b0d 100644 --- a/.github/actions/store_conan/action.yml +++ b/.github/actions/store_conan/action.yml @@ -30,5 +30,5 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}-${{ hashFiles('**/conanfile.py') }} + key: ${{ inputs.key_prefix }}-${{ hashFiles('conanfile.py', '3rd_party/**/conanfile.py') }} From 1c09a0914bd3e0a451fa9d5fbb26f57a305bed16 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 15:21:59 -0700 Subject: [PATCH 318/385] Remove underscore. --- .github/actions/load_conan/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index 23e06e2d..1b7a1b2d 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -27,7 +27,7 @@ runs: id: hash-key shell: bash run: | - echo "3rd_party=${{ inputs.path }}/3rd_party/**/conanfile.py" >> $GITHUB_OUTPUT + echo "3rdparty=${{ inputs.path }}/3rd_party/**/conanfile.py" >> $GITHUB_OUTPUT echo "primary=${{ inputs.path }}/conanfile.py" >> $GITHUB_OUTPUT - name: Restore Cache @@ -36,7 +36,7 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.primary, steps.hash-key.outputs.3rd_party ) }} + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.primary, steps.hash-key.outputs.3rdparty ) }} fail-on-cache-miss: ${{ inputs.fail_on_cache_miss }} - name: Restore Testing Cache From 56e102361c8ee9791cc14f96b1ef6c35db94dac2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 15:29:59 -0700 Subject: [PATCH 319/385] Try compound key. --- .github/actions/load_conan/action.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index 1b7a1b2d..58a5f6f8 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -27,8 +27,7 @@ runs: id: hash-key shell: bash run: | - echo "3rdparty=${{ inputs.path }}/3rd_party/**/conanfile.py" >> $GITHUB_OUTPUT - echo "primary=${{ inputs.path }}/conanfile.py" >> $GITHUB_OUTPUT + echo "keys=[${{ inputs.path }}/conanfile.py, ${{ inputs.path }}/3rd_party/**/conanfile.py]" >> $GITHUB_OUTPUT - name: Restore Cache id: restore-cache @@ -36,7 +35,7 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.primary, steps.hash-key.outputs.3rdparty ) }} + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.keys ) }} fail-on-cache-miss: ${{ inputs.fail_on_cache_miss }} - name: Restore Testing Cache From 05b2daec9c4ec0d335634f7b7a884bf63eb67753 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 15:42:09 -0700 Subject: [PATCH 320/385] Split outputs. --- .github/actions/load_conan/action.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index 58a5f6f8..fa2c0852 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -23,11 +23,15 @@ outputs: runs: using: "composite" steps: - - name: Calc Hash Key - id: hash-key + - id: hash-key-primary shell: bash run: | - echo "keys=[${{ inputs.path }}/conanfile.py, ${{ inputs.path }}/3rd_party/**/conanfile.py]" >> $GITHUB_OUTPUT + echo "key=${{ inputs.path }}/conanfile.py" >> $GITHUB_OUTPUT + + - id: hash-key-3rd + shell: bash + run: | + echo "keys=${{ inputs.path }}/3rd_party/**/conanfile.py" >> $GITHUB_OUTPUT - name: Restore Cache id: restore-cache @@ -35,7 +39,7 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key.outputs.keys ) }} + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key-primary.outputs.key, steps.hash-key-3rd.outputs.keys) }} fail-on-cache-miss: ${{ inputs.fail_on_cache_miss }} - name: Restore Testing Cache From c54a32e609f994169296e08e5a1cfb33d940195a Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 17:10:15 -0700 Subject: [PATCH 321/385] Relax test cache. --- .github/actions/load_conan/action.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index fa2c0852..dfa2f0d9 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -47,6 +47,7 @@ runs: with: path: | ~/.conan/data - key: ${{ inputs.key_prefix }}- - if: ${{ github.event_name == 'pull_request' && inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} + key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key-primary.outputs.key, steps.hash-key-3rd.outputs.keys) }} + restore-keys: ${{ inputs.key_prefix }}- + if: ${{ inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} From 218e711db2a8218201e3e1b8ace3687c30e29827 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 17:15:41 -0700 Subject: [PATCH 322/385] Load any --- .github/actions/load_conan/action.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index dfa2f0d9..bac4ad5f 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -16,6 +16,10 @@ inputs: description: 'Recipe path' required: false default: '.' + load_any: + description: 'Load cache miss' + required: false + default: 'False' outputs: cache-hit: description: 'Cache match found' @@ -49,5 +53,5 @@ runs: ~/.conan/data key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key-primary.outputs.key, steps.hash-key-3rd.outputs.keys) }} restore-keys: ${{ inputs.key_prefix }}- - if: ${{ inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} + if: ${{ (github.event_name == 'pull_request' || inputs.load_any == 'True') && inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} From 0b9fd92ab828de4edd328a50a282e742a62e61df Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 17:22:34 -0700 Subject: [PATCH 323/385] Use load_any excl. --- .github/actions/load_conan/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index bac4ad5f..7cea08ed 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -53,5 +53,5 @@ runs: ~/.conan/data key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key-primary.outputs.key, steps.hash-key-3rd.outputs.keys) }} restore-keys: ${{ inputs.key_prefix }}- - if: ${{ (github.event_name == 'pull_request' || inputs.load_any == 'True') && inputs.testing == 'True' && steps.restore-cache.outputs.cache-hit != 'true' }} + if: ${{ steps.restore-cache.outputs.cache-hit != 'true' && (( github.event_name == 'pull_request' && inputs.testing == 'True' && ) || ( inputs.load_any == 'True' )) }} From 02c69ac971118ee4afd91909a3023e183f57d480 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 17:24:31 -0700 Subject: [PATCH 324/385] Extra && --- .github/actions/load_conan/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load_conan/action.yml b/.github/actions/load_conan/action.yml index 7cea08ed..22991f83 100644 --- a/.github/actions/load_conan/action.yml +++ b/.github/actions/load_conan/action.yml @@ -53,5 +53,5 @@ runs: ~/.conan/data key: ${{ inputs.key_prefix }}-${{ hashFiles(steps.hash-key-primary.outputs.key, steps.hash-key-3rd.outputs.keys) }} restore-keys: ${{ inputs.key_prefix }}- - if: ${{ steps.restore-cache.outputs.cache-hit != 'true' && (( github.event_name == 'pull_request' && inputs.testing == 'True' && ) || ( inputs.load_any == 'True' )) }} + if: ${{ steps.restore-cache.outputs.cache-hit != 'true' && (( github.event_name == 'pull_request' && inputs.testing == 'True' ) || ( inputs.load_any == 'True' )) }} From d70f0298102f03a0dd97ee37dcc307cf33117482 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 22 Jun 2023 17:35:39 -0700 Subject: [PATCH 325/385] Support no 3rd_party. --- .github/actions/store_conan/action.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/actions/store_conan/action.yml b/.github/actions/store_conan/action.yml index 906f3b0d..065614fa 100644 --- a/.github/actions/store_conan/action.yml +++ b/.github/actions/store_conan/action.yml @@ -11,7 +11,9 @@ runs: - name: Setup Conan and Export Recipes shell: bash run: | - dep_pkgs=$(ls -1d 3rd_party/* 2>/dev/null | cut -d'/' -f2 | paste -sd'|' - -) + if [ -d 3rd_party ]; then + dep_pkgs=$(ls -1d 3rd_party/* 2>/dev/null | cut -d'/' -f2 | paste -sd'|' - -) + fi if [ -z "${dep_pkgs}" ]; then dep_pkgs="no_3rd_party" fi From 210217d4f51bdaf0bfb9def359c6e9516956176a Mon Sep 17 00:00:00 2001 From: Sanal P Date: Thu, 22 Jun 2023 14:26:50 -0700 Subject: [PATCH 326/385] Generate flip python gprc files in package. --- conanfile.py | 3 ++- src/flip/client/python/flip_rpc_client.py | 3 +-- src/flip/proto/CMakeLists.txt | 10 ++++++++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/conanfile.py b/conanfile.py index 107a8bea..6b4dc1ee 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.1" + version = "8.6.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -121,6 +121,7 @@ def package(self): copy(self, "*.so*", self.build_folder, lib_dir, keep_path=False) copy(self, "*.proto", join(self.source_folder, "src", "flip", "proto"), join(self.package_folder, "proto", "flip"), keep_path=False) copy(self, "*", join(self.source_folder, "src", "flip", "client", "python"), join(self.package_folder, "bindings", "flip", "python"), keep_path=False) + copy(self, "*.py", join(self.build_folder, "src", "flip", "proto"), join(self.package_folder, "bindings", "flip", "python"), keep_path=False) copy(self, "*.h*", join(self.source_folder, "include"), join(self.package_folder, "include"), keep_path=True) diff --git a/src/flip/client/python/flip_rpc_client.py b/src/flip/client/python/flip_rpc_client.py index 25bb6747..91ba637c 100644 --- a/src/flip/client/python/flip_rpc_client.py +++ b/src/flip/client/python/flip_rpc_client.py @@ -3,7 +3,6 @@ import random import logging import sys -sys.path.append("gen_src") import grpc import flip_spec_pb2 as fspec @@ -15,7 +14,7 @@ class FlipRPCClient: def __init__(self, server_address): self.channel = grpc.insecure_channel(server_address) self.stub = flip_server_pb2_grpc.FlipServerStub(self.channel) - + def inject_fault(self, name, freq, conds, action): self.stub.InjectFault(fspec.FlipSpec(flip_name=name, conditions=conds, flip_action=action, flip_frequency=freq)) diff --git a/src/flip/proto/CMakeLists.txt b/src/flip/proto/CMakeLists.txt index bb08c7f4..7bdb9856 100644 --- a/src/flip/proto/CMakeLists.txt +++ b/src/flip/proto/CMakeLists.txt @@ -16,3 +16,13 @@ target_link_libraries(flip_proto protobuf::libprotobuf gRPC::grpc++ ) + +add_custom_target(flip_py_proto ALL) +protobuf_generate(LANGUAGE python TARGET flip_py_proto PROTOS flip_server.proto) +protobuf_generate(LANGUAGE python TARGET flip_py_proto PROTOS flip_spec.proto) +protobuf_generate( + TARGET flip_py_proto + PROTOS flip_server.proto flip_spec.proto + LANGUAGE grpc + GENERATE_EXTENSIONS _pb2_grpc.py + PLUGIN protoc-gen-grpc=$) From c64b6d17e54609c400db7e74878a50dc776eb5ec Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 23 Jun 2023 14:26:09 -0700 Subject: [PATCH 327/385] Fix chain --- .github/workflows/merge_conan_build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 65391851..884809b7 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -44,4 +44,4 @@ jobs: -H "X-GitHub-Api-Version: 2022-11-28" \ https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ -d '{"ref":"stable/v8.x","inputs":{}}' - if: ${{ github.ref == 'stable/v8.x' }} + if: ${{ github.ref == 'refs/heads/stable/v8.x' }} From 8559f1062345665b059b77c960bd65f296ed70a2 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 23 Jun 2023 14:30:33 -0700 Subject: [PATCH 328/385] Our ref. --- .github/workflows/merge_conan_build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index c1a2681f..8ff164c6 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -68,4 +68,4 @@ jobs: -H "X-GitHub-Api-Version: 2022-11-28" \ https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/merge_conan_build.yml/dispatches \ -d '{"ref":"main","inputs":{}}' - if: ${{ github.ref == 'refs/heads/main' }} + if: ${{ github.ref == 'refs/heads/master' }} From 0f444e0302bb40f48621630e263879b182c041cd Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Jun 2023 10:35:26 -0700 Subject: [PATCH 329/385] Fix compile script for Jenkins CI. --- .jenkins/Jenkinsfile | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index ffd2ec2d..278351a5 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,22 +93,19 @@ pipeline { */ stage("Compile") { steps { - # For Sanitized Unit Testing (no publish) - sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG}" - sh "conan remove -f ${PROJECT}/${TAG}" - - # Debug/Release with libc for OM (disabled for now) - #sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" - #sh "conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" - - # Debug w/ libc for downstream Sanitizer builds - sh "conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" - - # Pre-Release for Stability - sh "conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG}" - - # Release for Prod Build - sh "conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG}" + sh "# For Sanitized Unit Testing (no publish) \ + conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ + conan remove -f ${PROJECT}/${TAG} ; \ + # Debug/Release with libc for OM (disabled for now) \ + # conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" ; \ + # conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" ; \ + # Debug w/ libc for downstream Sanitizer builds \ + conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" ; \ + # Pre-Release for Stability \ + conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG}" ; \ + # Release for Prod Build \ + conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ + " } } From e1896dde89dfe5963c0cf23b6db448ab97542598 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Jun 2023 10:36:54 -0700 Subject: [PATCH 330/385] Syntax errors --- .jenkins/Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 278351a5..4cf61442 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -97,12 +97,12 @@ pipeline { conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ # Debug/Release with libc for OM (disabled for now) \ - # conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" ; \ - # conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG}" ; \ + # conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} \ + # conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG} \ # Debug w/ libc for downstream Sanitizer builds \ - conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG}" ; \ + conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ # Pre-Release for Stability \ - conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG}" ; \ + conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ # Release for Prod Build \ conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ " From 509fea9460eee077856a3c7999fcca0f30eef979 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Jun 2023 10:39:10 -0700 Subject: [PATCH 331/385] Remove escapes. --- .jenkins/Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 4cf61442..3a4e0b7e 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,17 +93,17 @@ pipeline { */ stage("Compile") { steps { - sh "# For Sanitized Unit Testing (no publish) \ + sh "# For Sanitized Unit Testing (no publish) conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ # Debug/Release with libc for OM (disabled for now) \ # conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} \ # conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG} \ - # Debug w/ libc for downstream Sanitizer builds \ + # Debug w/ libc for downstream Sanitizer builds conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ - # Pre-Release for Stability \ + # Pre-Release for Stability conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ - # Release for Prod Build \ + # Release for Prod Build conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ " } From c090465028c352ec246df0c7fb34e397d2c67fd3 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Jun 2023 10:40:14 -0700 Subject: [PATCH 332/385] Remove comments entirely. --- .jenkins/Jenkinsfile | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 3a4e0b7e..756e201a 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -93,17 +93,10 @@ pipeline { */ stage("Compile") { steps { - sh "# For Sanitized Unit Testing (no publish) - conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ + sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ - # Debug/Release with libc for OM (disabled for now) \ - # conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} \ - # conan create ${BUILD_MISSING} -pr test . ${PROJECT}/${TAG} \ - # Debug w/ libc for downstream Sanitizer builds conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ - # Pre-Release for Stability conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ - # Release for Prod Build conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ " } From 30e84cb300115a3a7f92b488e1e697c720390137 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 27 Jun 2023 10:47:12 -0700 Subject: [PATCH 333/385] Adjust options --- .jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 756e201a..41b841b3 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -96,8 +96,8 @@ pipeline { sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ - conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ - conan create ${BUILD_MISSING} -o malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ + conan create ${BUILD_MISSING} -o sisl:malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ + conan create ${BUILD_MISSING} -o sisl:malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ " } } From c866ce5ecac6712da4961e5d56ebd585a33da6f5 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 29 Jun 2023 15:45:48 +0000 Subject: [PATCH 334/385] Do not build tests when not being run. --- CMakeLists.txt | 9 ++- conanfile.py | 7 +- src/auth_manager/CMakeLists.txt | 32 ++++---- src/cache/CMakeLists.txt | 46 +++++------ src/fds/CMakeLists.txt | 130 ++++++++++++++++---------------- src/file_watcher/CMakeLists.txt | 16 ++-- src/flip/CMakeLists.txt | 38 +++++----- src/grpc/CMakeLists.txt | 8 +- src/logging/CMakeLists.txt | 14 ++-- src/metrics/CMakeLists.txt | 40 +++++----- src/options/CMakeLists.txt | 16 ++-- src/settings/CMakeLists.txt | 34 +++++---- src/sobject/CMakeLists.txt | 16 ++-- src/utility/CMakeLists.txt | 68 +++++++++-------- src/version/CMakeLists.txt | 16 ++-- src/wisr/CMakeLists.txt | 56 +++++++------- 16 files changed, 305 insertions(+), 241 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a27ca676..eab16432 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,6 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # turn on folder hierarchies include (cmake/Flags.cmake) set(CMAKE_CXX_STANDARD 17) -enable_testing() if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) @@ -18,6 +17,13 @@ else () message("The file conanbuildinfo.cmake doesn't exist, some properties will be unavailable") endif () +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + enable_testing() + find_package(GTest QUIET REQUIRED) + endif() +endif() + if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") include (cmake/debug_flags.cmake) endif() @@ -53,7 +59,6 @@ find_package(benchmark QUIET REQUIRED) find_package(Boost QUIET REQUIRED) find_package(cpr QUIET REQUIRED) find_package(cxxopts QUIET REQUIRED) -find_package(GTest QUIET REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") find_package(gperftools QUIET REQUIRED) endif() diff --git a/conanfile.py b/conanfile.py index c9f4fe6c..e78c9a41 100644 --- a/conanfile.py +++ b/conanfile.py @@ -111,6 +111,7 @@ def build(self): cmake = CMake(self) definitions = {'CONAN_BUILD_COVERAGE': 'OFF', + 'ENABLE_TESTING': 'OFF', 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', 'CONAN_CMAKE_SILENT_OUTPUT': 'ON', 'MEMORY_SANITIZER_ON': 'OFF', @@ -122,11 +123,13 @@ def build(self): elif self.options.coverage: definitions['CONAN_BUILD_COVERAGE'] = 'ON' - definitions['MALLOC_IMPL'] = self.options.malloc_impl + if self.options.testing: + definitions['ENABLE_TESTING'] = 'ON' cmake.configure(defs=definitions) cmake.build() - cmake.test(output_on_failure=True) + if self.options.testing: + cmake.test(output_on_failure=True) def package(self): lib_dir = join(self.package_folder, "lib") diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index b1e1b34f..33142ad8 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -26,17 +26,21 @@ settings_gen_cpp( security_config.fbs ) -add_executable(test_auth_mgr) -target_sources(test_auth_mgr PRIVATE - tests/AuthTest.cpp - ) -target_link_libraries(test_auth_mgr - sisl - ${COMMON_DEPS} - cpr::cpr - Pistache::Pistache - flatbuffers::flatbuffers - jwt-cpp::jwt-cpp - GTest::gmock - ) -add_test(NAME AuthManager COMMAND test_auth_mgr) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_auth_mgr) + target_sources(test_auth_mgr PRIVATE + tests/AuthTest.cpp + ) + target_link_libraries(test_auth_mgr + sisl + ${COMMON_DEPS} + cpr::cpr + Pistache::Pistache + flatbuffers::flatbuffers + jwt-cpp::jwt-cpp + GTest::gmock + ) + add_test(NAME AuthManager COMMAND test_auth_mgr) + endif() +endif() diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index 06b1f035..d33e8db0 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -6,26 +6,30 @@ target_sources(sisl_cache PRIVATE ) target_link_libraries(sisl_cache ${COMMON_DEPS}) -add_executable(test_range_hashmap) -target_sources(test_range_hashmap PRIVATE - tests/test_range_hashmap.cpp - ) -target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -#add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_range_hashmap) + target_sources(test_range_hashmap PRIVATE + tests/test_range_hashmap.cpp + ) + target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + #add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) -add_executable(test_range_cache) -target_sources(test_range_cache PRIVATE - tests/test_range_cache.cpp - ) -target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -#add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) + add_executable(test_range_cache) + target_sources(test_range_cache PRIVATE + tests/test_range_cache.cpp + ) + target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + #add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) -add_executable(test_simple_cache) -target_sources(test_simple_cache PRIVATE - tests/test_simple_cache.cpp - ) -target_include_directories(test_simple_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) -add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) + add_executable(test_simple_cache) + target_sources(test_simple_cache PRIVATE + tests/test_simple_cache.cpp + ) + target_include_directories(test_simple_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) + endif() +endif() diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index 52d6379d..d0841fe5 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -8,77 +8,81 @@ target_sources(sisl_buffer PRIVATE ) target_link_libraries(sisl_buffer Folly::Folly ${COMMON_DEPS}) -add_executable(test_stream_tracker) -target_sources(test_stream_tracker PRIVATE - tests/test_stream_tracker.cpp - ) -target_link_libraries(test_stream_tracker sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME StreamTracker COMMAND test_stream_tracker) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_stream_tracker) + target_sources(test_stream_tracker PRIVATE + tests/test_stream_tracker.cpp + ) + target_link_libraries(test_stream_tracker sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME StreamTracker COMMAND test_stream_tracker) -add_executable(test_atomic_status_counter) -target_sources(test_atomic_status_counter PRIVATE - tests/test_atomic_status_counter.cpp - ) -target_link_libraries(test_atomic_status_counter sisl ${COMMON_DEPS} GTest::gtest atomic) -add_test(NAME AtomicStatusCounter COMMAND test_atomic_status_counter) + add_executable(test_atomic_status_counter) + target_sources(test_atomic_status_counter PRIVATE + tests/test_atomic_status_counter.cpp + ) + target_link_libraries(test_atomic_status_counter sisl ${COMMON_DEPS} GTest::gtest atomic) + add_test(NAME AtomicStatusCounter COMMAND test_atomic_status_counter) -add_executable(test_bitset) -target_sources(test_bitset PRIVATE - tests/test_bitset.cpp - ) -target_link_libraries(test_bitset sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME Bitset COMMAND test_bitset) + add_executable(test_bitset) + target_sources(test_bitset PRIVATE + tests/test_bitset.cpp + ) + target_link_libraries(test_bitset sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME Bitset COMMAND test_bitset) -add_executable(test_bitword) -target_sources(test_bitword PRIVATE - tests/test_bitword.cpp - ) -target_link_libraries(test_bitword sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME Bitword COMMAND test_bitset) + add_executable(test_bitword) + target_sources(test_bitword PRIVATE + tests/test_bitword.cpp + ) + target_link_libraries(test_bitword sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME Bitword COMMAND test_bitset) -add_executable(obj_allocator_benchmark) -target_sources(obj_allocator_benchmark PRIVATE - tests/obj_allocator_benchmark.cpp - ) -target_link_libraries(obj_allocator_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) -add_test(NAME ObjAllocatorBenchmark COMMAND obj_allocator_benchmark) + add_executable(obj_allocator_benchmark) + target_sources(obj_allocator_benchmark PRIVATE + tests/obj_allocator_benchmark.cpp + ) + target_link_libraries(obj_allocator_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) + add_test(NAME ObjAllocatorBenchmark COMMAND obj_allocator_benchmark) -add_executable(test_obj_allocator) -target_sources(test_obj_allocator PRIVATE - tests/test_obj_allocator.cpp - ) -target_link_libraries(test_obj_allocator sisl ${COMMON_DEPS}) -add_test(NAME ObjAlloc COMMAND test_obj_allocator) + add_executable(test_obj_allocator) + target_sources(test_obj_allocator PRIVATE + tests/test_obj_allocator.cpp + ) + target_link_libraries(test_obj_allocator sisl ${COMMON_DEPS}) + add_test(NAME ObjAlloc COMMAND test_obj_allocator) -add_executable(test_cb_mutex) -target_sources(test_cb_mutex PRIVATE - tests/test_cb_mutex.cpp - ) -target_link_libraries(test_cb_mutex sisl ${COMMON_DEPS} GTest::gtest) -#add_test(NAME TestCBMutex COMMAND test_cb_mutex) + add_executable(test_cb_mutex) + target_sources(test_cb_mutex PRIVATE + tests/test_cb_mutex.cpp + ) + target_link_libraries(test_cb_mutex sisl ${COMMON_DEPS} GTest::gtest) + #add_test(NAME TestCBMutex COMMAND test_cb_mutex) -add_executable(test_sg_list) -target_sources(test_sg_list PRIVATE - tests/test_sg_list.cpp - ) -target_link_libraries(test_sg_list sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME SgList COMMAND test_sg_list) + add_executable(test_sg_list) + target_sources(test_sg_list PRIVATE + tests/test_sg_list.cpp + ) + target_link_libraries(test_sg_list sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME SgList COMMAND test_sg_list) -if (DEFINED MALLOC_IMPL) - if (${MALLOC_IMPL} STREQUAL "jemalloc") - add_executable(test_jemalloc) - target_sources(test_jemalloc PRIVATE - tests/test_jemalloc_helper.cpp - ) - target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} GTest::gtest) - add_test(NAME TestJemalloc COMMAND test_jemalloc) - elseif (${MALLOC_IMPL} STREQUAL "tcmalloc") - add_executable(test_tcmalloc) - target_sources(test_tcmalloc PRIVATE - tests/test_tcmalloc_helper.cpp - ) - target_link_libraries(test_tcmalloc sisl ${COMMON_DEPS} GTest::gtest) - add_test(NAME TestTcmalloc COMMAND test_tcmalloc) + if (DEFINED MALLOC_IMPL) + if (${MALLOC_IMPL} STREQUAL "jemalloc") + add_executable(test_jemalloc) + target_sources(test_jemalloc PRIVATE + tests/test_jemalloc_helper.cpp + ) + target_link_libraries(test_jemalloc sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME TestJemalloc COMMAND test_jemalloc) + elseif (${MALLOC_IMPL} STREQUAL "tcmalloc") + add_executable(test_tcmalloc) + target_sources(test_tcmalloc PRIVATE + tests/test_tcmalloc_helper.cpp + ) + target_link_libraries(test_tcmalloc sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME TestTcmalloc COMMAND test_tcmalloc) + endif() endif() + endif() endif() diff --git a/src/file_watcher/CMakeLists.txt b/src/file_watcher/CMakeLists.txt index 36cb0bb9..aeaf9cd8 100644 --- a/src/file_watcher/CMakeLists.txt +++ b/src/file_watcher/CMakeLists.txt @@ -6,9 +6,13 @@ target_sources(sisl_file_watcher PRIVATE ) target_link_libraries(sisl_file_watcher ${COMMON_DEPS}) -add_executable(test_file_watcher) -target_sources(test_file_watcher PRIVATE - file_watcher_test.cpp - ) -target_link_libraries(test_file_watcher sisl ${COMMON_DEPS} GTest::gtest GTest::gmock) -add_test(NAME FileWatcher COMMAND test_file_watcher) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_file_watcher) + target_sources(test_file_watcher PRIVATE + file_watcher_test.cpp + ) + target_link_libraries(test_file_watcher sisl ${COMMON_DEPS} GTest::gtest GTest::gmock) + add_test(NAME FileWatcher COMMAND test_file_watcher) + endif() +endif() diff --git a/src/flip/CMakeLists.txt b/src/flip/CMakeLists.txt index dc305577..368cabce 100644 --- a/src/flip/CMakeLists.txt +++ b/src/flip/CMakeLists.txt @@ -21,22 +21,26 @@ target_link_libraries(flip nlohmann_json::nlohmann_json ) -add_executable(test_flip) -target_sources(test_flip PRIVATE - lib/test_flip.cpp - ) -target_link_libraries(test_flip flip cxxopts::cxxopts) -add_test(NAME Flip COMMAND test_flip) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_flip) + target_sources(test_flip PRIVATE + lib/test_flip.cpp + ) + target_link_libraries(test_flip flip cxxopts::cxxopts) + add_test(NAME Flip COMMAND test_flip) -add_executable(test_flip_server) -target_sources(test_flip_server PRIVATE - lib/test_flip_server.cpp - ) -target_link_libraries(test_flip_server flip cxxopts::cxxopts) + add_executable(test_flip_server) + target_sources(test_flip_server PRIVATE + lib/test_flip_server.cpp + ) + target_link_libraries(test_flip_server flip cxxopts::cxxopts) -add_executable(test_flip_local_client) -target_sources(test_flip_local_client PRIVATE - client/local/test_flip_local_client.cpp - ) -target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) -add_test(NAME FlipLocalClient COMMAND test_flip_local_client) + add_executable(test_flip_local_client) + target_sources(test_flip_local_client PRIVATE + client/local/test_flip_local_client.cpp + ) + target_link_libraries(test_flip_local_client flip cxxopts::cxxopts) + add_test(NAME FlipLocalClient COMMAND test_flip_local_client) + endif() +endif() diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index ea4c1ee0..37b977c2 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -19,6 +19,10 @@ target_link_libraries(sisl_grpc ${COMMON_DEPS} ) -if(${Pistache_FOUND}) - add_subdirectory(tests) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + if(${Pistache_FOUND}) + add_subdirectory(tests) + endif() + endif() endif() diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 83870062..396f8994 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -7,8 +7,12 @@ target_sources(sisl_logging PRIVATE ) target_link_libraries(sisl_logging ${COMMON_DEPS}) -add_executable(logging_example) -target_sources(logging_example PRIVATE - test/example.cpp - ) -target_link_libraries(logging_example sisl ${COMMON_DEPS}) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(logging_example) + target_sources(logging_example PRIVATE + test/example.cpp + ) + target_link_libraries(logging_example sisl ${COMMON_DEPS}) + endif() +endif() diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index f14e62f7..9fb9958c 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -15,23 +15,27 @@ target_link_libraries(sisl_metrics Folly::Folly ) -add_executable(metrics_farm_test) -target_sources(metrics_farm_test PRIVATE - tests/farm_test.cpp - ) -target_link_libraries(metrics_farm_test sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME MetricsFarm COMMAND metrics_farm_test) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(metrics_farm_test) + target_sources(metrics_farm_test PRIVATE + tests/farm_test.cpp + ) + target_link_libraries(metrics_farm_test sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME MetricsFarm COMMAND metrics_farm_test) -add_executable(metrics_wrapper_test) -target_sources(metrics_wrapper_test PRIVATE - tests/wrapper_test.cpp - ) -target_link_libraries(metrics_wrapper_test sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME MetricsWrapper COMMAND metrics_wrapper_test) + add_executable(metrics_wrapper_test) + target_sources(metrics_wrapper_test PRIVATE + tests/wrapper_test.cpp + ) + target_link_libraries(metrics_wrapper_test sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME MetricsWrapper COMMAND metrics_wrapper_test) -add_executable(metrics_benchmark) -target_sources(metrics_benchmark PRIVATE - tests/metrics_benchmark.cpp - ) -target_link_libraries(metrics_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) -add_test(NAME MetricsBenchmark COMMAND metrics_benchmark) + add_executable(metrics_benchmark) + target_sources(metrics_benchmark PRIVATE + tests/metrics_benchmark.cpp + ) + target_link_libraries(metrics_benchmark sisl ${COMMON_DEPS} benchmark::benchmark) + add_test(NAME MetricsBenchmark COMMAND metrics_benchmark) + endif() +endif() diff --git a/src/options/CMakeLists.txt b/src/options/CMakeLists.txt index f80d2a3d..eca57063 100644 --- a/src/options/CMakeLists.txt +++ b/src/options/CMakeLists.txt @@ -6,9 +6,13 @@ target_sources(sisl_options PRIVATE ) target_link_libraries(sisl_options ${COMMON_DEPS}) -add_executable(basic_test) -target_sources(basic_test PRIVATE - tests/basic.cpp - ) -target_link_libraries(basic_test sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME OptionsBasics COMMAND basic_test ${extra_args}) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(basic_test) + target_sources(basic_test PRIVATE + tests/basic.cpp + ) + target_link_libraries(basic_test sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME OptionsBasics COMMAND basic_test ${extra_args}) + endif() +endif() diff --git a/src/settings/CMakeLists.txt b/src/settings/CMakeLists.txt index 1d73c2ad..6d240965 100644 --- a/src/settings/CMakeLists.txt +++ b/src/settings/CMakeLists.txt @@ -11,19 +11,23 @@ target_link_libraries(sisl_settings flatbuffers::flatbuffers ) -add_executable(test_settings) -target_sources(test_settings PRIVATE - tests/test_settings.cpp - ) -settings_gen_cpp( - ${FLATBUFFERS_FLATC_EXECUTABLE} - ${CMAKE_CURRENT_BINARY_DIR}/generated/ - test_settings - tests/test_app_schema.fbs - ) -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_settings) + target_sources(test_settings PRIVATE + tests/test_settings.cpp + ) + settings_gen_cpp( + ${FLATBUFFERS_FLATC_EXECUTABLE} + ${CMAKE_CURRENT_BINARY_DIR}/generated/ + test_settings + tests/test_app_schema.fbs + ) + if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) + endif() + target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers GTest::gtest) + add_test(NAME Settings COMMAND test_settings) + endif() endif() -target_include_directories(test_settings BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(test_settings sisl ${COMMON_DEPS} flatbuffers::flatbuffers GTest::gtest) -add_test(NAME Settings COMMAND test_settings) diff --git a/src/sobject/CMakeLists.txt b/src/sobject/CMakeLists.txt index 3a24caaa..95c60b8f 100644 --- a/src/sobject/CMakeLists.txt +++ b/src/sobject/CMakeLists.txt @@ -4,9 +4,13 @@ add_library(sisl_sobject_mgr OBJECT) target_sources(sisl_sobject_mgr PRIVATE sobject.cpp) target_link_libraries(sisl_sobject_mgr ${COMMON_DEPS}) -add_executable(test_sobject) -target_sources(test_sobject PRIVATE - tests/test_sobject.cpp - ) -target_link_libraries(test_sobject sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME Sobject COMMAND test_sobject) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_sobject) + target_sources(test_sobject PRIVATE + tests/test_sobject.cpp + ) + target_link_libraries(test_sobject sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME Sobject COMMAND test_sobject) + endif() +endif() diff --git a/src/utility/CMakeLists.txt b/src/utility/CMakeLists.txt index 0b95585d..fc41362d 100644 --- a/src/utility/CMakeLists.txt +++ b/src/utility/CMakeLists.txt @@ -2,39 +2,43 @@ cmake_minimum_required (VERSION 3.11) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(test_atomic_counter) -target_sources(test_atomic_counter PRIVATE - tests/test_atomic_counter.cpp - ) -target_link_libraries(test_atomic_counter sisl ${COMMON_DEPS} GTest::gtest) -add_test(NAME AtomicCounter COMMAND test_atomic_counter) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_atomic_counter) + target_sources(test_atomic_counter PRIVATE + tests/test_atomic_counter.cpp + ) + target_link_libraries(test_atomic_counter sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME AtomicCounter COMMAND test_atomic_counter) -add_executable(test_thread_buffer) -target_sources(test_thread_buffer PRIVATE - tests/test_thread_buffer.cpp - ) -target_link_libraries(test_thread_buffer ${COMMON_DEPS} GTest::gtest) -add_test(NAME ThreadBuffer COMMAND test_thread_buffer) + add_executable(test_thread_buffer) + target_sources(test_thread_buffer PRIVATE + tests/test_thread_buffer.cpp + ) + target_link_libraries(test_thread_buffer ${COMMON_DEPS} GTest::gtest) + add_test(NAME ThreadBuffer COMMAND test_thread_buffer) -add_executable(test_status_factory) -target_sources(test_status_factory PRIVATE - tests/test_status_factory.cpp - ) -target_link_libraries(test_status_factory ${COMMON_DEPS} benchmark::benchmark) -add_test(NAME StatusFactory COMMAND test_status_factory) + add_executable(test_status_factory) + target_sources(test_status_factory PRIVATE + tests/test_status_factory.cpp + ) + target_link_libraries(test_status_factory ${COMMON_DEPS} benchmark::benchmark) + add_test(NAME StatusFactory COMMAND test_status_factory) -add_executable(test_enum) -target_sources(test_enum PRIVATE - tests/test_enum.cpp - ) -target_link_libraries(test_enum ${COMMON_DEPS} GTest::gtest) -add_test(NAME Enum COMMAND test_enum) + add_executable(test_enum) + target_sources(test_enum PRIVATE + tests/test_enum.cpp + ) + target_link_libraries(test_enum ${COMMON_DEPS} GTest::gtest) + add_test(NAME Enum COMMAND test_enum) -if (${prerelease_dummy_FOUND}) - add_executable(test_objlife) - target_sources(test_objlife PRIVATE - tests/test_objlife_counter.cpp - ) - target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) - add_test(NAME ObjLife COMMAND test_objlife) -endif () + if (${prerelease_dummy_FOUND}) + add_executable(test_objlife) + target_sources(test_objlife PRIVATE + tests/test_objlife_counter.cpp + ) + target_link_libraries(test_objlife sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME ObjLife COMMAND test_objlife) + endif () + endif() +endif() diff --git a/src/version/CMakeLists.txt b/src/version/CMakeLists.txt index cdf75f4b..1df1dec6 100644 --- a/src/version/CMakeLists.txt +++ b/src/version/CMakeLists.txt @@ -6,9 +6,13 @@ target_sources(sisl_version PRIVATE ) target_link_libraries(sisl_version ${COMMON_DEPS} zmarok-semver::zmarok-semver) -add_executable(test_version) -target_sources(test_version PRIVATE - tests/test_version.cpp - ) -target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) -add_test(NAME Version COMMAND test_version) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(test_version) + target_sources(test_version PRIVATE + tests/test_version.cpp + ) + target_link_libraries(test_version sisl ${COMMON_DEPS} zmarok-semver::zmarok-semver GTest::gtest) + add_test(NAME Version COMMAND test_version) + endif() +endif() diff --git a/src/wisr/CMakeLists.txt b/src/wisr/CMakeLists.txt index e0121243..bad69615 100644 --- a/src/wisr/CMakeLists.txt +++ b/src/wisr/CMakeLists.txt @@ -2,33 +2,37 @@ cmake_minimum_required (VERSION 3.11) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(wisr_vector_test) -target_sources(wisr_vector_test PRIVATE - tests/test_wisr_vector.cpp - ) -target_link_libraries(wisr_vector_test ${COMMON_DEPS} benchmark::benchmark GTest::gtest) -add_test(NAME WisrVector COMMAND wisr_vector_test) +if (DEFINED ENABLE_TESTING) + if (${ENABLE_TESTING}) + add_executable(wisr_vector_test) + target_sources(wisr_vector_test PRIVATE + tests/test_wisr_vector.cpp + ) + target_link_libraries(wisr_vector_test ${COMMON_DEPS} benchmark::benchmark GTest::gtest) + add_test(NAME WisrVector COMMAND wisr_vector_test) -add_executable(wisr_vector_benchmark) -target_sources(wisr_vector_benchmark PRIVATE - tests/wisr_vector_benchmark.cpp - ) -target_link_libraries(wisr_vector_benchmark ${COMMON_DEPS} benchmark::benchmark) + add_executable(wisr_vector_benchmark) + target_sources(wisr_vector_benchmark PRIVATE + tests/wisr_vector_benchmark.cpp + ) + target_link_libraries(wisr_vector_benchmark ${COMMON_DEPS} benchmark::benchmark) -add_executable(wisr_list_benchmark) -target_sources(wisr_list_benchmark PRIVATE - tests/wisr_list_benchmark.cpp - ) -target_link_libraries(wisr_list_benchmark ${COMMON_DEPS} benchmark::benchmark) + add_executable(wisr_list_benchmark) + target_sources(wisr_list_benchmark PRIVATE + tests/wisr_list_benchmark.cpp + ) + target_link_libraries(wisr_list_benchmark ${COMMON_DEPS} benchmark::benchmark) -add_executable(wisr_deque_benchmark) -target_sources(wisr_deque_benchmark PRIVATE - tests/wisr_deque_benchmark.cpp - ) -target_link_libraries(wisr_deque_benchmark ${COMMON_DEPS} benchmark::benchmark) + add_executable(wisr_deque_benchmark) + target_sources(wisr_deque_benchmark PRIVATE + tests/wisr_deque_benchmark.cpp + ) + target_link_libraries(wisr_deque_benchmark ${COMMON_DEPS} benchmark::benchmark) -add_executable(wisr_intrusive_slist_benchmark) -target_sources(wisr_intrusive_slist_benchmark PRIVATE - tests/wisr_intrusive_slist_benchmark.cpp - ) -target_link_libraries(wisr_intrusive_slist_benchmark ${COMMON_DEPS} benchmark::benchmark) + add_executable(wisr_intrusive_slist_benchmark) + target_sources(wisr_intrusive_slist_benchmark PRIVATE + tests/wisr_intrusive_slist_benchmark.cpp + ) + target_link_libraries(wisr_intrusive_slist_benchmark ${COMMON_DEPS} benchmark::benchmark) + endif() +endif() From 86b2970fea44435980158170b18bb3aa11046d0d Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 29 Jun 2023 15:45:22 -0700 Subject: [PATCH 335/385] dump breakpad stacktrace file in the same dir as logfile (#151) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/logging/logging.h | 1 + src/logging/logging.cpp | 35 +++++++++++++++++++--------------- src/logging/stacktrace.cpp | 2 +- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/conanfile.py b/conanfile.py index 6b4dc1ee..fd643757 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.2" + version = "8.6.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 16407415..4dc951c9 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -474,6 +474,7 @@ extern bool is_crash_handler_installed(); extern bool restore_signal_handler(const SignalType sig_num); extern bool restore_signal_handlers(); extern bool send_thread_signal(const pthread_t thr, const SignalType sig_num); +extern std::filesystem::path get_base_dir(); template < typename... Args > std::string format_log_msg(const char* const msg, Args&&... args) { diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index e8bf2c41..5c294fb5 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -132,19 +132,25 @@ std::shared_ptr< spdlog::logger >& GetCriticalLogger() { return logger_thread_ctx.m_critical_logger; } -static std::filesystem::path get_base_dir() { - const auto cwd{std::filesystem::current_path()}; - auto p{cwd / "logs"}; - // Construct a unique directory path based on the current time - auto const current_time{std::chrono::system_clock::now()}; - auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; - auto const current_tm{std::localtime(¤t_t)}; - std::array< char, PATH_MAX > c_time; - if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { - p /= c_time.data(); - std::filesystem::create_directories(p); - } - return p; +static std::filesystem::path g_base_dir; + +std::filesystem::path get_base_dir() { + static std::once_flag one_base_dir; + std::call_once(one_base_dir, [] { + const auto cwd{std::filesystem::current_path()}; + g_base_dir = cwd / "logs"; + // Construct a unique directory path based on the current time + auto const current_time{std::chrono::system_clock::now()}; + auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; + auto const current_tm{std::localtime(¤t_t)}; + std::array< char, PATH_MAX > c_time; + if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { + g_base_dir /= c_time.data(); + std::filesystem::create_directories(g_base_dir); + } + }); + + return g_base_dir; } static std::filesystem::path log_path(std::string const& name) { @@ -152,8 +158,7 @@ static std::filesystem::path log_path(std::string const& name) { if (0 < SISL_OPTIONS.count("logfile")) { p = std::filesystem::path{SISL_OPTIONS["logfile"].as< std::string >()}; } else { - static std::filesystem::path base_dir{get_base_dir()}; - p = base_dir / std::filesystem::path{name}.filename(); + p = get_base_dir() / std::filesystem::path{name}.filename(); } return p; } diff --git a/src/logging/stacktrace.cpp b/src/logging/stacktrace.cpp index 8287507b..844e5362 100644 --- a/src/logging/stacktrace.cpp +++ b/src/logging/stacktrace.cpp @@ -126,7 +126,7 @@ static bool dumpCallback(const google_breakpad::MinidumpDescriptor& descriptor, } static void bt_dumper([[maybe_unused]] const SignalType signal_number) { - google_breakpad::ExceptionHandler::WriteMinidump("./", dumpCallback, nullptr); + google_breakpad::ExceptionHandler::WriteMinidump(get_base_dir().string(), dumpCallback, nullptr); } static void crash_handler(const SignalType signal_number) { From 6d6efcf1d56425be18aca973cc68306798db3bb2 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 20 Jul 2023 13:53:47 -0700 Subject: [PATCH 336/385] enable grpc tests (#153) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 7 ++++--- src/auth_manager/CMakeLists.txt | 2 +- src/grpc/CMakeLists.txt | 5 ++--- src/grpc/tests/CMakeLists.txt | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/conanfile.py b/conanfile.py index e78c9a41..115ea524 100644 --- a/conanfile.py +++ b/conanfile.py @@ -79,9 +79,10 @@ def requirements(self): self.requires("prometheus-cpp/1.1.0") self.requires("spdlog/1.11.0") self.requires("zmarok-semver/1.1.0") - if self.settings.compiler in ["gcc"]: - self.requires("pistache/0.0.5") - + if self.options.testing: + if self.settings.compiler in ["gcc"]: + self.requires("pistache/0.0.5") + self.requires("fmt/8.1.1", override=True) self.requires("libcurl/7.86.0", override=True) self.requires("libevent/2.1.12", override=True) diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index 33142ad8..3e45f388 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -1,7 +1,6 @@ cmake_minimum_required (VERSION 3.11) find_package(flatbuffers QUIET REQUIRED) -find_package(Pistache QUIET REQUIRED) if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) @@ -28,6 +27,7 @@ settings_gen_cpp( if (DEFINED ENABLE_TESTING) if (${ENABLE_TESTING}) + find_package(Pistache QUIET REQUIRED) add_executable(test_auth_mgr) target_sources(test_auth_mgr PRIVATE tests/AuthTest.cpp diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 37b977c2..439ded40 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -21,8 +21,7 @@ target_link_libraries(sisl_grpc if (DEFINED ENABLE_TESTING) if (${ENABLE_TESTING}) - if(${Pistache_FOUND}) - add_subdirectory(tests) - endif() + find_package(Pistache QUIET REQUIRED) + add_subdirectory(tests) endif() endif() diff --git a/src/grpc/tests/CMakeLists.txt b/src/grpc/tests/CMakeLists.txt index f951c983..e4090cf8 100644 --- a/src/grpc/tests/CMakeLists.txt +++ b/src/grpc/tests/CMakeLists.txt @@ -1,7 +1,6 @@ cmake_minimum_required (VERSION 3.11) find_package(GTest QUIET REQUIRED) -find_package(Pistache QUIET REQUIRED) add_subdirectory(proto) From 04aad0e39ded14ca347d2fdfab134eac1e63d806 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 2 Aug 2023 12:08:51 -0600 Subject: [PATCH 337/385] Remove libcurl override. (#155) --- .jenkins/Jenkinsfile | 1 + conanfile.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 41b841b3..9ac4c6fd 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -16,6 +16,7 @@ pipeline { steps { script { sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") + sh(script: "sed -Ei 's,#LIBCURLFIXTOKEN.*,self.requires(\"libcurl/7.86.0\"\\, override=True),' conanfile.py") BUILD_MISSING = "--build missing" } } diff --git a/conanfile.py b/conanfile.py index 115ea524..b4cfe63d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -84,7 +84,7 @@ def requirements(self): self.requires("pistache/0.0.5") self.requires("fmt/8.1.1", override=True) - self.requires("libcurl/7.86.0", override=True) + #LIBCURLFIXTOKEN self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) self.requires("xz_utils/5.2.5", override=True) From 0426e3d85521278eaba4f693f44f640a83fd946d Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 7 Aug 2023 10:09:47 -0700 Subject: [PATCH 338/385] Provide generic interfaces for token based authorization (#156) * remove trf client and replace it with a token client interface. * Make auth manager a generic interface * remove auth manager package * enable grpc tests --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- CMakeLists.txt | 2 - conanfile.py | 8 +- include/sisl/auth_manager/auth_manager.hpp | 37 -- include/sisl/auth_manager/security_config.hpp | 106 ------ include/sisl/auth_manager/token_client.hpp | 27 ++ include/sisl/auth_manager/token_verifier.hpp | 44 +++ include/sisl/auth_manager/trf_client.hpp | 51 --- include/sisl/grpc/rpc_client.hpp | 36 +- include/sisl/grpc/rpc_common.hpp | 2 - include/sisl/grpc/rpc_server.hpp | 10 +- src/CMakeLists.txt | 6 - src/auth_manager/CMakeLists.txt | 46 --- src/auth_manager/auth_manager.cpp | 93 ----- src/auth_manager/security_config.fbs | 51 --- src/auth_manager/tests/AuthTest.cpp | 323 ------------------ src/auth_manager/tests/basic_http_server.hpp | 50 --- src/auth_manager/tests/test_token.hpp | 110 ------ src/auth_manager/trf_client.cpp | 127 ------- src/grpc/CMakeLists.txt | 6 +- src/grpc/rpc_client.cpp | 14 +- src/grpc/rpc_server.cpp | 45 +-- src/grpc/tests/unit/CMakeLists.txt | 1 - src/grpc/tests/unit/auth_test.cpp | 105 ++---- src/grpc/tests/unit/basic_http_server.hpp | 75 ---- src/grpc/tests/unit/test_token.hpp | 86 ----- 25 files changed, 141 insertions(+), 1320 deletions(-) delete mode 100644 include/sisl/auth_manager/auth_manager.hpp delete mode 100644 include/sisl/auth_manager/security_config.hpp create mode 100644 include/sisl/auth_manager/token_client.hpp create mode 100644 include/sisl/auth_manager/token_verifier.hpp delete mode 100644 include/sisl/auth_manager/trf_client.hpp delete mode 100644 src/auth_manager/CMakeLists.txt delete mode 100644 src/auth_manager/auth_manager.cpp delete mode 100644 src/auth_manager/security_config.fbs delete mode 100644 src/auth_manager/tests/AuthTest.cpp delete mode 100644 src/auth_manager/tests/basic_http_server.hpp delete mode 100644 src/auth_manager/tests/test_token.hpp delete mode 100644 src/auth_manager/trf_client.cpp delete mode 100644 src/grpc/tests/unit/basic_http_server.hpp delete mode 100644 src/grpc/tests/unit/test_token.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index eab16432..ea7728c9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,7 +57,6 @@ endif() find_package(benchmark QUIET REQUIRED) find_package(Boost QUIET REQUIRED) -find_package(cpr QUIET REQUIRED) find_package(cxxopts QUIET REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") find_package(gperftools QUIET REQUIRED) @@ -67,7 +66,6 @@ if (${MALLOC_IMPL} STREQUAL "jemalloc") find_package(jemalloc QUIET REQUIRED) endif() -find_package(jwt-cpp QUIET REQUIRED) find_package(nlohmann_json QUIET REQUIRED) find_package(prerelease_dummy QUIET) find_package(prometheus-cpp QUIET REQUIRED) diff --git a/conanfile.py b/conanfile.py index b4cfe63d..2a883e80 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.4.5" + version = "10.0.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -68,21 +68,15 @@ def requirements(self): # Generic packages (conan-center) self.requires("boost/1.79.0") - self.requires("cpr/1.9.3") if self.settings.os in ["Linux"]: self.requires("breakpad/cci.20230127") self.requires("cxxopts/2.2.1") self.requires("flatbuffers/1.12.0") self.requires("grpc/1.48.0") - self.requires("jwt-cpp/0.4.0") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.1.0") self.requires("spdlog/1.11.0") self.requires("zmarok-semver/1.1.0") - if self.options.testing: - if self.settings.compiler in ["gcc"]: - self.requires("pistache/0.0.5") - self.requires("fmt/8.1.1", override=True) #LIBCURLFIXTOKEN self.requires("libevent/2.1.12", override=True) diff --git a/include/sisl/auth_manager/auth_manager.hpp b/include/sisl/auth_manager/auth_manager.hpp deleted file mode 100644 index bf5ea957..00000000 --- a/include/sisl/auth_manager/auth_manager.hpp +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include -#include - -#undef HTTP_OK // nameclash with cpr/cpr.h header -#include - -// maybe-uninitialized variable in one of the included headers from jwt.h -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif -#include -#if defined __clang__ or defined __GNUC__ -#pragma GCC diagnostic pop -#endif - -#include -#include "security_config.hpp" - -namespace sisl { - -ENUM(AuthVerifyStatus, uint8_t, OK, UNAUTH, FORBIDDEN) - -class AuthManager { -public: - AuthManager() {} - virtual ~AuthManager() = default; - AuthVerifyStatus verify(const std::string& token, std::string& msg) const; - -private: - void verify_decoded(const jwt::decoded_jwt& decoded) const; - virtual std::string download_key(const std::string& key_url) const; - std::string get_app(const jwt::decoded_jwt& decoded) const; -}; -} // namespace sisl diff --git a/include/sisl/auth_manager/security_config.hpp b/include/sisl/auth_manager/security_config.hpp deleted file mode 100644 index fa0f14dc..00000000 --- a/include/sisl/auth_manager/security_config.hpp +++ /dev/null @@ -1,106 +0,0 @@ -#pragma once -#include -#include -#include "generated/security_config_generated.h" - -SETTINGS_INIT(securitycfg::SecuritySettings, security_config) - -#define SECURITY_DYNAMIC_CONFIG_WITH(...) SETTINGS(security_config, __VA_ARGS__) -#define SECURITY_DYNAMIC_CONFIG_THIS(...) SETTINGS_THIS(security_config, __VA_ARGS__) -#define SECURITY_DYNAMIC_CONFIG(...) SETTINGS_VALUE(security_config, __VA_ARGS__) - -#define SECURITY_SETTINGS_FACTORY() SETTINGS_FACTORY(security_config) - -class SecurityDynamicConfig { -public: - static constexpr std::string_view default_auth_allowed_apps = "all"; - - static std::string get_env(const std::string& env_str) { - auto env_var = getenv(env_str.c_str()); - return (env_var != nullptr) ? std::string(env_var) : ""; - } - - inline static const std::string default_app_name{get_env("APP_NAME")}; - inline static const std::string default_app_inst_name{get_env("APP_INST_NAME")}; - inline static const std::string default_pod_name{get_env("POD_NAME")}; - inline static const std::string default_app_env{get_env("APP_ENV")}; - inline static const std::string default_ssl_cert_file{get_env("SSL_CERT")}; - inline static const std::string default_ssl_key_file{get_env("SSL_KEY")}; - inline static const std::string default_ssl_ca_file{get_env("SSL_CA")}; - inline static const std::string default_tf_token_url{get_env("TOKEN_URL")}; - inline static const std::string default_issuer{get_env("TOKEN_ISSUER")}; - inline static const std::string default_server{get_env("TOKEN_SERVER")}; - inline static const std::string default_grant_path{get_env("TOKEN_GRANT")}; - - // This method sets up the default for settings factory when there is no override specified in the json - // file and .fbs cannot specify default because they are not scalar. - static void init_settings_default() { - bool is_modified = false; - SECURITY_SETTINGS_FACTORY().modifiable_settings([&is_modified](auto& s) { - auto& ssl_cert_file = s.ssl_cert_file; - if (ssl_cert_file.empty()) { - ssl_cert_file = default_ssl_cert_file; - is_modified = true; - } - auto& ssl_key_file = s.ssl_key_file; - if (ssl_key_file.empty()) { - ssl_key_file = default_ssl_key_file; - is_modified = true; - } - auto& ssl_ca_file = s.ssl_ca_file; - if (ssl_ca_file.empty()) { - ssl_ca_file = default_ssl_ca_file; - is_modified = true; - } - auto& server = s.trf_client->server; - if (server.empty()) { - server = std::string_view(default_server); - is_modified = true; - } - auto& grant_path = s.trf_client->grant_path; - if (grant_path.empty()) { - grant_path = std::string_view(default_grant_path); - is_modified = true; - } - auto& auth_allowed_apps = s.auth_manager->auth_allowed_apps; - if (auth_allowed_apps.empty()) { - auth_allowed_apps = default_auth_allowed_apps; - is_modified = true; - } - auto& issuer = s.auth_manager->issuer; - if (issuer.empty()) { - issuer = default_issuer; - is_modified = true; - } - auto& tf_token_url = s.auth_manager->tf_token_url; - if (tf_token_url.empty()) { - tf_token_url = default_tf_token_url; - is_modified = true; - } - auto& app_name = s.trf_client->app_name; - if (app_name.empty()) { - app_name = std::string_view(default_app_name); - is_modified = true; - } - auto& app_inst_name = s.trf_client->app_inst_name; - if (app_inst_name.empty()) { - app_inst_name = std::string_view(default_app_inst_name); - is_modified = true; - } - auto& app_env = s.trf_client->app_env; - if (app_env.empty()) { - app_env = std::string_view(default_app_env); - is_modified = true; - } - auto& pod_name = s.trf_client->pod_name; - if (pod_name.empty()) { - pod_name = std::string_view(default_pod_name); - is_modified = true; - } - - // Any more default overrides or set non-scalar entries come here - }); - - if (is_modified) { SECURITY_SETTINGS_FACTORY().save(); } - } -}; diff --git a/include/sisl/auth_manager/token_client.hpp b/include/sisl/auth_manager/token_client.hpp new file mode 100644 index 00000000..427e45f5 --- /dev/null +++ b/include/sisl/auth_manager/token_client.hpp @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace sisl { + +// Interface to get a token for authorization + +class TokenClient { +public: + virtual ~TokenClient() = default; + + virtual std::string get_token() = 0; +}; + +// the key value pairs (m_auth_header_key, get_token()) are sent as metadata in the grpc client context + +class GrpcTokenClient : public TokenClient { +public: + explicit GrpcTokenClient(std::string const& auth_header_key) : m_auth_header_key(auth_header_key) {} + virtual ~GrpcTokenClient() = default; + + std::string get_auth_header_key() const { return m_auth_header_key; } + +private: + std::string m_auth_header_key; +}; +} // namespace sisl \ No newline at end of file diff --git a/include/sisl/auth_manager/token_verifier.hpp b/include/sisl/auth_manager/token_verifier.hpp new file mode 100644 index 00000000..67e0f95c --- /dev/null +++ b/include/sisl/auth_manager/token_verifier.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include + +namespace grpc { +class Status; +class ServerContext; +} // namespace grpc + +namespace sisl { + +// An interface for verifing a token for authorization. This can be used in conjunction with the TokenClient which is an +// interface to get a token. The implementation is deployment specific, one example is jwt based tokens provided by +// ebay/TrustFabric + +ENUM(VerifyCode, uint8_t, OK, UNAUTH, FORBIDDEN) + +struct TokenVerifyStatus { + VerifyCode code; + std::string msg; +}; + +class TokenVerifier { +public: + virtual ~TokenVerifier() = default; + virtual TokenVerifyStatus verify(std::string const& token) const = 0; +}; + +// extracts the key value pairs (m_auth_header_key, get_token()) from grpc client context and verifies the token +class GrpcTokenVerifier : public TokenVerifier { +public: + explicit GrpcTokenVerifier(std::string const& auth_header_key) : m_auth_header_key(auth_header_key) {} + virtual ~GrpcTokenVerifier() = default; + + virtual grpc::Status verify(grpc::ServerContext const* srv_ctx) const = 0; + +protected: + std::string m_auth_header_key; +}; + +} // namespace sisl diff --git a/include/sisl/auth_manager/trf_client.hpp b/include/sisl/auth_manager/trf_client.hpp deleted file mode 100644 index 9632b7e7..00000000 --- a/include/sisl/auth_manager/trf_client.hpp +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#undef HTTP_OK // nameclash with cpr/cpr.h header -#include -#include -#include "security_config.hpp" - -namespace sisl { - -class TrfClient { -public: - TrfClient(); - std::string get_token(); - std::string get_token_type(); - std::string get_typed_token() { - // get_token needs to be called first which might potentially set token type - const auto token{get_token()}; - return fmt::format("{} {}", get_token_type(), token); - } - -private: - void validate_grant_path() const; - bool grant_path_exists() const { return std::filesystem::exists(SECURITY_DYNAMIC_CONFIG(trf_client->grant_path)); } - bool access_token_expired() const { - return (std::chrono::system_clock::now() > - m_expiry + std::chrono::seconds(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))); - } - static bool get_file_contents(const std::string& file_name, std::string& contents); - -private: - std::shared_mutex m_mtx; - -protected: - // acquire unique lock before calling - virtual void request_with_grant_token(); - void parse_response(const std::string& resp); - static std::string get_string(const std::string& resp, const std::string& pattern); - static std::string get_quoted_string(const std::string& resp, const std::string& pattern); - -protected: - std::string m_access_token; - std::string m_token_type; - std::chrono::system_clock::time_point m_expiry; -}; - -} // namespace sisl diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index b4082c7c..52d50c15 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -31,7 +31,7 @@ #include #include #include -#include +#include namespace sisl { @@ -145,12 +145,12 @@ class GrpcBaseClient { const std::string m_ssl_cert; std::shared_ptr< ::grpc::ChannelInterface > m_channel; - std::shared_ptr< sisl::TrfClient > m_trf_client; + std::shared_ptr< sisl::GrpcTokenClient > m_token_client; public: GrpcBaseClient(const std::string& server_addr, const std::string& target_domain = "", const std::string& ssl_cert = ""); - GrpcBaseClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient >& trf_client, + GrpcBaseClient(const std::string& server_addr, const std::shared_ptr< sisl::GrpcTokenClient >& token_client, const std::string& target_domain = "", const std::string& ssl_cert = ""); virtual ~GrpcBaseClient() = default; virtual bool is_connection_ready() const; @@ -228,9 +228,9 @@ class GrpcAsyncClient : public GrpcBaseClient { template < typename ServiceT > using StubPtr = std::unique_ptr< typename ServiceT::StubInterface >; - GrpcAsyncClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient > trf_client, + GrpcAsyncClient(const std::string& server_addr, const std::shared_ptr< sisl::GrpcTokenClient > token_client, const std::string& target_domain = "", const std::string& ssl_cert = "") : - GrpcBaseClient(server_addr, trf_client, target_domain, ssl_cert) {} + GrpcBaseClient(server_addr, token_client, target_domain, ssl_cert) {} GrpcAsyncClient(const std::string& server_addr, const std::string& target_domain = "", const std::string& ssl_cert = "") : @@ -253,8 +253,8 @@ class GrpcAsyncClient : public GrpcBaseClient { using UPtr = std::unique_ptr< AsyncStub >; AsyncStub(StubPtr< ServiceT > stub, GrpcAsyncClientWorker* worker, - std::shared_ptr< sisl::TrfClient > trf_client) : - m_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} + std::shared_ptr< sisl::GrpcTokenClient > token_client) : + m_stub(std::move(stub)), m_worker(worker), m_token_client(token_client) {} using stub_t = typename ServiceT::StubInterface; @@ -293,7 +293,9 @@ class GrpcAsyncClient : public GrpcBaseClient { const unary_callback_t< RespT >& callback, uint32_t deadline) { auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); data->set_deadline(deadline); - if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + if (m_token_client) { + data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); + } // Note that async unary RPCs don't post a CQ tag in call data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); // CQ tag posted here @@ -307,7 +309,9 @@ class GrpcAsyncClient : public GrpcBaseClient { auto cd = new ClientRpcData< ReqT, RespT >(done_cb); builder_cb(cd->m_req); cd->set_deadline(deadline); - if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } + if (m_token_client) { + cd->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); + } cd->m_resp_reader_ptr = (m_stub.get()->*method)(&cd->context(), cd->m_req, cq()); cd->m_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); } @@ -321,7 +325,9 @@ class GrpcAsyncClient : public GrpcBaseClient { for (auto const& [key, value] : metadata) { data->add_metadata(key, value); } - if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + if (m_token_client) { + data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); + } // Note that async unary RPCs don't post a CQ tag in call data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); // CQ tag posted here @@ -330,7 +336,7 @@ class GrpcAsyncClient : public GrpcBaseClient { StubPtr< ServiceT > m_stub; GrpcAsyncClientWorker* m_worker; - std::shared_ptr< sisl::TrfClient > m_trf_client; + std::shared_ptr< sisl::GrpcTokenClient > m_token_client; const StubPtr< ServiceT >& stub() { return m_stub; } @@ -347,8 +353,8 @@ class GrpcAsyncClient : public GrpcBaseClient { struct GenericAsyncStub { GenericAsyncStub(std::unique_ptr< grpc::GenericStub > stub, GrpcAsyncClientWorker* worker, - std::shared_ptr< sisl::TrfClient > trf_client) : - m_generic_stub(std::move(stub)), m_worker(worker), m_trf_client(trf_client) {} + std::shared_ptr< sisl::GrpcTokenClient > token_client) : + m_generic_stub(std::move(stub)), m_worker(worker), m_token_client(token_client) {} void call_unary(const grpc::ByteBuffer& request, const std::string& method, const generic_unary_callback_t& callback, uint32_t deadline); @@ -358,7 +364,7 @@ class GrpcAsyncClient : public GrpcBaseClient { std::unique_ptr< grpc::GenericStub > m_generic_stub; GrpcAsyncClientWorker* m_worker; - std::shared_ptr< sisl::TrfClient > m_trf_client; + std::shared_ptr< sisl::GrpcTokenClient > m_token_client; grpc::CompletionQueue* cq() { return &m_worker->cq(); } }; @@ -373,7 +379,7 @@ class GrpcAsyncClient : public GrpcBaseClient { auto w = GrpcAsyncClientWorker::get_worker(worker); if (w == nullptr) { throw std::runtime_error("worker thread not available"); } - return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_trf_client); + return std::make_unique< AsyncStub< ServiceT > >(ServiceT::NewStub(m_channel), w, m_token_client); } std::unique_ptr< GenericAsyncStub > make_generic_stub(const std::string& worker); diff --git a/include/sisl/grpc/rpc_common.hpp b/include/sisl/grpc/rpc_common.hpp index 593f31e2..c5fc09be 100644 --- a/include/sisl/grpc/rpc_common.hpp +++ b/include/sisl/grpc/rpc_common.hpp @@ -17,7 +17,6 @@ namespace sisl { class GrpcServer; class GenericRpcData; -enum class AuthVerifyStatus : uint8_t; using generic_rpc_handler_cb_t = std::function< bool(boost::intrusive_ptr< GenericRpcData >&) >; using generic_rpc_completed_cb_t = std::function< void(boost::intrusive_ptr< GenericRpcData >&) >; @@ -27,6 +26,5 @@ struct RPCHelper { static bool run_generic_handler_cb(GrpcServer* server, const std::string& method, boost::intrusive_ptr< GenericRpcData >& rpc_data); static grpc::Status do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx); - static grpc::StatusCode to_grpc_statuscode(const sisl::AuthVerifyStatus status); }; } // namespace sisl::grpc diff --git a/include/sisl/grpc/rpc_server.hpp b/include/sisl/grpc/rpc_server.hpp index 41defd2c..e23d1109 100644 --- a/include/sisl/grpc/rpc_server.hpp +++ b/include/sisl/grpc/rpc_server.hpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include "rpc_call.hpp" namespace sisl { @@ -40,7 +40,7 @@ class GrpcServer : private boost::noncopyable { GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert); GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr); + const std::string& ssl_cert, const std::shared_ptr< sisl::GrpcTokenVerifier >& auth_mgr); virtual ~GrpcServer(); /** @@ -48,7 +48,7 @@ class GrpcServer : private boost::noncopyable { */ static GrpcServer* make(const std::string& listen_addr, uint32_t threads = 1, const std::string& ssl_key = "", const std::string& ssl_cert = ""); - static GrpcServer* make(const std::string& listen_addr, const std::shared_ptr< sisl::AuthManager >& auth_mgr, + static GrpcServer* make(const std::string& listen_addr, const std::shared_ptr< sisl::GrpcTokenVerifier >& auth_mgr, uint32_t threads = 1, const std::string& ssl_key = "", const std::string& ssl_cert = ""); void run(const rpc_thread_start_cb_t& thread_start_cb = nullptr); @@ -113,7 +113,7 @@ class GrpcServer : private boost::noncopyable { } bool is_auth_enabled() const; - sisl::AuthVerifyStatus auth_verify(const std::string& token, std::string& msg) const; + grpc::Status auth_verify(grpc::ServerContext const* srv_ctx) const; // generic service methods bool run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data); @@ -136,7 +136,7 @@ class GrpcServer : private boost::noncopyable { std::unordered_map< const char*, ::grpc::Service* > m_services; std::mutex m_rpc_registry_mtx; std::vector< std::unique_ptr< RpcStaticInfoBase > > m_rpc_registry; - std::shared_ptr< sisl::AuthManager > m_auth_mgr; + std::shared_ptr< sisl::GrpcTokenVerifier > m_auth_mgr; std::unique_ptr< grpc::AsyncGenericService > m_generic_service; std::unique_ptr< GenericRpcStaticInfo > m_generic_rpc_static_info; bool m_generic_service_registered{false}; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cc276866..768116bf 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,12 +9,6 @@ add_subdirectory (sobject) # on Folly and pistache. It is unknown if Windows is supported... list(APPEND POSIX_LIBRARIES ) list(APPEND SISL_DEPS ) -if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") - add_subdirectory (auth_manager) - list(APPEND POSIX_LIBRARIES - $ - ) -endif() if(${userspace-rcu_FOUND}) add_subdirectory (grpc) list(APPEND POSIX_LIBRARIES diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt deleted file mode 100644 index 3e45f388..00000000 --- a/src/auth_manager/CMakeLists.txt +++ /dev/null @@ -1,46 +0,0 @@ -cmake_minimum_required (VERSION 3.11) - -find_package(flatbuffers QUIET REQUIRED) - -if(NOT ${CMAKE_CURRENT_BINARY_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) -endif() -include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) - -add_library(sisl_auth_manager OBJECT) -target_sources(sisl_auth_manager PRIVATE - auth_manager.cpp - trf_client.cpp - ) -target_link_libraries(sisl_auth_manager - ${COMMON_DEPS} - cpr::cpr - flatbuffers::flatbuffers - jwt-cpp::jwt-cpp - ) -settings_gen_cpp( - ${FLATBUFFERS_FLATC_EXECUTABLE} - ${CMAKE_CURRENT_BINARY_DIR}/generated/ - sisl_auth_manager - security_config.fbs - ) - -if (DEFINED ENABLE_TESTING) - if (${ENABLE_TESTING}) - find_package(Pistache QUIET REQUIRED) - add_executable(test_auth_mgr) - target_sources(test_auth_mgr PRIVATE - tests/AuthTest.cpp - ) - target_link_libraries(test_auth_mgr - sisl - ${COMMON_DEPS} - cpr::cpr - Pistache::Pistache - flatbuffers::flatbuffers - jwt-cpp::jwt-cpp - GTest::gmock - ) - add_test(NAME AuthManager COMMAND test_auth_mgr) - endif() -endif() diff --git a/src/auth_manager/auth_manager.cpp b/src/auth_manager/auth_manager.cpp deleted file mode 100644 index 38396cca..00000000 --- a/src/auth_manager/auth_manager.cpp +++ /dev/null @@ -1,93 +0,0 @@ -#include -#include - -#include - -#include "sisl/auth_manager/auth_manager.hpp" - -namespace sisl { - -AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) const { - std::string app_name; - // TODO: cache tokens for better performance - try { - // this may throw if token is ill formed - const auto decoded{jwt::decode(token)}; - - // for any reason that causes the verification failure, an - // exception is thrown. - verify_decoded(decoded); - app_name = get_app(decoded); - } catch (const std::exception& e) { - msg = e.what(); - return AuthVerifyStatus::UNAUTH; - } - - // check client application - - if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps) != "all") { - if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps).find(app_name) == std::string::npos) { - msg = fmt::format("application '{}' is not allowed to perform the request", app_name); - return AuthVerifyStatus::FORBIDDEN; - } - } - - return AuthVerifyStatus::OK; -} -void AuthManager::verify_decoded(const jwt::decoded_jwt& decoded) const { - const auto alg{decoded.get_algorithm()}; - if (alg != "RS256") throw std::runtime_error(fmt::format("unsupported algorithm: {}", alg)); - - if (!decoded.has_header_claim("x5u")) throw std::runtime_error("no indication of verification key"); - - auto key_url = decoded.get_header_claim("x5u").as_string(); - - if (key_url.rfind(SECURITY_DYNAMIC_CONFIG(auth_manager->tf_token_url), 0) != 0) { - throw std::runtime_error(fmt::format("key url {} is not trusted", key_url)); - } - const std::string signing_key{download_key(key_url)}; - const auto verifier{jwt::verify() - .with_issuer(SECURITY_DYNAMIC_CONFIG(auth_manager->issuer)) - .allow_algorithm(jwt::algorithm::rs256(signing_key)) - .expires_at_leeway(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))}; - - // if verification fails, an instance of std::system_error subclass is thrown. - verifier.verify(decoded); -} - -std::string AuthManager::download_key(const std::string& key_url) const { - cpr::Session session; - session.SetUrl(cpr::Url{key_url}); - if (SECURITY_DYNAMIC_CONFIG(auth_manager->verify)) { - auto ca_file{SECURITY_DYNAMIC_CONFIG(ssl_ca_file)}; - auto cert_file{SECURITY_DYNAMIC_CONFIG(ssl_cert_file)}; - auto key_file{SECURITY_DYNAMIC_CONFIG(ssl_key_file)}; - - // constructor for CaInfo does std::move(filename) - auto sslOpts{cpr::Ssl(cpr::ssl::CaInfo{std::move(ca_file)})}; - sslOpts.SetOption(cpr::ssl::CertFile{std::move(cert_file)}); - sslOpts.SetOption(cpr::ssl::KeyFile{std::move(key_file)}); - session.SetOption(sslOpts); - } - - session.SetTimeout(std::chrono::milliseconds{5000}); - const auto resp{session.Get()}; - - if (resp.error) { throw std::runtime_error(fmt::format("download key failed: {}", resp.error.message)); } - if (resp.status_code != 200) { throw std::runtime_error(fmt::format("download key failed: {}", resp.text)); } - - return resp.text; -} - -std::string AuthManager::get_app(const jwt::decoded_jwt& decoded) const { - // get app name from client_id, which is the "sub" field in the decoded token - // body - // https://pages.github.corp.ebay.com/security-platform/documents/tf-documentation/tessintegration/#environment-variables - if (!decoded.has_payload_claim("sub")) return ""; - - const auto client_id{decoded.get_payload_claim("sub").as_string()}; - const auto start{client_id.find(",o=") + 3}; - const auto end{client_id.find_first_of(",", start)}; - return client_id.substr(start, end - start); -} -} // namespace sisl diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs deleted file mode 100644 index e560455b..00000000 --- a/src/auth_manager/security_config.fbs +++ /dev/null @@ -1,51 +0,0 @@ -native_include "sisl/utility/non_null_ptr.hpp"; -namespace securitycfg; - -attribute "hotswap"; -attribute "deprecated"; - -table TrfClient { - // grant token mount directory - grant_path: string; - - // Server addr to download the token (default: https://trustfabric.vip.ebay.com/v2/token) - server: string; - - // Pod env variables - app_name: string; - app_inst_name: string; - app_env: string; - pod_name: string; -} - -table AuthManager { - // , separated allowed applications (default all) - auth_allowed_apps: string; - - // Token issuer (default trustfabric) - issuer: string; - - // signing key domain - tf_token_url: string; - - // leeway to the token expiration - leeway: uint32 = 0; - - // ssl verification for the signing key download url - verify: bool = true; -} - -table SecuritySettings { - // ssl cert related files - ssl_cert_file: string; - ssl_key_file: string; - ssl_ca_file: string; - - // Auth Manager to decode and verify the token - auth_manager: AuthManager; - - // Trustfabric client settings - trf_client: TrfClient; -} - -root_type SecuritySettings; diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp deleted file mode 100644 index 7447a346..00000000 --- a/src/auth_manager/tests/AuthTest.cpp +++ /dev/null @@ -1,323 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "sisl/auth_manager/auth_manager.hpp" -#include "sisl/auth_manager/trf_client.hpp" -#include "test_token.hpp" -#include "basic_http_server.hpp" - -SISL_OPTIONS_ENABLE(logging) - -namespace sisl::testing { -using namespace ::testing; - -static std::string get_cur_file_dir() { - const std::string cur_file_path{__FILE__}; - const auto last_slash_pos{cur_file_path.rfind('/')}; - if (last_slash_pos == std::string::npos) { return ""; } - return std::string{cur_file_path.substr(0, last_slash_pos + 1)}; -} - -static const std::string cur_file_dir{get_cur_file_dir()}; - -static const std::string grant_path = fmt::format("{}/dummy_grant.cg", cur_file_dir); - -class MockAuthManager : public AuthManager { -public: - using AuthManager::AuthManager; - MOCK_METHOD(std::string, download_key, (const std::string&), (const)); - AuthVerifyStatus verify(const std::string& token) { - std::string msg; - return AuthManager::verify(token, msg); - } -}; - -class AuthTest : public ::testing::Test { -public: - virtual void SetUp() override { - load_settings(); - mock_auth_mgr = std::shared_ptr< MockAuthManager >(new MockAuthManager()); - } - - virtual void TearDown() override {} - - void set_allowed_to_all() { - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { s.auth_manager->auth_allowed_apps = "all"; }); - SECURITY_SETTINGS_FACTORY().save(); - } - - static void load_settings() { - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { - s.auth_manager->auth_allowed_apps = "app1, testapp, app2"; - s.auth_manager->tf_token_url = "http://127.0.0.1"; - s.auth_manager->leeway = 0; - s.auth_manager->issuer = "trustfabric"; - }); - SECURITY_SETTINGS_FACTORY().save(); - } - -protected: - std::shared_ptr< MockAuthManager > mock_auth_mgr; -}; - -// test the TestToken utility, should not raise -TEST(TokenGenerte, sign_and_decode) { - const auto token{TestToken().sign_rs256()}; - const auto verify{jwt::verify().allow_algorithm(jwt::algorithm::rs256(rsa_pub_key)).with_issuer("trustfabric")}; - const auto decoded{jwt::decode(token)}; - verify.verify(decoded); -} - -TEST_F(AuthTest, allow_vaid_token) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs256()), AuthVerifyStatus::OK); -} - -TEST_F(AuthTest, reject_garbage_auth) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); - EXPECT_EQ(mock_auth_mgr->verify("garbage_token"), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_wrong_algorithm) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_untrusted_issuer) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // token is issued by an untrusted issuer, we only trust "trustfabric" - auto token{TestToken()}; - token.get_token().set_issuer("do_not_trust_me"); - EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_untrusted_keyurl) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); - // the key url is an untrusted address, we only trust "http://127.0.0.1" - auto token{TestToken()}; - token.get_token().set_header_claim("x5u", jwt::claim(std::string{"http://untrusted.addr/keys/abc123"})); - EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_expired_token) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // token expired 1 second ago - auto token{TestToken()}; - token.get_token().set_expires_at(std::chrono::system_clock::now() - std::chrono::seconds(1)); - EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_download_key_fail) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Throw(std::runtime_error("download key failed"))); - EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, reject_wrong_key) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub1_key)); - EXPECT_EQ(mock_auth_mgr->verify(TestToken().sign_rs512()), AuthVerifyStatus::UNAUTH); -} - -TEST_F(AuthTest, allow_all_apps) { - set_allowed_to_all(); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - auto token{TestToken()}; - token.get_token().set_subject("any-prefix,o=dummy_app,dc=tess,dc=ebay,dc=com"); - EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::OK); -} - -TEST_F(AuthTest, reject_unauthorized_app) { - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - // the client application is "myapp", which is not in the allowed list - auto token{TestToken()}; - token.get_token().set_subject("any-prefix,o=myapp,dc=tess,dc=ebay,dc=com"); - EXPECT_EQ(mock_auth_mgr->verify(token.sign_rs256()), AuthVerifyStatus::FORBIDDEN); -} - -// Testing trf client -class MockTrfClient : public TrfClient { -public: - using TrfClient::TrfClient; - MOCK_METHOD(void, request_with_grant_token, ()); - void set_token(const std::string& raw_token, const std::string token_type) { - m_access_token = raw_token; - m_token_type = token_type; - m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(2000); - } - // deligate to parent class (run the real method) - - void __request_with_grant_token() { TrfClient::request_with_grant_token(); } - - void set_expiry(std::chrono::system_clock::time_point tp) { m_expiry = tp; } - std::string get_access_token() { return m_access_token; } - std::chrono::system_clock::time_point get_expiry() { return m_expiry; } - - void parse_token(const std::string& resp) { TrfClient::parse_response(resp); } -}; - -static void load_trf_settings() { - std::ofstream outfile{grant_path}; - outfile << "dummy cg contents\n"; - outfile.close(); - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { - s.trf_client->grant_path = grant_path; - s.trf_client->server = "127.0.0.1:12346/token"; - s.auth_manager->verify = false; - s.auth_manager->leeway = 30; - }); - SECURITY_SETTINGS_FACTORY().save(); -} - -static void remove_grant_path() { std::remove(grant_path.c_str()); } - -// this test will take 10 seconds to run -TEST_F(AuthTest, trf_grant_path_failure) { - load_trf_settings(); - remove_grant_path(); - EXPECT_THROW( - { - try { - TrfClient trf_client; - } catch (const std::runtime_error& e) { - const std::string cmp_string{ - fmt::format("trustfabric client grant path {} does not exist", grant_path)}; - EXPECT_STREQ(e.what(), cmp_string.c_str()); - throw e; - } - }, - std::runtime_error); -} - -TEST_F(AuthTest, trf_allow_valid_token) { - load_trf_settings(); - MockTrfClient mock_trf_client; - const auto raw_token{TestToken().sign_rs256()}; - // mock_trf_client is expected to be called twice - // 1. First time when access_token is empty - // 2. When token is set to be expired - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(2); - ON_CALL(mock_trf_client, request_with_grant_token()) - .WillByDefault( - testing::Invoke([&mock_trf_client, &raw_token]() { mock_trf_client.set_token(raw_token, "Bearer"); })); - - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); - - // use the acces_token saved from the previous call - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); - - // set token to be expired invoking request_with_grant_token - mock_trf_client.set_expiry(std::chrono::system_clock::now() - std::chrono::seconds(100)); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); - EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); -} - -static const std::string trf_token_server_ip{"127.0.0.1"}; -static const uint32_t trf_token_server_port{12346}; -static std::string token_response; -static void set_token_response(const std::string& raw_token) { - token_response = "{\"access_token\":\"" + raw_token + - "\",\"token_type\":\"Bearer\",\"expires_in\":2000,\"refresh_token\":\"dummy_refresh_token\"}\n"; -} - -class TokenApiImpl : public TokenApi { -public: - void get_token_impl(Pistache::Http::ResponseWriter& response) { - LOGINFO("Sending token to client"); - response.send(Pistache::Http::Code::Ok, token_response); - } -}; - -// Test request_with_grant_token. Setup http server with path /token to return token json -class TrfClientTest : public ::testing::Test { -public: - TrfClientTest() = default; - TrfClientTest(const TrfClientTest&) = delete; - TrfClientTest& operator=(const TrfClientTest&) = delete; - TrfClientTest(TrfClientTest&&) noexcept = delete; - TrfClientTest& operator=(TrfClientTest&&) noexcept = delete; - virtual ~TrfClientTest() override = default; - - virtual void SetUp() override { - // start token server - APIBase::init(Pistache::Address(fmt::format("{}:{}", trf_token_server_ip, trf_token_server_port)), 1); - m_token_server = std::unique_ptr< TokenApiImpl >(new TokenApiImpl()); - m_token_server->setupRoutes(); - APIBase::start(); - } - - virtual void TearDown() override { - APIBase::stop(); - remove_grant_path(); - } - -private: - std::unique_ptr< TokenApiImpl > m_token_server; -}; - -TEST_F(TrfClientTest, trf_grant_path_load_failure) { - load_trf_settings(); - MockTrfClient mock_trf_client; - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); - ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { - mock_trf_client.__request_with_grant_token(); - })); - remove_grant_path(); - EXPECT_THROW( - { - try { - mock_trf_client.get_token(); - } catch (const std::runtime_error& e) { - EXPECT_EQ( - e.what(), - fmt::format("could not load grant from path {}", SECURITY_DYNAMIC_CONFIG(trf_client->grant_path))); - throw e; - } - }, - std::runtime_error); -} - -TEST_F(TrfClientTest, request_with_grant_token) { - load_trf_settings(); - MockTrfClient mock_trf_client; - const auto raw_token{TestToken().sign_rs256()}; - set_token_response(raw_token); - EXPECT_CALL(mock_trf_client, request_with_grant_token()).Times(1); - ON_CALL(mock_trf_client, request_with_grant_token()).WillByDefault(testing::Invoke([&mock_trf_client]() { - mock_trf_client.__request_with_grant_token(); - })); - mock_trf_client.get_token(); - EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); - EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); -} - -TEST(TrfClientParseTest, parse_token) { - load_trf_settings(); - MockTrfClient mock_trf_client; - const auto raw_token{TestToken().sign_rs256()}; - set_token_response(raw_token); - EXPECT_TRUE(mock_trf_client.get_access_token().empty()); - EXPECT_TRUE(mock_trf_client.get_token_type().empty()); - mock_trf_client.parse_token(token_response); - EXPECT_EQ(raw_token, mock_trf_client.get_access_token()); - EXPECT_EQ("Bearer", mock_trf_client.get_token_type()); - EXPECT_TRUE(mock_trf_client.get_expiry() > std::chrono::system_clock::now()); - remove_grant_path(); -} -} // namespace sisl::testing - -using namespace sisl; -using namespace sisl::testing; - -int main(int argc, char* argv[]) { - ::testing::InitGoogleMock(&argc, argv); - SISL_OPTIONS_LOAD(argc, argv, logging) - return RUN_ALL_TESTS(); -} diff --git a/src/auth_manager/tests/basic_http_server.hpp b/src/auth_manager/tests/basic_http_server.hpp deleted file mode 100644 index 92aac0c2..00000000 --- a/src/auth_manager/tests/basic_http_server.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#pragma once - -class APIBase { -public: - static void init(Pistache::Address addr, size_t thr) { - m_http_endpoint = std::make_shared< Pistache::Http::Endpoint >(addr); - auto flags = Pistache::Tcp::Options::ReuseAddr; - auto opts = Pistache::Http::Endpoint::options().threadsName("http_server").threads(thr).flags(flags); - m_http_endpoint->init(opts); - } - - static void start() { - m_http_endpoint->setHandler(m_router.handler()); - m_http_endpoint->serveThreaded(); - } - - static void stop() { m_http_endpoint->shutdown(); } - - virtual ~APIBase() {} - -protected: - static std::shared_ptr< Pistache::Http::Endpoint > m_http_endpoint; - static Pistache::Rest::Router m_router; -}; - -std::shared_ptr< Pistache::Http::Endpoint > APIBase::m_http_endpoint; -Pistache::Rest::Router APIBase::m_router; - -class TokenApi : public APIBase { -public: - void setupRoutes() { - Pistache::Rest::Routes::Post(m_router, "/token", - Pistache::Rest::Routes::bind(&TokenApi::get_token_handler, this)); - } - - void get_token_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { - this->get_token_impl(response); - } - - virtual void get_token_impl(Pistache::Http::ResponseWriter& response) = 0; - - virtual ~TokenApi() { Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); } -}; diff --git a/src/auth_manager/tests/test_token.hpp b/src/auth_manager/tests/test_token.hpp deleted file mode 100644 index 3d3d198a..00000000 --- a/src/auth_manager/tests/test_token.hpp +++ /dev/null @@ -1,110 +0,0 @@ -#pragma once - -namespace sisl::testing { -// public and private keys for unit test - -static const std::string rsa_pub_key = "-----BEGIN PUBLIC KEY-----\n" - "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" - "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" - "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" - "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" - "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" - "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" - "YwIDAQAB\n" - "-----END PUBLIC KEY-----"; - -static const std::string rsa_pub1_key = "-----BEGIN PUBLIC KEY-----\n" - "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" - "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" - "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" - "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" - "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SptUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" - "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" - "YwIDAQAB\n" - "-----END PUBLIC KEY-----"; - -static const std::string rsa_priv_key = "-----BEGIN PRIVATE KEY-----\n" - "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4ZtdaIrd1BPIJ\n" - "tfnF0TjIK5inQAXZ3XlCrUlJdP+XHwIRxdv1FsN12XyMYO/6ymLmo9ryoQeIrsXB\n" - "XYqlET3zfAY+diwCb0HEsVvhisthwMU4gZQu6TYW2s9LnXZB5rVtcBK69hcSlA2k\n" - "ZudMZWxZcj0L7KMfO2rIvaHw/qaVOE9j0T257Z8Kp2CLF9MUgX0ObhIsdumFRLaL\n" - "DvDUmBPr2zuh/34j2XmWwn1yjN/WvGtdfhXW79Ki1S40HcWnygHgLV8sESFKUxxQ\n" - "mKvPUTwDOIwLFL5WtE8Mz7N++kgmDcmWMCHc8kcOIu73Ta/3D4imW7VbKgHZo9+K\n" - "3ESFE3RjAgMBAAECggEBAJTEIyjMqUT24G2FKiS1TiHvShBkTlQdoR5xvpZMlYbN\n" - "tVWxUmrAGqCQ/TIjYnfpnzCDMLhdwT48Ab6mQJw69MfiXwc1PvwX1e9hRscGul36\n" - "ryGPKIVQEBsQG/zc4/L2tZe8ut+qeaK7XuYrPp8bk/X1e9qK5m7j+JpKosNSLgJj\n" - "NIbYsBkG2Mlq671irKYj2hVZeaBQmWmZxK4fw0Istz2WfN5nUKUeJhTwpR+JLUg4\n" - "ELYYoB7EO0Cej9UBG30hbgu4RyXA+VbptJ+H042K5QJROUbtnLWuuWosZ5ATldwO\n" - "u03dIXL0SH0ao5NcWBzxU4F2sBXZRGP2x/jiSLHcqoECgYEA4qD7mXQpu1b8XO8U\n" - "6abpKloJCatSAHzjgdR2eRDRx5PMvloipfwqA77pnbjTUFajqWQgOXsDTCjcdQui\n" - "wf5XAaWu+TeAVTytLQbSiTsBhrnoqVrr3RoyDQmdnwHT8aCMouOgcC5thP9vQ8Us\n" - "rVdjvRRbnJpg3BeSNimH+u9AHgsCgYEA0EzcbOltCWPHRAY7B3Ge/AKBjBQr86Kv\n" - "TdpTlxePBDVIlH+BM6oct2gaSZZoHbqPjbq5v7yf0fKVcXE4bSVgqfDJ/sZQu9Lp\n" - "PTeV7wkk0OsAMKk7QukEpPno5q6tOTNnFecpUhVLLlqbfqkB2baYYwLJR3IRzboJ\n" - "FQbLY93E8gkCgYB+zlC5VlQbbNqcLXJoImqItgQkkuW5PCgYdwcrSov2ve5r/Acz\n" - "FNt1aRdSlx4176R3nXyibQA1Vw+ztiUFowiP9WLoM3PtPZwwe4bGHmwGNHPIfwVG\n" - "m+exf9XgKKespYbLhc45tuC08DATnXoYK7O1EnUINSFJRS8cezSI5eHcbQKBgQDC\n" - "PgqHXZ2aVftqCc1eAaxaIRQhRmY+CgUjumaczRFGwVFveP9I6Gdi+Kca3DE3F9Pq\n" - "PKgejo0SwP5vDT+rOGHN14bmGJUMsX9i4MTmZUZ5s8s3lXh3ysfT+GAhTd6nKrIE\n" - "kM3Nh6HWFhROptfc6BNusRh1kX/cspDplK5x8EpJ0QKBgQDWFg6S2je0KtbV5PYe\n" - "RultUEe2C0jYMDQx+JYxbPmtcopvZQrFEur3WKVuLy5UAy7EBvwMnZwIG7OOohJb\n" - "vkSpADK6VPn9lbqq7O8cTedEHttm6otmLt8ZyEl3hZMaL3hbuRj6ysjmoFKx6CrX\n" - "rK0/Ikt5ybqUzKCMJZg2VKGTxg==\n" - "-----END PRIVATE KEY-----"; - -/** - * This will by default construct a valid jwt token, which contains exactly the - * same attributes in heeader and payload claims. In some test cases if we want - * to build a token with some invalid attributes, we must explicitly set those - * attributes. - * - * A trustfabric token example: - * Header claims - * alg: RS256 - * kid: 779112af - * typ: JWT - * x5u: https://trustfabric.vip.ebay.com/v2/k/779112af - * - * Payload claims - * iss: trustfabric - * aud: [usersessionauthsvc, protegoreg, fountauth, monstor, ...] - * cluster: 92 - * ns: sds-tess92-19 - * iat: 1610081499 - * exp: 1610083393 - * nbf: 1610081499 - * instances: 10.175.165.15 - * sub: - * uid=sdsapp,networkaddress=10.175.165.15,ou=orchmanager+l=production,o=sdstess9219,dc=tess,dc=ebay,dc=com - * ver: 2 - * vpc: production - */ - -struct TestToken { - using token_t = jwt::builder; - - TestToken() : - token{jwt::create() - .set_type("JWT") - .set_algorithm("RS256") - .set_key_id("abc123") - .set_issuer("trustfabric") - .set_header_claim("x5u", jwt::claim(std::string{"http://127.0.0.1:12346/download_key"})) - .set_audience(std::set< std::string >{"test-sisl", "protegoreg"}) - .set_issued_at(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_not_before(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_expires_at(std::chrono::system_clock::now() + std::chrono::seconds(180)) - .set_subject("uid=sdsapp,networkaddress=dummy_ip,ou=orchmanager+l=" - "production,o=testapp,dc=tess,dc=ebay,dc=com") - .set_payload_claim("ver", jwt::claim(std::string{"2"})) - .set_payload_claim("vpc", jwt::claim(std::string{"production"})) - .set_payload_claim("instances", jwt::claim(std::string{"dummy_ip"}))} {} - - std::string sign_rs256() { return token.sign(jwt::algorithm::rs256(rsa_pub_key, rsa_priv_key, "", "")); } - std::string sign_rs512() { return token.sign(jwt::algorithm::rs512(rsa_pub_key, rsa_priv_key, "", "")); } - token_t& get_token() { return token; } - -private: - token_t token; -}; -} // namespace sisl::testing \ No newline at end of file diff --git a/src/auth_manager/trf_client.cpp b/src/auth_manager/trf_client.cpp deleted file mode 100644 index 92e5f062..00000000 --- a/src/auth_manager/trf_client.cpp +++ /dev/null @@ -1,127 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include - -#include "sisl/auth_manager/trf_client.hpp" - -namespace sisl { -TrfClient::TrfClient() { validate_grant_path(); } - -void TrfClient::validate_grant_path() const { - uint8_t retry_limit{10}; - // Retry until the grant path is up. Might take few seconds when deployed as tess secret - while (!grant_path_exists() && retry_limit-- > 0) { - std::this_thread::sleep_for(std::chrono::seconds{1}); - } - if (!grant_path_exists()) { - throw std::runtime_error{fmt::format("trustfabric client grant path {} does not exist", - SECURITY_DYNAMIC_CONFIG(trf_client->grant_path))}; - } -} - -bool TrfClient::get_file_contents(const std::string& file_path, std::string& contents) { - try { - std::ifstream f{file_path}; - const std::string buffer{std::istreambuf_iterator< char >{f}, std::istreambuf_iterator< char >{}}; - contents = buffer; - return !contents.empty(); - } catch (...) {} - return false; -} - -void TrfClient::request_with_grant_token() { - std::string grant_token; - if (!get_file_contents(SECURITY_DYNAMIC_CONFIG(trf_client->grant_path), grant_token)) { - throw std::runtime_error( - fmt::format("could not load grant from path {}", SECURITY_DYNAMIC_CONFIG(trf_client->grant_path))); - } - - const auto client_id{ - fmt::format("ou={}+l={},o={},dc=tess,dc=ebay,dc=com", SECURITY_DYNAMIC_CONFIG(trf_client->app_inst_name), - SECURITY_DYNAMIC_CONFIG(trf_client->app_env), SECURITY_DYNAMIC_CONFIG(trf_client->app_name))}; - - cpr::Session session; - if (SECURITY_DYNAMIC_CONFIG(auth_manager->verify)) { - auto ca_file{SECURITY_DYNAMIC_CONFIG(ssl_ca_file)}; - auto cert_file{SECURITY_DYNAMIC_CONFIG(ssl_cert_file)}; - auto key_file{SECURITY_DYNAMIC_CONFIG(ssl_key_file)}; - auto sslOpts{cpr::Ssl(cpr::ssl::CaInfo{std::move(ca_file)})}; - sslOpts.SetOption(cpr::ssl::CertFile{std::move(cert_file)}); - sslOpts.SetOption(cpr::ssl::KeyFile{std::move(key_file)}); - session.SetOption(sslOpts); - } - - session.SetUrl(cpr::Url{SECURITY_DYNAMIC_CONFIG(trf_client->server)}); - std::vector< cpr::Pair > payload_data; - payload_data.emplace_back("grant_type", "authorization_code"); - payload_data.emplace_back("code", grant_token); - payload_data.emplace_back("client_id", client_id); - session.SetPayload(cpr::Payload(payload_data.begin(), payload_data.end())); - session.SetTimeout(std::chrono::milliseconds{5000}); - const auto resp{session.Post()}; - if (resp.error || resp.status_code != 200) { - LOGERROR("request grant token from server failed, error: {}, status code: {}", resp.error.message, - resp.status_code); - return; - } - - try { - const nlohmann::json resp_json = nlohmann::json::parse(resp.text); - m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(resp_json["expires_in"]); - m_access_token = resp_json["access_token"]; - m_token_type = resp_json["token_type"]; - } catch ([[maybe_unused]] const nlohmann::detail::exception& e) { - LOGDEBUG("parsing token response using json failed, what: {}; trying to parse manually", e.what()); - parse_response(resp.text); - } -} - -void TrfClient::parse_response(const std::string& resp) { - try { - static std::string token1{"{\"access_token\":"}; - static std::string token2{"\"token_type\":"}; - static std::string token3{"\"expires_in\":"}; - - if (m_access_token = get_quoted_string(resp, token1); m_access_token.empty()) { return; } - if (m_token_type = get_quoted_string(resp, token2); m_access_token.empty()) { return; } - auto expiry_str = get_string(resp, token3); - if (expiry_str.empty()) { return; } - m_expiry = std::chrono::system_clock::now() + std::chrono::seconds(std::stol(expiry_str)); - } catch (const std::exception& e) { LOGERROR("failed to parse response: {}, what: {}", resp, e.what()); } -} - -std::string TrfClient::get_string(const std::string& resp, const std::string& pattern) { - auto n = resp.find(pattern); - if (n == std::string::npos) { return ""; } - auto n1 = resp.find(',', n); - if (n1 == std::string::npos) { return ""; } - return resp.substr(n + pattern.length(), n1 - n - pattern.length()); -} - -std::string TrfClient::get_quoted_string(const std::string& resp, const std::string& pattern) { - auto quoted_string{get_string(resp, pattern)}; - return quoted_string.substr(1, quoted_string.length() - 2); -} - -std::string TrfClient::get_token() { - { - std::shared_lock< std::shared_mutex > lock(m_mtx); - if (!(m_access_token.empty() || access_token_expired())) { return m_access_token; } - } - - // Not a frequent code path, occurs for the first time or when token expires - std::unique_lock< std::shared_mutex > lock(m_mtx); - request_with_grant_token(); - return m_access_token; -} - -std::string TrfClient::get_token_type() { - std::shared_lock< std::shared_mutex > lock(m_mtx); - return m_token_type; -} -} // namespace sisl diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 439ded40..c5e31fac 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -11,17 +11,13 @@ target_sources(sisl_grpc PRIVATE rpc_client.cpp ) target_link_libraries(sisl_grpc - sisl_auth_manager gRPC::grpc++ - cpr::cpr flatbuffers::flatbuffers - jwt-cpp::jwt-cpp ${COMMON_DEPS} ) if (DEFINED ENABLE_TESTING) if (${ENABLE_TESTING}) - find_package(Pistache QUIET REQUIRED) add_subdirectory(tests) endif() -endif() +endif() \ No newline at end of file diff --git a/src/grpc/rpc_client.cpp b/src/grpc/rpc_client.cpp index abf4a053..6f36c3eb 100644 --- a/src/grpc/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -21,9 +21,13 @@ GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::string const std::string& ssl_cert) : GrpcBaseClient::GrpcBaseClient(server_addr, nullptr, target_domain, ssl_cert) {} -GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, const std::shared_ptr< sisl::TrfClient >& trf_client, +GrpcBaseClient::GrpcBaseClient(const std::string& server_addr, + const std::shared_ptr< sisl::GrpcTokenClient >& token_client, const std::string& target_domain, const std::string& ssl_cert) : - m_server_addr(server_addr), m_target_domain(target_domain), m_ssl_cert(ssl_cert), m_trf_client(trf_client) {} + m_server_addr(server_addr), + m_target_domain(target_domain), + m_ssl_cert(ssl_cert), + m_token_client(token_client) {} void GrpcBaseClient::init() { ::grpc::SslCredentialsOptions ssl_opts; @@ -132,7 +136,7 @@ void GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& reque const generic_unary_callback_t& callback, uint32_t deadline) { auto data = new GenericClientRpcDataInternal(callback); data->set_deadline(deadline); - if (m_trf_client) { data->add_metadata("authorization", m_trf_client->get_typed_token()); } + if (m_token_client) { data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); } // Note that async unary RPCs don't post a CQ tag in call data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->context(), method, request, cq()); data->m_generic_resp_reader_ptr->StartCall(); @@ -146,7 +150,7 @@ void GrpcAsyncClient::GenericAsyncStub::call_rpc(const generic_req_builder_cb_t& auto cd = new GenericClientRpcData(done_cb); builder_cb(cd->m_req); cd->set_deadline(deadline); - if (m_trf_client) { cd->add_metadata("authorization", m_trf_client->get_typed_token()); } + if (m_token_client) { cd->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); } cd->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&cd->context(), method, cd->m_req, cq()); cd->m_generic_resp_reader_ptr->StartCall(); cd->m_generic_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); @@ -157,6 +161,6 @@ std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > GrpcAsyncClient::make_gener if (w == nullptr) { throw std::runtime_error("worker thread not available"); } return std::make_unique< GrpcAsyncClient::GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, - m_trf_client); + m_token_client); } } // namespace sisl::grpc diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index e23af456..45ba425e 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -32,7 +32,7 @@ GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const s GrpcServer::GrpcServer(listen_addr, threads, ssl_key, ssl_cert, nullptr) {} GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, - const std::string& ssl_cert, const std::shared_ptr< sisl::AuthManager >& auth_mgr) : + const std::string& ssl_cert, const std::shared_ptr< sisl::GrpcTokenVerifier >& auth_mgr) : m_num_threads{threads}, m_auth_mgr{auth_mgr} { if (listen_addr.empty() || threads == 0) { throw std::invalid_argument("Invalid parameter to start grpc server"); } @@ -78,7 +78,7 @@ GrpcServer* GrpcServer::make(const std::string& listen_addr, uint32_t threads, c return GrpcServer::make(listen_addr, nullptr, threads, ssl_key, ssl_cert); } -GrpcServer* GrpcServer::make(const std::string& listen_addr, const std::shared_ptr< sisl::AuthManager >& auth_mgr, +GrpcServer* GrpcServer::make(const std::string& listen_addr, const std::shared_ptr< sisl::GrpcTokenVerifier >& auth_mgr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert) { return new GrpcServer(listen_addr, threads, ssl_key, ssl_cert, auth_mgr); } @@ -139,9 +139,7 @@ void GrpcServer::shutdown() { bool GrpcServer::is_auth_enabled() const { return m_auth_mgr != nullptr; } -sisl::AuthVerifyStatus GrpcServer::auth_verify(const std::string& token, std::string& msg) const { - return m_auth_mgr->verify(token, msg); -} +grpc::Status GrpcServer::auth_verify(grpc::ServerContext const* srv_ctx) const { return m_auth_mgr->verify(srv_ctx); } bool GrpcServer::run_generic_handler_cb(const std::string& rpc_name, boost::intrusive_ptr< GenericRpcData >& rpc_data) { generic_rpc_handler_cb_t cb; @@ -216,42 +214,7 @@ bool RPCHelper::run_generic_handler_cb(GrpcServer* server, const std::string& me } grpc::Status RPCHelper::do_authorization(const GrpcServer* server, const grpc::ServerContext* srv_ctx) { - if (!server->is_auth_enabled()) { return grpc::Status(); } - auto& client_headers = srv_ctx->client_metadata(); - if (auto it = client_headers.find("authorization"); it != client_headers.end()) { - const std::string bearer{"Bearer "}; - if (it->second.starts_with(bearer)) { - auto token_ref = it->second.substr(bearer.size()); - std::string msg; - return grpc::Status(RPCHelper::to_grpc_statuscode( - server->auth_verify(std::string(token_ref.begin(), token_ref.end()), msg)), - msg); - } else { - return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, - grpc::string("authorization header value does not start with 'Bearer '")); - } - } else { - return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, grpc::string("missing header authorization")); - } -} - -grpc::StatusCode RPCHelper::to_grpc_statuscode(const sisl::AuthVerifyStatus status) { - grpc::StatusCode ret; - switch (status) { - case sisl::AuthVerifyStatus::OK: - ret = grpc::StatusCode::OK; - break; - case sisl::AuthVerifyStatus::UNAUTH: - ret = grpc::StatusCode::UNAUTHENTICATED; - break; - case sisl::AuthVerifyStatus::FORBIDDEN: - ret = grpc::StatusCode::PERMISSION_DENIED; - break; - default: - ret = grpc::StatusCode::UNKNOWN; - break; - } - return ret; + return (server->is_auth_enabled()) ? server->auth_verify(srv_ctx) : grpc::Status(); } } // namespace sisl::grpc diff --git a/src/grpc/tests/unit/CMakeLists.txt b/src/grpc/tests/unit/CMakeLists.txt index bd163d99..b085b2bc 100644 --- a/src/grpc/tests/unit/CMakeLists.txt +++ b/src/grpc/tests/unit/CMakeLists.txt @@ -7,7 +7,6 @@ add_executable(auth_test target_link_libraries(auth_test sisl sisl_grpc - Pistache::Pistache GTest::gmock ${COMMON_DEPS} ) diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index 82960526..8c34cc60 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -19,13 +19,10 @@ #include #include -#include -#include "basic_http_server.hpp" #include "sisl/grpc/rpc_client.hpp" #include "sisl/grpc/rpc_server.hpp" #include "grpc_helper_test.grpc.pb.h" -#include "test_token.hpp" SISL_LOGGING_INIT(logging, grpc_server) SISL_OPTIONS_ENABLE(logging) @@ -36,19 +33,8 @@ using namespace ::grpc_helper_test; using namespace ::testing; static const std::string grpc_server_addr{"0.0.0.0:12345"}; -static const std::string trf_token_server_ip{"127.0.0.1"}; -static const uint32_t trf_token_server_port{12346}; -static std::string token_response; -static void set_token_response(const std::string& raw_token) { - token_response = "{\n" - " \"access_token\": \"" + - raw_token + - "\",\n" - " \"token_type\": \"Bearer\",\n" - " \"expires_in\": 2000,\n" - " \"refresh_token\": \"dummy_refresh_token\"\n" - "}"; -} +static const std::string g_auth_header{"auth_header"}; +static const std::string g_test_token{"dummy_token"}; static const std::string GENERIC_METHOD{"generic_method"}; static const std::vector< std::pair< std::string, std::string > > grpc_metadata{ @@ -107,6 +93,20 @@ class EchoServiceImpl final { } }; +class MockTokenVerifier : public GrpcTokenVerifier { +public: + using GrpcTokenVerifier::GrpcTokenVerifier; + ::grpc::Status verify(::grpc::ServerContext const* srv_ctx) const override { + auto& client_headers = srv_ctx->client_metadata(); + if (auto it = client_headers.find(g_auth_header); it != client_headers.end()) { + if (it->second == g_test_token) { return ::grpc::Status(); } + } + return ::grpc::Status(::grpc::StatusCode::UNAUTHENTICATED, ::grpc::string("missing header authorization")); + } + + TokenVerifyStatus verify(std::string const&) const override { return TokenVerifyStatus(); } +}; + class AuthBaseTest : public ::testing::Test { public: void SetUp() override {} @@ -119,7 +119,7 @@ class AuthBaseTest : public ::testing::Test { } } - void grpc_server_start(const std::string& server_address, std::shared_ptr< AuthManager > auth_mgr) { + void grpc_server_start(const std::string& server_address, std::shared_ptr< MockTokenVerifier > auth_mgr) { LOGINFO("Start echo and ping server on {}...", server_address); m_grpc_server = GrpcServer::make(server_address, auth_mgr, 4, "", ""); m_echo_impl = new EchoServiceImpl(); @@ -184,7 +184,7 @@ class AuthBaseTest : public ::testing::Test { } protected: - std::shared_ptr< AuthManager > m_auth_mgr; + std::shared_ptr< MockTokenVerifier > m_auth_mgr; EchoServiceImpl* m_echo_impl = nullptr; GrpcServer* m_grpc_server = nullptr; std::unique_ptr< GrpcAsyncClient > m_async_grpc_client; @@ -236,34 +236,11 @@ TEST_F(AuthDisableTest, metadata) { EXPECT_TRUE(status.ok()); } -static auto const grant_path = std::string{"dummy_grant.cg"}; - -static void load_auth_settings() { - std::ofstream outfile{grant_path}; - outfile << "dummy cg contents\n"; - outfile.close(); - SECURITY_SETTINGS_FACTORY().modifiable_settings([](auto& s) { - s.auth_manager->auth_allowed_apps = "app1, testapp, app2"; - s.auth_manager->tf_token_url = "http://127.0.0.1"; - s.auth_manager->leeway = 0; - s.auth_manager->issuer = "trustfabric"; - s.trf_client->grant_path = grant_path; - s.trf_client->server = fmt::format("{}:{}/token", trf_token_server_ip, trf_token_server_port); - }); - SECURITY_SETTINGS_FACTORY().save(); -} - -static void remove_auth_settings() { - auto const grant_fs_path = std::filesystem::path{grant_path}; - EXPECT_TRUE(std::filesystem::remove(grant_fs_path)); -} - class AuthServerOnlyTest : public AuthBaseTest { public: void SetUp() override { // start grpc server with auth - load_auth_settings(); - m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); + m_auth_mgr = std::shared_ptr< MockTokenVerifier >(new MockTokenVerifier(g_auth_header)); grpc_server_start(grpc_server_addr, m_auth_mgr); // Client without auth @@ -274,10 +251,7 @@ class AuthServerOnlyTest : public AuthBaseTest { m_generic_stub = m_async_grpc_client->make_generic_stub("worker-2"); } - void TearDown() override { - AuthBaseTest::TearDown(); - remove_auth_settings(); - } + void TearDown() override { AuthBaseTest::TearDown(); } }; TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { @@ -296,56 +270,35 @@ TEST_F(AuthServerOnlyTest, fail_on_no_client_auth) { EXPECT_EQ(generic_status.error_code(), ::grpc::UNAUTHENTICATED); } -class TokenApiImpl : public TokenApi { +class MockGrpcTokenClient : public GrpcTokenClient { public: - void get_token_impl(Pistache::Http::ResponseWriter& response) { - LOGINFO("Sending token to client"); - response.send(Pistache::Http::Code::Ok, token_response); - } - - void get_key_impl(Pistache::Http::ResponseWriter& response) { - LOGINFO("Download rsa key"); - response.send(Pistache::Http::Code::Ok, rsa_pub_key); - } + using GrpcTokenClient::GrpcTokenClient; + std::string get_token() override { return g_test_token; } }; class AuthEnableTest : public AuthBaseTest { public: void SetUp() override { // start grpc server with auth - load_auth_settings(); - m_auth_mgr = std::shared_ptr< AuthManager >(new AuthManager()); + m_auth_mgr = std::shared_ptr< MockTokenVerifier >(new MockTokenVerifier(g_auth_header)); grpc_server_start(grpc_server_addr, m_auth_mgr); - // start token server - APIBase::init(Pistache::Address(fmt::format("{}:{}", trf_token_server_ip, trf_token_server_port)), 1); - m_token_server = std::unique_ptr< TokenApiImpl >(new TokenApiImpl()); - m_token_server->setupRoutes(); - APIBase::start(); - // Client with auth - m_trf_client = std::make_shared< TrfClient >(); - m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, m_trf_client, "", ""); + m_token_client = std::make_shared< MockGrpcTokenClient >(g_auth_header); + m_async_grpc_client = std::make_unique< GrpcAsyncClient >(grpc_server_addr, m_token_client, "", ""); m_async_grpc_client->init(); GrpcAsyncClientWorker::create_worker("worker-3", 4); m_echo_stub = m_async_grpc_client->make_stub< EchoService >("worker-3"); m_generic_stub = m_async_grpc_client->make_generic_stub("worker-3"); } - void TearDown() override { - AuthBaseTest::TearDown(); - APIBase::stop(); - remove_auth_settings(); - } + void TearDown() override { AuthBaseTest::TearDown(); } protected: - std::unique_ptr< TokenApiImpl > m_token_server; - std::shared_ptr< TrfClient > m_trf_client; + std::shared_ptr< MockGrpcTokenClient > m_token_client; }; TEST_F(AuthEnableTest, allow_with_auth) { - auto raw_token = TestToken().sign_rs256(); - set_token_response(raw_token); EchoRequest req; req.set_message("dummy_msg"); EchoReply reply; @@ -382,7 +335,7 @@ TEST_F(AuthEnableTest, allow_sync_client_with_auth) { EchoReply reply; req.set_message("dummy_sync_msg"); ::grpc::ClientContext context; - context.AddMetadata("authorization", m_trf_client->get_typed_token()); + context.AddMetadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); auto status = sync_client->echo_stub()->Echo(&context, req, &reply); EXPECT_TRUE(status.ok()); EXPECT_EQ(req.message(), reply.message()); diff --git a/src/grpc/tests/unit/basic_http_server.hpp b/src/grpc/tests/unit/basic_http_server.hpp deleted file mode 100644 index f01038a1..00000000 --- a/src/grpc/tests/unit/basic_http_server.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#include -#include -#include -#include -#include -#include - -#pragma once - -class APIBase { -public: - static void init(Pistache::Address addr, size_t thr) { - m_http_endpoint = std::make_shared< Pistache::Http::Endpoint >(addr); - auto flags = Pistache::Tcp::Options::ReuseAddr; - auto opts = Pistache::Http::Endpoint::options().threadsName("http_server").threads(thr).flags(flags); - m_http_endpoint->init(opts); - } - - static void start() { - m_http_endpoint->setHandler(m_router.handler()); - m_http_endpoint->serveThreaded(); - } - - static void stop() { m_http_endpoint->shutdown(); } - - virtual ~APIBase() {} - -protected: - static std::shared_ptr< Pistache::Http::Endpoint > m_http_endpoint; - static Pistache::Rest::Router m_router; -}; - -std::shared_ptr< Pistache::Http::Endpoint > APIBase::m_http_endpoint; -Pistache::Rest::Router APIBase::m_router; - -class TokenApi : public APIBase { -public: - void setupRoutes() { - Pistache::Rest::Routes::Post(m_router, "/token", - Pistache::Rest::Routes::bind(&TokenApi::get_token_handler, this)); - Pistache::Rest::Routes::Get(m_router, "/download_key", - Pistache::Rest::Routes::bind(&TokenApi::get_key_handler, this)); - } - - void get_token_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { - this->get_token_impl(response); - } - - void get_key_handler(const Pistache::Rest::Request&, Pistache::Http::ResponseWriter response) { - - this->get_key_impl(response); - } - - virtual void get_token_impl(Pistache::Http::ResponseWriter& response) = 0; - virtual void get_key_impl(Pistache::Http::ResponseWriter& response) = 0; - - virtual ~TokenApi() { - Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Post, "/token"); - Pistache::Rest::Routes::Remove(m_router, Pistache::Http::Method::Get, "/download_key"); - } -}; diff --git a/src/grpc/tests/unit/test_token.hpp b/src/grpc/tests/unit/test_token.hpp deleted file mode 100644 index a50bcdad..00000000 --- a/src/grpc/tests/unit/test_token.hpp +++ /dev/null @@ -1,86 +0,0 @@ -/********************************************************************************* - * Modifications Copyright 2017-2019 eBay Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - * - *********************************************************************************/ -#pragma once - -namespace sisl::grpc::testing { -// public and private keys for unit test - -static const std::string rsa_pub_key = "-----BEGIN PUBLIC KEY-----\n" - "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuGbXWiK3dQTyCbX5xdE4\n" - "yCuYp0AF2d15Qq1JSXT/lx8CEcXb9RbDddl8jGDv+spi5qPa8qEHiK7FwV2KpRE9\n" - "83wGPnYsAm9BxLFb4YrLYcDFOIGULuk2FtrPS512Qea1bXASuvYXEpQNpGbnTGVs\n" - "WXI9C+yjHztqyL2h8P6mlThPY9E9ue2fCqdgixfTFIF9Dm4SLHbphUS2iw7w1JgT\n" - "69s7of9+I9l5lsJ9cozf1rxrXX4V1u/SotUuNB3Fp8oB4C1fLBEhSlMcUJirz1E8\n" - "AziMCxS+VrRPDM+zfvpIJg3JljAh3PJHDiLu902v9w+Iplu1WyoB2aPfitxEhRN0\n" - "YwIDAQAB\n" - "-----END PUBLIC KEY-----"; - -static const std::string rsa_priv_key = "-----BEGIN PRIVATE KEY-----\n" - "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4ZtdaIrd1BPIJ\n" - "tfnF0TjIK5inQAXZ3XlCrUlJdP+XHwIRxdv1FsN12XyMYO/6ymLmo9ryoQeIrsXB\n" - "XYqlET3zfAY+diwCb0HEsVvhisthwMU4gZQu6TYW2s9LnXZB5rVtcBK69hcSlA2k\n" - "ZudMZWxZcj0L7KMfO2rIvaHw/qaVOE9j0T257Z8Kp2CLF9MUgX0ObhIsdumFRLaL\n" - "DvDUmBPr2zuh/34j2XmWwn1yjN/WvGtdfhXW79Ki1S40HcWnygHgLV8sESFKUxxQ\n" - "mKvPUTwDOIwLFL5WtE8Mz7N++kgmDcmWMCHc8kcOIu73Ta/3D4imW7VbKgHZo9+K\n" - "3ESFE3RjAgMBAAECggEBAJTEIyjMqUT24G2FKiS1TiHvShBkTlQdoR5xvpZMlYbN\n" - "tVWxUmrAGqCQ/TIjYnfpnzCDMLhdwT48Ab6mQJw69MfiXwc1PvwX1e9hRscGul36\n" - "ryGPKIVQEBsQG/zc4/L2tZe8ut+qeaK7XuYrPp8bk/X1e9qK5m7j+JpKosNSLgJj\n" - "NIbYsBkG2Mlq671irKYj2hVZeaBQmWmZxK4fw0Istz2WfN5nUKUeJhTwpR+JLUg4\n" - "ELYYoB7EO0Cej9UBG30hbgu4RyXA+VbptJ+H042K5QJROUbtnLWuuWosZ5ATldwO\n" - "u03dIXL0SH0ao5NcWBzxU4F2sBXZRGP2x/jiSLHcqoECgYEA4qD7mXQpu1b8XO8U\n" - "6abpKloJCatSAHzjgdR2eRDRx5PMvloipfwqA77pnbjTUFajqWQgOXsDTCjcdQui\n" - "wf5XAaWu+TeAVTytLQbSiTsBhrnoqVrr3RoyDQmdnwHT8aCMouOgcC5thP9vQ8Us\n" - "rVdjvRRbnJpg3BeSNimH+u9AHgsCgYEA0EzcbOltCWPHRAY7B3Ge/AKBjBQr86Kv\n" - "TdpTlxePBDVIlH+BM6oct2gaSZZoHbqPjbq5v7yf0fKVcXE4bSVgqfDJ/sZQu9Lp\n" - "PTeV7wkk0OsAMKk7QukEpPno5q6tOTNnFecpUhVLLlqbfqkB2baYYwLJR3IRzboJ\n" - "FQbLY93E8gkCgYB+zlC5VlQbbNqcLXJoImqItgQkkuW5PCgYdwcrSov2ve5r/Acz\n" - "FNt1aRdSlx4176R3nXyibQA1Vw+ztiUFowiP9WLoM3PtPZwwe4bGHmwGNHPIfwVG\n" - "m+exf9XgKKespYbLhc45tuC08DATnXoYK7O1EnUINSFJRS8cezSI5eHcbQKBgQDC\n" - "PgqHXZ2aVftqCc1eAaxaIRQhRmY+CgUjumaczRFGwVFveP9I6Gdi+Kca3DE3F9Pq\n" - "PKgejo0SwP5vDT+rOGHN14bmGJUMsX9i4MTmZUZ5s8s3lXh3ysfT+GAhTd6nKrIE\n" - "kM3Nh6HWFhROptfc6BNusRh1kX/cspDplK5x8EpJ0QKBgQDWFg6S2je0KtbV5PYe\n" - "RultUEe2C0jYMDQx+JYxbPmtcopvZQrFEur3WKVuLy5UAy7EBvwMnZwIG7OOohJb\n" - "vkSpADK6VPn9lbqq7O8cTedEHttm6otmLt8ZyEl3hZMaL3hbuRj6ysjmoFKx6CrX\n" - "rK0/Ikt5ybqUzKCMJZg2VKGTxg==\n" - "-----END PRIVATE KEY-----"; - -struct TestToken { - using token_t = jwt::builder; - - TestToken() : - token{jwt::create() - .set_type("JWT") - .set_algorithm("RS256") - .set_key_id("abc123") - .set_issuer("trustfabric") - .set_header_claim("x5u", jwt::claim(std::string{"http://127.0.0.1:12346/download_key"})) - .set_audience(std::set< std::string >{"test-sisl", "protegoreg"}) - .set_issued_at(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_not_before(std::chrono::system_clock::now() - std::chrono::seconds(180)) - .set_expires_at(std::chrono::system_clock::now() + std::chrono::seconds(180)) - .set_subject("uid=sdsapp,networkaddress=dummy_ip,ou=orchmanager+l=" - "production,o=testapp,dc=tess,dc=ebay,dc=com") - .set_payload_claim("ver", jwt::claim(std::string{"2"})) - .set_payload_claim("vpc", jwt::claim(std::string{"production"})) - .set_payload_claim("instances", jwt::claim(std::string{"dummy_ip"}))} {} - - std::string sign_rs256() { return token.sign(jwt::algorithm::rs256(rsa_pub_key, rsa_priv_key, "", "")); } - std::string sign_rs512() { return token.sign(jwt::algorithm::rs512(rsa_pub_key, rsa_priv_key, "", "")); } - token_t& get_token() { return token; } - -private: - token_t token; -}; -} // namespace sisl::grpc::testing From 94c6f73bd813e93191b06e62cfc435eca7584df8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Aug 2023 16:31:04 -0600 Subject: [PATCH 339/385] Update dependencies for 10.x (#158) --- .github/workflows/build_dependencies.yml | 2 +- .jenkins/Jenkinsfile | 4 +-- 3rd_party/folly/conandata.yml | 4 +-- 3rd_party/folly/conanfile.py | 3 +-- conanfile.py | 26 +++++++++---------- include/sisl/grpc/rpc_client.hpp | 8 +++++- include/sisl/utility/enum.hpp | 3 +++ src/grpc/tests/function/echo_async_client.cpp | 4 +-- src/grpc/tests/unit/auth_test.cpp | 2 +- src/logging/logging.cpp | 3 ++- src/metrics/metrics_group_impl.cpp | 2 +- src/metrics/tests/wrapper_test.cpp | 2 +- 12 files changed, 36 insertions(+), 27 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 1a7720ba..48b5dd0f 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -103,7 +103,7 @@ jobs: - name: Export Recipes run: | conan export 3rd_party/breakpad breakpad/cci.20230127@ - conan export 3rd_party/folly folly/2022.01.31.00@ + conan export 3rd_party/folly folly/nu2.2022.01.31.00@ conan export 3rd_party/gperftools conan export 3rd_party/jemalloc conan export 3rd_party/prerelease_dummy diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 9ac4c6fd..87c7003b 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -16,7 +16,6 @@ pipeline { steps { script { sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") - sh(script: "sed -Ei 's,#LIBCURLFIXTOKEN.*,self.requires(\"libcurl/7.86.0\"\\, override=True),' conanfile.py") BUILD_MISSING = "--build missing" } } @@ -94,7 +93,8 @@ pipeline { */ stage("Compile") { steps { - sh "conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ + sh "conan export 3rd_party/folly folly/nu2.2022.01.31.00@ ; \ + conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -o sisl:malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml index 75a25a20..28a5d757 100644 --- a/3rd_party/folly/conandata.yml +++ b/3rd_party/folly/conandata.yml @@ -5,7 +5,7 @@ sources: "2020.08.10.00": url: "https://github.com/facebook/folly/archive/v2020.08.10.00.tar.gz" sha256: "e81140d04a4e89e3f848e528466a9b3d3ae37d7eeb9e65467fca50d70918eef6" - "2022.01.31.00": + "nu2.2022.01.31.00": url: "https://github.com/facebook/folly/archive/v2022.01.31.00.tar.gz" sha256: "d764b9a7832d967bb7cfea4bcda15d650315aa4d559fde1da2a52b015cd88b9c" patches: @@ -43,7 +43,7 @@ patches: base_path: "source_subfolder" - patch_file: "patches/0015-benchmark-format-macros.patch" base_path: "source_subfolder" - "2022.01.31.00": + "nu2.2022.01.31.00": - patch_file: "patches/0016-find-packages.patch" base_path: "source_subfolder" - patch_file: "patches/0017-compiler-flags.patch" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 06dc6965..fa8b46d7 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -143,9 +143,8 @@ def validate(self): if self.options.get_safe("use_sse4_2") and str(self.settings.arch) not in ['x86', 'x86_64']: raise ConanInvalidConfiguration(f"{self.ref} can use the option use_sse4_2 only on x86 and x86_64 archs.") - # FIXME: Freeze max. CMake version at 3.16.2 to fix the Linux build def build_requirements(self): - self.build_requires("cmake/3.16.9") + self.build_requires("cmake/3.27.0") def source(self): files.get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) diff --git a/conanfile.py b/conanfile.py index 2a883e80..83db889d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -47,8 +47,9 @@ class SISLConan(ConanFile): ) def build_requirements(self): - self.build_requires("benchmark/1.7.1") - self.build_requires("gtest/1.13.0") + self.build_requires("benchmark/1.8.2") + self.build_requires("cmake/3.27.0") + self.build_requires("gtest/1.14.0") def requirements(self): # Custom packages @@ -63,26 +64,25 @@ def requirements(self): # Linux Specific Support if self.settings.os in ["Linux"]: - self.requires("folly/2022.01.31.00") + self.requires("folly/nu2.2022.01.31.00") self.requires("userspace-rcu/0.11.4") # Generic packages (conan-center) - self.requires("boost/1.79.0") + self.requires("boost/1.82.0") if self.settings.os in ["Linux"]: self.requires("breakpad/cci.20230127") - self.requires("cxxopts/2.2.1") + self.requires("cxxopts/3.1.1") self.requires("flatbuffers/1.12.0") - self.requires("grpc/1.48.0") + self.requires("grpc/1.50.1") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.1.0") - self.requires("spdlog/1.11.0") + self.requires("spdlog/1.12.0") self.requires("zmarok-semver/1.1.0") - self.requires("fmt/8.1.1", override=True) - #LIBCURLFIXTOKEN - self.requires("libevent/2.1.12", override=True) - self.requires("openssl/1.1.1q", override=True) - self.requires("xz_utils/5.2.5", override=True) - self.requires("zlib/1.2.12", override=True) + self.requires("fmt/10.0.0", override=True) + self.requires("libcurl/8.0.1", override=True) + self.requires("openssl/3.1.1", override=True) + self.requires("xz_utils/5.2.5", override=True) + self.requires("zlib/1.2.13", override=True) def validate(self): if self.info.settings.compiler.cppstd: diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index 52d50c15..b06c1470 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -33,6 +33,12 @@ #include #include +#include + +namespace grpc { +inline auto format_as(StatusCode s) { return fmt::underlying(s); } +} // namespace grpc + namespace sisl { /** @@ -385,4 +391,4 @@ class GrpcAsyncClient : public GrpcBaseClient { std::unique_ptr< GenericAsyncStub > make_generic_stub(const std::string& worker); }; -} // namespace sisl::grpc +} // namespace sisl diff --git a/include/sisl/utility/enum.hpp b/include/sisl/utility/enum.hpp index 37b8bbb7..8df4952a 100644 --- a/include/sisl/utility/enum.hpp +++ b/include/sisl/utility/enum.hpp @@ -27,6 +27,8 @@ #include #include +#include + template < typename EnumType > class EnumSupportBase { public: @@ -117,6 +119,7 @@ class EnumSupportBase { #define BASE_ENUM(FQEnumName, EnumName, Underlying, ...) \ enum class FQEnumName : Underlying { __VA_ARGS__ }; \ + inline auto format_as(FQEnumName e) { return fmt::underlying(e); } \ \ struct FQEnumName##Support : EnumSupportBase< EnumName > { \ typedef EnumName enum_type; \ diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index de36ff1d..064b0042 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -306,7 +306,7 @@ class TestServer { server->register_generic_rpc(GENERIC_METHOD, [this](boost::intrusive_ptr< GenericRpcData >& rpc_data) { rpc_data->set_comp_cb([this](boost::intrusive_ptr< GenericRpcData >&) { num_completions++; }); if ((++num_calls % 2) == 0) { - LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls); + LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls.load()); std::thread([this, rpc = rpc_data] { set_response(rpc->request(), rpc->response()); rpc->send_response(); @@ -321,7 +321,7 @@ class TestServer { bool compare_counters() { if (num_calls != num_completions) { - LOGERROR("num calls: {}, num_completions = {}", num_calls, num_completions); + LOGERROR("num calls: {}, num_completions = {}", num_calls.load(), num_completions.load()); return false; } return true; diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index 8c34cc60..82f26354 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -59,7 +59,7 @@ class EchoServiceImpl final { if (it == client_headers.end()) { rpc_data->set_status(::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, ::grpc::string())); } else if (it->second != val) { - LOGERROR("wrong value, expected = {}, actual = {}", val, it->second) + LOGERROR("wrong value, expected = {}, actual = {}", val, it->second.data()) rpc_data->set_status(::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, ::grpc::string())); } } diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index df32d266..d47e5c19 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -154,7 +154,8 @@ static std::filesystem::path get_base_dir() { if (fs::is_symlink(sym_path)) { fs::remove(sym_path); } fs::create_directory_symlink(cur_log_dir, sym_path); } catch (std::exception& e) { - LOGINFO("Unable to create latest symlink={} to log dir={}, ignoring symlink creation\n", sym_path, log_dir); + LOGINFO("Unable to create latest symlink={} to log dir={}, ignoring symlink creation\n", sym_path.string(), + log_dir.string()); } return cur_log_dir; } else { diff --git a/src/metrics/metrics_group_impl.cpp b/src/metrics/metrics_group_impl.cpp index 1a9e64c4..0b709bf4 100644 --- a/src/metrics/metrics_group_impl.cpp +++ b/src/metrics/metrics_group_impl.cpp @@ -240,7 +240,7 @@ nlohmann::json MetricsGroupImpl::get_result_in_json(bool need_latest) { HistogramDynamicInfo& h = hist_dynamic_info(idx); if (h.is_histogram_reporter()) { hist_entries[hist_static_info(idx).desc()] = - fmt::format("{:#} / {:#} / {:#} / {:#}", h.average(result), + fmt::format("{:.1f} / {:.1f} / {:.1f} / {:.1f}", h.average(result), h.percentile(result, hist_static_info(idx).get_boundaries(), 50), h.percentile(result, hist_static_info(idx).get_boundaries(), 95), h.percentile(result, hist_static_info(idx).get_boundaries(), 99)); diff --git a/src/metrics/tests/wrapper_test.cpp b/src/metrics/tests/wrapper_test.cpp index 42f2a298..1a505d11 100644 --- a/src/metrics/tests/wrapper_test.cpp +++ b/src/metrics/tests/wrapper_test.cpp @@ -218,7 +218,7 @@ nlohmann::json expected = { {"Total memory utilization", 980} }}, {"Histograms percentiles (usecs) avg/50/95/99", { - {"Distribution of request per transactions", "18.25 / 15.0 / 31.0 / 31.0"} + {"Distribution of request per transactions", "18.2 / 15.0 / 31.0 / 31.0"} }} }} }} From e8bcd721a46b6b694d74540d8d4b1a7b2e190636 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Aug 2023 16:56:44 -0600 Subject: [PATCH 340/385] Require cmake --- conanfile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/conanfile.py b/conanfile.py index b4cfe63d..5077478e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -48,6 +48,7 @@ class SISLConan(ConanFile): def build_requirements(self): self.build_requires("benchmark/1.7.1") + self.build_requires("cmake/3.27.0") self.build_requires("gtest/1.13.0") def requirements(self): From 60e85b3e85c224e29bbee15e69e4ccfeb37103d9 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 7 Aug 2023 17:11:56 -0600 Subject: [PATCH 341/385] Does nothing --- conanfile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 5077478e..b4cfe63d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -48,7 +48,6 @@ class SISLConan(ConanFile): def build_requirements(self): self.build_requires("benchmark/1.7.1") - self.build_requires("cmake/3.27.0") self.build_requires("gtest/1.13.0") def requirements(self): From ccd82f07a7d751f52b2331938acc51ebb19ba33b Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 10 Aug 2023 09:35:52 -0600 Subject: [PATCH 342/385] Fix ENUM formatter to use string, not integer value. (#159) --- conanfile.py | 2 +- include/sisl/utility/enum.hpp | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/conanfile.py b/conanfile.py index 83db889d..601e1561 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.0.1" + version = "10.0.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/utility/enum.hpp b/include/sisl/utility/enum.hpp index 8df4952a..05896b12 100644 --- a/include/sisl/utility/enum.hpp +++ b/include/sisl/utility/enum.hpp @@ -119,7 +119,6 @@ class EnumSupportBase { #define BASE_ENUM(FQEnumName, EnumName, Underlying, ...) \ enum class FQEnumName : Underlying { __VA_ARGS__ }; \ - inline auto format_as(FQEnumName e) { return fmt::underlying(e); } \ \ struct FQEnumName##Support : EnumSupportBase< EnumName > { \ typedef EnumName enum_type; \ @@ -135,6 +134,9 @@ class EnumSupportBase { return s_instance; \ }; \ }; \ + [[nodiscard]] inline auto format_as(FQEnumName##Support::enum_type e) { \ + return FQEnumName##Support::instance().get_name(e); \ + } \ [[nodiscard]] inline FQEnumName##Support::enum_type operator|(const FQEnumName##Support::enum_type a, \ const FQEnumName##Support::enum_type b) { \ return static_cast< FQEnumName##Support::enum_type >(static_cast< FQEnumName##Support::underlying_type >(a) | \ @@ -162,7 +164,7 @@ class EnumSupportBase { const FQEnumName##Support::enum_type es) { \ std::basic_ostringstream< charT, traits > out_stream_copy{}; \ out_stream_copy.copyfmt(out_stream); \ - out_stream_copy << FQEnumName##Support::instance().get_name(es); \ + out_stream_copy << fmt::format("{}", es); \ out_stream << out_stream_copy.str(); \ return out_stream; \ } \ From 6ee970328c0059a6eeb6e558970ed0d8a3ea29b8 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 23 Aug 2023 10:41:37 -0600 Subject: [PATCH 343/385] Change how coverage and sanitize works. (#162) --- .github/workflows/build_dependencies.yml | 31 ++++++------ ...{merge_conan_build.yml => conan_build.yml} | 41 ++++++++-------- .github/workflows/pr_conan_build.yml | 47 ------------------- .jenkins/Jenkinsfile | 45 ++---------------- conanfile.py | 35 +++++++------- 5 files changed, 57 insertions(+), 142 deletions(-) rename .github/workflows/{merge_conan_build.yml => conan_build.yml} (69%) delete mode 100644 .github/workflows/pr_conan_build.yml diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 48b5dd0f..8a78ad74 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -20,6 +20,10 @@ on: required: false type: string default: 'False' + tooling: + required: false + type: string + default: 'None' testing: required: false type: string @@ -61,6 +65,13 @@ on: - 'True' - 'False' default: 'False' + tooling: + required: false + type: choice + - 'Sanitize' + - 'Coverage' + - 'None' + default: 'None' testing: description: 'Build and Run' required: true @@ -113,9 +124,11 @@ jobs: - name: Build Cache run: | + coverage=$([[ "${{ inputs.tooling }}" == "Coverage" ]] && echo "True" || echo "False") conan install \ -o prerelease=${{ inputs.prerelease }} \ -o malloc_impl=${{ inputs.malloc-impl }} \ + -o coverage=True \ -s build_type=${{ inputs.build-type }} \ --build missing \ . @@ -129,29 +142,19 @@ jobs: - name: Code Coverage Run run: | - conan install \ - -o prerelease=${{ inputs.prerelease }} \ - -o malloc_impl=${{ inputs.malloc-impl }} \ - -o coverage=True \ - -s build_type=${{ inputs.build-type }} \ - --build missing \ - . conan build . - if: ${{ inputs.testing == 'True' && inputs.platform == 'ubuntu-22.04' && inputs.build-type == 'Debug' && inputs.malloc-impl == 'libc' && inputs.prerelease == 'False' }} + if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} gcov: true - if: ${{ inputs.testing == 'True' && inputs.platform == 'ubuntu-22.04' && inputs.build-type == 'Debug' && inputs.malloc-impl == 'libc' && inputs.prerelease == 'False' }} + if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} - name: Create and Test Package run: | - sanitize=$([[ "${{ inputs.build-type }}" == "Debug" && \ - "${{ inputs.malloc-impl }}" == "libc" && \ - "${{ inputs.prerelease }}" == "True" ]] && \ - echo "True" || echo "False") + sanitize=$([[ "${{ inputs.tooling }}" == "Sanitize" ]] && echo "True" || echo "False") conan create \ -o sisl:prerelease=${{ inputs.prerelease }} \ -o sisl:malloc_impl=${{ inputs.malloc-impl }} \ @@ -159,4 +162,4 @@ jobs: -s build_type=${{ inputs.build-type }} \ --build missing \ . - if: ${{ inputs.testing == 'True' && ( inputs.platform != 'ubuntu-22.04' || inputs.build-type != 'Debug' || inputs.malloc-impl != 'libc' || inputs.prerelease != 'False' ) }} + if: ${{ inputs.testing == 'True' && inputs.tooling != 'Coverage' }} diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/conan_build.yml similarity index 69% rename from .github/workflows/merge_conan_build.yml rename to .github/workflows/conan_build.yml index 8ff164c6..300eb84a 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/conan_build.yml @@ -5,6 +5,12 @@ on: push: branches: - stable/v8.x + - stable/v9.x + - master + pull_request: + branches: + - stable/v8.x + - stable/v9.x - master jobs: @@ -12,32 +18,24 @@ jobs: strategy: fail-fast: false matrix: - platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] + platform: ["ubuntu-22.04"] build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] + malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] + tooling: ["Sanitize", "Coverage", "None"] exclude: - build-type: Debug - platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: jemalloc - platform: ubuntu-20.04 - - build-type: Debug - platform: macos-13 - - malloc-impl: tcmalloc - platform: macos-13 - - malloc-impl: jemalloc - platform: macos-13 - - malloc-impl: jemalloc - build-type: Debug - - malloc-impl: jemalloc prerelease: "False" - - malloc-impl: libc - build-type: Release - platform: ubuntu-22.04 - - prerelease: "True" - platform: ubuntu-20.04 + - build-type: Debug + tooling: None + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Release + malloc-impl: libc + - build-type: Release + tooling: Sanitize + - build-type: Release + tooling: Coverage uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} @@ -45,6 +43,7 @@ jobs: build-type: ${{ matrix.build-type }} malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} + tooling: ${{ matrix.tooling }} testing: 'True' ChainBuild: runs-on: "ubuntu-22.04" diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml deleted file mode 100644 index 07b61b22..00000000 --- a/.github/workflows/pr_conan_build.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Sisl PR Build - -on: - pull_request: - branches: - - stable/v8.x - - master - -jobs: - Build: - strategy: - fail-fast: false - matrix: - platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] - build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] - prerelease: ["True", "False"] - exclude: - - build-type: Debug - platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: jemalloc - platform: ubuntu-20.04 - - build-type: Debug - platform: macos-13 - - malloc-impl: tcmalloc - platform: macos-13 - - malloc-impl: jemalloc - platform: macos-13 - - malloc-impl: jemalloc - build-type: Debug - - malloc-impl: jemalloc - prerelease: "False" - - malloc-impl: libc - build-type: Release - platform: ubuntu-22.04 - - prerelease: "True" - platform: ubuntu-20.04 - uses: ./.github/workflows/build_dependencies.yml - with: - platform: ${{ matrix.platform }} - branch: ${{ github.ref }} - build-type: ${{ matrix.build-type }} - malloc-impl: ${{ matrix.malloc-impl }} - prerelease: ${{ matrix.prerelease }} - testing: 'True' diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 87c7003b..265fda03 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -52,53 +52,14 @@ pipeline { } } -/* Commented out until unit tests are available - stage('Coverage') { - when { not { - branch "${STABLE_BRANCH}" - } } - - stages { - stage("Adjust Sonar Branch") { - when { - not { - branch "${TARGET_BRANCH}" - } - } - steps { - sh "echo \"sonar.branch.target=${TARGET_BRANCH}\" >> sonar-project.properties" - } - } - stage("Code Coverage") { - steps { - slackSend channel: '#sds-ci', message: "*${PROJECT}:${TAG}* is undergoing Code Coverage." - sh "echo \"sonar.branch.name=${BRANCH_NAME}\" >> sonar-project.properties" - sh "conan install -o sisl:prerelease=True -pr debug ${BUILD_MISSING} -o ${PROJECT}:coverage=True ." - sh "build-wrapper-linux-x86-64 --out-dir /tmp/sonar conan build ." - sh "find . -name \"*.gcno\" -exec gcov {} \\;" - withSonarQubeEnv('sds-sonar') { - sh "sonar-scanner -Dsonar.projectBaseDir=. -Dsonar.projectVersion=\"${VER}\"" - } - } - } - stage("Quality Gate") { - steps { - timeout(time: 5, unit: 'MINUTES') { - waitForQualityGate abortPipeline: false - } - } - } - } - } -*/ stage("Compile") { steps { sh "conan export 3rd_party/folly folly/nu2.2022.01.31.00@ ; \ - conan create ${BUILD_MISSING} -o ${PROJECT}:sanitize=True -pr debug . ${PROJECT}/${TAG} ; \ + conan create ${BUILD_MISSING} -pr debug -o ${PROJECT}:sanitize=True . ${PROJECT}/${TAG} ; \ conan remove -f ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ - conan create ${BUILD_MISSING} -o sisl:malloc_impl=tcmalloc -o sisl:prerelease=True -pr test . ${PROJECT}/${TAG} ; \ - conan create ${BUILD_MISSING} -o sisl:malloc_impl=tcmalloc -pr test . ${PROJECT}/${TAG} ; \ + conan create ${BUILD_MISSING} -pr test -o ${PROJECT}:malloc_impl=tcmalloc . ${PROJECT}/${TAG} ; \ + conan create ${BUILD_MISSING} -pr test -o ${PROJECT}:prerelease=True -o ${PROJECT}:malloc_impl=tcmalloc . ${PROJECT}/${TAG} ; \ " } } diff --git a/conanfile.py b/conanfile.py index 601e1561..1720b8af 100644 --- a/conanfile.py +++ b/conanfile.py @@ -46,6 +46,23 @@ class SISLConan(ConanFile): "src/*", ) + def validate(self): + if self.info.settings.compiler.cppstd: + check_min_cppstd(self, 20) + + def configure(self): + if self.settings.compiler in ["gcc"]: + self.options['pistache'].with_ssl: True + if self.options.shared: + del self.options.fPIC + if self.settings.build_type == "Debug": + self.options.prerelease = True + if self.options.coverage and self.options.sanitize: + raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") + if not self.options.testing: + if self.options.coverage or self.options.sanitize: + raise ConanInvalidConfiguration("Coverage/Sanitizer requires Testing!") + def build_requirements(self): self.build_requires("benchmark/1.8.2") self.build_requires("cmake/3.27.0") @@ -84,24 +101,6 @@ def requirements(self): self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.13", override=True) - def validate(self): - if self.info.settings.compiler.cppstd: - check_min_cppstd(self, 20) - - def configure(self): - if self.settings.compiler in ["gcc"]: - self.options['pistache'].with_ssl: True - if self.options.shared: - del self.options.fPIC - if self.settings.build_type == "Debug": - if self.options.coverage and self.options.sanitize: - raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") - if self.options.coverage or self.options.sanitize: - self.options.malloc_impl = 'libc' - if not self.options.testing: - if self.options.coverage or self.options.sanitize: - raise ConanInvalidConfiguration("Coverage/Sanitizer requires Testing!") - def build(self): cmake = CMake(self) From 58ef390fa89571dccdb12d151f39a8c885f912da Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 23 Aug 2023 09:28:45 -0700 Subject: [PATCH 344/385] Publish sanitized build. --- .github/workflows/build_dependencies.yml | 33 ++++++++++++++---------- .jenkins/Jenkinsfile | 1 - 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 8a78ad74..bfbbfccd 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -124,11 +124,9 @@ jobs: - name: Build Cache run: | - coverage=$([[ "${{ inputs.tooling }}" == "Coverage" ]] && echo "True" || echo "False") conan install \ -o prerelease=${{ inputs.prerelease }} \ -o malloc_impl=${{ inputs.malloc-impl }} \ - -o coverage=True \ -s build_type=${{ inputs.build-type }} \ --build missing \ . @@ -140,18 +138,6 @@ jobs: key_prefix: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache.outputs.cache-hit != 'true' }} - - name: Code Coverage Run - run: | - conan build . - if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} - - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - gcov: true - if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} - - name: Create and Test Package run: | sanitize=$([[ "${{ inputs.tooling }}" == "Sanitize" ]] && echo "True" || echo "False") @@ -163,3 +149,22 @@ jobs: --build missing \ . if: ${{ inputs.testing == 'True' && inputs.tooling != 'Coverage' }} + + - name: Code Coverage Run + run: | + conan install \ + -o prerelease=${{ inputs.prerelease }} \ + -o malloc_impl=${{ inputs.malloc-impl }} \ + -o coverage=True \ + -s build_type=${{ inputs.build-type }} \ + --build missing \ + . + conan build . + if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + gcov: true + if: ${{ inputs.testing == 'True' && inputs.tooling == 'Coverage' }} diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 265fda03..764f1177 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -56,7 +56,6 @@ pipeline { steps { sh "conan export 3rd_party/folly folly/nu2.2022.01.31.00@ ; \ conan create ${BUILD_MISSING} -pr debug -o ${PROJECT}:sanitize=True . ${PROJECT}/${TAG} ; \ - conan remove -f ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr test -o ${PROJECT}:malloc_impl=tcmalloc . ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr test -o ${PROJECT}:prerelease=True -o ${PROJECT}:malloc_impl=tcmalloc . ${PROJECT}/${TAG} ; \ From 7cf8d2a2a293baef47fedf909002dcfff6fb91da Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 23 Aug 2023 10:28:48 -0700 Subject: [PATCH 345/385] Fix readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c85f97f3..116fa919 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SymbiosisLib (sisl) -[![Conan Build](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/merge_conan_build.yml) +[![Conan Build](https://github.com/eBay/sisl/actions/workflows/conan_build.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/conan_build.yml) [![CodeCov](https://codecov.io/gh/eBay/sisl/branch/master/graph/badge.svg)](https://codecov.io/gh/eBay/Sisl) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data From a8b0cefe23ee986047051bf80185f82b93feecae Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 23 Aug 2023 11:00:50 -0700 Subject: [PATCH 346/385] Fix chain build. --- .github/workflows/conan_build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/conan_build.yml b/.github/workflows/conan_build.yml index 300eb84a..57f21948 100644 --- a/.github/workflows/conan_build.yml +++ b/.github/workflows/conan_build.yml @@ -55,7 +55,7 @@ jobs: -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ + https://api.github.com/repos/eBay/iomanager/actions/workflows/conan_build.yml/dispatches \ -d '{"ref":"master","inputs":{}}' if: ${{ github.ref == 'refs/heads/master' }} - name: Start NuraftMesg Build @@ -65,6 +65,6 @@ jobs: -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/merge_conan_build.yml/dispatches \ + https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/conan_build.yml/dispatches \ -d '{"ref":"main","inputs":{}}' if: ${{ github.ref == 'refs/heads/master' }} From 43d6fbf0b5de9b0bafcb6f30cc65ef11255c38ba Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Sat, 26 Aug 2023 14:55:12 -0600 Subject: [PATCH 347/385] Add simple script to automate exporting custom recipes. (#165) --- .github/workflows/build_dependencies.yml | 8 ++------ 3rd_party/breakpad/conanfile.py | 1 + 3rd_party/gperftools/conanfile.py | 1 + README.md | 1 + prepare.sh | 14 ++++++++++++++ 5 files changed, 19 insertions(+), 6 deletions(-) create mode 100755 prepare.sh diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index bfbbfccd..a365256d 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -111,13 +111,9 @@ jobs: platform: ${{ inputs.platform }} if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} - - name: Export Recipes + - name: Prepare Recipes run: | - conan export 3rd_party/breakpad breakpad/cci.20230127@ - conan export 3rd_party/folly folly/nu2.2022.01.31.00@ - conan export 3rd_party/gperftools - conan export 3rd_party/jemalloc - conan export 3rd_party/prerelease_dummy + ./prepare.sh cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/export 2>/dev/null | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} diff --git a/3rd_party/breakpad/conanfile.py b/3rd_party/breakpad/conanfile.py index f4cdf5d9..bdd6327e 100644 --- a/3rd_party/breakpad/conanfile.py +++ b/3rd_party/breakpad/conanfile.py @@ -32,6 +32,7 @@ def layout(self): def requirements(self): self.requires("linux-syscall-support/cci.20200813") + self.requires("zlib/1.2.13") def validate(self): if self.settings.os != "Linux": diff --git a/3rd_party/gperftools/conanfile.py b/3rd_party/gperftools/conanfile.py index 8d114f31..f63430b4 100644 --- a/3rd_party/gperftools/conanfile.py +++ b/3rd_party/gperftools/conanfile.py @@ -10,6 +10,7 @@ class GPerfToolsConan(ConanFile): license = "BSD" description = "A portable library to determine the call-chain of a C program" + url = "https://github.com/conan-io/conan-center-index" settings = "os", "arch", "compiler", "build_type" options = {"shared": [True, False], "fPIC": [True, False]} diff --git a/README.md b/README.md index 116fa919..db4f79b7 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ to be built. Assuming the conan setup is already done ``` +$ ./prepare.sh # this will export some recipes to the conan cache $ mkdir build $ cd build diff --git a/prepare.sh b/prepare.sh new file mode 100755 index 00000000..a35686bf --- /dev/null +++ b/prepare.sh @@ -0,0 +1,14 @@ +set -eu + +echo -n "Exporting custom recipes..." +echo -n "breakpad." +conan export 3rd_party/breakpad breakpad/cci.20230127@ >/dev/null +echo -n "folly." +conan export 3rd_party/folly folly/nu2.2022.01.31.00@ >/dev/null +echo -n "gperftools." +conan export 3rd_party/gperftools >/dev/null +echo -n "jemalloc." +conan export 3rd_party/jemalloc >/dev/null +echo -n "prerelease_dummy." +conan export 3rd_party/prerelease_dummy >/dev/null +echo "done." From 0084eb081de41585215b3e9fdae47608a2308156 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Wed, 30 Aug 2023 10:17:51 -0700 Subject: [PATCH 348/385] Token Caching (#169) * add caching to auth_manager * fix the auth test after caching * update folly cmake * add build missing for all branch builds --------- Authored-by: Ravi Akella email = raakella@ebay.com --- .jenkins/Jenkinsfile | 9 +- 3rd_party/folly/conanfile.py | 3 +- conanfile.py | 8 +- include/sisl/auth_manager/LRUCache.h | 82 ++++++++++++++++++ include/sisl/auth_manager/auth_manager.hpp | 36 +++++++- src/auth_manager/CMakeLists.txt | 2 +- src/auth_manager/auth_manager.cpp | 97 +++++++++++++++++++--- src/auth_manager/security_config.fbs | 4 + src/auth_manager/tests/AuthTest.cpp | 4 +- src/grpc/tests/unit/CMakeLists.txt | 2 +- 10 files changed, 220 insertions(+), 27 deletions(-) create mode 100644 include/sisl/auth_manager/LRUCache.h diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 894495b6..05d762cf 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -18,18 +18,13 @@ pipeline { steps { script { sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") - BUILD_MISSING = "--build missing" } } } - stage('Adjust for Testing/Stable') { - when { anyOf { - branch "${TESTING_BRANCH}" - branch "${STABLE_BRANCH}" - } } + stage('include build missing') { steps { script { - BUILD_MISSING = "" + BUILD_MISSING = "--build missing" } } } diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 06dc6965..2ed4f619 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -145,7 +145,8 @@ def validate(self): # FIXME: Freeze max. CMake version at 3.16.2 to fix the Linux build def build_requirements(self): - self.build_requires("cmake/3.16.9") + pass + # self.build_requires("cmake/3.16.9") def source(self): files.get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) diff --git a/conanfile.py b/conanfile.py index fd643757..1d9e80ad 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.3" + version = "8.6.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -41,7 +41,7 @@ def build_requirements(self): self.build_requires("benchmark/1.7.0") self.build_requires("gtest/1.11.0") if self.settings.compiler in ["gcc"]: - self.build_requires("pistache/cci.20201127") + self.build_requires("pistache/0.0.5") def requirements(self): # Custom packages @@ -59,7 +59,6 @@ def requirements(self): self.requires("folly/2022.01.31.00") self.requires("grpc/1.48.0") self.requires("jwt-cpp/0.4.0") - self.requires("libcurl/7.86.0") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.0.1") self.requires("spdlog/1.11.0") @@ -68,7 +67,8 @@ def requirements(self): self.requires("zmarok-semver/1.1.0") self.requires("fmt/8.1.1", override=True) self.requires("libevent/2.1.12", override=True) - self.requires("openssl/1.1.1q", override=True) + self.requires("openssl/1.1.1s", override=True) + self.requires("libcurl/7.86.0") self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": diff --git a/include/sisl/auth_manager/LRUCache.h b/include/sisl/auth_manager/LRUCache.h new file mode 100644 index 00000000..f5eb3cdc --- /dev/null +++ b/include/sisl/auth_manager/LRUCache.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace sisl { + +/** + * + * written by @jiankun + * + * A high performance LRU cache implementation. + * + * The cache provides two atomic operations: + * put(key, value): put an object into the cache. + * get(key): returns a optional reference to the value found by key in cache + * + * Important notes: + * 1. The get() method returns a const reference, any change to the reference + * needs to be done by a Put call. + * 2. The put/get methods are thread safe. + */ +template < typename key_t, typename value_t > +class LRUCache { +public: + using kv_pair_t = std::pair< key_t, value_t >; + using list_iterator_t = typename std::list< kv_pair_t >::iterator; + + explicit LRUCache(size_t capacity) : capacity_(capacity) {} + + template < typename K, typename V > + void put(K&& key, V&& value) { + std::unique_lock< std::shared_mutex > l{mtx_}; + + auto it = items_map_.find(key); + if (it != items_map_.end()) { + items_list_.erase(it->second); + items_map_.erase(it); + } + + items_list_.emplace_front(std::make_pair(std::forward< K >(key), std::forward< V >(value))); + items_map_[key] = items_list_.begin(); + + if (items_map_.size() > capacity_) { + auto last = items_list_.rbegin(); + items_map_.erase(last->first); + items_list_.pop_back(); + } + } + + [[nodiscard]] const std::optional< std::reference_wrapper< value_t const > > get(const key_t& key) { + std::shared_lock< std::shared_mutex > l{mtx_}; + + auto it = items_map_.find(key); + if (it == items_map_.end()) { return std::nullopt; } + + items_list_.splice(items_list_.begin(), items_list_, it->second); + return std::optional(std::cref(it->second->second)); + } + + bool exists(const key_t& key) const { + std::shared_lock< std::shared_mutex > l{mtx_}; + return items_map_.find(key) != items_map_.end(); + } + + [[nodiscard]] size_t size() const { + std::shared_lock< std::shared_mutex > l{mtx_}; + return items_map_.size(); + } + +private: + std::list< kv_pair_t > items_list_; + std::unordered_map< key_t, list_iterator_t > items_map_; + size_t capacity_; + mutable std::shared_mutex mtx_; +}; + +} // namespace sisl diff --git a/include/sisl/auth_manager/auth_manager.hpp b/include/sisl/auth_manager/auth_manager.hpp index bf5ea957..01885809 100644 --- a/include/sisl/auth_manager/auth_manager.hpp +++ b/include/sisl/auth_manager/auth_manager.hpp @@ -18,14 +18,40 @@ #include #include "security_config.hpp" +#include "LRUCache.h" namespace sisl { ENUM(AuthVerifyStatus, uint8_t, OK, UNAUTH, FORBIDDEN) +template < typename key_t, typename value_t > +class LRUCache; + +/** + * This struct holds information of a token, that can be used as if + * they were extracted from decoded token. + */ +struct CachedToken { + AuthVerifyStatus response_status; + std::string msg; + bool valid; + std::chrono::system_clock::time_point expires_at; + + inline void set_invalid(AuthVerifyStatus code, const std::string& reason) { + valid = false; + response_status = code; + msg = reason; + } + + inline void set_valid() { + valid = true; + response_status = AuthVerifyStatus::OK; + } +}; + class AuthManager { public: - AuthManager() {} + AuthManager(); virtual ~AuthManager() = default; AuthVerifyStatus verify(const std::string& token, std::string& msg) const; @@ -33,5 +59,13 @@ class AuthManager { void verify_decoded(const jwt::decoded_jwt& decoded) const; virtual std::string download_key(const std::string& key_url) const; std::string get_app(const jwt::decoded_jwt& decoded) const; + + // the verify method is declared const. We make this mutable + // as these caches are modified in the verify method. md5_sum(raw_token) -> + // DecodedToken + mutable LRUCache< std::string, CachedToken > m_cached_tokens; + + // key_id -> signing public key + mutable LRUCache< std::string, std::string > m_cached_keys; }; } // namespace sisl diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index aa554c36..ff83dc6d 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -34,7 +34,7 @@ target_link_libraries(test_auth_mgr sisl ${COMMON_DEPS} cpr::cpr - pistache::pistache + Pistache::Pistache flatbuffers::flatbuffers jwt-cpp::jwt-cpp GTest::gmock diff --git a/src/auth_manager/auth_manager.cpp b/src/auth_manager/auth_manager.cpp index 38396cca..d50b7e7e 100644 --- a/src/auth_manager/auth_manager.cpp +++ b/src/auth_manager/auth_manager.cpp @@ -2,14 +2,59 @@ #include #include +extern "C" { +#include +} #include "sisl/auth_manager/auth_manager.hpp" namespace sisl { +static std::string md5_sum(std::string const& s) { + unsigned char digest[MD5_DIGEST_LENGTH]; + + MD5(reinterpret_cast< unsigned char* >(const_cast< char* >(s.c_str())), s.length(), + reinterpret_cast< unsigned char* >(&digest)); + + std::ostringstream out; + out << std::hex; + for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { + out << std::setfill('0') << std::setw(2) << std::hex << (int)(unsigned char)digest[i]; + } + return out.str(); +} + +struct incomplete_verification_error : std::exception { + explicit incomplete_verification_error(const std::string& error) : error_(error) {} + const char* what() const noexcept { return error_.c_str(); } + +private: + const std::string error_; +}; + +AuthManager::AuthManager() : + m_cached_tokens(SECURITY_DYNAMIC_CONFIG(auth_manager->auth_token_cache_size)), + m_cached_keys(SECURITY_DYNAMIC_CONFIG(auth_manager->auth_key_cache_size)) {} + AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) const { + // if we have it in cache, just use it to make the decision + auto const token_hash = md5_sum(token); + if (auto const ct = m_cached_tokens.get(token_hash); ct) { + auto const& cached_token = ct->get(); + if (cached_token.valid) { + auto now = std::chrono::system_clock::now(); + if (now > cached_token.expires_at + std::chrono::seconds(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))) { + m_cached_tokens.put( + token_hash, CachedToken{AuthVerifyStatus::UNAUTH, "token expired", false, cached_token.expires_at}); + } + } + msg = cached_token.msg; + return cached_token.response_status; + } + + // not found in cache + CachedToken cached_token; std::string app_name; - // TODO: cache tokens for better performance try { // this may throw if token is ill formed const auto decoded{jwt::decode(token)}; @@ -18,34 +63,66 @@ AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) // exception is thrown. verify_decoded(decoded); app_name = get_app(decoded); - } catch (const std::exception& e) { + cached_token.expires_at = decoded.get_expires_at(); + cached_token.set_valid(); + } catch (const incomplete_verification_error& e) { + // verification incomplete, the token validity is not determined, shouldn't + // cache msg = e.what(); return AuthVerifyStatus::UNAUTH; + } catch (const std::exception& e) { + cached_token.set_invalid(AuthVerifyStatus::UNAUTH, e.what()); + m_cached_tokens.put(token_hash, cached_token); + msg = cached_token.msg; + return cached_token.response_status; } // check client application if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps) != "all") { if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps).find(app_name) == std::string::npos) { - msg = fmt::format("application '{}' is not allowed to perform the request", app_name); - return AuthVerifyStatus::FORBIDDEN; + cached_token.set_invalid(AuthVerifyStatus::FORBIDDEN, + fmt::format("application '{}' is not allowed to perform the request", app_name)); } } - return AuthVerifyStatus::OK; + m_cached_tokens.put(token_hash, cached_token); + msg = cached_token.msg; + return cached_token.response_status; } + void AuthManager::verify_decoded(const jwt::decoded_jwt& decoded) const { const auto alg{decoded.get_algorithm()}; if (alg != "RS256") throw std::runtime_error(fmt::format("unsupported algorithm: {}", alg)); - if (!decoded.has_header_claim("x5u")) throw std::runtime_error("no indication of verification key"); + std::string signing_key; + std::string key_id; + auto should_cache_key = true; - auto key_url = decoded.get_header_claim("x5u").as_string(); + if (decoded.has_key_id()) { + key_id = decoded.get_key_id(); + auto cached_key = m_cached_keys.get(key_id); + if (cached_key) { + signing_key = cached_key->get(); + should_cache_key = false; + } + } else { + should_cache_key = false; + } - if (key_url.rfind(SECURITY_DYNAMIC_CONFIG(auth_manager->tf_token_url), 0) != 0) { - throw std::runtime_error(fmt::format("key url {} is not trusted", key_url)); + if (signing_key.empty()) { + if (!decoded.has_header_claim("x5u")) throw std::runtime_error("no indication of verification key"); + + auto key_url = decoded.get_header_claim("x5u").as_string(); + + if (key_url.rfind(SECURITY_DYNAMIC_CONFIG(auth_manager->tf_token_url), 0) != 0) { + throw std::runtime_error(fmt::format("key url {} is not trusted", key_url)); + } + signing_key = download_key(key_url); } - const std::string signing_key{download_key(key_url)}; + + if (should_cache_key) { m_cached_keys.put(key_id, signing_key); } + const auto verifier{jwt::verify() .with_issuer(SECURITY_DYNAMIC_CONFIG(auth_manager->issuer)) .allow_algorithm(jwt::algorithm::rs256(signing_key)) diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs index e560455b..20cbec5a 100644 --- a/src/auth_manager/security_config.fbs +++ b/src/auth_manager/security_config.fbs @@ -33,6 +33,10 @@ table AuthManager { // ssl verification for the signing key download url verify: bool = true; + + // LRUCache sizes + auth_token_cache_size: uint32 = 2000; + auth_key_cache_size: uint32 = 100; } table SecuritySettings { diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index 7447a346..79ba44ac 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -210,12 +210,12 @@ TEST_F(AuthTest, trf_allow_valid_token) { EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); // use the acces_token saved from the previous call - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); // set token to be expired invoking request_with_grant_token mock_trf_client.set_expiry(std::chrono::system_clock::now() - std::chrono::seconds(100)); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); } diff --git a/src/grpc/tests/unit/CMakeLists.txt b/src/grpc/tests/unit/CMakeLists.txt index 1e82a780..bd163d99 100644 --- a/src/grpc/tests/unit/CMakeLists.txt +++ b/src/grpc/tests/unit/CMakeLists.txt @@ -7,7 +7,7 @@ add_executable(auth_test target_link_libraries(auth_test sisl sisl_grpc - pistache::pistache + Pistache::Pistache GTest::gmock ${COMMON_DEPS} ) From 8cc3704dacf6b1a5521ab80e3f2e3525c3ba3660 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Wed, 30 Aug 2023 11:36:24 -0700 Subject: [PATCH 349/385] add date to 3rd party (#170) Co-authored-by: Ravi Akella email = raakella@ebay.com --- .github/workflows/build_dependencies.yml | 1 + 3rd_party/date/conandata.yml | 29 +++++ 3rd_party/date/conanfile.py | 140 ++++++++++++++++++++++ 3rd_party/date/patches/0001-fix-uwp.patch | 17 +++ 3rd_party/date/patches/cmake-3.0.0.patch | 14 +++ 3rd_party/date/patches/cmake-3.0.1.patch | 14 +++ 3rd_party/date/patches/cmake.patch | 19 +++ 3rd_party/date/patches/string_view.patch | 13 ++ 8 files changed, 247 insertions(+) create mode 100644 3rd_party/date/conandata.yml create mode 100644 3rd_party/date/conanfile.py create mode 100644 3rd_party/date/patches/0001-fix-uwp.patch create mode 100644 3rd_party/date/patches/cmake-3.0.0.patch create mode 100644 3rd_party/date/patches/cmake-3.0.1.patch create mode 100644 3rd_party/date/patches/cmake.patch create mode 100644 3rd_party/date/patches/string_view.patch diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 93f988f5..693d13f7 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -106,6 +106,7 @@ jobs: conan export 3rd_party/jemalloc conan export 3rd_party/prerelease_dummy conan export 3rd_party/pistache pistache/cci.20201127@ + conan export 3rd_party/date date/3.0.1@ cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/export 2>/dev/null | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} diff --git a/3rd_party/date/conandata.yml b/3rd_party/date/conandata.yml new file mode 100644 index 00000000..bed2d768 --- /dev/null +++ b/3rd_party/date/conandata.yml @@ -0,0 +1,29 @@ +sources: + "3.0.1": + url: "https://github.com/HowardHinnant/date/archive/refs/tags/v3.0.1.tar.gz" + sha256: "7a390f200f0ccd207e8cff6757e04817c1a0aec3e327b006b7eb451c57ee3538" + "3.0.0": + url: "https://github.com/HowardHinnant/date/archive/refs/tags/v3.0.0.tar.gz" + sha256: "87bba2eaf0ebc7ec539e5e62fc317cb80671a337c1fb1b84cb9e4d42c6dbebe3" + "2.4.1": + url: "https://github.com/HowardHinnant/date/archive/refs/tags/v2.4.1.tar.gz" + sha256: "98907d243397483bd7ad889bf6c66746db0d7d2a39cc9aacc041834c40b65b98" +patches: + "3.0.1": + - patch_file: "patches/cmake-3.0.1.patch" + patch_description: "Disable string view to workaround clang 5 not having it" + patch_type: "portability" + "3.0.0": + - patch_file: "patches/cmake-3.0.0.patch" + patch_description: "Disable string view to workaround clang 5 not having it" + patch_type: "portability" + "2.4.1": + - patch_file: "patches/0001-fix-uwp.patch" + patch_description: "Fix Universal Windows Platform (UWP) unhandled exception support. See https://github.com/microsoft/vcpkg/pull/8151#issuecomment-531175393." + patch_type: "portability" + - patch_file: "patches/cmake.patch" + patch_description: "Add libcurl target for conan compatibility" + patch_type: "conan" + - patch_file: "patches/string_view.patch" + patch_description: "Disable string view to workaround clang 5 not having it" + patch_type: "portability" diff --git a/3rd_party/date/conanfile.py b/3rd_party/date/conanfile.py new file mode 100644 index 00000000..7c597110 --- /dev/null +++ b/3rd_party/date/conanfile.py @@ -0,0 +1,140 @@ +from conan import ConanFile +from conan.tools.build import check_min_cppstd +from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout +from conan.tools.files import get, rmdir, apply_conandata_patches, export_conandata_patches, copy +from conan.tools.scm import Version + +import os + +required_conan_version = ">=1.53.0" + + +class DateConan(ConanFile): + name = "date" + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://github.com/HowardHinnant/date" + description = "A date and time library based on the C++11/14/17 header" + topics = ("datetime", "timezone", "calendar", "time", "iana-database") + license = "MIT" + + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + "header_only": [True, False], + "use_system_tz_db": [True, False], + "use_tz_db_in_dot": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "header_only": False, + "use_system_tz_db": False, + "use_tz_db_in_dot": False, + } + + def export_sources(self): + export_conandata_patches(self) + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + if self.settings.os in ["iOS", "tvOS", "watchOS", "Android"]: + self.options.use_system_tz_db = True + + def configure(self): + if self.options.shared or self.options.header_only: + self.options.rm_safe("fPIC") + if self.options.header_only: + del self.options.shared + + def layout(self): + cmake_layout(self, src_folder="src") + + def requirements(self): + if not self.options.header_only and not self.options.use_system_tz_db: + self.requires("libcurl/7.86.0") + + def package_id(self): + if self.info.options.header_only: + self.info.clear() + + def validate(self): + if self.settings.compiler.get_safe("cppstd"): + check_min_cppstd(self, 11) + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + def generate(self): + tc = CMakeToolchain(self) + tc.variables["ENABLE_DATE_TESTING"] = False + tc.variables["USE_SYSTEM_TZ_DB"] = self.options.use_system_tz_db + tc.variables["USE_TZ_DB_IN_DOT"] = self.options.use_tz_db_in_dot + tc.variables["BUILD_TZ_LIB"] = not self.options.header_only + # workaround for clang 5 not having string_view + if Version(self.version) >= "3.0.0" and self.settings.compiler == "clang" \ + and Version(self.settings.compiler.version) <= "5.0": + tc.cache_variables["DISABLE_STRING_VIEW"] = True + tc.generate() + + deps = CMakeDeps(self) + deps.generate() + + def build(self): + apply_conandata_patches(self) + if not self.options.header_only: + cmake = CMake(self) + cmake.configure() + cmake.build() + + def package(self): + copy(self, "LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) + if self.options.header_only: + src = os.path.join(self.source_folder, "include", "date") + dst = os.path.join(self.package_folder, "include", "date") + copy(self, "date.h", dst=dst, src=src) + copy(self, "tz.h", dst=dst, src=src) + copy(self, "ptz.h", dst=dst, src=src) + copy(self, "iso_week.h", dst=dst, src=src) + copy(self, "julian.h", dst=dst, src=src) + copy(self, "islamic.h", dst=dst, src=src) + else: + cmake = CMake(self) + cmake.install() + rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) + rmdir(self, os.path.join(self.package_folder, "CMake")) + + def package_info(self): + self.cpp_info.set_property("cmake_target_name", "date::date") + # TODO: Remove legacy .names attribute when conan 2.0 is released + self.cpp_info.names["cmake_find_package"] = "date" + self.cpp_info.names["cmake_find_package_multi"] = "date" + + # date-tz + if not self.options.header_only: + self.cpp_info.components["date-tz"].set_property("cmake_target_name", "date::date-tz") + # TODO: Remove legacy .names attribute when conan 2.0 is released + self.cpp_info.components["date-tz"].names["cmake_find_package"] = "date-tz" + self.cpp_info.components["date-tz"].names["cmake_find_package_multi"] = "date-tz" + lib_name = "{}tz".format("date-" if Version(self.version) >= "3.0.0" else "") + self.cpp_info.components["date-tz"].libs = [lib_name] + if self.settings.os == "Linux": + self.cpp_info.components["date-tz"].system_libs.append("pthread") + self.cpp_info.components["date-tz"].system_libs.append("m") + + if not self.options.use_system_tz_db: + self.cpp_info.components["date-tz"].requires.append("libcurl::libcurl") + + if self.options.use_system_tz_db and not self.settings.os == "Windows": + use_os_tzdb = 1 + else: + use_os_tzdb = 0 + + defines = ["USE_OS_TZDB={}".format(use_os_tzdb)] + if self.settings.os == "Windows" and self.options.shared: + defines.append("DATE_USE_DLL=1") + + self.cpp_info.components["date-tz"].defines.extend(defines) + else: + self.cpp_info.defines.append("DATE_HEADER_ONLY") diff --git a/3rd_party/date/patches/0001-fix-uwp.patch b/3rd_party/date/patches/0001-fix-uwp.patch new file mode 100644 index 00000000..f7b5c246 --- /dev/null +++ b/3rd_party/date/patches/0001-fix-uwp.patch @@ -0,0 +1,17 @@ +diff --git a/include/date/date.h b/include/date/date.h +index cb115a9..66d87c2 100644 +--- a/include/date/date.h ++++ b/include/date/date.h +@@ -76,6 +76,12 @@ + # endif + #endif + ++#ifdef _MSC_VER ++# pragma warning(push) ++// warning C4127: conditional expression is constant ++# pragma warning(disable : 4127 4996) ++#endif ++ + namespace date + { + diff --git a/3rd_party/date/patches/cmake-3.0.0.patch b/3rd_party/date/patches/cmake-3.0.0.patch new file mode 100644 index 00000000..583e86e5 --- /dev/null +++ b/3rd_party/date/patches/cmake-3.0.0.patch @@ -0,0 +1,14 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index ad74900..ac390a9 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -127,6 +127,9 @@ if( BUILD_TZ_LIB ) + target_include_directories( date-tz SYSTEM PRIVATE ${CURL_INCLUDE_DIRS} ) + target_link_libraries( date-tz PRIVATE ${CURL_LIBRARIES} ) + endif( ) ++ if( DISABLE_STRING_VIEW ) ++ target_compile_definitions( date-tz PRIVATE -DHAS_STRING_VIEW=0 -DHAS_DEDUCTION_GUIDES=0 ) ++ endif( ) + endif( ) + + #[===================================================================[ diff --git a/3rd_party/date/patches/cmake-3.0.1.patch b/3rd_party/date/patches/cmake-3.0.1.patch new file mode 100644 index 00000000..8edcb309 --- /dev/null +++ b/3rd_party/date/patches/cmake-3.0.1.patch @@ -0,0 +1,14 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index ad74900..ac390a9 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -156,6 +156,9 @@ if( BUILD_TZ_LIB ) + target_include_directories( date-tz SYSTEM PRIVATE ${CURL_INCLUDE_DIRS} ) + target_link_libraries( date-tz PRIVATE ${CURL_LIBRARIES} ) + endif( ) ++ if( DISABLE_STRING_VIEW ) ++ target_compile_definitions( date-tz PRIVATE -DHAS_STRING_VIEW=0 -DHAS_DEDUCTION_GUIDES=0 ) ++ endif( ) + endif( ) + + #[===================================================================[ diff --git a/3rd_party/date/patches/cmake.patch b/3rd_party/date/patches/cmake.patch new file mode 100644 index 00000000..3f9df797 --- /dev/null +++ b/3rd_party/date/patches/cmake.patch @@ -0,0 +1,19 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index f025a3a..7bc93df 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -57,8 +57,12 @@ else( ) + target_compile_definitions( tz PRIVATE -DHAS_REMOTE_API=1 ) + target_compile_definitions( tz PUBLIC -DUSE_OS_TZDB=0 ) + find_package( CURL REQUIRED ) +- include_directories( SYSTEM ${CURL_INCLUDE_DIRS} ) +- set( OPTIONAL_LIBRARIES ${CURL_LIBRARIES} ) ++ set( OPTIONAL_LIBRARIES CURL::libcurl ) ++endif() ++ ++if( BUILD_SHARED_LIBS ) ++ target_compile_definitions( tz PRIVATE -DDATE_BUILD_DLL=1 ) ++ target_compile_definitions( tz PUBLIC -DDATE_USE_DLL=1 ) + endif( ) + + if( USE_TZ_DB_IN_DOT ) diff --git a/3rd_party/date/patches/string_view.patch b/3rd_party/date/patches/string_view.patch new file mode 100644 index 00000000..008dd04c --- /dev/null +++ b/3rd_party/date/patches/string_view.patch @@ -0,0 +1,13 @@ +diff --git a/include/date/date.h b/include/date/date.h +index cb115a9..23cd05a 100644 +--- a/include/date/date.h ++++ b/include/date/date.h +@@ -31,7 +31,7 @@ + // We did not mean to shout. + + #ifndef HAS_STRING_VIEW +-# if __cplusplus >= 201703 ++# if __cplusplus >= 201703 && __has_include() + # define HAS_STRING_VIEW 1 + # else + # define HAS_STRING_VIEW 0 From 22bbca006541948844ddc97e0fb58f44c46b92c1 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 7 Sep 2023 17:04:59 -0700 Subject: [PATCH 350/385] make token verify return state derivable (#174) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/auth_manager/token_verifier.hpp | 12 ++++++++++-- src/grpc/tests/unit/auth_test.cpp | 4 +++- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/conanfile.py b/conanfile.py index e0ffee93..489f470f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.1" + version = "10.1.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/auth_manager/token_verifier.hpp b/include/sisl/auth_manager/token_verifier.hpp index 67e0f95c..82ee0a68 100644 --- a/include/sisl/auth_manager/token_verifier.hpp +++ b/include/sisl/auth_manager/token_verifier.hpp @@ -18,15 +18,23 @@ namespace sisl { ENUM(VerifyCode, uint8_t, OK, UNAUTH, FORBIDDEN) -struct TokenVerifyStatus { +// This class represents the return value to the token verify call. +// Derive from this class if the return value needs to contain some information from the decoded token. +class TokenVerifyState { +public: + TokenVerifyState() = default; + TokenVerifyState(VerifyCode const c, std::string const& m) : code(c), msg(m) {} + virtual ~TokenVerifyState() {} VerifyCode code; std::string msg; }; +using token_state_ptr = std::shared_ptr< TokenVerifyState >; + class TokenVerifier { public: virtual ~TokenVerifier() = default; - virtual TokenVerifyStatus verify(std::string const& token) const = 0; + virtual token_state_ptr verify(std::string const& token) const = 0; }; // extracts the key value pairs (m_auth_header_key, get_token()) from grpc client context and verifies the token diff --git a/src/grpc/tests/unit/auth_test.cpp b/src/grpc/tests/unit/auth_test.cpp index 82f26354..87720830 100644 --- a/src/grpc/tests/unit/auth_test.cpp +++ b/src/grpc/tests/unit/auth_test.cpp @@ -104,7 +104,9 @@ class MockTokenVerifier : public GrpcTokenVerifier { return ::grpc::Status(::grpc::StatusCode::UNAUTHENTICATED, ::grpc::string("missing header authorization")); } - TokenVerifyStatus verify(std::string const&) const override { return TokenVerifyStatus(); } + sisl::token_state_ptr verify(std::string const&) const override { + return std::make_shared< sisl::TokenVerifyState >(); + } }; class AuthBaseTest : public ::testing::Test { From 0079b86b5f7a8ec17305c92009861f037ae45626 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Fri, 8 Sep 2023 11:13:14 -0700 Subject: [PATCH 351/385] include filesystem header in logging.h (#175) Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/logging/logging.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 1d9e80ad..01a148b2 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.4" + version = "8.6.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 4dc951c9..66c96bb4 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include From 005fc2c9b70319d7fdc1173dddafc118fd1c6a5a Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Fri, 8 Sep 2023 12:01:37 -0700 Subject: [PATCH 352/385] acquire unique lock for LRU cache get operation (#176) Co-authored-by: Ravi Akella email = raakella@ebay.com --- include/sisl/auth_manager/LRUCache.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/sisl/auth_manager/LRUCache.h b/include/sisl/auth_manager/LRUCache.h index f5eb3cdc..b32bce3f 100644 --- a/include/sisl/auth_manager/LRUCache.h +++ b/include/sisl/auth_manager/LRUCache.h @@ -53,7 +53,8 @@ class LRUCache { } [[nodiscard]] const std::optional< std::reference_wrapper< value_t const > > get(const key_t& key) { - std::shared_lock< std::shared_mutex > l{mtx_}; + // we need unique lock for the splice operation + std::unique_lock< std::shared_mutex > l{mtx_}; auto it = items_map_.find(key); if (it == items_map_.end()) { return std::nullopt; } From a424845d219cd83997494f9cc14d36643fa9b5b9 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:29:48 -0700 Subject: [PATCH 353/385] lru get to return a copy rather than ref. Add more lru tests (#177) Co-authored-by: Ravi Akella email = raakella@ebay.com --- include/sisl/auth_manager/LRUCache.h | 4 +- src/auth_manager/CMakeLists.txt | 11 +++ src/auth_manager/auth_manager.cpp | 15 ++- src/auth_manager/tests/LRUCacheTest.cpp | 123 ++++++++++++++++++++++++ 4 files changed, 143 insertions(+), 10 deletions(-) create mode 100644 src/auth_manager/tests/LRUCacheTest.cpp diff --git a/include/sisl/auth_manager/LRUCache.h b/include/sisl/auth_manager/LRUCache.h index b32bce3f..504141d4 100644 --- a/include/sisl/auth_manager/LRUCache.h +++ b/include/sisl/auth_manager/LRUCache.h @@ -52,7 +52,7 @@ class LRUCache { } } - [[nodiscard]] const std::optional< std::reference_wrapper< value_t const > > get(const key_t& key) { + [[nodiscard]] std::optional< value_t > get(const key_t& key) { // we need unique lock for the splice operation std::unique_lock< std::shared_mutex > l{mtx_}; @@ -60,7 +60,7 @@ class LRUCache { if (it == items_map_.end()) { return std::nullopt; } items_list_.splice(items_list_.begin(), items_list_, it->second); - return std::optional(std::cref(it->second->second)); + return std::optional(it->second->second); } bool exists(const key_t& key) const { diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index ff83dc6d..ee75918d 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -40,3 +40,14 @@ target_link_libraries(test_auth_mgr GTest::gmock ) add_test(NAME AuthManager COMMAND test_auth_mgr) + +add_executable(test_lru_cache) +target_sources(test_lru_cache PRIVATE + tests/LRUCacheTest.cpp + ) +target_link_libraries(test_lru_cache + sisl + ${COMMON_DEPS} + GTest::gmock + ) +add_test(NAME LRUCache COMMAND test_lru_cache) diff --git a/src/auth_manager/auth_manager.cpp b/src/auth_manager/auth_manager.cpp index d50b7e7e..6e083cd4 100644 --- a/src/auth_manager/auth_manager.cpp +++ b/src/auth_manager/auth_manager.cpp @@ -40,16 +40,15 @@ AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) // if we have it in cache, just use it to make the decision auto const token_hash = md5_sum(token); if (auto const ct = m_cached_tokens.get(token_hash); ct) { - auto const& cached_token = ct->get(); - if (cached_token.valid) { + if (ct->valid) { auto now = std::chrono::system_clock::now(); - if (now > cached_token.expires_at + std::chrono::seconds(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))) { - m_cached_tokens.put( - token_hash, CachedToken{AuthVerifyStatus::UNAUTH, "token expired", false, cached_token.expires_at}); + if (now > ct->expires_at + std::chrono::seconds(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))) { + m_cached_tokens.put(token_hash, + CachedToken{AuthVerifyStatus::UNAUTH, "token expired", false, ct->expires_at}); } } - msg = cached_token.msg; - return cached_token.response_status; + msg = ct->msg; + return ct->response_status; } // not found in cache @@ -103,7 +102,7 @@ void AuthManager::verify_decoded(const jwt::decoded_jwt& decoded) const { key_id = decoded.get_key_id(); auto cached_key = m_cached_keys.get(key_id); if (cached_key) { - signing_key = cached_key->get(); + signing_key = *cached_key; should_cache_key = false; } } else { diff --git a/src/auth_manager/tests/LRUCacheTest.cpp b/src/auth_manager/tests/LRUCacheTest.cpp new file mode 100644 index 00000000..250b8647 --- /dev/null +++ b/src/auth_manager/tests/LRUCacheTest.cpp @@ -0,0 +1,123 @@ +#include "sisl/auth_manager/LRUCache.h" +#include +#include +#include +#include + +SISL_OPTIONS_ENABLE(logging) + +namespace sisl::testing { + +using namespace ::testing; + +TEST(LRUTest, basic) { + auto lru = LRUCache< int, int >(3); + + EXPECT_EQ(0, lru.size()); + EXPECT_FALSE(lru.exists(1)); + + lru.put(0, 0); + lru.put(1, 1); + EXPECT_EQ(2, lru.size()); + EXPECT_TRUE(lru.exists(0)); + EXPECT_TRUE(lru.exists(1)); + + lru.put(2, 2); + + // this will evict 0 from cache + lru.put(3, 3); + + EXPECT_EQ(3, lru.size()); + + EXPECT_FALSE(lru.exists(0)); + EXPECT_TRUE(lru.exists(1)); + EXPECT_TRUE(lru.exists(2)); + EXPECT_TRUE(lru.exists(3)); + + // current elements in cache are 3, 2, 1 + // let's re-insert 1, this will move 1 to the head of cache + lru.put(1, 1); + + // insert another new key, this will evict 2 + lru.put(4, 4); + + EXPECT_EQ(3, lru.size()); + EXPECT_FALSE(lru.exists(2)); + EXPECT_TRUE(lru.exists(1)); + EXPECT_TRUE(lru.exists(3)); + EXPECT_TRUE(lru.exists(4)); +} + +TEST(LRUTest, get) { + auto lru = LRUCache< std::string, std::string >(3); + + lru.put("key1", "value1"); + EXPECT_EQ("value1", lru.get("key1")); + auto v = lru.get("no-such-key"); + EXPECT_EQ(std::nullopt, v); + + // use variable as key, to test the perfect forwarding + std::string key{"key2"}; + std::string value{"value2"}; + lru.put(key, value); + ASSERT_TRUE(lru.get(key)); + EXPECT_EQ(value, lru.get(key)); +} + +TEST(LRUTest, stress_test) { + struct val { + std::string s; + }; + auto p_get = std::make_shared< std::promise< void > >(); + auto f_get = p_get->get_future(); + auto p_put = std::make_shared< std::promise< void > >(); + auto f_put = p_put->get_future(); + static constexpr size_t iter = 3000; + auto lru = LRUCache< int, val >(2000); + auto putter = [&lru, p_put](int const i) { + lru.put(i, val{std::to_string(i)}); + if (i == iter) { p_put->set_value(); } + }; + auto getter = [&lru, p_get](int const i) { + if (lru.exists(i)) { + auto v = lru.get(i); + EXPECT_EQ(v->s, std::to_string(i)); + } + if (i == iter) { p_get->set_value(); } + }; + for (size_t i = 1; i <= iter; i++) { + std::thread(putter, i).detach(); + } + f_put.get(); + + for (size_t i = 1; i <= iter; i++) { + std::thread(getter, i).detach(); + } + + f_get.get(); + auto p_get1 = std::make_shared< std::promise< void > >(); + auto f_get1 = p_get1->get_future(); + static constexpr int single_key = 10000; + lru.put(single_key, val{std::to_string(single_key)}); + for (size_t i = 1; i <= 5000; i++) { + std::thread( + [&lru, p_get1](int const i) { + if (lru.exists(single_key)) { + auto v = lru.get(single_key); + EXPECT_EQ(v->s, std::to_string(single_key)); + } + if (i == 5000) { p_get1->set_value(); } + }, + i) + .detach(); + } + f_get1.get(); +} + +} // namespace sisl::testing + +int main(int argc, char* argv[]) { + testing::InitGoogleMock(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging) + return RUN_ALL_TESTS(); +} \ No newline at end of file From cbc315f0850009aa6707c82d54bda879fc051834 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Mon, 11 Sep 2023 10:23:23 -0700 Subject: [PATCH 354/385] disable stress test (#178) Co-authored-by: Ravi Akella email = raakella@ebay.com --- src/auth_manager/tests/LRUCacheTest.cpp | 50 ------------------------- 1 file changed, 50 deletions(-) diff --git a/src/auth_manager/tests/LRUCacheTest.cpp b/src/auth_manager/tests/LRUCacheTest.cpp index 250b8647..cdb901f4 100644 --- a/src/auth_manager/tests/LRUCacheTest.cpp +++ b/src/auth_manager/tests/LRUCacheTest.cpp @@ -64,56 +64,6 @@ TEST(LRUTest, get) { EXPECT_EQ(value, lru.get(key)); } -TEST(LRUTest, stress_test) { - struct val { - std::string s; - }; - auto p_get = std::make_shared< std::promise< void > >(); - auto f_get = p_get->get_future(); - auto p_put = std::make_shared< std::promise< void > >(); - auto f_put = p_put->get_future(); - static constexpr size_t iter = 3000; - auto lru = LRUCache< int, val >(2000); - auto putter = [&lru, p_put](int const i) { - lru.put(i, val{std::to_string(i)}); - if (i == iter) { p_put->set_value(); } - }; - auto getter = [&lru, p_get](int const i) { - if (lru.exists(i)) { - auto v = lru.get(i); - EXPECT_EQ(v->s, std::to_string(i)); - } - if (i == iter) { p_get->set_value(); } - }; - for (size_t i = 1; i <= iter; i++) { - std::thread(putter, i).detach(); - } - f_put.get(); - - for (size_t i = 1; i <= iter; i++) { - std::thread(getter, i).detach(); - } - - f_get.get(); - auto p_get1 = std::make_shared< std::promise< void > >(); - auto f_get1 = p_get1->get_future(); - static constexpr int single_key = 10000; - lru.put(single_key, val{std::to_string(single_key)}); - for (size_t i = 1; i <= 5000; i++) { - std::thread( - [&lru, p_get1](int const i) { - if (lru.exists(single_key)) { - auto v = lru.get(single_key); - EXPECT_EQ(v->s, std::to_string(single_key)); - } - if (i == 5000) { p_get1->set_value(); } - }, - i) - .detach(); - } - f_get1.get(); -} - } // namespace sisl::testing int main(int argc, char* argv[]) { From 5ae717672449340d7c54abe83a01716660edecb4 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 15 Sep 2023 15:55:52 -0600 Subject: [PATCH 355/385] sisl::byte_array_impl should be movable. (#179) --- .../{conan_build.yml => merge_build.yml} | 5 -- .github/workflows/pr_build.yml | 42 +++++++++++++++++ README.md | 2 +- conanfile.py | 2 +- include/sisl/fds/buffer.hpp | 47 ++++++++++++++----- 5 files changed, 78 insertions(+), 20 deletions(-) rename .github/workflows/{conan_build.yml => merge_build.yml} (96%) create mode 100644 .github/workflows/pr_build.yml diff --git a/.github/workflows/conan_build.yml b/.github/workflows/merge_build.yml similarity index 96% rename from .github/workflows/conan_build.yml rename to .github/workflows/merge_build.yml index 57f21948..1d49a8fe 100644 --- a/.github/workflows/conan_build.yml +++ b/.github/workflows/merge_build.yml @@ -7,11 +7,6 @@ on: - stable/v8.x - stable/v9.x - master - pull_request: - branches: - - stable/v8.x - - stable/v9.x - - master jobs: Build: diff --git a/.github/workflows/pr_build.yml b/.github/workflows/pr_build.yml new file mode 100644 index 00000000..9cffc3a8 --- /dev/null +++ b/.github/workflows/pr_build.yml @@ -0,0 +1,42 @@ +name: Sisl Build + +on: + workflow_dispatch: + pull_request: + branches: + - stable/v8.x + - stable/v9.x + - master + +jobs: + Build: + strategy: + fail-fast: false + matrix: + platform: ["ubuntu-22.04"] + build-type: ["Debug", "Release"] + malloc-impl: ["libc", "tcmalloc"] + prerelease: ["True", "False"] + tooling: ["Sanitize", "Coverage", "None"] + exclude: + - build-type: Debug + prerelease: "False" + - build-type: Debug + tooling: None + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Release + malloc-impl: libc + - build-type: Release + tooling: Sanitize + - build-type: Release + tooling: Coverage + uses: ./.github/workflows/build_dependencies.yml + with: + platform: ${{ matrix.platform }} + branch: ${{ github.ref }} + build-type: ${{ matrix.build-type }} + malloc-impl: ${{ matrix.malloc-impl }} + prerelease: ${{ matrix.prerelease }} + tooling: ${{ matrix.tooling }} + testing: 'True' diff --git a/README.md b/README.md index db4f79b7..65c8e133 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SymbiosisLib (sisl) -[![Conan Build](https://github.com/eBay/sisl/actions/workflows/conan_build.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/conan_build.yml) +[![Conan Build](https://github.com/eBay/sisl/actions/workflows/merge_build.yml/badge.svg?branch=master)](https://github.com/eBay/sisl/actions/workflows/merge_build.yml) [![CodeCov](https://codecov.io/gh/eBay/sisl/branch/master/graph/badge.svg)](https://codecov.io/gh/eBay/Sisl) This repo provides a symbiosis of libraries (thus named sisl - pronounced like sizzle) mostly for very high performance data diff --git a/conanfile.py b/conanfile.py index 489f470f..ebf03bd1 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.2" + version = "10.1.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 41ebd7ca..972eccb1 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -248,8 +248,7 @@ struct io_blob : public blob { buf_alloc(sz, align_size, tag); } io_blob(uint8_t* const bytes, const uint32_t size, const bool is_aligned) : - blob(bytes, size), - aligned{is_aligned} {} + blob(bytes, size), aligned{is_aligned} {} ~io_blob() = default; void buf_alloc(const size_t sz, const uint32_t align_size = 512, const buftag tag = buftag::common) { @@ -300,24 +299,46 @@ struct io_blob : public blob { /* An extension to blob where the buffer it holds is allocated by constructor and freed during destruction. The only * reason why we have this instead of using vector< uint8_t > is that this supports allocating in aligned memory */ -struct byte_array_impl : public io_blob { - byte_array_impl(const uint32_t sz, const uint32_t alignment = 0, const buftag tag = buftag::common) : - io_blob(sz, alignment, tag), - m_tag{tag} {} - byte_array_impl(uint8_t* const bytes, const uint32_t size, const bool is_aligned) : - io_blob(bytes, size, is_aligned) {} - ~byte_array_impl() { io_blob::buf_free(m_tag); } - +struct io_blob_safe final : public io_blob { +public: buftag m_tag; + +public: + io_blob_safe(uint32_t sz, uint32_t alignment = 0, buftag tag = buftag::common) : + io_blob(sz, alignment, tag), m_tag{tag} {} + io_blob_safe(uint8_t* bytes, uint32_t size, bool is_aligned) : io_blob(bytes, size, is_aligned) {} + ~io_blob_safe() { + if (bytes != nullptr) { io_blob::buf_free(m_tag); } + } + + io_blob_safe(io_blob_safe const& other) = delete; + io_blob_safe(io_blob_safe&& other) : io_blob(std::move(other)), m_tag(other.m_tag) { + other.bytes = nullptr; + other.size = 0; + } + + io_blob_safe& operator=(io_blob_safe const& other) = delete; // Delete copy constructor + io_blob_safe& operator=(io_blob_safe&& other) { + if (bytes != nullptr) { this->buf_free(m_tag); } + + *((io_blob*)this) = std::move(*((io_blob*)&other)); + m_tag = other.m_tag; + + other.bytes = nullptr; + other.size = 0; + return *this; + } }; -using byte_array = std::shared_ptr< byte_array_impl >; +using byte_array_impl = io_blob_safe; + +using byte_array = std::shared_ptr< io_blob_safe >; inline byte_array make_byte_array(const uint32_t sz, const uint32_t alignment = 0, const buftag tag = buftag::common) { - return std::make_shared< byte_array_impl >(sz, alignment, tag); + return std::make_shared< io_blob_safe >(sz, alignment, tag); } inline byte_array to_byte_array(const sisl::io_blob& blob) { - return std::make_shared< byte_array_impl >(blob.bytes, blob.size, blob.aligned); + return std::make_shared< io_blob_safe >(blob.bytes, blob.size, blob.aligned); } struct byte_view { From c8a8f1b37fb1c05314e767410ca15992b44758b1 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 14 Sep 2023 09:46:44 -0700 Subject: [PATCH 356/385] byte_array_impl is io_blob_safe with RAII and movable --- conanfile.py | 2 +- include/sisl/cache/simple_hashmap.hpp | 54 +++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index ebf03bd1..c569af07 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.3" + version = "10.1.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/cache/simple_hashmap.hpp b/include/sisl/cache/simple_hashmap.hpp index 8d3bd099..ebfbc446 100644 --- a/include/sisl/cache/simple_hashmap.hpp +++ b/include/sisl/cache/simple_hashmap.hpp @@ -205,6 +205,40 @@ class SimpleHashBucket { return false; } + bool mutate(const K& input_key, const V& input_value, auto&& update_or_delete_cb) { +#ifndef GLOBAL_HASHSET_LOCK + folly::SharedMutexWritePriority::WriteHolder holder(m_lock); +#endif + SingleEntryHashNode< V >* n = nullptr; + + auto it = m_list.begin(); + for (auto itend{m_list.end()}; it != itend; ++it) { + const K k = SimpleHashMap< K, V >::extractor_cb()(it->m_value); + if (input_key > k) { + break; + } else if (input_key == k) { + n = &*it; + break; + } + } + + if (n == nullptr) { + n = new SingleEntryHashNode< V >(input_value); + m_list.insert(it, *n); + access_cb(*n, input_key, hash_op_t::CREATE); + return true; + } else { + if (update_or_delete_cb(n->m_value)) { + access_cb(*n, input_key, hash_op_t::DELETE); + m_list.erase(it); + delete n; + } else { + access_cb(*n, input_key, hash_op_t::ACCESS); + } + } + return false; + } + private: static void access_cb(const SingleEntryHashNode< V >& node, const K& key, hash_op_t op) { SimpleHashMap< K, V >::call_access_cb((const ValueEntryBase&)node, key, op); @@ -260,6 +294,26 @@ bool SimpleHashMap< K, V >::erase(const K& key, V& out_val) { return get_bucket(key).erase(key, out_val); } +/// This is a special atomic operation where user can insert_or_update_or_erase based on condition atomically. It +/// performs differently based on certain conditions. +/// +/// * If the key does not exist, it will insert the value (works exactlylike insert operation) and the mutate_cb is +/// not called +/// * If the key exist, then insert_val is ignored and mutate_cb is called with current value +/// Callback should so one of the following 2 operation +/// a) The current value can be updated and return false from callback - it works like an update operation +/// b) Return true from callback - in that case it will behave like erase operation of the KV +/// +/// Returns true if the value was inserted +template < typename K, typename V > +bool SimpleHashMap< K, V >::mutate(const K& key, const V& insert_val, auto&& update_or_delete_cb) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).mutate(key, insert_val, std::move(update_or_delete_cb)); +} + template < typename K, typename V > SimpleHashBucket< K, V >& SimpleHashMap< K, V >::get_bucket(const K& key) const { return (m_buckets[compute_hash(key) % m_nbuckets]); From bffef235a6cf124ed364ba86da80e9bd5d8dadc5 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 25 Sep 2023 08:12:48 -0700 Subject: [PATCH 357/385] SimpleHashmap to support atomic upsert or delete based on user callback --- conanfile.py | 2 +- include/sisl/cache/simple_hashmap.hpp | 67 ++++++++++++++++++++------- 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/conanfile.py b/conanfile.py index c569af07..f524b663 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.4" + version = "10.1.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/cache/simple_hashmap.hpp b/include/sisl/cache/simple_hashmap.hpp index ebfbc446..3316ba1b 100644 --- a/include/sisl/cache/simple_hashmap.hpp +++ b/include/sisl/cache/simple_hashmap.hpp @@ -73,6 +73,8 @@ class SimpleHashMap { bool upsert(const K& key, const V& value); bool get(const K& input_key, V& out_val); bool erase(const K& key, V& out_val); + bool update(const K& key, auto&& update_cb); + bool upsert_or_delete(const K& key, auto&& update_or_delete_cb); static void set_current_instance(SimpleHashMap< K, V >* hmap) { s_cur_hash_map = hmap; } static SimpleHashMap< K, V >* get_current_instance() { return s_cur_hash_map; } @@ -205,7 +207,7 @@ class SimpleHashBucket { return false; } - bool mutate(const K& input_key, const V& input_value, auto&& update_or_delete_cb) { + bool upsert_or_delete(const K& input_key, auto&& update_or_delete_cb) { #ifndef GLOBAL_HASHSET_LOCK folly::SharedMutexWritePriority::WriteHolder holder(m_lock); #endif @@ -222,23 +224,44 @@ class SimpleHashBucket { } } + bool found{true}; if (n == nullptr) { - n = new SingleEntryHashNode< V >(input_value); + n = new SingleEntryHashNode< V >(V{}); m_list.insert(it, *n); access_cb(*n, input_key, hash_op_t::CREATE); - return true; + found = false; + } + + if (update_or_delete_cb(n->m_value, found)) { + access_cb(*n, input_key, hash_op_t::DELETE); + m_list.erase(it); + delete n; } else { - if (update_or_delete_cb(n->m_value)) { - access_cb(*n, input_key, hash_op_t::DELETE); - m_list.erase(it); - delete n; - } else { - access_cb(*n, input_key, hash_op_t::ACCESS); - } + access_cb(*n, input_key, hash_op_t::ACCESS); } + return false; } + bool update(const K& input_key, auto&& update_cb) { +#ifndef GLOBAL_HASHSET_LOCK + folly::SharedMutexWritePriority::ReadHolder holder(m_lock); +#endif + bool found{false}; + for (auto& n : m_list) { + const K k = SimpleHashMap< K, V >::extractor_cb()(n.m_value); + if (input_key > k) { + break; + } else if (input_key == k) { + found = true; + access_cb(n, input_key, hash_op_t::ACCESS); + update_cb(n.m_value); + break; + } + } + return found; + } + private: static void access_cb(const SingleEntryHashNode< V >& node, const K& key, hash_op_t op) { SimpleHashMap< K, V >::call_access_cb((const ValueEntryBase&)node, key, op); @@ -297,21 +320,31 @@ bool SimpleHashMap< K, V >::erase(const K& key, V& out_val) { /// This is a special atomic operation where user can insert_or_update_or_erase based on condition atomically. It /// performs differently based on certain conditions. /// -/// * If the key does not exist, it will insert the value (works exactlylike insert operation) and the mutate_cb is -/// not called -/// * If the key exist, then insert_val is ignored and mutate_cb is called with current value -/// Callback should so one of the following 2 operation -/// a) The current value can be updated and return false from callback - it works like an update operation +/// NOTE: This method works only if the Value is default constructible +/// +/// * If the key does not exist, it will insert a default value and does the callback +/// +/// * Callback should so one of the following 2 operation +/// a) The current value can be updated and return false from callback - it works like an upsert operation /// b) Return true from callback - in that case it will behave like erase operation of the KV /// /// Returns true if the value was inserted template < typename K, typename V > -bool SimpleHashMap< K, V >::mutate(const K& key, const V& insert_val, auto&& update_or_delete_cb) { +bool SimpleHashMap< K, V >::upsert_or_delete(const K& key, auto&& update_or_delete_cb) { +#ifdef GLOBAL_HASHSET_LOCK + std::lock_guard< std::mutex > lk(m); +#endif + set_current_instance(this); + return get_bucket(key).upsert_or_delete(key, std::move(update_or_delete_cb)); +} + +template < typename K, typename V > +bool SimpleHashMap< K, V >::update(const K& key, auto&& update_cb) { #ifdef GLOBAL_HASHSET_LOCK std::lock_guard< std::mutex > lk(m); #endif set_current_instance(this); - return get_bucket(key).mutate(key, insert_val, std::move(update_or_delete_cb)); + return get_bucket(key).update(key, std::move(update_cb)); } template < typename K, typename V > From bd6b8c166654075f2a536c5c55e0ffd81b266cc9 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 25 Sep 2023 08:14:10 -0700 Subject: [PATCH 358/385] Updated conanfile --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index f524b663..c569af07 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.5" + version = "10.1.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" From d586f9224b3ad9c93cb6591b9edc17dedfddb35d Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 25 Sep 2023 09:50:56 -0700 Subject: [PATCH 359/385] upsert_on_delete to return true on insertion --- include/sisl/cache/simple_hashmap.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/sisl/cache/simple_hashmap.hpp b/include/sisl/cache/simple_hashmap.hpp index 3316ba1b..bd5b389f 100644 --- a/include/sisl/cache/simple_hashmap.hpp +++ b/include/sisl/cache/simple_hashmap.hpp @@ -240,7 +240,7 @@ class SimpleHashBucket { access_cb(*n, input_key, hash_op_t::ACCESS); } - return false; + return !found; } bool update(const K& input_key, auto&& update_cb) { From bd9b008866796bed78a21f896a215d516adbe11f Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 28 Sep 2023 07:22:14 -0700 Subject: [PATCH 360/385] Fix chain build. --- .github/workflows/merge_build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge_build.yml b/.github/workflows/merge_build.yml index 1d49a8fe..23c53fed 100644 --- a/.github/workflows/merge_build.yml +++ b/.github/workflows/merge_build.yml @@ -50,7 +50,7 @@ jobs: -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/iomanager/actions/workflows/conan_build.yml/dispatches \ + https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_build.yml/dispatches \ -d '{"ref":"master","inputs":{}}' if: ${{ github.ref == 'refs/heads/master' }} - name: Start NuraftMesg Build From 4ce39a92ab8b3e78cfe7e2bf4f895f15f09b10b9 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:33:23 -0700 Subject: [PATCH 361/385] implement futures based grpc client API (#185) * implement futures based grpc client API * review_comments: reuse common logic in the call_unary method --------- Co-authored-by: Ravi Akella email = raakella@ebay.com --- conanfile.py | 2 +- include/sisl/grpc/rpc_client.hpp | 152 ++++++++++++------ src/CMakeLists.txt | 13 +- src/grpc/CMakeLists.txt | 1 + src/grpc/rpc_client.cpp | 31 ++-- src/grpc/tests/function/echo_async_client.cpp | 58 +++++-- 6 files changed, 183 insertions(+), 74 deletions(-) diff --git a/conanfile.py b/conanfile.py index c569af07..321ea4cb 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.1.4" + version = "10.2.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/grpc/rpc_client.hpp b/include/sisl/grpc/rpc_client.hpp index b06c1470..5b54190d 100644 --- a/include/sisl/grpc/rpc_client.hpp +++ b/include/sisl/grpc/rpc_client.hpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -63,23 +64,35 @@ template < typename RespT > using unary_callback_t = std::function< void(RespT&, ::grpc::Status& status) >; template < typename ReqT, typename RespT > -class ClientRpcDataInternal; +class ClientRpcDataCallback; + +template < typename ReqT, typename RespT > +class ClientRpcDataFuture; + +template < typename T > +using Result = folly::Expected< T, ::grpc::Status >; + +template < typename T > +using AsyncResult = folly::SemiFuture< Result< T > >; using GenericClientRpcData = ClientRpcData< grpc::ByteBuffer, grpc::ByteBuffer >; using generic_rpc_comp_cb_t = rpc_comp_cb_t< grpc::ByteBuffer, grpc::ByteBuffer >; using generic_req_builder_cb_t = req_builder_cb_t< grpc::ByteBuffer >; using generic_unary_callback_t = unary_callback_t< grpc::ByteBuffer >; -using GenericClientRpcDataInternal = ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >; +using GenericClientRpcDataCallback = ClientRpcDataCallback< grpc::ByteBuffer, grpc::ByteBuffer >; +using GenericClientRpcDataFuture = ClientRpcDataFuture< grpc::ByteBuffer, grpc::ByteBuffer >; +using generic_result_t = Result< grpc::ByteBuffer >; +using generic_async_result_t = AsyncResult< grpc::ByteBuffer >; /** - * The specialized 'ClientRpcDataInternal' per gRPC call, it stores - * the response handler function - * + * The specialized 'ClientRpcDataInternal' per gRPC call, + * Derive from this class to create Rpc Data that can hold + * the response handler function or a promise */ template < typename ReqT, typename RespT > class ClientRpcDataInternal : public ClientRpcDataAbstract { public: - using ResponseReaderPtr = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< RespT > >; + using ResponseReaderPtr = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< RespT > >; using GenericResponseReaderPtr = std::unique_ptr< grpc::GenericClientAsyncResponseReader >; /* Allow GrpcAsyncClient and its inner classes to use @@ -88,7 +101,6 @@ class ClientRpcDataInternal : public ClientRpcDataAbstract { friend class GrpcAsyncClient; ClientRpcDataInternal() = default; - ClientRpcDataInternal(const unary_callback_t< RespT >& cb) : m_cb{cb} {} virtual ~ClientRpcDataInternal() = default; // TODO: support time in any time unit -- lhuang8 @@ -103,16 +115,12 @@ class ClientRpcDataInternal : public ClientRpcDataAbstract { RespT& reply() { return m_reply; } ::grpc::ClientContext& context() { return m_context; } - virtual void handle_response([[maybe_unused]] bool ok = true) override { - // For unary call, ok is always true, `status_` will indicate error if there are any. - m_cb(m_reply, m_status); - } + virtual void handle_response(bool ok = true) = 0; void add_metadata(const std::string& meta_key, const std::string& meta_value) { m_context.AddMetadata(meta_key, meta_value); } - unary_callback_t< RespT > m_cb; RespT m_reply; ::grpc::ClientContext m_context; ::grpc::Status m_status; @@ -120,6 +128,42 @@ class ClientRpcDataInternal : public ClientRpcDataAbstract { GenericResponseReaderPtr m_generic_resp_reader_ptr; }; +/** + * callback version of ClientRpcDataInternal + */ +template < typename ReqT, typename RespT > +class ClientRpcDataCallback : public ClientRpcDataInternal< ReqT, RespT > { +public: + ClientRpcDataCallback(const unary_callback_t< RespT >& cb) : m_cb{cb} {} + + virtual void handle_response([[maybe_unused]] bool ok = true) override { + // For unary call, ok is always true, `status_` will indicate error if there are any. + if (m_cb) { m_cb(this->m_reply, this->m_status); } + } + + unary_callback_t< RespT > m_cb; +}; + +/** + * futures version of ClientRpcDataInternal + */ +template < typename ReqT, typename RespT > +class ClientRpcDataFuture : public ClientRpcDataInternal< ReqT, RespT > { +public: + ClientRpcDataFuture(folly::Promise< Result< RespT > >&& promise) : m_promise{std::move(promise)} {} + + virtual void handle_response([[maybe_unused]] bool ok = true) override { + // For unary call, ok is always true, `status_` will indicate error if there are any. + if (this->m_status.ok()) { + m_promise.setValue(this->m_reply); + } else { + m_promise.setValue(folly::makeUnexpected(this->m_status)); + } + } + + folly::Promise< Result< RespT > > m_promise; +}; + template < typename ReqT, typename RespT > class ClientRpcData : public ClientRpcDataInternal< ReqT, RespT > { public: @@ -150,7 +194,7 @@ class GrpcBaseClient { const std::string m_target_domain; const std::string m_ssl_cert; - std::shared_ptr< ::grpc::ChannelInterface > m_channel; + std::shared_ptr<::grpc::ChannelInterface > m_channel; std::shared_ptr< sisl::GrpcTokenClient > m_token_client; public: @@ -266,12 +310,29 @@ class GrpcAsyncClient : public GrpcBaseClient { /* unary call helper */ template < typename RespT > - using unary_call_return_t = std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< RespT > >; + using unary_call_return_t = std::unique_ptr<::grpc::ClientAsyncResponseReaderInterface< RespT > >; template < typename ReqT, typename RespT > using unary_call_t = unary_call_return_t< RespT > (stub_t::*)(::grpc::ClientContext*, const ReqT&, ::grpc::CompletionQueue*); + template < typename ReqT, typename RespT > + void prepare_and_send_unary(ClientRpcDataInternal< ReqT, RespT >* data, const ReqT& request, + const unary_call_t< ReqT, RespT >& method, uint32_t deadline, + const std::vector< std::pair< std::string, std::string > >& metadata) { + data->set_deadline(deadline); + for (auto const& [key, value] : metadata) { + data->add_metadata(key, value); + } + if (m_token_client) { + data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); + } + // Note that async unary RPCs don't post a CQ tag in call + data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->m_context, request, cq()); + // CQ tag posted here + data->m_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + } + // using unary_callback_t = std::function< void(RespT&, ::grpc::Status& status) >; /** @@ -292,21 +353,21 @@ class GrpcAsyncClient : public GrpcBaseClient { * OK before handling the response. If call failed, `::grpc::Status` * indicates the error code and error message. * @param deadline - deadline in seconds + * @param metadata - key value pair of the metadata to be sent with the request * */ + template < typename ReqT, typename RespT > + void call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, + const unary_callback_t< RespT >& callback, uint32_t deadline, + const std::vector< std::pair< std::string, std::string > >& metadata) { + auto data = new ClientRpcDataCallback< ReqT, RespT >(callback); + prepare_and_send_unary(data, request, method, deadline, metadata); + } + template < typename ReqT, typename RespT > void call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, const unary_callback_t< RespT >& callback, uint32_t deadline) { - auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); - data->set_deadline(deadline); - if (m_token_client) { - data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); - } - // Note that async unary RPCs don't post a CQ tag in call - data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); - // CQ tag posted here - data->m_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); - return; + call_unary(request, method, callback, deadline, {}); } template < typename ReqT, typename RespT > @@ -314,30 +375,24 @@ class GrpcAsyncClient : public GrpcBaseClient { const rpc_comp_cb_t< ReqT, RespT >& done_cb, uint32_t deadline) { auto cd = new ClientRpcData< ReqT, RespT >(done_cb); builder_cb(cd->m_req); - cd->set_deadline(deadline); - if (m_token_client) { - cd->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); - } - cd->m_resp_reader_ptr = (m_stub.get()->*method)(&cd->context(), cd->m_req, cq()); - cd->m_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); + prepare_and_send_unary(cd, cd->m_req, method, deadline, {}); } + // Futures version of call_unary template < typename ReqT, typename RespT > - void call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, - const unary_callback_t< RespT >& callback, uint32_t deadline, - const std::vector< std::pair< std::string, std::string > >& metadata) { - auto data = new ClientRpcDataInternal< ReqT, RespT >(callback); - data->set_deadline(deadline); - for (auto const& [key, value] : metadata) { - data->add_metadata(key, value); - } - if (m_token_client) { - data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); - } - // Note that async unary RPCs don't post a CQ tag in call - data->m_resp_reader_ptr = (m_stub.get()->*method)(&data->context(), request, cq()); - // CQ tag posted here - data->m_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); + AsyncResult< RespT > call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, + uint32_t deadline, + const std::vector< std::pair< std::string, std::string > >& metadata) { + auto [p, sf] = folly::makePromiseContract< Result< RespT > >(); + auto data = new ClientRpcDataFuture< ReqT, RespT >(std::move(p)); + prepare_and_send_unary(data, request, method, deadline, metadata); + return std::move(sf); + } + + template < typename ReqT, typename RespT > + AsyncResult< RespT > call_unary(const ReqT& request, const unary_call_t< ReqT, RespT >& method, + uint32_t deadline) { + return call_unary(request, method, deadline, {}); } StubPtr< ServiceT > m_stub; @@ -362,12 +417,19 @@ class GrpcAsyncClient : public GrpcBaseClient { std::shared_ptr< sisl::GrpcTokenClient > token_client) : m_generic_stub(std::move(stub)), m_worker(worker), m_token_client(token_client) {} + void prepare_and_send_unary_generic(ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >* data, + const grpc::ByteBuffer& request, const std::string& method, uint32_t deadline); + void call_unary(const grpc::ByteBuffer& request, const std::string& method, const generic_unary_callback_t& callback, uint32_t deadline); void call_rpc(const generic_req_builder_cb_t& builder_cb, const std::string& method, const generic_rpc_comp_cb_t& done_cb, uint32_t deadline); + // futures version of call_unary + generic_async_result_t call_unary(const grpc::ByteBuffer& request, const std::string& method, + uint32_t deadline); + std::unique_ptr< grpc::GenericStub > m_generic_stub; GrpcAsyncClientWorker* m_worker; std::shared_ptr< sisl::GrpcTokenClient > m_token_client; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 768116bf..ec2bd709 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,13 +9,14 @@ add_subdirectory (sobject) # on Folly and pistache. It is unknown if Windows is supported... list(APPEND POSIX_LIBRARIES ) list(APPEND SISL_DEPS ) -if(${userspace-rcu_FOUND}) - add_subdirectory (grpc) - list(APPEND POSIX_LIBRARIES - $ - ) -endif() + if(${folly_FOUND}) + if(${userspace-rcu_FOUND}) + add_subdirectory (grpc) + list(APPEND POSIX_LIBRARIES + $ + ) + endif() add_subdirectory (cache) add_subdirectory (fds) add_subdirectory (file_watcher) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index c5e31fac..7ca0105b 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -13,6 +13,7 @@ target_sources(sisl_grpc PRIVATE target_link_libraries(sisl_grpc gRPC::grpc++ flatbuffers::flatbuffers + Folly::Folly ${COMMON_DEPS} ) diff --git a/src/grpc/rpc_client.cpp b/src/grpc/rpc_client.cpp index 6f36c3eb..c6f85439 100644 --- a/src/grpc/rpc_client.cpp +++ b/src/grpc/rpc_client.cpp @@ -132,28 +132,37 @@ void GrpcAsyncClientWorker::shutdown_all() { s_workers.clear(); } -void GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& request, const std::string& method, - const generic_unary_callback_t& callback, uint32_t deadline) { - auto data = new GenericClientRpcDataInternal(callback); +void GrpcAsyncClient::GenericAsyncStub::prepare_and_send_unary_generic( + ClientRpcDataInternal< grpc::ByteBuffer, grpc::ByteBuffer >* data, const grpc::ByteBuffer& request, + const std::string& method, uint32_t deadline) { data->set_deadline(deadline); if (m_token_client) { data->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); } // Note that async unary RPCs don't post a CQ tag in call - data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->context(), method, request, cq()); + data->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&data->m_context, method, request, cq()); data->m_generic_resp_reader_ptr->StartCall(); // CQ tag posted here data->m_generic_resp_reader_ptr->Finish(&data->reply(), &data->status(), (void*)data); - return; +} + +void GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& request, const std::string& method, + const generic_unary_callback_t& callback, uint32_t deadline) { + auto data = new GenericClientRpcDataCallback(callback); + prepare_and_send_unary_generic(data, request, method, deadline); } void GrpcAsyncClient::GenericAsyncStub::call_rpc(const generic_req_builder_cb_t& builder_cb, const std::string& method, const generic_rpc_comp_cb_t& done_cb, uint32_t deadline) { auto cd = new GenericClientRpcData(done_cb); builder_cb(cd->m_req); - cd->set_deadline(deadline); - if (m_token_client) { cd->add_metadata(m_token_client->get_auth_header_key(), m_token_client->get_token()); } - cd->m_generic_resp_reader_ptr = m_generic_stub->PrepareUnaryCall(&cd->context(), method, cd->m_req, cq()); - cd->m_generic_resp_reader_ptr->StartCall(); - cd->m_generic_resp_reader_ptr->Finish(&cd->reply(), &cd->status(), (void*)cd); + prepare_and_send_unary_generic(cd, cd->m_req, method, deadline); +} + +generic_async_result_t GrpcAsyncClient::GenericAsyncStub::call_unary(const grpc::ByteBuffer& request, + const std::string& method, uint32_t deadline) { + auto [p, sf] = folly::makePromiseContract< generic_result_t >(); + auto data = new GenericClientRpcDataFuture(std::move(p)); + prepare_and_send_unary_generic(data, request, method, deadline); + return std::move(sf); } std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > GrpcAsyncClient::make_generic_stub(const std::string& worker) { @@ -163,4 +172,4 @@ std::unique_ptr< GrpcAsyncClient::GenericAsyncStub > GrpcAsyncClient::make_gener return std::make_unique< GrpcAsyncClient::GenericAsyncStub >(std::make_unique< grpc::GenericStub >(m_channel), w, m_token_client); } -} // namespace sisl::grpc +} // namespace sisl diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index 064b0042..1a92b9a2 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -89,7 +89,7 @@ class TestClient { static constexpr int GRPC_CALL_COUNT = 400; const std::string WORKER_NAME{"Worker-1"}; - void validate_echo_reply(const EchoRequest& req, EchoReply& reply, ::grpc::Status& status) { + void validate_echo_reply(const EchoRequest& req, EchoReply& reply, ::grpc::Status const& status) { RELEASE_ASSERT_EQ(status.ok(), true, "echo request {} failed, status {}: {}", req.message(), status.error_code(), status.error_message()); LOGDEBUGMOD(grpc_server, "echo request {} reply {}", req.message(), reply.message()); @@ -100,7 +100,7 @@ class TestClient { } } - void validate_ping_reply(const PingRequest& req, PingReply& reply, ::grpc::Status& status) { + void validate_ping_reply(const PingRequest& req, PingReply& reply, ::grpc::Status const& status) { RELEASE_ASSERT_EQ(status.ok(), true, "ping request {} failed, status {}: {}", req.seqno(), status.error_code(), status.error_message()); LOGDEBUGMOD(grpc_server, "ping request {} reply {}", req.seqno(), reply.seqno()); @@ -111,7 +111,7 @@ class TestClient { } } - void validate_generic_reply(const DataMessage& req, grpc::ByteBuffer& reply, ::grpc::Status& status) { + void validate_generic_reply(const DataMessage& req, grpc::ByteBuffer& reply, ::grpc::Status const& status) { RELEASE_ASSERT_EQ(status.ok(), true, "generic request {} failed, status {}: {}", req.m_seqno, status.error_code(), status.error_message()); DataMessage svr_msg; @@ -140,7 +140,7 @@ class TestClient { for (int i = 1; i <= GRPC_CALL_COUNT; ++i) { if ((i % 2) == 0) { - if ((i % 4) == 0) { + if ((i % 3) == 0) { EchoRequest req; req.set_message(std::to_string(i)); echo_stub->call_unary< EchoRequest, EchoReply >( @@ -149,7 +149,7 @@ class TestClient { validate_echo_reply(req, reply, status); }, 1); - } else { + } else if (i % 3 == 1) { echo_stub->call_rpc< EchoRequest, EchoReply >( [i](EchoRequest& req) { req.set_message(std::to_string(i)); }, &EchoService::StubInterface::AsyncEcho, @@ -157,10 +157,22 @@ class TestClient { validate_echo_reply(cd.req(), cd.reply(), cd.status()); }, 1); + } else { + EchoRequest req; + req.set_message(std::to_string(i)); + echo_stub->call_unary< EchoRequest, EchoReply >(req, &EchoService::StubInterface::AsyncEcho, 1) + .deferValue([req, this](auto e) { + RELEASE_ASSERT(e.hasValue(), "echo request {} failed, status {}: {}", req.message(), + e.error().error_code(), e.error().error_message()); + validate_echo_reply(req, e.value(), grpc::Status::OK); + return folly::Unit(); + }) + .get(); } } else if ((i % 3) == 0) { - // divide all numbers divisible by 3 and not by 2 into two equal buckets - if ((((i + 3) / 6) % 2) == 0) { + // divide all numbers divisible by 3 and not by 2 into three equal buckets + auto const j = (i + 3) / 6; + if (j % 3 == 0) { PingRequest req; req.set_seqno(i); ping_stub->call_unary< PingRequest, PingReply >( @@ -169,17 +181,29 @@ class TestClient { validate_ping_reply(req, reply, status); }, 1); - } else { + } else if (j % 3 == 1) { ping_stub->call_rpc< PingRequest, PingReply >( [i](PingRequest& req) { req.set_seqno(i); }, &PingService::StubInterface::AsyncPing, [this](ClientRpcData< PingRequest, PingReply >& cd) { validate_ping_reply(cd.req(), cd.reply(), cd.status()); }, 1); + } else { + PingRequest req; + req.set_seqno(i); + ping_stub->call_unary< PingRequest, PingReply >(req, &PingService::StubInterface::AsyncPing, 1) + .deferValue([req, this](auto e) { + RELEASE_ASSERT(e.hasValue(), "ping request {} failed, status {}: {}", req.seqno(), + e.error().error_code(), e.error().error_message()); + validate_ping_reply(req, e.value(), grpc::Status::OK); + return folly::Unit(); + }) + .get(); } } else { - // divide all numbers not divisible by 2 and 3 into two equal buckets - if (((i + 1) % 6) == 0) { + // divide all numbers not divisible by 2 and 3 into three equal buckets + static uint32_t j = 0u; + if ((j++ % 3) == 0) { DataMessage req(i, GENERIC_CLIENT_MESSAGE); grpc::ByteBuffer cli_buf; SerializeToByteBuffer(cli_buf, req); @@ -189,7 +213,7 @@ class TestClient { validate_generic_reply(req, reply, status); }, 1); - } else { + } else if (((j++ % 3) == 1)) { DataMessage data_msg(i, GENERIC_CLIENT_MESSAGE); generic_stub->call_rpc([data_msg](grpc::ByteBuffer& req) { SerializeToByteBuffer(req, data_msg); }, GENERIC_METHOD, @@ -197,6 +221,18 @@ class TestClient { validate_generic_reply(data_msg, cd.reply(), cd.status()); }, 1); + } else { + DataMessage req(i, GENERIC_CLIENT_MESSAGE); + grpc::ByteBuffer cli_buf; + SerializeToByteBuffer(cli_buf, req); + generic_stub->call_unary(cli_buf, GENERIC_METHOD, 1) + .deferValue([req, this](auto e) { + RELEASE_ASSERT(e.hasValue(), "generic request {} failed, status {}: {}", req.m_seqno, + e.error().error_code(), e.error().error_message()); + validate_generic_reply(req, e.value(), grpc::Status::OK); + return folly::Unit(); + }) + .get(); } } } From 0de227c076b9f24a9ec862238bb8b95115f0feba Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 17 Oct 2023 16:36:33 -0700 Subject: [PATCH 362/385] Remove old debug options from FDS days with gcc 4.x (#186) --- CMakeLists.txt | 8 +++-- cmake/debug_flags.cmake | 72 ----------------------------------------- 2 files changed, 6 insertions(+), 74 deletions(-) delete mode 100644 cmake/debug_flags.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index ea7728c9..e03743ab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,8 +24,12 @@ if (DEFINED ENABLE_TESTING) endif() endif() -if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") - include (cmake/debug_flags.cmake) +if (DEFINED CONAN_BUILD_COVERAGE) + if (${CONAN_BUILD_COVERAGE}) + include (cmake/CodeCoverage.cmake) + APPEND_COVERAGE_COMPILER_FLAGS() + SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) + endif() endif() if (DEFINED MALLOC_IMPL) diff --git a/cmake/debug_flags.cmake b/cmake/debug_flags.cmake deleted file mode 100644 index 6dab2ec2..00000000 --- a/cmake/debug_flags.cmake +++ /dev/null @@ -1,72 +0,0 @@ -# This list is generated from the output of: -# -# gcc -Q --help=optimizers -O0 -# -# with GCC 4.8.4 (Ubuntu 4.8.4-2ubuntu1-14.04.3). Yes, every one of these flags -# is on even with -O0 specified, and nothing changes when you add debugging -# options (-g/-g3/-gdwarf-4/etc.) in there. This should be updated every time -# the version of GCC used to compile changes. -# -# If you add an option here, it is your responsibility to comment it, with the -# following convention (feel free to add your own if there's not one suitable). -# DO YOUR RESEARCH. -# -# CBWITPOB: Can be wrong in the presence of bugs. When are you usually -# debugging? When there's a bug. Optimizations that can be wrong -# in the presence of bugs mean that, for example, you won't see -# a variable be modified when it actually happens--if it's -# modified due to the bug, as far as the debugger is concerned, -# it wasn't modified by the program, and things like conditional -# breakpoints won't work right, unless maybe it's a volatile -# variable. -# Inlining: Although GDB claims to track this correctly with -g3 and inject -# the code while you're stepping, it does not. You'll either be -# missing stack frames, or unable to view locals when you step -# to that frame--even if those locals exist nowhere else (i.e. -# not a function argument or tail return value). -# Eliding: Behavior may not change, but who knows where the values come -# from. -# Hoisting: Your program is not running instructions in the order of the -# code. Again, GDB claims to handle this, but it does not, or at -# least not well. -# Vectorizing: Great optimization, but the simulation of going through for -# loops is far from perfect, especially when you're dealing -# with bugs. -# -# And yes, these optimizations severely effect the quality of the debugging -# experience. Without these, you're lucky to be able to step into 80% of the -# stack, and of that 80%, you'll see anywhere from 50% to 100% of locals -# missing values. With these, I've never seen a stack frame I couldn't step -# into, and never seen when I look at a local. -# -set (REALLY_NO_OPTIMIZATION_FLAGS ) -if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")) - set (REALLY_NO_OPTIMIZATION_FLAGS "-fno-short-enums" )# Binary-incompatible with code compiled otherwise. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-aggressive-loop-optimizations" ) # Changes behavior on overflow. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-branch-count-reg" )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dce )# Can be wrong in the presence of bugs (CBWITPOB). set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-delete-null-pointer-checks )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-dse )# CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-early-inlining )# NO INLINING! Because... set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-gcse-lm )# Changes CPU instructions used. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-inline )# ...inlining also does things like elide locals. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ira-hoist-pressure )# Might be irrelevant, but NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-ivopts )# Elides and changes instructions. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-jump-tables )# Changes CPU instructions for switch statements. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-move-loop-invariants )# NO HOISTING! set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-peephole )# Exploiting CPU quirks. CBWITPOB. set (REALLY_NO_OPTIMIZATION_FLAGS ="${REALLY_NO_OPTIMIZATION_FLAGS}+= -fno-prefetch-loop-arrays )# Changes CPU instructions, even GCC manual is ambivalent. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-rename-registers" )# Maybe wrong in the presence of bugs? - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-toplevel-reorder" )# Elides unused static variable, reorders globals. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-coalesce-vars" )# Elides temporaries. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-cselim" )# Reorders, violates C++ mem model, CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-forwprop" )# Reorders and changes instructions. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-if-convert" )# Reorders and changes instructions. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-im" )# Reorders and changes instructions. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-loop-optimize" )# Reorders and changes instructions. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-phiprop" )# NO HOISTING! Reorders and changes. CBWITPOB. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-pta" )# Less analysis means maybe less interference. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-reassoc" )# Elides and vectories. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-scev-cprop" )# Elides and changes instructions. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-vect-loop-version" )# E&C. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-web" )# E&C. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fno-tree-slp-vectorize" )# E&C. - set (REALLY_NO_OPTIMIZATION_FLAGS "${REALLY_NO_OPTIMIZATION_FLAGS} -fthreadsafe-statics" )# Slightly smaller in code that doesn't need to be TS. -endif() - -if (DEFINED CONAN_BUILD_COVERAGE) - if (${CONAN_BUILD_COVERAGE}) - include (cmake/CodeCoverage.cmake) - APPEND_COVERAGE_COMPILER_FLAGS() - SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) - endif() -endif() -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${REALLY_NO_OPTIMIZATION_FLAGS}") From 91a68352d228fb53750d7dc98ae8662b5ccd4ad3 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 24 Oct 2023 07:59:13 -0700 Subject: [PATCH 363/385] CompactBitset utility to turn bunch of bits to a bitset without additional serialization requirements (#184) This PR introduces the CompactBitset, which does support set/reset bits and find next available set / reset bit. The caller is expected to provide the buffer and its size as part of its constructor (though it does have a buildable constructor as well). --- CMakeLists.txt | 2 +- conanfile.py | 2 +- include/sisl/fds/bitword.hpp | 17 ++- include/sisl/fds/buffer.hpp | 2 +- include/sisl/fds/compact_bitset.hpp | 175 ++++++++++++++++++++++++++ include/sisl/fds/utils.hpp | 1 + src/fds/CMakeLists.txt | 7 ++ src/fds/tests/test_compact_bitset.cpp | 158 +++++++++++++++++++++++ 8 files changed, 357 insertions(+), 7 deletions(-) create mode 100644 include/sisl/fds/compact_bitset.hpp create mode 100644 src/fds/tests/test_compact_bitset.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index e03743ab..58929c8b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # turn on folder hierarchies include (cmake/Flags.cmake) -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) diff --git a/conanfile.py b/conanfile.py index 321ea4cb..74b61bc2 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.2.1" + version = "10.2.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/bitword.hpp b/include/sisl/fds/bitword.hpp index d65d021b..69c910eb 100644 --- a/include/sisl/fds/bitword.hpp +++ b/include/sisl/fds/bitword.hpp @@ -398,6 +398,16 @@ class Bitword { } } + bool get_prev_set_bit(uint8_t start, uint8_t* p_set_bit) const { + const word_t e{extract(0, start + 1)}; + if (e) { + *p_set_bit = logBase2(e); + return true; + } else { + return false; + } + } + uint8_t get_next_reset_bits(const uint8_t start, uint8_t* const pcount) const { assert(start < bits()); assert(pcount); @@ -536,10 +546,9 @@ class Bitword { std::string to_string() const { std::ostringstream oSS{}; - const word_t e{m_bits.get()}; - word_t mask{static_cast< word_t >(bit_mask[bits() - 1])}; - for (uint8_t bit{0}; bit < bits(); ++bit, mask >>= 1) { - oSS << (((e & mask) == mask) ? '1' : '0'); + const word_t e = m_bits.get(); + for (uint8_t bit{0}; bit < bits(); ++bit) { + oSS << (((e & bit_mask[bit]) == bit_mask[bit]) ? '1' : '0'); } return oSS.str(); } diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 972eccb1..505fc603 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -37,7 +37,7 @@ struct blob { uint32_t size; blob() : blob{nullptr, 0} {} - blob(uint8_t* const b, const uint32_t s) : bytes{b}, size{s} {} + blob(uint8_t* b, uint32_t s) : bytes{b}, size{s} {} }; using sg_iovs_t = folly::small_vector< iovec, 4 >; diff --git a/include/sisl/fds/compact_bitset.hpp b/include/sisl/fds/compact_bitset.hpp new file mode 100644 index 00000000..53c9df02 --- /dev/null +++ b/include/sisl/fds/compact_bitset.hpp @@ -0,0 +1,175 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include + +namespace sisl { +class CompactBitSet { +public: + using bit_count_t = uint32_t; + +private: + using bitword_type = Bitword< unsafe_bits< uint64_t > >; + + struct serialized { + bitword_type words[1]{bitword_type{}}; + }; + + bit_count_t nbits_{0}; + bool allocated_{false}; + serialized* s_{nullptr}; + +private: + static constexpr size_t word_size_bytes() { return sizeof(unsafe_bits< uint64_t >); } + static constexpr size_t word_size_bits() { return word_size_bytes() * 8; } + static constexpr uint64_t word_mask() { return bitword_type::bits() - 1; } + +public: + static constexpr bit_count_t inval_bit = std::numeric_limits< bit_count_t >::max(); + static constexpr uint8_t size_multiples() { return word_size_bytes(); } + + explicit CompactBitSet(bit_count_t nbits) { + DEBUG_ASSERT_GT(nbits, 0, "compact bitset should have nbits > 0"); + nbits_ = s_cast< bit_count_t >(sisl::round_up(nbits, word_size_bits())); + size_t const buf_size = nbits_ / 8; + + uint8_t* buf = new uint8_t[buf_size]; + std::memset(buf, 0, buf_size); + s_ = r_cast< serialized* >(buf); + allocated_ = true; + } + + CompactBitSet(sisl::blob const& buf, bool init_bits) : s_{r_cast< serialized* >(buf.bytes)} { + DEBUG_ASSERT_GT(buf.size, 0, "compact bitset initialized with empty buffer"); + DEBUG_ASSERT_EQ(buf.size % word_size_bytes(), 0, "compact bitset buffer size must be multiple of word size"); + nbits_ = buf.size * 8; + if (init_bits) { std::memset(buf.bytes, 0, buf.size); } + } + + ~CompactBitSet() { + if (allocated_) { delete[] uintptr_cast(s_); } + } + + bit_count_t size() const { return nbits_; } + void set_bit(bit_count_t start) { set_reset_bit(start, true); } + void reset_bit(bit_count_t start) { set_reset_bit(start, false); } + + bool is_bit_set(bit_count_t bit) const { + bitword_type const* word_ptr = get_word_const(bit); + if (!word_ptr) { return false; } + uint8_t const offset = get_word_offset(bit); + return word_ptr->is_bit_set_reset(offset, true); + } + + bit_count_t get_next_set_bit(bit_count_t start_bit) const { return get_next_set_or_reset_bit(start_bit, true); } + bit_count_t get_next_reset_bit(bit_count_t start_bit) const { return get_next_set_or_reset_bit(start_bit, false); } + + /// @brief This method gets the previous set bit from starting bit (including the start bit). So if start bit + /// is 1, it will return the start bit. + /// @param start_bit: Start bit should be > 0 and <= size() + /// @return Returns the previous set bit or inval_bit if nothing is set + bit_count_t get_prev_set_bit(bit_count_t start_bit) const { + // check first word which may be partial + uint8_t offset = get_word_offset(start_bit); + bit_count_t word_idx = get_word_index(start_bit); + + do { + bitword_type const* word_ptr = &s_->words[word_idx]; + if (!word_ptr) { return inval_bit; } + + uint8_t nbit{0}; + if (word_ptr->get_prev_set_bit(offset, &nbit)) { return start_bit - (offset - nbit); } + + start_bit -= offset; + offset = bitword_type::bits(); + } while (word_idx-- != 0); + + return inval_bit; + } + + void set_reset_bit(bit_count_t bit, bool value) { + bitword_type* word_ptr = get_word(bit); + if (!word_ptr) { return; } + uint8_t const offset = get_word_offset(bit); + word_ptr->set_reset_bits(offset, 1, value); + } + + bit_count_t get_next_set_or_reset_bit(bit_count_t start_bit, bool search_for_set_bit) const { + bit_count_t ret{inval_bit}; + + // check first word which may be partial + uint8_t const offset = get_word_offset(start_bit); + bitword_type const* word_ptr = get_word_const(start_bit); + if (!word_ptr) { return ret; } + + uint8_t nbit{0}; + bool found = search_for_set_bit ? word_ptr->get_next_set_bit(offset, &nbit) + : word_ptr->get_next_reset_bit(offset, &nbit); + if (found) { ret = start_bit + nbit - offset; } + + if (ret == inval_bit) { + // test rest of whole words + bit_count_t current_bit = start_bit + (bitword_type::bits() - offset); + bit_count_t bits_remaining = (current_bit > size()) ? 0 : size() - current_bit; + while (bits_remaining > 0) { + ++word_ptr; + found = + search_for_set_bit ? word_ptr->get_next_set_bit(0, &nbit) : word_ptr->get_next_reset_bit(0, &nbit); + if (found) { + ret = current_bit + nbit; + break; + } + current_bit += bitword_type::bits(); + bits_remaining -= std::min< bit_count_t >(bits_remaining, bitword_type::bits()); + } + } + + if (ret >= size()) { ret = inval_bit; } + return ret; + } + + std::string to_string() const { + std::string str; + auto const num_words = size() / word_size_bits(); + for (uint32_t i{0}; i < num_words; ++i) { + fmt::format_to(std::back_inserter(str), "{}", s_->words[i].to_string()); + } + return str; + } + +private: + bitword_type* get_word(bit_count_t bit) { + return (sisl_unlikely(bit >= nbits_)) ? nullptr : &s_->words[bit / word_size_bits()]; + } + + bitword_type const* get_word_const(bit_count_t bit) const { + return (sisl_unlikely(bit >= nbits_)) ? nullptr : &s_->words[bit / word_size_bits()]; + } + + bit_count_t get_word_index(bit_count_t bit) const { + DEBUG_ASSERT(s_, "compact bitset not initialized"); + return bit / word_size_bits(); + } + + uint8_t get_word_offset(bit_count_t bit) const { + assert(s_); + return static_cast< uint8_t >(bit & word_mask()); + } +}; +} // namespace sisl diff --git a/include/sisl/fds/utils.hpp b/include/sisl/fds/utils.hpp index 0832a76e..6615103e 100644 --- a/include/sisl/fds/utils.hpp +++ b/include/sisl/fds/utils.hpp @@ -224,6 +224,7 @@ static int spaceship_oper(const T& left, const T& right) { #define uintptr_cast reinterpret_cast< uint8_t* > #define voidptr_cast reinterpret_cast< void* > +#define c_voidptr_cast reinterpret_cast< const void* > #define charptr_cast reinterpret_cast< char* > #define c_charptr_cast reinterpret_cast< const char* > #define int_cast static_cast< int > diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index d0841fe5..b53fe5cd 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -38,6 +38,13 @@ if (DEFINED ENABLE_TESTING) target_link_libraries(test_bitword sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME Bitword COMMAND test_bitset) + add_executable(test_compact_bitset) + target_sources(test_compact_bitset PRIVATE + tests/test_compact_bitset.cpp + ) + target_link_libraries(test_compact_bitset sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME CompactBitset COMMAND test_compact_bitset) + add_executable(obj_allocator_benchmark) target_sources(obj_allocator_benchmark PRIVATE tests/obj_allocator_benchmark.cpp diff --git a/src/fds/tests/test_compact_bitset.cpp b/src/fds/tests/test_compact_bitset.cpp new file mode 100644 index 00000000..7ce9cc95 --- /dev/null +++ b/src/fds/tests/test_compact_bitset.cpp @@ -0,0 +1,158 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include + +#include +#include + +#include + +#include + +using namespace sisl; + +SISL_OPTIONS_ENABLE(logging, test_compact_bitset) + +class CompactBitsetTest : public testing::Test { +protected: + sisl::io_blob_safe m_buf; + std::unique_ptr< CompactBitSet > m_bset; + +public: + CompactBitsetTest() : + testing::Test(), + m_buf{uint32_cast( + sisl::round_up(SISL_OPTIONS["buf_size"].as< uint32_t >(), CompactBitSet::size_multiples()))} {} + CompactBitsetTest(const CompactBitsetTest&) = delete; + CompactBitsetTest(CompactBitsetTest&&) noexcept = delete; + CompactBitsetTest& operator=(const CompactBitsetTest&) = delete; + CompactBitsetTest& operator=(CompactBitsetTest&&) noexcept = delete; + virtual ~CompactBitsetTest() override = default; + +protected: + void SetUp() override { m_bset = std::make_unique< CompactBitSet >(m_buf, true); } + void TearDown() override {} +}; + +TEST_F(CompactBitsetTest, AlternateBits) { + ASSERT_EQ(m_bset->size(), m_buf.size * 8); + + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->is_bit_set(i), false); + } + + // Set alternate bits + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); i += 2) { + m_bset->set_bit(i); + } + + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->is_bit_set(i), (i % 2 == 0)); + } + + // Validate if next set or reset bit starting from itself returns itself back + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->get_next_set_or_reset_bit(i, ((i % 2) == 0)), i); + } + + // Validate if next set or reset bit starting from previous returns next bit + for (CompactBitSet::bit_count_t i{1}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->get_next_set_or_reset_bit(i - 1, ((i % 2) == 0)), i); + } +} + +TEST_F(CompactBitsetTest, AllBits) { + // Set all bits + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + m_bset->set_bit(i); + } + + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->is_bit_set(i), true); + } + + for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { + ASSERT_EQ(m_bset->get_next_set_bit(i), i); + ASSERT_EQ(m_bset->get_next_reset_bit(i), CompactBitSet::inval_bit); + } +} + +TEST_F(CompactBitsetTest, RandomBitsWithReload) { + auto const num_bits = m_bset->size(); + boost::dynamic_bitset<> shadow_bset{num_bits}; + + std::random_device rd; + std::mt19937 re(rd()); + std::uniform_int_distribution< CompactBitSet::bit_count_t > bit_gen(0, num_bits - 1); + for (uint64_t i{0}; i < num_bits / 2; ++i) { + auto bit = bit_gen(re); + shadow_bset.set(bit); + m_bset->set_bit(s_cast< CompactBitSet::bit_count_t >(bit)); + } + + auto validate = [this, &shadow_bset]() { + CompactBitSet::bit_count_t prev_set_bit{CompactBitSet::inval_bit}; + for (uint64_t i{0}; i < m_bset->size(); ++i) { + auto next_shadow_set_bit = (i == 0) ? shadow_bset.find_first() : shadow_bset.find_next(i - 1); + CompactBitSet::bit_count_t next_set_bit = m_bset->get_next_set_bit(i); + if (next_shadow_set_bit == boost::dynamic_bitset<>::npos) { + ASSERT_EQ(next_set_bit, CompactBitSet::inval_bit); + } else { + ASSERT_EQ(next_set_bit, next_shadow_set_bit); + if (next_set_bit == i) { prev_set_bit = i; } + ASSERT_EQ(m_bset->get_prev_set_bit(i), prev_set_bit); + } + } + + // Flip it back so we can look for reset bits + shadow_bset = shadow_bset.flip(); + for (uint64_t i{0}; i < m_bset->size(); ++i) { + auto next_shadow_reset_bit = (i == 0) ? shadow_bset.find_first() : shadow_bset.find_next(i - 1); + CompactBitSet::bit_count_t next_reset_bit = m_bset->get_next_reset_bit(i); + if (next_shadow_reset_bit == boost::dynamic_bitset<>::npos) { + ASSERT_EQ(next_reset_bit, CompactBitSet::inval_bit); + } else { + ASSERT_EQ(next_reset_bit, next_shadow_reset_bit); + } + } + + // Flip it back to original + shadow_bset = shadow_bset.flip(); + }; + + validate(); + m_bset = std::make_unique< CompactBitSet >(m_buf, false); // Reload + validate(); +} + +SISL_OPTION_GROUP(test_compact_bitset, + (buf_size, "", "buf_size", "buf_size that contains the bits", + ::cxxopts::value< uint32_t >()->default_value("1024"), "number")) + +int main(int argc, char* argv[]) { + int parsed_argc{argc}; + ::testing::InitGoogleTest(&parsed_argc, argv); + SISL_OPTIONS_LOAD(parsed_argc, argv, logging, test_compact_bitset); + + sisl::logging::SetLogger("test_compact_bitset"); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + return RUN_ALL_TESTS(); +} From b45862d826b7781b965e77b1a760747b58e25984 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Mon, 30 Oct 2023 16:11:55 -0700 Subject: [PATCH 364/385] New concurrent insertable vector (#188) --- conanfile.py | 2 +- include/sisl/fds/concurrent_insert_vector.hpp | 119 ++++++++++++++++++ src/fds/CMakeLists.txt | 14 +++ .../tests/concurrent_insert_vector_bench.cpp | 67 ++++++++++ .../tests/test_concurrent_insert_vector.cpp | 115 +++++++++++++++++ 5 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 include/sisl/fds/concurrent_insert_vector.hpp create mode 100644 src/fds/tests/concurrent_insert_vector_bench.cpp create mode 100644 src/fds/tests/test_concurrent_insert_vector.cpp diff --git a/conanfile.py b/conanfile.py index 74b61bc2..aa778835 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.2.2" + version = "10.2.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/concurrent_insert_vector.hpp b/include/sisl/fds/concurrent_insert_vector.hpp new file mode 100644 index 00000000..a9c22d9f --- /dev/null +++ b/include/sisl/fds/concurrent_insert_vector.hpp @@ -0,0 +1,119 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include + +#include + +namespace sisl { + +// +// This data structure provides a vector where concurrent threads can safely emplace or push back the data into. +// However, it does not guarantee any access or iterations happen during the insertion. It is the responsibility of the +// user to synchornoize this behavior. This data structure is useful when the user wants to insert data into a vector +// concurrently in a fast manner and then iterate over the data later. If the user wants a vector implementation which +// reads concurrently with writer, they can use sisl::ThreadVector. This data structure is provided as a replacement for +// simplistic cases where insertion and iteration never happen concurrently. As a result it provides better performance +// than even sisl::ThreadVector and better debuggability. +// +// Benchmark shows atleast 10x better performance on more than 4 threads concurrently inserting with mutex. +// +template < typename T > +class ConcurrentInsertVector { +private: + ExitSafeThreadBuffer< std::vector< T >, size_t > tvector_; + std::vector< std::vector< T > const* > per_thread_vec_ptrs_; + +public: + struct iterator { + size_t next_thread{0}; + size_t next_id_in_thread{0}; + ConcurrentInsertVector const* vec{nullptr}; + + iterator(ConcurrentInsertVector const& v) : vec{&v} {} + iterator(ConcurrentInsertVector const& v, bool end_iterator) : vec{&v} { + if (end_iterator) { next_thread = vec->per_thread_vec_ptrs_.size(); } + } + + void operator++() { + ++next_id_in_thread; + if (next_id_in_thread >= vec->per_thread_vec_ptrs_[next_thread]->size()) { + ++next_thread; + next_id_in_thread = 0; + } + } + + bool operator==(iterator const& other) const = default; + bool operator!=(iterator const& other) const = default; + + T const& operator*() const { return vec->per_thread_vec_ptrs_[next_thread]->at(next_id_in_thread); } + T const* operator->() const { return &(vec->per_thread_vec_ptrs_[next_thread]->at(next_id_in_thread)); } + }; + + ConcurrentInsertVector() = default; + ConcurrentInsertVector(size_t size) : tvector_{size} {} + ConcurrentInsertVector(const ConcurrentInsertVector&) = delete; + ConcurrentInsertVector(ConcurrentInsertVector&&) noexcept = delete; + ConcurrentInsertVector& operator=(const ConcurrentInsertVector&) = delete; + ConcurrentInsertVector& operator=(ConcurrentInsertVector&&) noexcept = delete; + ~ConcurrentInsertVector() = default; + + template < typename InputType, + typename = typename std::enable_if< + std::is_convertible< typename std::decay< InputType >::type, T >::value >::type > + void push_back(InputType&& ele) { + tvector_->push_back(std::forward< InputType >(ele)); + } + + template < class... Args > + void emplace_back(Args&&... args) { + tvector_->emplace_back(std::forward< Args >(args)...); + } + + iterator begin() { + tvector_.access_all_threads([this](std::vector< T > const* tvec, bool, bool) { + if (tvec) { per_thread_vec_ptrs_.push_back(tvec); } + return false; + }); + return iterator{*this}; + } + + iterator end() { return iterator{*this, true /* end_iterator */}; } + + void foreach_entry(auto&& cb) { + tvector_.access_all_threads([this, &cb](std::vector< T > const* tvec, bool, bool) { + if (tvec) { + for (auto const& e : *tvec) { + cb(e); + } + } + return false; + }); + } + + size_t size() { + size_t sz{0}; + tvector_.access_all_threads([this, &sz](std::vector< T > const* tvec, bool, bool) { + if (tvec) { sz += tvec->size; } + }); + return sz; + } +}; + +} // namespace sisl diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index b53fe5cd..f500ece9 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -45,6 +45,20 @@ if (DEFINED ENABLE_TESTING) target_link_libraries(test_compact_bitset sisl ${COMMON_DEPS} GTest::gtest) add_test(NAME CompactBitset COMMAND test_compact_bitset) + add_executable(test_concurrent_insert_vector) + target_sources(test_concurrent_insert_vector PRIVATE + tests/test_concurrent_insert_vector.cpp + ) + target_link_libraries(test_concurrent_insert_vector sisl ${COMMON_DEPS} GTest::gtest) + add_test(NAME ConcurrentInsertVector COMMAND test_concurrent_insert_vector) + + add_executable(concurrent_insert_vector_bench) + target_sources(concurrent_insert_vector_bench PRIVATE + tests/concurrent_insert_vector_bench.cpp + ) + target_link_libraries(concurrent_insert_vector_bench sisl ${COMMON_DEPS} benchmark::benchmark) + add_test(NAME ConcurrentVectorBench COMMAND concurrent_insert_vector_bench) + add_executable(obj_allocator_benchmark) target_sources(obj_allocator_benchmark PRIVATE tests/obj_allocator_benchmark.cpp diff --git a/src/fds/tests/concurrent_insert_vector_bench.cpp b/src/fds/tests/concurrent_insert_vector_bench.cpp new file mode 100644 index 00000000..7edc1e67 --- /dev/null +++ b/src/fds/tests/concurrent_insert_vector_bench.cpp @@ -0,0 +1,67 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include + +#include +#include +#include + +using namespace sisl; + +static constexpr uint32_t NUM_THREADS = 1; +std::unique_ptr< std::vector< uint64_t > > glob_lock_vector; +std::mutex glob_vector_mutex; + +std::unique_ptr< sisl::ConcurrentInsertVector< uint64_t > > glob_cvec; + +void test_locked_vector_insert(benchmark::State& state) { + // auto const per_thread_count = nentries / state.threads(); + + LOGINFO("Running on {} iterations in {} threads", state.iterations(), state.threads()); + std::cout << "Running on iterations=" << state.iterations() << " in threads=" << state.threads() << "\n"; + glob_lock_vector = std::make_unique< std::vector< uint64_t > >(); + + uint64_t i{0}; + for (auto _ : state) { // Loops upto iteration count + std::lock_guard< std::mutex > lg(glob_vector_mutex); + glob_lock_vector->emplace_back(++i); + } +} + +void test_concurrent_vector_insert(benchmark::State& state) { + std::cout << "Running on iterations=" << state.iterations() << " in threads=" << state.threads() << "\n"; + glob_cvec = std::make_unique< sisl::ConcurrentInsertVector< uint64_t > >(); + + uint64_t i{0}; + for (auto _ : state) { // Loops upto iteration count + glob_cvec->emplace_back(++i); + } +} + +BENCHMARK(test_locked_vector_insert)->Threads(NUM_THREADS); +BENCHMARK(test_concurrent_vector_insert)->Threads(NUM_THREADS); + +int main(int argc, char** argv) { + int parsed_argc{argc}; + ::benchmark::Initialize(&parsed_argc, argv); + + // setup(); + ::benchmark::RunSpecifiedBenchmarks(); +} diff --git a/src/fds/tests/test_concurrent_insert_vector.cpp b/src/fds/tests/test_concurrent_insert_vector.cpp new file mode 100644 index 00000000..7a58fb44 --- /dev/null +++ b/src/fds/tests/test_concurrent_insert_vector.cpp @@ -0,0 +1,115 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Author/Developer(s): Harihara Kadayam + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +using namespace sisl; + +SISL_OPTIONS_ENABLE(logging, test_concurrent_insert_vector) + +class ConcurrentInsertVectorTest : public testing::Test { +protected: + ConcurrentInsertVector< uint32_t > m_cvec; + std::vector< std::thread > m_threads; + +public: + ConcurrentInsertVectorTest() : + testing::Test(), m_cvec{s_cast< size_t >(SISL_OPTIONS["num_entries"].as< uint32_t >())} {} + ConcurrentInsertVectorTest(const ConcurrentInsertVectorTest&) = delete; + ConcurrentInsertVectorTest(ConcurrentInsertVectorTest&&) noexcept = delete; + ConcurrentInsertVectorTest& operator=(const ConcurrentInsertVectorTest&) = delete; + ConcurrentInsertVectorTest& operator=(ConcurrentInsertVectorTest&&) noexcept = delete; + virtual ~ConcurrentInsertVectorTest() override = default; + +protected: + void insert_and_wait() { + auto const nthreads = SISL_OPTIONS["num_threads"].as< uint32_t >(); + auto const per_thread_count = SISL_OPTIONS["num_entries"].as< uint32_t >() / nthreads; + for (size_t i{0}; i < nthreads; ++i) { + m_threads.emplace_back( + [this](uint32_t start, uint32_t count) { + for (uint32_t i{0}; i < count; ++i) { + m_cvec.push_back(start + i); + } + }, + i * per_thread_count, per_thread_count); + } + + for (auto& thr : m_threads) { + thr.join(); + } + } + + void validate_all() { + sisl::Bitset bset{SISL_OPTIONS["num_entries"].as< uint32_t >()}; + m_cvec.foreach_entry([&bset](uint32_t const& e) { bset.set_bit(e); }); + ASSERT_EQ(bset.get_next_reset_bit(0), sisl::Bitset::npos) << "Access didn't receive all entries"; + } + + void validate_all_by_iteration() { + sisl::Bitset bset{SISL_OPTIONS["num_entries"].as< uint32_t >()}; + for (const auto& e : m_cvec) { + bset.set_bit(e); + } + ASSERT_EQ(bset.get_next_reset_bit(0), sisl::Bitset::npos) << "Access didn't receive all entries"; + } +}; + +TEST_F(ConcurrentInsertVectorTest, concurrent_insertion) { + LOGINFO("Step1: Inserting {} entries in parallel in {} threads and wait", + SISL_OPTIONS["num_entries"].as< uint32_t >(), SISL_OPTIONS["num_threads"].as< uint32_t >()); + insert_and_wait(); + + LOGINFO("Step2: Validating all entries are inserted"); + validate_all(); + + LOGINFO("Step3: Validating all entries again to ensure it is readable multipled times"); + validate_all(); + + LOGINFO("Step4: Validating all entries by iterator"); + validate_all_by_iteration(); + + LOGINFO("Step5: Validating all entries again by iterator to ensure it is readable multipled times"); + validate_all_by_iteration(); +} + +SISL_OPTION_GROUP(test_concurrent_insert_vector, + (num_entries, "", "num_entries", "num_entries", + ::cxxopts::value< uint32_t >()->default_value("10000"), "number"), + (num_threads, "", "num_threads", "num_threads", ::cxxopts::value< uint32_t >()->default_value("8"), + "number")) + +int main(int argc, char* argv[]) { + int parsed_argc{argc}; + ::testing::InitGoogleTest(&parsed_argc, argv); + SISL_OPTIONS_LOAD(parsed_argc, argv, logging, test_concurrent_insert_vector); + + sisl::logging::SetLogger("test_concurrent_insert_vector"); + spdlog::set_pattern("[%D %T%z] [%^%l%$] [%n] [%t] %v"); + + return RUN_ALL_TESTS(); +} From f9f162d66e37db34aaea61c4fc45002d3a39f996 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 31 Oct 2023 15:17:34 -0700 Subject: [PATCH 365/385] Allowed size() of concurrent vector to be const (#189) --- conanfile.py | 2 +- include/sisl/fds/concurrent_insert_vector.hpp | 13 ++++++++----- src/fds/tests/test_concurrent_insert_vector.cpp | 5 +++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/conanfile.py b/conanfile.py index aa778835..412a3f2f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.2.3" + version = "10.2.4" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/fds/concurrent_insert_vector.hpp b/include/sisl/fds/concurrent_insert_vector.hpp index a9c22d9f..f9d4fd79 100644 --- a/include/sisl/fds/concurrent_insert_vector.hpp +++ b/include/sisl/fds/concurrent_insert_vector.hpp @@ -46,6 +46,7 @@ class ConcurrentInsertVector { size_t next_id_in_thread{0}; ConcurrentInsertVector const* vec{nullptr}; + iterator() = default; iterator(ConcurrentInsertVector const& v) : vec{&v} {} iterator(ConcurrentInsertVector const& v, bool end_iterator) : vec{&v} { if (end_iterator) { next_thread = vec->per_thread_vec_ptrs_.size(); } @@ -88,7 +89,7 @@ class ConcurrentInsertVector { iterator begin() { tvector_.access_all_threads([this](std::vector< T > const* tvec, bool, bool) { - if (tvec) { per_thread_vec_ptrs_.push_back(tvec); } + if (tvec && tvec->size()) { per_thread_vec_ptrs_.push_back(tvec); } return false; }); return iterator{*this}; @@ -107,11 +108,13 @@ class ConcurrentInsertVector { }); } - size_t size() { + size_t size() const { size_t sz{0}; - tvector_.access_all_threads([this, &sz](std::vector< T > const* tvec, bool, bool) { - if (tvec) { sz += tvec->size; } - }); + const_cast< ExitSafeThreadBuffer< std::vector< T >, size_t >& >(tvector_).access_all_threads( + [this, &sz](std::vector< T > const* tvec, bool, bool) { + if (tvec) { sz += tvec->size(); } + return false; + }); return sz; } }; diff --git a/src/fds/tests/test_concurrent_insert_vector.cpp b/src/fds/tests/test_concurrent_insert_vector.cpp index 7a58fb44..38c4488e 100644 --- a/src/fds/tests/test_concurrent_insert_vector.cpp +++ b/src/fds/tests/test_concurrent_insert_vector.cpp @@ -37,8 +37,7 @@ class ConcurrentInsertVectorTest : public testing::Test { std::vector< std::thread > m_threads; public: - ConcurrentInsertVectorTest() : - testing::Test(), m_cvec{s_cast< size_t >(SISL_OPTIONS["num_entries"].as< uint32_t >())} {} + ConcurrentInsertVectorTest() : testing::Test() {} ConcurrentInsertVectorTest(const ConcurrentInsertVectorTest&) = delete; ConcurrentInsertVectorTest(ConcurrentInsertVectorTest&&) noexcept = delete; ConcurrentInsertVectorTest& operator=(const ConcurrentInsertVectorTest&) = delete; @@ -68,6 +67,7 @@ class ConcurrentInsertVectorTest : public testing::Test { sisl::Bitset bset{SISL_OPTIONS["num_entries"].as< uint32_t >()}; m_cvec.foreach_entry([&bset](uint32_t const& e) { bset.set_bit(e); }); ASSERT_EQ(bset.get_next_reset_bit(0), sisl::Bitset::npos) << "Access didn't receive all entries"; + ASSERT_EQ(m_cvec.size(), bset.get_set_count(0)) << "Size doesn't match with number of entries"; } void validate_all_by_iteration() { @@ -76,6 +76,7 @@ class ConcurrentInsertVectorTest : public testing::Test { bset.set_bit(e); } ASSERT_EQ(bset.get_next_reset_bit(0), sisl::Bitset::npos) << "Access didn't receive all entries"; + ASSERT_EQ(m_cvec.size(), bset.get_set_count(0)) << "Size doesn't match with number of entries"; } }; From daf049a3be689e88249b582f1a3bbe0416c17771 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Wed, 8 Nov 2023 14:16:14 -0800 Subject: [PATCH 366/385] Update flatbuffers version and fixed incompatibilities (#191) --- cmake/settings_gen.cmake | 2 +- conanfile.py | 4 ++-- include/sisl/settings/settings.hpp | 20 +++++++++++++------- include/sisl/utility/non_null_ptr.hpp | 2 ++ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/cmake/settings_gen.cmake b/cmake/settings_gen.cmake index 8db3cc24..acac0a62 100644 --- a/cmake/settings_gen.cmake +++ b/cmake/settings_gen.cmake @@ -58,7 +58,7 @@ macro(settings_gen_cpp flatbuffer_bin_path gen_out_path _target) add_custom_command( OUTPUT ${_GEN_HEADERS} COMMAND ${flatbuffer_bin_path} - ARGS -c -s -o ${gen_out_path} + ARGS -c -o ${gen_out_path} --no-prefix --scoped-enums --gen-mutable diff --git a/conanfile.py b/conanfile.py index 412a3f2f..27b283c0 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.2.4" + version = "10.2.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -89,7 +89,7 @@ def requirements(self): if self.settings.os in ["Linux"]: self.requires("breakpad/cci.20230127") self.requires("cxxopts/3.1.1") - self.requires("flatbuffers/1.12.0") + self.requires("flatbuffers/23.5.26") self.requires("grpc/1.50.1") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.1.0") diff --git a/include/sisl/settings/settings.hpp b/include/sisl/settings/settings.hpp index 512025da..01be4257 100644 --- a/include/sisl/settings/settings.hpp +++ b/include/sisl/settings/settings.hpp @@ -301,14 +301,17 @@ class SettingsFactory : public sisl::SettingsFactoryBase { parser.opts.strict_json = true; parser.opts.output_default_scalars_in_json = true; - if (!parser.Parse(m_raw_schema.c_str())) { return; } + if (!parser.Parse(m_raw_schema.c_str())) { + LOGERROR("Error in parsing schema file to save"); + return; + } parser.builder_.Finish( SettingsT::TableType::Pack(parser.builder_, m_rcu_data.get_node()->get().get(), nullptr)); std::string fname = filepath; boost::replace_all(fname, ".json", ""); - if (!GenerateTextFile(parser, "", fname)) { return; } + if (GenTextFile(parser, "", fname) == nullptr) { LOGERROR("Error in Saving json to file"); } } const std::string& get_current_settings() const { return m_current_settings; } @@ -321,12 +324,15 @@ class SettingsFactory : public sisl::SettingsFactoryBase { parser.opts.strict_json = true; parser.opts.output_default_scalars_in_json = true; - if (!parser.Parse(m_raw_schema.c_str())) { return "Error parsing flatbuffer settings schema"; } + if (!parser.Parse(m_raw_schema.c_str())) { + LOGERROR("Error parsing flatbuffer settings schema"); + return json; + } parser.builder_.Finish( SettingsT::TableType::Pack(parser.builder_, m_rcu_data.get_node()->get().get(), nullptr)); - if (!GenerateText(parser, parser.builder_.GetBufferPointer(), &json)) { - return "Error generating json from flatbuffer"; + if (GenText(parser, parser.builder_.GetBufferPointer(), &json) == nullptr) { + LOGERROR("Error generating json from flatbuffer"); } return json; } @@ -464,7 +470,7 @@ class SettingsFactory : public sisl::SettingsFactoryBase { #define WITH_SETTINGS_THIS_CAP1(var, cap1, ...) with_settings([ this, cap1 ](auto& var) __VA_ARGS__) #define WITH_SETTINGS_THIS_CAP2(var, cap1, cap2, ...) with_settings([ this, cap1, cap2 ](auto& var) __VA_ARGS__) -//#define SETTINGS_FACTORY(SType) ::sisl::SettingsFactory< SType##T >::instance() +// #define SETTINGS_FACTORY(SType) ::sisl::SettingsFactory< SType##T >::instance() /* * SETTINGS(var) invokes user supplied lamdba passing it a safe pointer to an instance of settings object @@ -484,5 +490,5 @@ class SettingsFactory : public sisl::SettingsFactoryBase { #define SETTINGS_THIS_CAP2(sname, var, cap1, cap2, ...) \ SETTINGS_FACTORY(sname).WITH_SETTINGS_THIS_CAP2(var, cap1, cap2, __VA_ARGS__) -//#define SETTINGS_VALUE(SType, path_expr) SETTINGS_FACTORY(SType).WITH_SETTINGS_VALUE(path_expr) +// #define SETTINGS_VALUE(SType, path_expr) SETTINGS_FACTORY(SType).WITH_SETTINGS_VALUE(path_expr) #define SETTINGS_VALUE(sname, path_expr) SETTINGS_FACTORY(sname).WITH_SETTINGS_VALUE(path_expr) diff --git a/include/sisl/utility/non_null_ptr.hpp b/include/sisl/utility/non_null_ptr.hpp index 95c57198..80bab1a9 100644 --- a/include/sisl/utility/non_null_ptr.hpp +++ b/include/sisl/utility/non_null_ptr.hpp @@ -98,6 +98,8 @@ struct embedded_t : public T { *ret = *this; return static_cast< T* >(ret); } + + void reset() noexcept { *this = embedded_t{nullptr}; } }; template < class T > From bee5f32b518568cbb7cff196250e9852ec9b2820 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Fri, 1 Dec 2023 15:00:21 -0800 Subject: [PATCH 367/385] Add the option to get the request as sisl::io_blob from the generic service (#194) Co-authored-by: Ravi Nagarjun Akella --- conanfile.py | 2 +- include/sisl/grpc/generic_service.hpp | 32 +++++++++++++++++-- src/grpc/tests/function/echo_async_client.cpp | 19 ++++++++--- 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/conanfile.py b/conanfile.py index 27b283c0..e782c20c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.2.5" + version = "10.3.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index 9782638a..cdd3ad89 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -15,6 +15,7 @@ #pragma once #include +#include "sisl/fds/buffer.hpp" #include "rpc_call.hpp" namespace sisl { @@ -49,12 +50,35 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD RpcDataAbstract* create_new() override { return new GenericRpcData(m_rpc_info, m_queue_idx); } void set_status(grpc::Status& status) { m_retstatus = status; } - ~GenericRpcData() override = default; + ~GenericRpcData() override { + if (m_request_blob_allocated) { m_request_blob.buf_free(); } + } // There is only one generic static rpc data for all rpcs. size_t get_rpc_idx() const override { return 0; } const grpc::ByteBuffer& request() const { return m_request; } + sisl::io_blob& request_blob() { + if (!m_request_blob.bytes) { + grpc::Slice slice; + auto status = m_request.TrySingleSlice(&slice); + if (status.ok()) { + m_request_blob.bytes = const_cast< uint8_t* >(slice.begin()); + m_request_blob.size = slice.size(); + } else if (status.error_code() == grpc::StatusCode::FAILED_PRECONDITION) { + // If the ByteBuffer is not made up of single slice, TrySingleSlice() will fail. + // DumpSingleSlice() should work in those cases but will incur a copy. + if (status = m_request.DumpToSingleSlice(&slice); status.ok()) { + m_request_blob.buf_alloc(slice.size()); + m_request_blob_allocated = true; + std::memcpy(static_cast< void* >(m_request_blob.bytes), static_cast< const void* >(slice.begin()), + slice.size()); + } + } + } + return m_request_blob; + } + grpc::ByteBuffer& response() { return m_response; } void enqueue_call_request(::grpc::ServerCompletionQueue& cq) override { @@ -78,6 +102,8 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD grpc::GenericServerContext m_ctx; grpc::ByteBuffer m_request; grpc::ByteBuffer m_response; + sisl::io_blob m_request_blob; + bool m_request_blob_allocated{false}; grpc::Status m_retstatus{grpc::Status::OK}; // user can set and retrieve the context. Its life cycle is tied to that of rpc data. generic_rpc_ctx_ptr m_rpc_context; @@ -104,7 +130,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD return in_shutdown ? nullptr : create_new(); } - RpcDataAbstract* on_buf_read(bool ) { + RpcDataAbstract* on_buf_read(bool) { auto this_rpc_data = boost::intrusive_ptr< GenericRpcData >{this}; // take a ref before the handler cb is called. // unref is called in send_response which is handled by us (in case of sync calls) @@ -114,7 +140,7 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD return nullptr; } - RpcDataAbstract* on_buf_write(bool ) { + RpcDataAbstract* on_buf_write(bool) { m_stream.Finish(m_retstatus, static_cast< void* >(m_request_completed_tag.ref())); unref(); return nullptr; diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index 1a92b9a2..293354dd 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -62,7 +62,7 @@ struct DataMessage { } }; -static void DeserializeFromByteBuffer(const grpc::ByteBuffer& buffer, DataMessage& msg) { +static void DeserializeFromBuffer(const grpc::ByteBuffer& buffer, DataMessage& msg) { std::vector< grpc::Slice > slices; (void)buffer.Dump(&slices); std::string buf; @@ -72,6 +72,14 @@ static void DeserializeFromByteBuffer(const grpc::ByteBuffer& buffer, DataMessag } msg.DeserializeFromString(buf); } + +static void DeserializeFromBuffer(sisl::io_blob const& buffer, DataMessage& msg) { + std::string buf; + buf.reserve(buffer.size); + buf.append(reinterpret_cast< const char* >(buffer.bytes), buffer.size); + msg.DeserializeFromString(buf); +} + static void SerializeToByteBuffer(grpc::ByteBuffer& buffer, const DataMessage& msg) { std::string buf; msg.SerializeToString(buf); @@ -115,7 +123,7 @@ class TestClient { RELEASE_ASSERT_EQ(status.ok(), true, "generic request {} failed, status {}: {}", req.m_seqno, status.error_code(), status.error_message()); DataMessage svr_msg; - DeserializeFromByteBuffer(reply, svr_msg); + DeserializeFromBuffer(reply, svr_msg); RELEASE_ASSERT_EQ(req.m_seqno, svr_msg.m_seqno); RELEASE_ASSERT_EQ(req.m_buf, svr_msg.m_buf); { @@ -323,9 +331,10 @@ class TestServer { std::atomic< uint32_t > num_calls = 0ul; std::atomic< uint32_t > num_completions = 0ul; - static void set_response(const grpc::ByteBuffer& req, grpc::ByteBuffer& resp) { + template < typename BufT > + static void set_response(BufT const& req, grpc::ByteBuffer& resp) { DataMessage cli_request; - DeserializeFromByteBuffer(req, cli_request); + DeserializeFromBuffer(req, cli_request); RELEASE_ASSERT((cli_request.m_buf == GENERIC_CLIENT_MESSAGE), "Could not parse response buffer"); SerializeToByteBuffer(resp, cli_request); } @@ -344,7 +353,7 @@ class TestServer { if ((++num_calls % 2) == 0) { LOGDEBUGMOD(grpc_server, "respond async generic request, call_num {}", num_calls.load()); std::thread([this, rpc = rpc_data] { - set_response(rpc->request(), rpc->response()); + set_response(rpc->request_blob(), rpc->response()); rpc->send_response(); }).detach(); return false; From 23af8bd19bbdac480908eca9cb3864ae9780518d Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Mon, 11 Dec 2023 19:08:45 -0700 Subject: [PATCH 368/385] Update folly library. (#196) --- .jenkins/Jenkinsfile | 2 +- 3rd_party/folly/conandata.yml | 14 +++++++++++ 3rd_party/folly/conanfile.py | 8 +++---- .../folly/patches/0024-compiler-flags.patch | 23 +++++++++++++++++++ 3rd_party/folly/test_package/CMakeLists.txt | 6 +---- 3rd_party/folly/test_package/test_package.cpp | 13 ++++------- conanfile.py | 2 +- include/sisl/cache/range_hashmap.hpp | 2 +- prepare.sh | 2 +- 9 files changed, 51 insertions(+), 21 deletions(-) create mode 100644 3rd_party/folly/patches/0024-compiler-flags.patch diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 764f1177..6826e394 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -54,7 +54,7 @@ pipeline { stage("Compile") { steps { - sh "conan export 3rd_party/folly folly/nu2.2022.01.31.00@ ; \ + sh "conan export 3rd_party/folly folly/nu2.2023.12.11.00@ ; \ conan create ${BUILD_MISSING} -pr debug -o ${PROJECT}:sanitize=True . ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr debug . ${PROJECT}/${TAG} ; \ conan create ${BUILD_MISSING} -pr test -o ${PROJECT}:malloc_impl=tcmalloc . ${PROJECT}/${TAG} ; \ diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml index 28a5d757..9b806191 100644 --- a/3rd_party/folly/conandata.yml +++ b/3rd_party/folly/conandata.yml @@ -8,6 +8,9 @@ sources: "nu2.2022.01.31.00": url: "https://github.com/facebook/folly/archive/v2022.01.31.00.tar.gz" sha256: "d764b9a7832d967bb7cfea4bcda15d650315aa4d559fde1da2a52b015cd88b9c" + "nu2.2023.12.11.00": + url: "https://github.com/facebook/folly/archive/v2023.12.11.00.tar.gz" + sha256: "1ff0c0258f8322a818a6e0cd27c0fc965360dc04af308e59349e1c79966190a1" patches: "2019.10.21.00": - patch_file: "patches/0001-find-packages.patch" @@ -56,3 +59,14 @@ patches: base_path: "source_subfolder" - patch_file: "patches/0023-fix-safe-check-sanitize.patch" base_path: "source_subfolder" + "nu2.2023.12.11.00": + - patch_file: "patches/0016-find-packages.patch" + base_path: "source_subfolder" + - patch_file: "patches/0018-find-glog.patch" + base_path: "source_subfolder" + - patch_file: "patches/0019-exclude-example.patch" + base_path: "source_subfolder" + - patch_file: "patches/0022-fix-windows-minmax.patch" + base_path: "source_subfolder" + - patch_file: "patches/0024-compiler-flags.patch" + base_path: "source_subfolder" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index fa8b46d7..8ac1f46f 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -73,16 +73,16 @@ def configure(self): del self.options.fPIC def requirements(self): - self.requires("boost/1.78.0") + self.requires("boost/1.82.0") self.requires("bzip2/1.0.8") self.requires("double-conversion/3.2.0") self.requires("gflags/2.2.2") self.requires("glog/0.4.0") self.requires("libevent/2.1.12") - self.requires("openssl/1.1.1q") + self.requires("openssl/3.1.1") self.requires("lz4/1.9.3") self.requires("snappy/1.1.9") - self.requires("zlib/1.2.12") + self.requires("zlib/1.2.13") self.requires("zstd/1.5.2") if not is_msvc(self): self.requires("libdwarf/20191104") @@ -93,7 +93,7 @@ def requirements(self): self.requires("libiberty/9.1.0") self.requires("libunwind/1.5.0") if Version(self.version) >= "2020.08.10.00": - self.requires("fmt/7.1.3") + self.requires("fmt/[>=10]") @property def _required_boost_components(self): diff --git a/3rd_party/folly/patches/0024-compiler-flags.patch b/3rd_party/folly/patches/0024-compiler-flags.patch new file mode 100644 index 00000000..adee0d6e --- /dev/null +++ b/3rd_party/folly/patches/0024-compiler-flags.patch @@ -0,0 +1,23 @@ +diff -Naur a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake +--- a/CMake/FollyCompilerUnix.cmake 2023-12-08 20:38:13.000000000 -0700 ++++ b/CMake/FollyCompilerUnix.cmake 2023-12-11 12:34:46.769353376 -0700 +@@ -12,9 +12,9 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") ++set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") +-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") ++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") + + list(APPEND CMAKE_REQUIRED_DEFINITIONS "-D_GNU_SOURCE") + function(apply_folly_compile_options_to_target THETARGET) +@@ -25,7 +25,6 @@ + ) + target_compile_options(${THETARGET} + PRIVATE +- -g + -finput-charset=UTF-8 + -fsigned-char + -Wall diff --git a/3rd_party/folly/test_package/CMakeLists.txt b/3rd_party/folly/test_package/CMakeLists.txt index 6a9df4ea..cd964d40 100644 --- a/3rd_party/folly/test_package/CMakeLists.txt +++ b/3rd_party/folly/test_package/CMakeLists.txt @@ -9,8 +9,4 @@ target_link_libraries(${PROJECT_NAME} Folly::follybenchmark) -if (${FOLLY_VERSION} VERSION_LESS "2021.07.20.00") - set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14) -else() - set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17) -endif() +set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) diff --git a/3rd_party/folly/test_package/test_package.cpp b/3rd_party/folly/test_package/test_package.cpp index cc522b8b..ad935988 100644 --- a/3rd_party/folly/test_package/test_package.cpp +++ b/3rd_party/folly/test_package/test_package.cpp @@ -10,16 +10,13 @@ #include #endif -static void print_uri(const folly::fbstring& value) { - const folly::Uri uri(value); - std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; -} - int main() { folly::ThreadedExecutor executor; - folly::Promise promise; - folly::Future future = promise.getSemiFuture().via(&executor); - folly::Future unit = std::move(future).thenValue(print_uri); + auto [promise, future] = folly::makePromiseContract< folly::fbstring >(&executor); + auto unit = std::move(future).thenValue([](auto const value) { + const folly::Uri uri(value); + std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; + }); promise.setValue("https://github.com/bincrafters"); std::move(unit).get(); #if FOLLY_HAVE_ELF diff --git a/conanfile.py b/conanfile.py index e782c20c..f3a0db21 100644 --- a/conanfile.py +++ b/conanfile.py @@ -81,7 +81,7 @@ def requirements(self): # Linux Specific Support if self.settings.os in ["Linux"]: - self.requires("folly/nu2.2022.01.31.00") + self.requires("folly/nu2.2023.12.11.00") self.requires("userspace-rcu/0.11.4") # Generic packages (conan-center) diff --git a/include/sisl/cache/range_hashmap.hpp b/include/sisl/cache/range_hashmap.hpp index 654fccc7..0c2c3d5b 100644 --- a/include/sisl/cache/range_hashmap.hpp +++ b/include/sisl/cache/range_hashmap.hpp @@ -250,7 +250,7 @@ class MultiEntryHashNode : public boost::intrusive::slist_base_hook<> { K m_base_key; big_offset_t m_base_nth; - folly::small_vector< ValueEntryRange, 8, small_count_t > m_values; + folly::small_vector< ValueEntryRange, 8, folly::small_vector_policy::policy_size_type > m_values; public: MultiEntryHashNode(const K& base_key, big_offset_t nth) : m_base_key{base_key}, m_base_nth{nth} {} diff --git a/prepare.sh b/prepare.sh index a35686bf..ef8116a1 100755 --- a/prepare.sh +++ b/prepare.sh @@ -4,7 +4,7 @@ echo -n "Exporting custom recipes..." echo -n "breakpad." conan export 3rd_party/breakpad breakpad/cci.20230127@ >/dev/null echo -n "folly." -conan export 3rd_party/folly folly/nu2.2022.01.31.00@ >/dev/null +conan export 3rd_party/folly folly/nu2.2023.12.11.00@ >/dev/null echo -n "gperftools." conan export 3rd_party/gperftools >/dev/null echo -n "jemalloc." From 3db78a4d5c2760b15786a7b0ade3f38578f4c819 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 12 Dec 2023 10:35:04 -0700 Subject: [PATCH 369/385] Folly needs to depend on liburing directly. (#197) * Folly needs to depend on liburing directly. * Remove inclusion from header causing conflicts. --- 3rd_party/folly/conandata.yml | 2 ++ 3rd_party/folly/conanfile.py | 1 + 3rd_party/folly/patches/0025-timespec.patch | 38 +++++++++++++++++++++ conanfile.py | 2 +- 4 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 3rd_party/folly/patches/0025-timespec.patch diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml index 9b806191..41532ccf 100644 --- a/3rd_party/folly/conandata.yml +++ b/3rd_party/folly/conandata.yml @@ -70,3 +70,5 @@ patches: base_path: "source_subfolder" - patch_file: "patches/0024-compiler-flags.patch" base_path: "source_subfolder" + - patch_file: "patches/0025-timespec.patch" + base_path: "source_subfolder" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 8ac1f46f..7ce3707c 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -84,6 +84,7 @@ def requirements(self): self.requires("snappy/1.1.9") self.requires("zlib/1.2.13") self.requires("zstd/1.5.2") + self.requires("liburing/2.4") if not is_msvc(self): self.requires("libdwarf/20191104") self.requires("libsodium/1.0.18") diff --git a/3rd_party/folly/patches/0025-timespec.patch b/3rd_party/folly/patches/0025-timespec.patch new file mode 100644 index 00000000..974a120d --- /dev/null +++ b/3rd_party/folly/patches/0025-timespec.patch @@ -0,0 +1,38 @@ +diff -Naur a/folly/io/async/AsyncSocket.cpp b/folly/io/async/AsyncSocket.cpp +--- a/folly/io/async/AsyncSocket.cpp 2023-12-08 20:38:13.000000000 -0700 ++++ b/folly/io/async/AsyncSocket.cpp 2023-12-12 10:15:06.023030521 -0700 +@@ -18,6 +18,9 @@ + + #include + ++/* for struct sock_extended_err*/ ++#include ++ + #include + #include + #include +diff -Naur a/folly/io/async/AsyncUDPSocket.cpp b/folly/io/async/AsyncUDPSocket.cpp +--- a/folly/io/async/AsyncUDPSocket.cpp 2023-12-08 20:38:13.000000000 -0700 ++++ b/folly/io/async/AsyncUDPSocket.cpp 2023-12-12 10:19:06.419424565 -0700 +@@ -17,6 +17,9 @@ + #include + #include + ++/* for struct sock_extended_err*/ ++#include ++ + #include + + #include +diff -Naur a/folly/net/NetOps.h b/folly/net/NetOps.h +--- a/folly/net/NetOps.h 2023-12-12 10:16:10.675139766 -0700 ++++ b/folly/net/NetOps.h 2023-12-12 10:15:55.087113425 -0700 +@@ -114,7 +114,7 @@ + #endif + #endif + /* for struct sock_extended_err*/ +-#include ++#include + #endif + #endif + diff --git a/conanfile.py b/conanfile.py index f3a0db21..85f0b76d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.3.1" + version = "10.3.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" From 03282536e309252d85319bebf4b227be0be6f9fe Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Tue, 12 Dec 2023 21:22:45 -0800 Subject: [PATCH 370/385] sisl::blob const correctness (#192) * Update flatbuffers version and fixed incompatibilities * sisl::blob const correctness * Create "latest" folder for last logdirectory --- CMakeLists.txt | 5 + conanfile.py | 6 +- include/sisl/fds/bitset.hpp | 38 ++-- include/sisl/fds/buffer.hpp | 162 +++++++++++------- include/sisl/fds/compact_bitset.hpp | 10 +- include/sisl/grpc/generic_service.hpp | 8 +- src/cache/tests/test_range_cache.cpp | 12 +- src/cache/tests/test_range_hashmap.cpp | 6 +- src/fds/tests/test_compact_bitset.cpp | 2 +- src/fds/tests/test_sg_list.cpp | 4 +- src/grpc/tests/function/echo_async_client.cpp | 4 +- src/logging/logging.cpp | 39 +++-- 12 files changed, 174 insertions(+), 122 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 58929c8b..3cb1667f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,6 +32,11 @@ if (DEFINED CONAN_BUILD_COVERAGE) endif() endif() +if(${CMAKE_BUILD_TYPE} STREQUAL "Debug") + message(STATUS "Debug build") + add_flags("-D_DEBUG") +endif() + if (DEFINED MALLOC_IMPL) if (${MALLOC_IMPL} STREQUAL "jemalloc") add_flags("-DUSE_JEMALLOC=1") diff --git a/conanfile.py b/conanfile.py index 85f0b76d..3c86519c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "10.3.2" + version = "11.0.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -96,8 +96,8 @@ def requirements(self): self.requires("spdlog/1.12.0") self.requires("zmarok-semver/1.1.0") self.requires("fmt/10.0.0", override=True) - self.requires("libcurl/8.0.1", override=True) - self.requires("openssl/3.1.1", override=True) + self.requires("libcurl/8.2.1", override=True) + self.requires("openssl/3.1.3", override=True) self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.13", override=True) diff --git a/include/sisl/fds/bitset.hpp b/include/sisl/fds/bitset.hpp index 1141615c..616fb0e2 100644 --- a/include/sisl/fds/bitset.hpp +++ b/include/sisl/fds/bitset.hpp @@ -208,7 +208,7 @@ class BitsetImpl { const uint64_t size{(alignment_size > 0) ? round_up(bitset_serialized::nbytes(nbits), alignment_size) : bitset_serialized::nbytes(nbits)}; m_buf = make_byte_array_with_deleter(static_cast< uint32_t >(size), alignment_size); - m_s = new (m_buf->bytes) bitset_serialized{m_id, nbits, 0, alignment_size}; + m_s = new (m_buf->bytes()) bitset_serialized{m_id, nbits, 0, alignment_size}; } // this makes a shared copy of the rhs so that modifications of the shared version @@ -224,18 +224,18 @@ class BitsetImpl { // NOTE: This assumes that the passed byte_array already has an initialized bitset_serialized structure // Also assume that the words byte array contains packed word_t data since packed Word data is illegal // with any class besides POD - assert(b->size >= sizeof(bitset_serialized)); + assert(b->size() >= sizeof(bitset_serialized)); // get the header info - const bitset_serialized* const ptr{reinterpret_cast< const bitset_serialized* >(b->bytes)}; + const bitset_serialized* const ptr{reinterpret_cast< const bitset_serialized* >(b->cbytes())}; assert(ptr->m_word_bits == bitword_type::bits()); const uint64_t nbits{ptr->m_nbits}; const uint64_t total_bytes{bitset_serialized::nbytes(nbits)}; const uint32_t alignment_size{opt_alignment_size ? (*opt_alignment_size) : ptr->m_alignment_size}; const uint64_t size{(alignment_size > 0) ? round_up(total_bytes, alignment_size) : total_bytes}; - assert(b->size >= total_bytes); + assert(b->size() >= total_bytes); m_buf = make_byte_array_with_deleter(static_cast< uint32_t >(size), alignment_size); - m_s = new (m_buf->bytes) bitset_serialized{ptr->m_id, nbits, ptr->m_skip_bits, alignment_size, false}; - const word_t* b_words{reinterpret_cast< const word_t* >(b->bytes + sizeof(bitset_serialized))}; + m_s = new (m_buf->bytes()) bitset_serialized{ptr->m_id, nbits, ptr->m_skip_bits, alignment_size, false}; + const word_t* b_words{reinterpret_cast< const word_t* >(b->cbytes() + sizeof(bitset_serialized))}; // copy the data std::uninitialized_copy(b_words, std::next(b_words, m_s->m_words_cap), m_s->get_words()); } @@ -248,7 +248,7 @@ class BitsetImpl { const uint64_t size{(alignment_size > 0) ? round_up(bitset_serialized::nbytes(nbits), alignment_size) : bitset_serialized::nbytes(nbits)}; m_buf = make_byte_array_with_deleter(static_cast< uint32_t >(size), alignment_size); - m_s = new (m_buf->bytes) bitset_serialized{id, nbits, 0, alignment_size, false}; + m_s = new (m_buf->bytes()) bitset_serialized{id, nbits, 0, alignment_size, false}; // copy the data into the uninitialized bitset std::uninitialized_copy(start_ptr, end_ptr, m_s->get_words()); @@ -264,7 +264,7 @@ class BitsetImpl { const uint64_t size{(alignment_size > 0) ? round_up(bitset_serialized::nbytes(nbits), alignment_size) : bitset_serialized::nbytes(nbits)}; m_buf = make_byte_array_with_deleter(static_cast< uint32_t >(size), alignment_size); - m_s = new (m_buf->bytes) bitset_serialized{id, nbits, 0, alignment_size, false}; + m_s = new (m_buf->bytes()) bitset_serialized{id, nbits, 0, alignment_size, false}; // copy the data into the unitialized bitset std::uninitialized_copy(start_itr, end_itr, m_s->get_words()); @@ -485,9 +485,9 @@ class BitsetImpl { { ReadLockGuard other_lock{&other}; // ensure distinct buffers - if ((m_buf->size != other.m_buf->size) || (m_buf == other.m_buf)) { - m_buf = make_byte_array_with_deleter(other.m_buf->size, other.m_s->m_alignment_size); - m_s = new (m_buf->bytes) + if ((m_buf->size() != other.m_buf->size()) || (m_buf == other.m_buf)) { + m_buf = make_byte_array_with_deleter(other.m_buf->size(), other.m_s->m_alignment_size); + m_s = new (m_buf->bytes()) bitset_serialized{other.m_s->m_id, other.m_s->m_nbits, other.m_s->m_skip_bits, other.m_s->m_alignment_size, false}; std::uninitialized_copy(other.m_s->get_words_const(), other.m_s->end_words_const(), @@ -496,7 +496,7 @@ class BitsetImpl { // Word array is initialized here so std::copy suffices for some or all const auto old_words_cap{m_s->m_words_cap}; if (other.m_s->m_words_cap > old_words_cap) { - m_s = new (m_buf->bytes) + m_s = new (m_buf->bytes()) bitset_serialized{other.m_s->m_id, other.m_s->m_nbits, other.m_s->m_skip_bits, other.m_s->m_alignment_size, false}; // copy into previously initialized spaces @@ -513,7 +513,7 @@ class BitsetImpl { std::next(m_s->get_words(), old_words_cap)); } - m_s = new (m_buf->bytes) + m_s = new (m_buf->bytes()) bitset_serialized{other.m_s->m_id, other.m_s->m_nbits, other.m_s->m_skip_bits, other.m_s->m_alignment_size, false}; std::copy(other.m_s->get_words_const(), other.m_s->end_words_const(), m_s->get_words()); @@ -536,11 +536,11 @@ class BitsetImpl { // ensure distinct buffers bool uninitialized{false}; const auto old_words_cap{m_s->m_words_cap}; - if ((m_buf->size != size) || (m_buf == other.m_buf)) { + if ((m_buf->size() != size) || (m_buf == other.m_buf)) { m_buf = make_byte_array_with_deleter(size, alignment_size); uninitialized = true; } - m_s = new (m_buf->bytes) bitset_serialized{other.m_s->m_id, nbits, 0, alignment_size, false}; + m_s = new (m_buf->bytes()) bitset_serialized{other.m_s->m_id, nbits, 0, alignment_size, false}; const auto new_words_cap{m_s->m_words_cap}; bitword_type* word_ptr{m_s->get_words()}; const uint8_t rhs_offset{other.get_word_offset(0)}; @@ -667,17 +667,17 @@ class BitsetImpl { delete ptr; } }}}; - word_t* word_ptr{reinterpret_cast< word_t* >(buf->bytes + sizeof(bitset_serialized))}; + word_t* word_ptr{reinterpret_cast< word_t* >(buf->bytes() + sizeof(bitset_serialized))}; if (std::is_standard_layout_v< bitword_type > && std::is_trivial_v< value_type > && (sizeof(value_type) == sizeof(bitword_type))) { const size_t num_words{static_cast< size_t >(m_s->end_words_const() - get_word_const(0))}; const uint64_t skip_bits{get_word_offset(0)}; - new (buf->bytes) bitset_serialized{m_s->m_id, num_bits + skip_bits, skip_bits, alignment_size, false}; + new (buf->bytes()) bitset_serialized{m_s->m_id, num_bits + skip_bits, skip_bits, alignment_size, false}; std::memcpy(static_cast< void* >(word_ptr), static_cast< const void* >(get_word_const(0)), num_words * sizeof(word_t)); } else { // non trivial, word by word copy the unshifted data words - new (buf->bytes) bitset_serialized{m_s->m_id, num_bits, 0, alignment_size, false}; + new (buf->bytes()) bitset_serialized{m_s->m_id, num_bits, 0, alignment_size, false}; uint64_t current_bit{0}; for (uint64_t word_num{0}; word_num < total_words; ++word_num, ++word_ptr, current_bit += word_size()) { new (word_ptr) word_t{get_word_value(current_bit)}; @@ -1146,7 +1146,7 @@ class BitsetImpl { const uint64_t new_nbits{nbits + new_skip_bits}; auto new_buf{make_byte_array_with_deleter(bitset_serialized::nbytes(new_nbits), m_s->m_alignment_size)}; - auto new_s{new (new_buf->bytes) + auto new_s{new (new_buf->bytes()) bitset_serialized{m_s->m_id, new_nbits, new_skip_bits, m_s->m_alignment_size, false}}; const auto new_cap{new_s->m_words_cap}; diff --git a/include/sisl/fds/buffer.hpp b/include/sisl/fds/buffer.hpp index 505fc603..8f514d50 100644 --- a/include/sisl/fds/buffer.hpp +++ b/include/sisl/fds/buffer.hpp @@ -31,13 +31,49 @@ #include #include "utils.hpp" +#ifndef NDEBUG +#ifndef _DEBUG +#define _DEBUG +#endif +#endif + namespace sisl { struct blob { - uint8_t* bytes; - uint32_t size; +protected: + uint8_t* bytes_{nullptr}; + uint32_t size_{0}; +#ifdef _DEBUG + bool is_const_{false}; +#endif + +public: + blob() = default; + blob(uint8_t* b, uint32_t s) : bytes_{b}, size_{s} {} + blob(uint8_t const* b, uint32_t s) : bytes_{const_cast< uint8_t* >(b)}, size_{s} { +#ifdef _DEBUG + is_const_ = true; +#endif + } + + uint8_t* bytes() { + DEBUG_ASSERT_EQ(is_const_, false, "Trying to access writeable bytes with const declaration"); + return bytes_; + } + uint32_t size() const { return size_; } + uint8_t const* cbytes() const { return bytes_; } - blob() : blob{nullptr, 0} {} - blob(uint8_t* b, uint32_t s) : bytes{b}, size{s} {} + void set_bytes(uint8_t* b) { + DEBUG_ASSERT_EQ(is_const_, false, "Trying to access writeable bytes with const declaration"); + bytes_ = b; + } + + void set_bytes(uint8_t const* b) { +#ifdef _DEBUG + is_const_ = false; +#endif + bytes_ = const_cast< uint8_t* >(b); + } + void set_size(uint32_t s) { size_ = s; } }; using sg_iovs_t = folly::small_vector< iovec, 4 >; @@ -241,49 +277,50 @@ struct io_blob; using io_blob_list_t = folly::small_vector< sisl::io_blob, 4 >; struct io_blob : public blob { - bool aligned{false}; +protected: + bool aligned_{false}; +public: io_blob() = default; - io_blob(const size_t sz, const uint32_t align_size = 512, const buftag tag = buftag::common) { - buf_alloc(sz, align_size, tag); - } - io_blob(uint8_t* const bytes, const uint32_t size, const bool is_aligned) : - blob(bytes, size), aligned{is_aligned} {} + io_blob(size_t sz, uint32_t align_size = 512, buftag tag = buftag::common) { buf_alloc(sz, align_size, tag); } + io_blob(uint8_t* bytes, uint32_t size, bool is_aligned) : blob(bytes, size), aligned_{is_aligned} {} + io_blob(uint8_t const* bytes, uint32_t size, bool is_aligned) : blob(bytes, size), aligned_{is_aligned} {} ~io_blob() = default; - void buf_alloc(const size_t sz, const uint32_t align_size = 512, const buftag tag = buftag::common) { - aligned = (align_size != 0); - blob::size = sz; - blob::bytes = aligned ? sisl_aligned_alloc(align_size, sz, tag) : (uint8_t*)malloc(sz); + void buf_alloc(size_t sz, uint32_t align_size = 512, buftag tag = buftag::common) { + aligned_ = (align_size != 0); + blob::size_ = sz; + blob::bytes_ = aligned_ ? sisl_aligned_alloc(align_size, sz, tag) : (uint8_t*)malloc(sz); } - void buf_free(const buftag tag = buftag::common) const { - aligned ? sisl_aligned_free(blob::bytes, tag) : std::free(blob::bytes); + void buf_free(buftag tag = buftag::common) const { + aligned_ ? sisl_aligned_free(blob::bytes_, tag) : std::free(blob::bytes_); } - void buf_realloc(const size_t new_size, const uint32_t align_size = 512, - [[maybe_unused]] const buftag tag = buftag::common) { + void buf_realloc(size_t new_size, uint32_t align_size = 512, [[maybe_unused]] buftag tag = buftag::common) { uint8_t* new_buf{nullptr}; - if (aligned) { + if (aligned_) { // aligned before, so do not need check for new align size, once aligned will be aligned on realloc also - new_buf = sisl_aligned_realloc(blob::bytes, align_size, new_size, blob::size); + new_buf = sisl_aligned_realloc(blob::bytes_, align_size, new_size, blob::size_); } else if (align_size != 0) { // Not aligned before, but need aligned now uint8_t* const new_buf{sisl_aligned_alloc(align_size, new_size, buftag::common)}; - std::memcpy(static_cast< void* >(new_buf), static_cast< const void* >(blob::bytes), - std::min(new_size, static_cast< size_t >(blob::size))); - std::free(blob::bytes); + std::memcpy(static_cast< void* >(new_buf), static_cast< const void* >(blob::bytes_), + std::min(new_size, static_cast< size_t >(blob::size_))); + std::free(blob::bytes_); } else { // don't bother about alignment, just do standard realloc - new_buf = (uint8_t*)std::realloc(blob::bytes, new_size); + new_buf = (uint8_t*)std::realloc(blob::bytes_, new_size); } - blob::size = new_size; - blob::bytes = new_buf; + blob::size_ = new_size; + blob::bytes_ = new_buf; } + bool is_aligned() const { return aligned_; } + static io_blob from_string(const std::string& s) { - return io_blob{r_cast< uint8_t* >(const_cast< char* >(s.data())), uint32_cast(s.size()), false}; + return io_blob{r_cast< const uint8_t* >(s.data()), uint32_cast(s.size()), false}; } static io_blob_list_t sg_list_to_ioblob_list(const sg_list& sglist) { @@ -304,28 +341,30 @@ struct io_blob_safe final : public io_blob { buftag m_tag; public: + io_blob_safe() = default; io_blob_safe(uint32_t sz, uint32_t alignment = 0, buftag tag = buftag::common) : io_blob(sz, alignment, tag), m_tag{tag} {} io_blob_safe(uint8_t* bytes, uint32_t size, bool is_aligned) : io_blob(bytes, size, is_aligned) {} + io_blob_safe(uint8_t const* bytes, uint32_t size, bool is_aligned) : io_blob(bytes, size, is_aligned) {} ~io_blob_safe() { - if (bytes != nullptr) { io_blob::buf_free(m_tag); } + if (blob::bytes_ != nullptr) { io_blob::buf_free(m_tag); } } io_blob_safe(io_blob_safe const& other) = delete; io_blob_safe(io_blob_safe&& other) : io_blob(std::move(other)), m_tag(other.m_tag) { - other.bytes = nullptr; - other.size = 0; + other.bytes_ = nullptr; + other.size_ = 0; } io_blob_safe& operator=(io_blob_safe const& other) = delete; // Delete copy constructor io_blob_safe& operator=(io_blob_safe&& other) { - if (bytes != nullptr) { this->buf_free(m_tag); } + if (blob::bytes_ != nullptr) { this->buf_free(m_tag); } *((io_blob*)this) = std::move(*((io_blob*)&other)); m_tag = other.m_tag; - other.bytes = nullptr; - other.size = 0; + other.bytes_ = nullptr; + other.size_ = 0; return *this; } }; @@ -333,36 +372,32 @@ struct io_blob_safe final : public io_blob { using byte_array_impl = io_blob_safe; using byte_array = std::shared_ptr< io_blob_safe >; -inline byte_array make_byte_array(const uint32_t sz, const uint32_t alignment = 0, const buftag tag = buftag::common) { +inline byte_array make_byte_array(uint32_t sz, uint32_t alignment = 0, buftag tag = buftag::common) { return std::make_shared< io_blob_safe >(sz, alignment, tag); } -inline byte_array to_byte_array(const sisl::io_blob& blob) { - return std::make_shared< io_blob_safe >(blob.bytes, blob.size, blob.aligned); -} - struct byte_view { public: byte_view() = default; - byte_view(const uint32_t sz, const uint32_t alignment = 0, const buftag tag = buftag::common) { + byte_view(uint32_t sz, uint32_t alignment = 0, buftag tag = buftag::common) { m_base_buf = make_byte_array(sz, alignment, tag); - m_view = *m_base_buf; + m_view.set_bytes(m_base_buf->cbytes()); + m_view.set_size(m_base_buf->size()); } - byte_view(byte_array buf) : byte_view(std::move(buf), 0, buf->size) {} - byte_view(byte_array buf, const uint32_t offset, const uint32_t sz) { + byte_view(byte_array buf) : byte_view(std::move(buf), 0u, buf->size()) {} + byte_view(byte_array buf, uint32_t offset, uint32_t sz) { m_base_buf = std::move(buf); - m_view.bytes = m_base_buf->bytes + offset; - m_view.size = sz; + m_view.set_bytes(m_base_buf->cbytes() + offset); + m_view.set_size(sz); } - byte_view(const byte_view& v, const uint32_t offset, const uint32_t sz) { - DEBUG_ASSERT_GE(v.m_view.size, sz + offset); + byte_view(const byte_view& v, uint32_t offset, uint32_t sz) { + DEBUG_ASSERT_GE(v.m_view.size(), sz + offset); m_base_buf = v.m_base_buf; - m_view.bytes = v.m_view.bytes + offset; - m_view.size = sz; + m_view.set_bytes(v.m_view.cbytes() + offset); + m_view.set_size(sz); } - byte_view(const sisl::io_blob& blob) : - byte_view(std::make_shared< byte_array_impl >(blob.bytes, blob.size, blob.aligned)) {} + byte_view(const sisl::io_blob& b) : byte_view(b.size(), b.is_aligned()) {} ~byte_view() = default; byte_view(const byte_view& other) = default; @@ -380,33 +415,36 @@ struct byte_view { } blob get_blob() const { return m_view; } - uint8_t* bytes() const { return m_view.bytes; } - uint32_t size() const { return m_view.size; } - void move_forward(const uint32_t by) { - assert(m_view.size >= by); - m_view.bytes += by; - m_view.size -= by; + uint8_t const* bytes() const { return m_view.cbytes(); } + uint32_t size() const { return m_view.size(); } + void move_forward(uint32_t by) { + DEBUG_ASSERT_GE(m_view.size(), by, "Size greater than move forward request by"); + m_view.set_bytes(m_view.cbytes() + by); + m_view.set_size(m_view.size() - by); validate(); } // Extract the byte_array so that caller can safely use the underlying byte_array. If the view represents the // entire array, it will not do any copy. If view represents only portion of array, create a copy of the byte array // and returns that value - byte_array extract(const uint32_t alignment = 0) const { + byte_array extract(uint32_t alignment = 0) const { if (can_do_shallow_copy()) { return m_base_buf; } else { - auto base_buf = make_byte_array(m_view.size, alignment, m_base_buf->m_tag); - std::memcpy(base_buf->bytes, m_view.bytes, m_view.size); + auto base_buf = make_byte_array(m_view.size(), alignment, m_base_buf->m_tag); + std::memcpy(base_buf->bytes(), m_view.cbytes(), m_view.size()); return base_buf; } } bool can_do_shallow_copy() const { - return (m_view.bytes == m_base_buf->bytes) && (m_view.size == m_base_buf->size); + return (m_view.cbytes() == m_base_buf->cbytes()) && (m_view.size() == m_base_buf->size()); + } + void set_size(uint32_t sz) { m_view.set_size(sz); } + void validate() const { + DEBUG_ASSERT_LE((void*)(m_base_buf->cbytes() + m_base_buf->size()), (void*)(m_view.cbytes() + m_view.size()), + "Invalid byte_view"); } - void set_size(const uint32_t sz) { m_view.size = sz; } - void validate() { assert((m_base_buf->bytes + m_base_buf->size) >= (m_view.bytes + m_view.size)); } std::string get_string() const { return std::string(r_cast< const char* >(bytes()), uint64_cast(size())); } diff --git a/include/sisl/fds/compact_bitset.hpp b/include/sisl/fds/compact_bitset.hpp index 53c9df02..2c8f5db6 100644 --- a/include/sisl/fds/compact_bitset.hpp +++ b/include/sisl/fds/compact_bitset.hpp @@ -55,11 +55,11 @@ class CompactBitSet { allocated_ = true; } - CompactBitSet(sisl::blob const& buf, bool init_bits) : s_{r_cast< serialized* >(buf.bytes)} { - DEBUG_ASSERT_GT(buf.size, 0, "compact bitset initialized with empty buffer"); - DEBUG_ASSERT_EQ(buf.size % word_size_bytes(), 0, "compact bitset buffer size must be multiple of word size"); - nbits_ = buf.size * 8; - if (init_bits) { std::memset(buf.bytes, 0, buf.size); } + CompactBitSet(sisl::blob buf, bool init_bits) : s_{r_cast< serialized* >(buf.bytes())} { + DEBUG_ASSERT_GT(buf.size(), 0, "compact bitset initialized with empty buffer"); + DEBUG_ASSERT_EQ(buf.size() % word_size_bytes(), 0, "compact bitset buffer size must be multiple of word size"); + nbits_ = buf.size() * 8; + if (init_bits) { std::memset(buf.bytes(), 0, buf.size()); } } ~CompactBitSet() { diff --git a/include/sisl/grpc/generic_service.hpp b/include/sisl/grpc/generic_service.hpp index cdd3ad89..a401feea 100644 --- a/include/sisl/grpc/generic_service.hpp +++ b/include/sisl/grpc/generic_service.hpp @@ -59,19 +59,19 @@ class GenericRpcData : public RpcDataAbstract, sisl::ObjLifeCounter< GenericRpcD const grpc::ByteBuffer& request() const { return m_request; } sisl::io_blob& request_blob() { - if (!m_request_blob.bytes) { + if (m_request_blob.cbytes() == nullptr) { grpc::Slice slice; auto status = m_request.TrySingleSlice(&slice); if (status.ok()) { - m_request_blob.bytes = const_cast< uint8_t* >(slice.begin()); - m_request_blob.size = slice.size(); + m_request_blob.set_bytes(slice.begin()); + m_request_blob.set_size(slice.size()); } else if (status.error_code() == grpc::StatusCode::FAILED_PRECONDITION) { // If the ByteBuffer is not made up of single slice, TrySingleSlice() will fail. // DumpSingleSlice() should work in those cases but will incur a copy. if (status = m_request.DumpToSingleSlice(&slice); status.ok()) { m_request_blob.buf_alloc(slice.size()); m_request_blob_allocated = true; - std::memcpy(static_cast< void* >(m_request_blob.bytes), static_cast< const void* >(slice.begin()), + std::memcpy(voidptr_cast(m_request_blob.bytes()), c_voidptr_cast(slice.begin()), slice.size()); } } diff --git a/src/cache/tests/test_range_cache.cpp b/src/cache/tests/test_range_cache.cpp index cd68a6a5..843b321a 100644 --- a/src/cache/tests/test_range_cache.cpp +++ b/src/cache/tests/test_range_cache.cpp @@ -152,15 +152,15 @@ struct RangeCacheTest : public testing::Test { } } - void file_write(const uint32_t chunk_num, const uint32_t start_blk, const sisl::io_blob& b) { - const auto written = ::pwrite(m_fds[chunk_num], voidptr_cast(b.bytes), b.size, (start_blk * g_blk_size)); - RELEASE_ASSERT_EQ(written, b.size, "Not entire data is written to file"); + void file_write(const uint32_t chunk_num, const uint32_t start_blk, sisl::io_blob& b) { + const auto written = ::pwrite(m_fds[chunk_num], voidptr_cast(b.bytes()), b.size(), (start_blk * g_blk_size)); + RELEASE_ASSERT_EQ(written, b.size(), "Not entire data is written to file"); } sisl::io_blob file_read(const uint32_t chunk_num, const uint32_t blk, const uint32_t nblks) { sisl::io_blob b{nblks * g_blk_size, 0}; - const auto read_size = ::pread(m_fds[chunk_num], voidptr_cast(b.bytes), b.size, (blk * g_blk_size)); - RELEASE_ASSERT_EQ(uint32_cast(read_size), b.size, "Not entire data is read from file"); + const auto read_size = ::pread(m_fds[chunk_num], voidptr_cast(b.bytes()), b.size(), (blk * g_blk_size)); + RELEASE_ASSERT_EQ(uint32_cast(read_size), b.size(), "Not entire data is read from file"); return b; } @@ -168,7 +168,7 @@ struct RangeCacheTest : public testing::Test { auto b = file_read(chunk_num, data.first.m_nth, data.first.m_count); ASSERT_EQ(data.second.size(), data.first.m_count * g_blk_size) << "Mismatch of size between byte_view and RangeKey"; - auto ret = ::memcmp(data.second.bytes(), b.bytes, b.size); + auto ret = ::memcmp(data.second.bytes(), b.bytes(), b.size()); ASSERT_EQ(ret, 0) << "Data validation failed for Blk [" << data.first.m_nth << "-" << data.first.end_nth() << "]"; b.buf_free(); diff --git a/src/cache/tests/test_range_hashmap.cpp b/src/cache/tests/test_range_hashmap.cpp index 9231b1c3..d6de8557 100644 --- a/src/cache/tests/test_range_hashmap.cpp +++ b/src/cache/tests/test_range_hashmap.cpp @@ -60,11 +60,11 @@ struct RangeHashMapTest : public testing::Test { for (const auto& [key, val] : entries) { ASSERT_EQ(key.m_base_key, 1u) << "Expected base key is standard value 1"; - uint8_t* got_bytes = val.bytes(); + uint8_t const* got_bytes = val.bytes(); for (auto o{key.m_nth}; o < key.m_nth + key.m_count; ++o) { auto it = m_shadow_map.find(o); ASSERT_EQ(m_inserted_slots.is_bits_set(o, 1), true) << "Found a key " << o << " which was not inserted"; - compare_data(o, got_bytes, it->second.bytes); + compare_data(o, got_bytes, it->second.cbytes()); got_bytes += per_val_size; } } @@ -87,7 +87,7 @@ struct RangeHashMapTest : public testing::Test { sisl::io_blob create_data(const uint32_t start, const uint32_t end) { auto blob = sisl::io_blob{per_val_size * (end - start + 1), 0}; - uint8_t* bytes = blob.bytes; + uint8_t* bytes = blob.bytes(); for (auto i = start; i <= end; ++i) { auto arr = (std::array< uint32_t, per_val_size / sizeof(uint32_t) >*)bytes; diff --git a/src/fds/tests/test_compact_bitset.cpp b/src/fds/tests/test_compact_bitset.cpp index 7ce9cc95..adc0ff64 100644 --- a/src/fds/tests/test_compact_bitset.cpp +++ b/src/fds/tests/test_compact_bitset.cpp @@ -52,7 +52,7 @@ class CompactBitsetTest : public testing::Test { }; TEST_F(CompactBitsetTest, AlternateBits) { - ASSERT_EQ(m_bset->size(), m_buf.size * 8); + ASSERT_EQ(m_bset->size(), m_buf.size() * 8); for (CompactBitSet::bit_count_t i{0}; i < m_bset->size(); ++i) { ASSERT_EQ(m_bset->is_bit_set(i), false); diff --git a/src/fds/tests/test_sg_list.cpp b/src/fds/tests/test_sg_list.cpp index e94e89da..c62f6789 100644 --- a/src/fds/tests/test_sg_list.cpp +++ b/src/fds/tests/test_sg_list.cpp @@ -140,9 +140,9 @@ TEST_F(SgListTestOffset, TestMoveOffsetAligned) { auto rand_num = r_cast< uint32_t* >(iovs[0].iov_base); EXPECT_EQ(*rand_num, data_vec[i]); - rand_num = r_cast< uint32_t* >(ioblob_list[i].bytes); + rand_num = r_cast< uint32_t* >(ioblob_list[i].bytes()); EXPECT_EQ(*rand_num, data_vec[i]); - EXPECT_EQ(ioblob_list[i].size, SZ); + EXPECT_EQ(ioblob_list[i].size(), SZ); } sisl::sg_iterator sgitr1{sgl.iovs}; diff --git a/src/grpc/tests/function/echo_async_client.cpp b/src/grpc/tests/function/echo_async_client.cpp index 293354dd..57dba05b 100644 --- a/src/grpc/tests/function/echo_async_client.cpp +++ b/src/grpc/tests/function/echo_async_client.cpp @@ -75,8 +75,8 @@ static void DeserializeFromBuffer(const grpc::ByteBuffer& buffer, DataMessage& m static void DeserializeFromBuffer(sisl::io_blob const& buffer, DataMessage& msg) { std::string buf; - buf.reserve(buffer.size); - buf.append(reinterpret_cast< const char* >(buffer.bytes), buffer.size); + buf.reserve(buffer.size()); + buf.append(reinterpret_cast< const char* >(buffer.cbytes()), buffer.size()); msg.DeserializeFromString(buf); } diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index 41fe1f9a..8c51e27b 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -138,24 +138,33 @@ std::shared_ptr< spdlog::logger >& GetCriticalLogger() { static std::filesystem::path g_base_dir; std::filesystem::path get_base_dir() { - static std::once_flag one_base_dir; - std::call_once(one_base_dir, [] { - const auto cwd{std::filesystem::current_path()}; - g_base_dir = cwd / "logs"; - // Construct a unique directory path based on the current time - auto const current_time{std::chrono::system_clock::now()}; - auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; - auto const current_tm{std::localtime(¤t_t)}; - std::array< char, PATH_MAX > c_time; - if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { - g_base_dir /= c_time.data(); - std::filesystem::create_directories(g_base_dir); + namespace fs = std::filesystem; + const auto cwd = fs::current_path(); + const auto log_dir{cwd / "logs"}; + + // Construct a unique directory path based on the current time + auto const current_time{std::chrono::system_clock::now()}; + auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; + auto const current_tm{std::localtime(¤t_t)}; + std::array< char, PATH_MAX > c_time; + if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { + const fs::path cur_log_dir = log_dir / c_time.data(); + fs::create_directories(cur_log_dir); + + const fs::path sym_path = log_dir / "latest"; + try { + if (fs::is_symlink(sym_path)) { fs::remove(sym_path); } + fs::create_directory_symlink(cur_log_dir, sym_path); + } catch (std::exception& e) { + LOGINFO("Unable to create latest symlink 'latest' to logdir, ignoring symlink creation\n"); } - }); - - return g_base_dir; + return cur_log_dir; + } else { + return log_dir; + } } + static std::filesystem::path log_path(std::string const& name) { std::filesystem::path p; if (0 < SISL_OPTIONS.count("logfile")) { From 2db98d136394bfd1c23c9a8f3eb4e69571dbc9c4 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 13 Dec 2023 10:16:06 -0700 Subject: [PATCH 371/385] Logging improvements. (#198) Logging modules are locally defined by SISL_LOGGING_DEF(...) now to allow forward compat. --- .clang-format | 1 - conanfile.py | 5 +-- include/sisl/logging/logging.h | 12 +++---- src/flip/lib/flip_rpc_server.cpp | 2 ++ src/grpc/rpc_server.cpp | 2 ++ src/logging/logging.cpp | 60 +++++++++++++++----------------- src/logging/test/example.cpp | 2 ++ test_package/example_decl.cpp | 2 +- test_package/test_package.cpp | 1 + 9 files changed, 43 insertions(+), 44 deletions(-) diff --git a/.clang-format b/.clang-format index 2f771200..fdfa11f5 100644 --- a/.clang-format +++ b/.clang-format @@ -18,7 +18,6 @@ AlignOperands: false AlignTrailingComments: true AllowShortBlocksOnASingleLine: true AllowShortIfStatementsOnASingleLine: true -AllowShortBlocksOnASingleLine: true AllowShortCaseLabelsOnASingleLine: false # AllowShortFunctionsOnASingleLine: InlineOnly # AllowShortLoopsOnASingleLine: false diff --git a/conanfile.py b/conanfile.py index 3c86519c..6285d60b 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "11.0.2" + version = "11.0.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -148,9 +148,6 @@ def package(self): def package_info(self): self.cpp_info.libs = ["sisl"] - if self.settings.compiler == "gcc": - self.cpp_info.cppflags.extend(["-fconcepts"]) - if self.settings.os == "Linux": self.cpp_info.libs.append("flip") self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index 4d643ab4..3738ac97 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -36,10 +36,7 @@ #include #include #include -#include -#include #include -#include #include #include // NOTE: There is an ordering dependecy on this header and fmt headers below #include @@ -245,7 +242,7 @@ constexpr const char* file_name(const char* const str) { return str_slant(str) ? #define _ABORT_OR_DUMP(is_log_assert) \ assert(0); \ if (is_log_assert) { \ - if (sisl::logging::is_crash_handler_installed()) { raise(SIGUSR3); } \ + if (sisl::logging::is_crash_handler_installed()) { raise(SIGUSR3); } \ } else { \ abort(); \ } @@ -266,7 +263,7 @@ constexpr const char* file_name(const char* const str) { return str_slant(str) ? * LOGMSG_ASSERT: If condition is not met: Logs the message with stack trace, aborts in debug build only. * DEBUG_ASSERT: No-op in release build, for debug build, if condition is not met, logs the message and aborts */ -//#if __cplusplus > 201703L +// #if __cplusplus > 201703L #if 0 #define _GENERIC_ASSERT(is_log_assert, cond, formatter, msg, ...) \ [[unlikely]] if (!(cond)) { _LOG_AND_ASSERT_FMT(is_log_assert, formatter, msg, ##__VA_ARGS__); } @@ -448,9 +445,10 @@ MODLEVELDEC(_, _, base) #define SISL_LOGGING_DECL(...) \ BOOST_PP_SEQ_FOR_EACH(MODLEVELDEC, spdlog::level::level_enum::off, BOOST_PP_VARIADIC_TO_SEQ(__VA_ARGS__)) +#define SISL_LOGGING_DEF(...) \ + BOOST_PP_SEQ_FOR_EACH(MODLEVELDEF, spdlog::level::level_enum::err, BOOST_PP_VARIADIC_TO_SEQ(__VA_ARGS__)) + #define SISL_LOGGING_INIT(...) \ - BOOST_PP_SEQ_FOR_EACH(MODLEVELDEF, spdlog::level::level_enum::info, \ - BOOST_PP_TUPLE_TO_SEQ(BOOST_PP_VARIADIC_TO_TUPLE(__VA_ARGS__))) \ sisl::logging::InitModules s_init_enabled_mods{ \ BOOST_PP_SEQ_FOR_EACH(MOD_LEVEL_STRING, , BOOST_PP_VARIADIC_TO_SEQ(__VA_ARGS__))}; diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index d60b01e6..07cc20bc 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -25,6 +25,8 @@ #include "sisl/flip/flip_rpc_server.hpp" #include "sisl/flip/flip.hpp" +SISL_LOGGING_DEF(flip) + namespace flip { grpc::Status FlipRPCServer::InjectFault(grpc::ServerContext*, const FlipSpec* request, FlipResponse* response) { LOGTRACEMOD(flip, "InjectFault request = {}", request->DebugString()); diff --git a/src/grpc/rpc_server.cpp b/src/grpc/rpc_server.cpp index 45ba425e..f23afc88 100644 --- a/src/grpc/rpc_server.cpp +++ b/src/grpc/rpc_server.cpp @@ -26,6 +26,8 @@ extern "C" { #include +SISL_LOGGING_DEF(grpc_server) + namespace sisl { GrpcServer::GrpcServer(const std::string& listen_addr, uint32_t threads, const std::string& ssl_key, const std::string& ssl_cert) : diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index 8c51e27b..f2bd808d 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -57,10 +57,7 @@ SISL_OPTION_GROUP(logging, (enab_mods, "", "log_mods", "Module loggers to enabl (version, "V", "version", "Print the version and exist", ::cxxopts::value(), "")) // clang-format on -// logger required define if not inited -extern "C" { -spdlog::level::level_enum module_level_base{spdlog::level::level_enum::info}; -} +SISL_LOGGING_DEF(base) namespace sisl { namespace logging { @@ -263,37 +260,38 @@ static std::string setup_modules() { fmt::vformat_to(std::back_inserter(out_str), fmt::string_view{"{}={}, "}, fmt::make_format_args(mod_name, lvl_str)); } - } else { - if (SISL_OPTIONS.count("log_mods")) { - std::regex re{"[\\s,]+"}; - const auto s{SISL_OPTIONS["log_mods"].as< std::string >()}; - std::sregex_token_iterator it{std::cbegin(s), std::cend(s), re, -1}; - std::sregex_token_iterator reg_end; - for (; it != reg_end; ++it) { - auto mod_stream{std::istringstream(it->str())}; - std::string module_name, module_level; - std::getline(mod_stream, module_name, ':'); - const auto sym{std::string{"module_level_"} + module_name}; - if (auto* const mod_level{ - static_cast< spdlog::level::level_enum* >(::dlsym(RTLD_DEFAULT, sym.c_str()))}; - nullptr != mod_level) { - if (std::getline(mod_stream, module_level, ':')) { - *mod_level = (1 == module_level.size()) - ? static_cast< spdlog::level::level_enum >(std::strtol(module_level.data(), nullptr, 0)) - : spdlog::level::from_str(module_level.data()); - } - } else { - LOGWARN("Could not load module logger: {}\n{}", module_name, dlerror()); + } else + set_module_log_level("base", spdlog::level::level_enum::info); + + if (SISL_OPTIONS.count("log_mods")) { + std::regex re{"[\\s,]+"}; + const auto s{SISL_OPTIONS["log_mods"].as< std::string >()}; + std::sregex_token_iterator it{std::cbegin(s), std::cend(s), re, -1}; + std::sregex_token_iterator reg_end; + for (; it != reg_end; ++it) { + auto mod_stream{std::istringstream(it->str())}; + std::string module_name, module_level; + std::getline(mod_stream, module_name, ':'); + const auto sym{std::string{"module_level_"} + module_name}; + if (auto* const mod_level{static_cast< spdlog::level::level_enum* >(::dlsym(RTLD_DEFAULT, sym.c_str()))}; + nullptr != mod_level) { + if (std::getline(mod_stream, module_level, ':')) { + *mod_level = (1 == module_level.size()) + ? static_cast< spdlog::level::level_enum >(std::strtol(module_level.data(), nullptr, 0)) + : spdlog::level::from_str(module_level.data()); } + } else { + std::cout << fmt::format("Unable to locate the module {} in registered modules, error: {}\n", + module_name, dlerror()); } } + } - for (size_t mod_num{0}; mod_num < glob_num_mods; ++mod_num) { - const std::string& mod_name{glob_enabled_mods[mod_num]}; - fmt::vformat_to( - std::back_inserter(out_str), fmt::string_view{"{}={}, "}, - fmt::make_format_args(mod_name, spdlog::level::to_string_view(GetModuleLogLevel(mod_name)).data())); - } + for (size_t mod_num{0}; mod_num < glob_num_mods; ++mod_num) { + const std::string& mod_name{glob_enabled_mods[mod_num]}; + fmt::vformat_to( + std::back_inserter(out_str), fmt::string_view{"{}={}, "}, + fmt::make_format_args(mod_name, spdlog::level::to_string_view(GetModuleLogLevel(mod_name)).data())); } return out_str; diff --git a/src/logging/test/example.cpp b/src/logging/test/example.cpp index d6df91b4..adc4a675 100644 --- a/src/logging/test/example.cpp +++ b/src/logging/test/example.cpp @@ -24,6 +24,8 @@ #include +SISL_LOGGING_DECL(my_module) +SISL_LOGGING_DEF(my_module) SISL_LOGGING_INIT(my_module) void func() { diff --git a/test_package/example_decl.cpp b/test_package/example_decl.cpp index e6580b2b..2b560a84 100644 --- a/test_package/example_decl.cpp +++ b/test_package/example_decl.cpp @@ -1,6 +1,6 @@ #include -SISL_LOGGING_DECL(my_module) +SISL_LOGGING_DEF(my_module) void example_decl() { LOGINFOMOD(my_module, "Example def!"); diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index d0b62e0f..1cc93839 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -2,6 +2,7 @@ #include #include +SISL_LOGGING_DECL(my_module) SISL_LOGGING_INIT(my_module) SISL_OPTIONS_ENABLE(logging) From 452d5dabd991e8aa98c90aadc8895834326203ad Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 15 Dec 2023 09:16:57 -0700 Subject: [PATCH 372/385] Add version to stable cache. --- .github/workflows/build_dependencies.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index a365256d..15b39422 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -103,7 +103,7 @@ jobs: uses: eBay/sisl/.github/actions/load_conan@stable/v8.x with: testing: ${{ inputs.testing }} - key_prefix: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + key_prefix: Sisl10Deps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} - name: Setup Conan uses: eBay/sisl/.github/actions/setup_conan@stable/v8.x @@ -131,7 +131,7 @@ jobs: - name: Save Conan Cache uses: eBay/sisl/.github/actions/store_conan@stable/v8.x with: - key_prefix: SislDeps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} + key_prefix: Sisl10Deps-${{ inputs.platform }}-${{ inputs.build-type }}-${{ inputs.malloc-impl }}-${{ inputs.prerelease }} if: ${{ github.event_name != 'pull_request' && steps.restore-cache.outputs.cache-hit != 'true' }} - name: Create and Test Package From 525d1d54e6944a00d0bd3d633b58e996a80ba0f9 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 15 Dec 2023 09:18:47 -0700 Subject: [PATCH 373/385] Merge builds on 10. --- .github/workflows/merge_build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/merge_build.yml b/.github/workflows/merge_build.yml index 23c53fed..8b45619c 100644 --- a/.github/workflows/merge_build.yml +++ b/.github/workflows/merge_build.yml @@ -6,6 +6,7 @@ on: branches: - stable/v8.x - stable/v9.x + - stable/v10.x - master jobs: From c1782fb5431693624c6c422512e3414e6fe5abf8 Mon Sep 17 00:00:00 2001 From: Jie Yao Date: Fri, 29 Dec 2023 00:12:54 +0800 Subject: [PATCH 374/385] bump libcurl and openssl version (#200) --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 6285d60b..52305f93 100644 --- a/conanfile.py +++ b/conanfile.py @@ -96,7 +96,7 @@ def requirements(self): self.requires("spdlog/1.12.0") self.requires("zmarok-semver/1.1.0") self.requires("fmt/10.0.0", override=True) - self.requires("libcurl/8.2.1", override=True) + self.requires("libcurl/8.4.0", override=True) self.requires("openssl/3.1.3", override=True) self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.13", override=True) From 801b1163fac3c5f93ecbfea057185c115f496f9c Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Thu, 4 Jan 2024 10:11:17 -0800 Subject: [PATCH 375/385] remove file from the map regardless of the inotify result during remove watch (#201) Co-authored-by: Ravi Nagarjun Akella --- conanfile.py | 2 +- src/file_watcher/file_watcher.cpp | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/conanfile.py b/conanfile.py index 01a148b2..9a19b678 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.5" + version = "8.6.6" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index 05145ba1..94989052 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -119,9 +119,11 @@ bool FileWatcher::unregister_listener(const std::string& file_path, const std::s } bool FileWatcher::remove_watcher(FileInfo& file_info) { - if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { return false; } + bool success = true; + if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { success = false; } + // remove the file from the map regardless of the inotify_rm_watch result m_files.erase(file_info.m_filepath); - return true; + return success; } bool FileWatcher::stop() { From 537e8deccd85b8be818a986b90023e6a11bef787 Mon Sep 17 00:00:00 2001 From: raakella1 <114193113+raakella1@users.noreply.github.com> Date: Wed, 10 Jan 2024 09:10:50 -0800 Subject: [PATCH 376/385] remove -u option from conan create to fix build conflict in libcurl (#202) Co-authored-by: Ravi Nagarjun Akella --- .jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 05d762cf..bdb80554 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -129,7 +129,7 @@ pipeline { if (("${env.BRANCH_NAME}" =~ /PR-/) && ("$BUILD_TYPE" == "debug")) { sh "echo Skipping debug build for PR branch" } else { - sh "conan create -u ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=${ALLOC} -o ${PROJECT}:prerelease=${PRERELEASE} -o ${PROJECT}:sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" + sh "conan create ${BUILD_MISSING} -o ${PROJECT}:malloc_impl=${ALLOC} -o ${PROJECT}:prerelease=${PRERELEASE} -o ${PROJECT}:sanitize=${SANITIZE} -pr ${BUILD_PROFILE} . ${PROJECT}/${TAG}" } } } From e2f0ffab293cfef6d1ba9d16c131e59781331234 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 10 Jan 2024 10:46:56 -0700 Subject: [PATCH 377/385] Build on stable/9 --- .github/workflows/merge_conan_build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 8ff164c6..6a838f68 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -5,6 +5,7 @@ on: push: branches: - stable/v8.x + - stable/v9.x - master jobs: From 252448f20f11f2444aeadbb2a3f62b459a77a5dc Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 10 Jan 2024 11:02:06 -0700 Subject: [PATCH 378/385] Merge 8 to 9 (#203) * dump breakpad stacktrace file in the same dir as logfile (#151) Co-authored-by: Ravi Akella email = raakella@ebay.com * Token Caching (#169) * add caching to auth_manager * fix the auth test after caching * update folly cmake * add build missing for all branch builds --------- Authored-by: Ravi Akella email = raakella@ebay.com * add date to 3rd party (#170) Co-authored-by: Ravi Akella email = raakella@ebay.com * include filesystem header in logging.h (#175) Co-authored-by: Ravi Akella email = raakella@ebay.com * acquire unique lock for LRU cache get operation (#176) Co-authored-by: Ravi Akella email = raakella@ebay.com * lru get to return a copy rather than ref. Add more lru tests (#177) Co-authored-by: Ravi Akella email = raakella@ebay.com * disable stress test (#178) Co-authored-by: Ravi Akella email = raakella@ebay.com * remove file from the map regardless of the inotify result during remove watch (#201) Co-authored-by: Ravi Nagarjun Akella * remove -u option from conan create to fix build conflict in libcurl (#202) Co-authored-by: Ravi Nagarjun Akella --------- Co-authored-by: raakella1 <114193113+raakella1@users.noreply.github.com> Co-authored-by: Ravi Akella email = raakella@ebay.com Co-authored-by: Ravi Nagarjun Akella --- .github/workflows/merge_conan_build.yml | 57 +++---------- .github/workflows/pr_conan_build.yml | 35 ++++---- .jenkins/Jenkinsfile | 11 +-- conanfile.py | 3 +- include/sisl/auth_manager/LRUCache.h | 83 +++++++++++++++++++ include/sisl/auth_manager/auth_manager.hpp | 36 +++++++- include/sisl/logging/logging.h | 2 + src/auth_manager/CMakeLists.txt | 11 +++ src/auth_manager/auth_manager.cpp | 96 +++++++++++++++++++--- src/auth_manager/security_config.fbs | 4 + src/auth_manager/tests/AuthTest.cpp | 4 +- src/auth_manager/tests/LRUCacheTest.cpp | 73 ++++++++++++++++ src/file_watcher/file_watcher.cpp | 6 +- src/logging/logging.cpp | 45 +++++----- src/logging/stacktrace.cpp | 2 +- 15 files changed, 352 insertions(+), 116 deletions(-) create mode 100644 include/sisl/auth_manager/LRUCache.h create mode 100644 src/auth_manager/tests/LRUCacheTest.cpp diff --git a/.github/workflows/merge_conan_build.yml b/.github/workflows/merge_conan_build.yml index 6a838f68..f85f6056 100644 --- a/.github/workflows/merge_conan_build.yml +++ b/.github/workflows/merge_conan_build.yml @@ -13,32 +13,24 @@ jobs: strategy: fail-fast: false matrix: - platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] + platform: ["ubuntu-22.04"] build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] + malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] + tooling: ["Sanitize", "Coverage", "None"] exclude: - build-type: Debug - platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: jemalloc - platform: ubuntu-20.04 - - build-type: Debug - platform: macos-13 - - malloc-impl: tcmalloc - platform: macos-13 - - malloc-impl: jemalloc - platform: macos-13 - - malloc-impl: jemalloc - build-type: Debug - - malloc-impl: jemalloc prerelease: "False" - - malloc-impl: libc - build-type: Release - platform: ubuntu-22.04 - - prerelease: "True" - platform: ubuntu-20.04 + - build-type: Debug + tooling: None + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Release + malloc-impl: libc + - build-type: Release + tooling: Sanitize + - build-type: Release + tooling: Coverage uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} @@ -47,26 +39,3 @@ jobs: malloc-impl: ${{ matrix.malloc-impl }} prerelease: ${{ matrix.prerelease }} testing: 'True' - ChainBuild: - runs-on: "ubuntu-22.04" - steps: - - name: Start IOManager Build - run: | - curl -L \ - -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/iomanager/actions/workflows/merge_conan_build.yml/dispatches \ - -d '{"ref":"master","inputs":{}}' - if: ${{ github.ref == 'refs/heads/master' }} - - name: Start NuraftMesg Build - run: | - curl -L \ - -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.CHAIN_BUILD_TOKEN }}"\ - -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/eBay/nuraft_mesg/actions/workflows/merge_conan_build.yml/dispatches \ - -d '{"ref":"main","inputs":{}}' - if: ${{ github.ref == 'refs/heads/master' }} diff --git a/.github/workflows/pr_conan_build.yml b/.github/workflows/pr_conan_build.yml index 07b61b22..989e6ba6 100644 --- a/.github/workflows/pr_conan_build.yml +++ b/.github/workflows/pr_conan_build.yml @@ -4,6 +4,7 @@ on: pull_request: branches: - stable/v8.x + - stable/v9.x - master jobs: @@ -11,32 +12,24 @@ jobs: strategy: fail-fast: false matrix: - platform: ["ubuntu-22.04", "ubuntu-20.04", "macos-13"] + platform: ["ubuntu-22.04"] build-type: ["Debug", "Release"] - malloc-impl: ["libc", "tcmalloc", "jemalloc"] + malloc-impl: ["libc", "tcmalloc"] prerelease: ["True", "False"] + tooling: ["Sanitize", "Coverage", "None"] exclude: - build-type: Debug - platform: ubuntu-20.04 - - malloc-impl: tcmalloc - platform: ubuntu-20.04 - - malloc-impl: jemalloc - platform: ubuntu-20.04 - - build-type: Debug - platform: macos-13 - - malloc-impl: tcmalloc - platform: macos-13 - - malloc-impl: jemalloc - platform: macos-13 - - malloc-impl: jemalloc - build-type: Debug - - malloc-impl: jemalloc prerelease: "False" - - malloc-impl: libc - build-type: Release - platform: ubuntu-22.04 - - prerelease: "True" - platform: ubuntu-20.04 + - build-type: Debug + tooling: None + - build-type: Debug + malloc-impl: tcmalloc + - build-type: Release + malloc-impl: libc + - build-type: Release + tooling: Sanitize + - build-type: Release + tooling: Coverage uses: ./.github/workflows/build_dependencies.yml with: platform: ${{ matrix.platform }} diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index 9ac4c6fd..45866e0a 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -16,18 +16,13 @@ pipeline { steps { script { sh(script: "sed -Ei 's, version = .*\"([[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+).*, version = \"\\1-${env.BUILD_NUMBER}\",' conanfile.py") - sh(script: "sed -Ei 's,#LIBCURLFIXTOKEN.*,self.requires(\"libcurl/7.86.0\"\\, override=True),' conanfile.py") - BUILD_MISSING = "--build missing" } } } - stage('Adjust for Testing/Stable') { - when { - branch "${STABLE_BRANCH}" - } + stage('include build missing') { steps { script { - BUILD_MISSING = "" + BUILD_MISSING = "--build missing" } } } @@ -109,7 +104,7 @@ pipeline { } steps { sh "conan user -r ebay-local -p ${ARTIFACTORY_PASS} _service_sds" - sh "conan upload ${PROJECT}/${TAG} -c --all -r ebay-local" + sh "conan upload ${PROJECT}/${TAG} --parallel -c --all -r ebay-local" } } } diff --git a/conanfile.py b/conanfile.py index b4cfe63d..118c9d8b 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "9.4.5" + version = "9.4.6" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -87,6 +87,7 @@ def requirements(self): #LIBCURLFIXTOKEN self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1q", override=True) + self.requires("libcurl/7.86.0") self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.12", override=True) diff --git a/include/sisl/auth_manager/LRUCache.h b/include/sisl/auth_manager/LRUCache.h new file mode 100644 index 00000000..504141d4 --- /dev/null +++ b/include/sisl/auth_manager/LRUCache.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace sisl { + +/** + * + * written by @jiankun + * + * A high performance LRU cache implementation. + * + * The cache provides two atomic operations: + * put(key, value): put an object into the cache. + * get(key): returns a optional reference to the value found by key in cache + * + * Important notes: + * 1. The get() method returns a const reference, any change to the reference + * needs to be done by a Put call. + * 2. The put/get methods are thread safe. + */ +template < typename key_t, typename value_t > +class LRUCache { +public: + using kv_pair_t = std::pair< key_t, value_t >; + using list_iterator_t = typename std::list< kv_pair_t >::iterator; + + explicit LRUCache(size_t capacity) : capacity_(capacity) {} + + template < typename K, typename V > + void put(K&& key, V&& value) { + std::unique_lock< std::shared_mutex > l{mtx_}; + + auto it = items_map_.find(key); + if (it != items_map_.end()) { + items_list_.erase(it->second); + items_map_.erase(it); + } + + items_list_.emplace_front(std::make_pair(std::forward< K >(key), std::forward< V >(value))); + items_map_[key] = items_list_.begin(); + + if (items_map_.size() > capacity_) { + auto last = items_list_.rbegin(); + items_map_.erase(last->first); + items_list_.pop_back(); + } + } + + [[nodiscard]] std::optional< value_t > get(const key_t& key) { + // we need unique lock for the splice operation + std::unique_lock< std::shared_mutex > l{mtx_}; + + auto it = items_map_.find(key); + if (it == items_map_.end()) { return std::nullopt; } + + items_list_.splice(items_list_.begin(), items_list_, it->second); + return std::optional(it->second->second); + } + + bool exists(const key_t& key) const { + std::shared_lock< std::shared_mutex > l{mtx_}; + return items_map_.find(key) != items_map_.end(); + } + + [[nodiscard]] size_t size() const { + std::shared_lock< std::shared_mutex > l{mtx_}; + return items_map_.size(); + } + +private: + std::list< kv_pair_t > items_list_; + std::unordered_map< key_t, list_iterator_t > items_map_; + size_t capacity_; + mutable std::shared_mutex mtx_; +}; + +} // namespace sisl diff --git a/include/sisl/auth_manager/auth_manager.hpp b/include/sisl/auth_manager/auth_manager.hpp index bf5ea957..01885809 100644 --- a/include/sisl/auth_manager/auth_manager.hpp +++ b/include/sisl/auth_manager/auth_manager.hpp @@ -18,14 +18,40 @@ #include #include "security_config.hpp" +#include "LRUCache.h" namespace sisl { ENUM(AuthVerifyStatus, uint8_t, OK, UNAUTH, FORBIDDEN) +template < typename key_t, typename value_t > +class LRUCache; + +/** + * This struct holds information of a token, that can be used as if + * they were extracted from decoded token. + */ +struct CachedToken { + AuthVerifyStatus response_status; + std::string msg; + bool valid; + std::chrono::system_clock::time_point expires_at; + + inline void set_invalid(AuthVerifyStatus code, const std::string& reason) { + valid = false; + response_status = code; + msg = reason; + } + + inline void set_valid() { + valid = true; + response_status = AuthVerifyStatus::OK; + } +}; + class AuthManager { public: - AuthManager() {} + AuthManager(); virtual ~AuthManager() = default; AuthVerifyStatus verify(const std::string& token, std::string& msg) const; @@ -33,5 +59,13 @@ class AuthManager { void verify_decoded(const jwt::decoded_jwt& decoded) const; virtual std::string download_key(const std::string& key_url) const; std::string get_app(const jwt::decoded_jwt& decoded) const; + + // the verify method is declared const. We make this mutable + // as these caches are modified in the verify method. md5_sum(raw_token) -> + // DecodedToken + mutable LRUCache< std::string, CachedToken > m_cached_tokens; + + // key_id -> signing public key + mutable LRUCache< std::string, std::string > m_cached_keys; }; } // namespace sisl diff --git a/include/sisl/logging/logging.h b/include/sisl/logging/logging.h index a0b7778c..c87a47d3 100644 --- a/include/sisl/logging/logging.h +++ b/include/sisl/logging/logging.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -482,6 +483,7 @@ extern bool is_crash_handler_installed(); extern bool restore_signal_handler(const SignalType sig_num); extern bool restore_signal_handlers(); extern bool send_thread_signal(const pthread_t thr, const SignalType sig_num); +extern std::filesystem::path get_base_dir(); template < typename... Args > std::string format_log_msg(const char* const msg, Args&&... args) { diff --git a/src/auth_manager/CMakeLists.txt b/src/auth_manager/CMakeLists.txt index 3e45f388..9fe278d6 100644 --- a/src/auth_manager/CMakeLists.txt +++ b/src/auth_manager/CMakeLists.txt @@ -42,5 +42,16 @@ if (DEFINED ENABLE_TESTING) GTest::gmock ) add_test(NAME AuthManager COMMAND test_auth_mgr) + + add_executable(test_lru_cache) + target_sources(test_lru_cache PRIVATE + tests/LRUCacheTest.cpp + ) + target_link_libraries(test_lru_cache + sisl + ${COMMON_DEPS} + GTest::gmock + ) + add_test(NAME LRUCache COMMAND test_lru_cache) endif() endif() diff --git a/src/auth_manager/auth_manager.cpp b/src/auth_manager/auth_manager.cpp index 38396cca..6e083cd4 100644 --- a/src/auth_manager/auth_manager.cpp +++ b/src/auth_manager/auth_manager.cpp @@ -2,14 +2,58 @@ #include #include +extern "C" { +#include +} #include "sisl/auth_manager/auth_manager.hpp" namespace sisl { +static std::string md5_sum(std::string const& s) { + unsigned char digest[MD5_DIGEST_LENGTH]; + + MD5(reinterpret_cast< unsigned char* >(const_cast< char* >(s.c_str())), s.length(), + reinterpret_cast< unsigned char* >(&digest)); + + std::ostringstream out; + out << std::hex; + for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { + out << std::setfill('0') << std::setw(2) << std::hex << (int)(unsigned char)digest[i]; + } + return out.str(); +} + +struct incomplete_verification_error : std::exception { + explicit incomplete_verification_error(const std::string& error) : error_(error) {} + const char* what() const noexcept { return error_.c_str(); } + +private: + const std::string error_; +}; + +AuthManager::AuthManager() : + m_cached_tokens(SECURITY_DYNAMIC_CONFIG(auth_manager->auth_token_cache_size)), + m_cached_keys(SECURITY_DYNAMIC_CONFIG(auth_manager->auth_key_cache_size)) {} + AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) const { + // if we have it in cache, just use it to make the decision + auto const token_hash = md5_sum(token); + if (auto const ct = m_cached_tokens.get(token_hash); ct) { + if (ct->valid) { + auto now = std::chrono::system_clock::now(); + if (now > ct->expires_at + std::chrono::seconds(SECURITY_DYNAMIC_CONFIG(auth_manager->leeway))) { + m_cached_tokens.put(token_hash, + CachedToken{AuthVerifyStatus::UNAUTH, "token expired", false, ct->expires_at}); + } + } + msg = ct->msg; + return ct->response_status; + } + + // not found in cache + CachedToken cached_token; std::string app_name; - // TODO: cache tokens for better performance try { // this may throw if token is ill formed const auto decoded{jwt::decode(token)}; @@ -18,34 +62,66 @@ AuthVerifyStatus AuthManager::verify(const std::string& token, std::string& msg) // exception is thrown. verify_decoded(decoded); app_name = get_app(decoded); - } catch (const std::exception& e) { + cached_token.expires_at = decoded.get_expires_at(); + cached_token.set_valid(); + } catch (const incomplete_verification_error& e) { + // verification incomplete, the token validity is not determined, shouldn't + // cache msg = e.what(); return AuthVerifyStatus::UNAUTH; + } catch (const std::exception& e) { + cached_token.set_invalid(AuthVerifyStatus::UNAUTH, e.what()); + m_cached_tokens.put(token_hash, cached_token); + msg = cached_token.msg; + return cached_token.response_status; } // check client application if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps) != "all") { if (SECURITY_DYNAMIC_CONFIG(auth_manager->auth_allowed_apps).find(app_name) == std::string::npos) { - msg = fmt::format("application '{}' is not allowed to perform the request", app_name); - return AuthVerifyStatus::FORBIDDEN; + cached_token.set_invalid(AuthVerifyStatus::FORBIDDEN, + fmt::format("application '{}' is not allowed to perform the request", app_name)); } } - return AuthVerifyStatus::OK; + m_cached_tokens.put(token_hash, cached_token); + msg = cached_token.msg; + return cached_token.response_status; } + void AuthManager::verify_decoded(const jwt::decoded_jwt& decoded) const { const auto alg{decoded.get_algorithm()}; if (alg != "RS256") throw std::runtime_error(fmt::format("unsupported algorithm: {}", alg)); - if (!decoded.has_header_claim("x5u")) throw std::runtime_error("no indication of verification key"); + std::string signing_key; + std::string key_id; + auto should_cache_key = true; - auto key_url = decoded.get_header_claim("x5u").as_string(); + if (decoded.has_key_id()) { + key_id = decoded.get_key_id(); + auto cached_key = m_cached_keys.get(key_id); + if (cached_key) { + signing_key = *cached_key; + should_cache_key = false; + } + } else { + should_cache_key = false; + } - if (key_url.rfind(SECURITY_DYNAMIC_CONFIG(auth_manager->tf_token_url), 0) != 0) { - throw std::runtime_error(fmt::format("key url {} is not trusted", key_url)); + if (signing_key.empty()) { + if (!decoded.has_header_claim("x5u")) throw std::runtime_error("no indication of verification key"); + + auto key_url = decoded.get_header_claim("x5u").as_string(); + + if (key_url.rfind(SECURITY_DYNAMIC_CONFIG(auth_manager->tf_token_url), 0) != 0) { + throw std::runtime_error(fmt::format("key url {} is not trusted", key_url)); + } + signing_key = download_key(key_url); } - const std::string signing_key{download_key(key_url)}; + + if (should_cache_key) { m_cached_keys.put(key_id, signing_key); } + const auto verifier{jwt::verify() .with_issuer(SECURITY_DYNAMIC_CONFIG(auth_manager->issuer)) .allow_algorithm(jwt::algorithm::rs256(signing_key)) diff --git a/src/auth_manager/security_config.fbs b/src/auth_manager/security_config.fbs index e560455b..20cbec5a 100644 --- a/src/auth_manager/security_config.fbs +++ b/src/auth_manager/security_config.fbs @@ -33,6 +33,10 @@ table AuthManager { // ssl verification for the signing key download url verify: bool = true; + + // LRUCache sizes + auth_token_cache_size: uint32 = 2000; + auth_key_cache_size: uint32 = 100; } table SecuritySettings { diff --git a/src/auth_manager/tests/AuthTest.cpp b/src/auth_manager/tests/AuthTest.cpp index 7447a346..79ba44ac 100644 --- a/src/auth_manager/tests/AuthTest.cpp +++ b/src/auth_manager/tests/AuthTest.cpp @@ -210,12 +210,12 @@ TEST_F(AuthTest, trf_allow_valid_token) { EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); // use the acces_token saved from the previous call - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); // set token to be expired invoking request_with_grant_token mock_trf_client.set_expiry(std::chrono::system_clock::now() - std::chrono::seconds(100)); - EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(1).WillOnce(Return(rsa_pub_key)); + EXPECT_CALL(*mock_auth_mgr, download_key(_)).Times(0); EXPECT_EQ(mock_auth_mgr->verify(mock_trf_client.get_token()), AuthVerifyStatus::OK); } diff --git a/src/auth_manager/tests/LRUCacheTest.cpp b/src/auth_manager/tests/LRUCacheTest.cpp new file mode 100644 index 00000000..cdb901f4 --- /dev/null +++ b/src/auth_manager/tests/LRUCacheTest.cpp @@ -0,0 +1,73 @@ +#include "sisl/auth_manager/LRUCache.h" +#include +#include +#include +#include + +SISL_OPTIONS_ENABLE(logging) + +namespace sisl::testing { + +using namespace ::testing; + +TEST(LRUTest, basic) { + auto lru = LRUCache< int, int >(3); + + EXPECT_EQ(0, lru.size()); + EXPECT_FALSE(lru.exists(1)); + + lru.put(0, 0); + lru.put(1, 1); + EXPECT_EQ(2, lru.size()); + EXPECT_TRUE(lru.exists(0)); + EXPECT_TRUE(lru.exists(1)); + + lru.put(2, 2); + + // this will evict 0 from cache + lru.put(3, 3); + + EXPECT_EQ(3, lru.size()); + + EXPECT_FALSE(lru.exists(0)); + EXPECT_TRUE(lru.exists(1)); + EXPECT_TRUE(lru.exists(2)); + EXPECT_TRUE(lru.exists(3)); + + // current elements in cache are 3, 2, 1 + // let's re-insert 1, this will move 1 to the head of cache + lru.put(1, 1); + + // insert another new key, this will evict 2 + lru.put(4, 4); + + EXPECT_EQ(3, lru.size()); + EXPECT_FALSE(lru.exists(2)); + EXPECT_TRUE(lru.exists(1)); + EXPECT_TRUE(lru.exists(3)); + EXPECT_TRUE(lru.exists(4)); +} + +TEST(LRUTest, get) { + auto lru = LRUCache< std::string, std::string >(3); + + lru.put("key1", "value1"); + EXPECT_EQ("value1", lru.get("key1")); + auto v = lru.get("no-such-key"); + EXPECT_EQ(std::nullopt, v); + + // use variable as key, to test the perfect forwarding + std::string key{"key2"}; + std::string value{"value2"}; + lru.put(key, value); + ASSERT_TRUE(lru.get(key)); + EXPECT_EQ(value, lru.get(key)); +} + +} // namespace sisl::testing + +int main(int argc, char* argv[]) { + testing::InitGoogleMock(&argc, argv); + SISL_OPTIONS_LOAD(argc, argv, logging) + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/src/file_watcher/file_watcher.cpp b/src/file_watcher/file_watcher.cpp index 05145ba1..94989052 100644 --- a/src/file_watcher/file_watcher.cpp +++ b/src/file_watcher/file_watcher.cpp @@ -119,9 +119,11 @@ bool FileWatcher::unregister_listener(const std::string& file_path, const std::s } bool FileWatcher::remove_watcher(FileInfo& file_info) { - if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { return false; } + bool success = true; + if (auto err = inotify_rm_watch(m_inotify_fd, file_info.m_wd); err != 0) { success = false; } + // remove the file from the map regardless of the inotify_rm_watch result m_files.erase(file_info.m_filepath); - return true; + return success; } bool FileWatcher::stop() { diff --git a/src/logging/logging.cpp b/src/logging/logging.cpp index df32d266..41fe1f9a 100644 --- a/src/logging/logging.cpp +++ b/src/logging/logging.cpp @@ -135,31 +135,25 @@ std::shared_ptr< spdlog::logger >& GetCriticalLogger() { return logger_thread_ctx.m_critical_logger; } -static std::filesystem::path get_base_dir() { - namespace fs = std::filesystem; - const auto cwd = fs::current_path(); - const auto log_dir{cwd / "logs"}; - - // Construct a unique directory path based on the current time - auto const current_time{std::chrono::system_clock::now()}; - auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; - auto const current_tm{std::localtime(¤t_t)}; - std::array< char, PATH_MAX > c_time; - if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { - const fs::path cur_log_dir = log_dir / c_time.data(); - fs::create_directories(cur_log_dir); - - const fs::path sym_path = log_dir / "latest"; - try { - if (fs::is_symlink(sym_path)) { fs::remove(sym_path); } - fs::create_directory_symlink(cur_log_dir, sym_path); - } catch (std::exception& e) { - LOGINFO("Unable to create latest symlink={} to log dir={}, ignoring symlink creation\n", sym_path, log_dir); +static std::filesystem::path g_base_dir; + +std::filesystem::path get_base_dir() { + static std::once_flag one_base_dir; + std::call_once(one_base_dir, [] { + const auto cwd{std::filesystem::current_path()}; + g_base_dir = cwd / "logs"; + // Construct a unique directory path based on the current time + auto const current_time{std::chrono::system_clock::now()}; + auto const current_t{std::chrono::system_clock::to_time_t(current_time)}; + auto const current_tm{std::localtime(¤t_t)}; + std::array< char, PATH_MAX > c_time; + if (std::strftime(c_time.data(), c_time.size(), "%F_%R", current_tm)) { + g_base_dir /= c_time.data(); + std::filesystem::create_directories(g_base_dir); } - return cur_log_dir; - } else { - return log_dir; - } + }); + + return g_base_dir; } static std::filesystem::path log_path(std::string const& name) { @@ -167,8 +161,7 @@ static std::filesystem::path log_path(std::string const& name) { if (0 < SISL_OPTIONS.count("logfile")) { p = std::filesystem::path{SISL_OPTIONS["logfile"].as< std::string >()}; } else { - static std::filesystem::path base_dir{get_base_dir()}; - p = base_dir / std::filesystem::path{name}.filename(); + p = get_base_dir() / std::filesystem::path{name}.filename(); } return p; } diff --git a/src/logging/stacktrace.cpp b/src/logging/stacktrace.cpp index 1ac07443..ac65d5d1 100644 --- a/src/logging/stacktrace.cpp +++ b/src/logging/stacktrace.cpp @@ -131,7 +131,7 @@ static bool dumpCallback(const google_breakpad::MinidumpDescriptor& descriptor, static void bt_dumper([[maybe_unused]] const SignalType signal_number) { #if defined(__linux__) - google_breakpad::ExceptionHandler::WriteMinidump("./", dumpCallback, nullptr); + google_breakpad::ExceptionHandler::WriteMinidump(get_base_dir().string(), dumpCallback, nullptr); #endif } From 4015bb0c7fbd5fc2eba4409abd1e21eba9e520d4 Mon Sep 17 00:00:00 2001 From: Sanal Date: Tue, 16 Jan 2024 12:57:46 -0800 Subject: [PATCH 379/385] Add shutdown api for flip grpc server. (#207) --- include/sisl/flip/flip.hpp | 10 +++++++++- include/sisl/flip/flip_rpc_server.hpp | 10 +++++++++- src/flip/lib/flip_rpc_server.cpp | 4 ++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/include/sisl/flip/flip.hpp b/include/sisl/flip/flip.hpp index d0caad81..84ef278f 100644 --- a/include/sisl/flip/flip.hpp +++ b/include/sisl/flip/flip.hpp @@ -438,10 +438,17 @@ class Flip { } void start_rpc_server() { - m_flip_server_thread = std::unique_ptr< std::thread >(new std::thread(FlipRPCServer::rpc_thread)); + m_flip_server = std::make_unique< FlipRPCServer >(); + m_flip_server_thread = + std::unique_ptr< std::thread >(new std::thread([this]() { m_flip_server->rpc_thread(); })); m_flip_server_thread->detach(); } + void stop_rpc_server() { + m_flip_server->shutdown(); + m_flip_server.reset(); + } + bool add(const FlipSpec& fspec) { m_flip_enabled = true; auto inst = flip_instance(fspec); @@ -667,6 +674,7 @@ class Flip { bool m_flip_enabled; std::unique_ptr< FlipTimerBase > m_timer; std::unique_ptr< std::thread > m_flip_server_thread; + std::unique_ptr< FlipRPCServer > m_flip_server; }; } // namespace flip diff --git a/include/sisl/flip/flip_rpc_server.hpp b/include/sisl/flip/flip_rpc_server.hpp index ede9cd0a..5a2ed466 100644 --- a/include/sisl/flip/flip_rpc_server.hpp +++ b/include/sisl/flip/flip_rpc_server.hpp @@ -15,6 +15,7 @@ * *********************************************************************************/ #pragma once +#include #include "proto/flip_spec.pb.h" #include "proto/flip_server.grpc.pb.h" @@ -22,11 +23,18 @@ namespace flip { class FlipRPCServer final : public FlipServer::Service { public: + FlipRPCServer() = default; grpc::Status InjectFault(grpc::ServerContext* context, const FlipSpec* request, FlipResponse* response) override; grpc::Status GetFaults(grpc::ServerContext* context, const FlipNameRequest* request, FlipListResponse* response) override; grpc::Status RemoveFault(grpc::ServerContext*, const FlipRemoveRequest* request, FlipRemoveResponse* response) override; - static void rpc_thread(); + void rpc_thread(); + void shutdown() { + if (m_server) { m_server->Shutdown(); } + } + +private: + std::unique_ptr< grpc::Server > m_server; }; } // namespace flip diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index 07cc20bc..bf131089 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -70,9 +70,9 @@ void FlipRPCServer::rpc_thread() { grpc::ServerBuilder builder; builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); builder.RegisterService((FlipRPCServer::Service*)&service); - std::unique_ptr< grpc::Server > server(builder.BuildAndStart()); + m_server = builder.BuildAndStart(); LOGINFOMOD(flip, "Flip GRPC Server listening on {}", server_address); - server->Wait(); + m_server->Wait(); } } // namespace flip From 00155a8ab9ec497a9e3d5d8ab22b3c34fbd105d8 Mon Sep 17 00:00:00 2001 From: Sanal Date: Tue, 16 Jan 2024 15:03:21 -0800 Subject: [PATCH 380/385] Bump conan version. (#208) --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 0acf2d7a..be0cd4d6 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "11.0.4" + version = "11.0.5" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" From 24e1580fe4484fba054b17c7d582b5498879df8b Mon Sep 17 00:00:00 2001 From: Sanal Date: Thu, 18 Jan 2024 12:09:38 -0800 Subject: [PATCH 381/385] Fix flip grpc server thread terminate. (#209) --- conanfile.py | 2 +- include/sisl/flip/flip.hpp | 26 ++++++++++++++++++++++---- include/sisl/flip/flip_rpc_server.hpp | 7 ------- src/flip/lib/flip_rpc_server.cpp | 12 ------------ 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/conanfile.py b/conanfile.py index be0cd4d6..24e75459 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "11.0.5" + version = "11.0.6" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" diff --git a/include/sisl/flip/flip.hpp b/include/sisl/flip/flip.hpp index 84ef278f..ce778cd0 100644 --- a/include/sisl/flip/flip.hpp +++ b/include/sisl/flip/flip.hpp @@ -29,6 +29,11 @@ #include #include #include +#include +#include +#include +#include +#include #include "proto/flip_spec.pb.h" #include "flip_rpc_server.hpp" @@ -431,6 +436,9 @@ static constexpr int DELAYED_RETURN = 3; class Flip { public: Flip() : m_flip_enabled(false) {} + ~Flip() { + if (m_flip_server) { stop_rpc_server(); } + } static Flip& instance() { static Flip s_instance; @@ -438,14 +446,23 @@ class Flip { } void start_rpc_server() { + if (m_flip_server) { stop_rpc_server(); } + m_flip_server = std::make_unique< FlipRPCServer >(); - m_flip_server_thread = - std::unique_ptr< std::thread >(new std::thread([this]() { m_flip_server->rpc_thread(); })); - m_flip_server_thread->detach(); + std::string server_address("0.0.0.0:50051"); + grpc::ServerBuilder builder; + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService((FlipRPCServer::Service*)m_flip_server.get()); + m_grpc_server = builder.BuildAndStart(); + LOGINFOMOD(flip, "Flip GRPC Server listening on {}", server_address); + m_flip_server_thread = std::unique_ptr< std::thread >( + new std::thread([grpc_server = m_grpc_server.get()]() { grpc_server->Wait(); })); } void stop_rpc_server() { - m_flip_server->shutdown(); + if (m_grpc_server) { m_grpc_server->Shutdown(); } + m_flip_server_thread->join(); + m_flip_server_thread.reset(); m_flip_server.reset(); } @@ -675,6 +692,7 @@ class Flip { std::unique_ptr< FlipTimerBase > m_timer; std::unique_ptr< std::thread > m_flip_server_thread; std::unique_ptr< FlipRPCServer > m_flip_server; + std::unique_ptr< grpc::Server > m_grpc_server; }; } // namespace flip diff --git a/include/sisl/flip/flip_rpc_server.hpp b/include/sisl/flip/flip_rpc_server.hpp index 5a2ed466..d3a42ea1 100644 --- a/include/sisl/flip/flip_rpc_server.hpp +++ b/include/sisl/flip/flip_rpc_server.hpp @@ -29,12 +29,5 @@ class FlipRPCServer final : public FlipServer::Service { FlipListResponse* response) override; grpc::Status RemoveFault(grpc::ServerContext*, const FlipRemoveRequest* request, FlipRemoveResponse* response) override; - void rpc_thread(); - void shutdown() { - if (m_server) { m_server->Shutdown(); } - } - -private: - std::unique_ptr< grpc::Server > m_server; }; } // namespace flip diff --git a/src/flip/lib/flip_rpc_server.cpp b/src/flip/lib/flip_rpc_server.cpp index bf131089..769cb506 100644 --- a/src/flip/lib/flip_rpc_server.cpp +++ b/src/flip/lib/flip_rpc_server.cpp @@ -63,16 +63,4 @@ class FlipRPCServiceWrapper : public FlipRPCServer::Service { } }; -void FlipRPCServer::rpc_thread() { - std::string server_address("0.0.0.0:50051"); - FlipRPCServer service; - - grpc::ServerBuilder builder; - builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); - builder.RegisterService((FlipRPCServer::Service*)&service); - m_server = builder.BuildAndStart(); - LOGINFOMOD(flip, "Flip GRPC Server listening on {}", server_address); - m_server->Wait(); -} - } // namespace flip From 1e1f648f297af4fe78ec73ee46a1e600417755e4 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Wed, 24 Jan 2024 10:20:04 -0700 Subject: [PATCH 382/385] Remove copy of date, fix libcurl directly. (#212) --- .github/workflows/build_dependencies.yml | 1 - 3rd_party/date/conandata.yml | 29 ----- 3rd_party/date/conanfile.py | 140 ---------------------- 3rd_party/date/patches/0001-fix-uwp.patch | 17 --- 3rd_party/date/patches/cmake-3.0.0.patch | 14 --- 3rd_party/date/patches/cmake-3.0.1.patch | 14 --- 3rd_party/date/patches/cmake.patch | 19 --- 3rd_party/date/patches/string_view.patch | 13 -- conanfile.py | 4 +- 9 files changed, 2 insertions(+), 249 deletions(-) delete mode 100644 3rd_party/date/conandata.yml delete mode 100644 3rd_party/date/conanfile.py delete mode 100644 3rd_party/date/patches/0001-fix-uwp.patch delete mode 100644 3rd_party/date/patches/cmake-3.0.0.patch delete mode 100644 3rd_party/date/patches/cmake-3.0.1.patch delete mode 100644 3rd_party/date/patches/cmake.patch delete mode 100644 3rd_party/date/patches/string_view.patch diff --git a/.github/workflows/build_dependencies.yml b/.github/workflows/build_dependencies.yml index 693d13f7..93f988f5 100644 --- a/.github/workflows/build_dependencies.yml +++ b/.github/workflows/build_dependencies.yml @@ -106,7 +106,6 @@ jobs: conan export 3rd_party/jemalloc conan export 3rd_party/prerelease_dummy conan export 3rd_party/pistache pistache/cci.20201127@ - conan export 3rd_party/date date/3.0.1@ cached_pkgs=$(ls -1d ~/.conan/data/*/*/*/*/export 2>/dev/null | sed 's,.*data/,,' | cut -d'/' -f1,2 | paste -sd',' - -) echo "::info:: Pre-cached: ${cached_pkgs}" if: ${{ inputs.testing == 'True' || steps.restore-cache.outputs.cache-hit != 'true' }} diff --git a/3rd_party/date/conandata.yml b/3rd_party/date/conandata.yml deleted file mode 100644 index bed2d768..00000000 --- a/3rd_party/date/conandata.yml +++ /dev/null @@ -1,29 +0,0 @@ -sources: - "3.0.1": - url: "https://github.com/HowardHinnant/date/archive/refs/tags/v3.0.1.tar.gz" - sha256: "7a390f200f0ccd207e8cff6757e04817c1a0aec3e327b006b7eb451c57ee3538" - "3.0.0": - url: "https://github.com/HowardHinnant/date/archive/refs/tags/v3.0.0.tar.gz" - sha256: "87bba2eaf0ebc7ec539e5e62fc317cb80671a337c1fb1b84cb9e4d42c6dbebe3" - "2.4.1": - url: "https://github.com/HowardHinnant/date/archive/refs/tags/v2.4.1.tar.gz" - sha256: "98907d243397483bd7ad889bf6c66746db0d7d2a39cc9aacc041834c40b65b98" -patches: - "3.0.1": - - patch_file: "patches/cmake-3.0.1.patch" - patch_description: "Disable string view to workaround clang 5 not having it" - patch_type: "portability" - "3.0.0": - - patch_file: "patches/cmake-3.0.0.patch" - patch_description: "Disable string view to workaround clang 5 not having it" - patch_type: "portability" - "2.4.1": - - patch_file: "patches/0001-fix-uwp.patch" - patch_description: "Fix Universal Windows Platform (UWP) unhandled exception support. See https://github.com/microsoft/vcpkg/pull/8151#issuecomment-531175393." - patch_type: "portability" - - patch_file: "patches/cmake.patch" - patch_description: "Add libcurl target for conan compatibility" - patch_type: "conan" - - patch_file: "patches/string_view.patch" - patch_description: "Disable string view to workaround clang 5 not having it" - patch_type: "portability" diff --git a/3rd_party/date/conanfile.py b/3rd_party/date/conanfile.py deleted file mode 100644 index 7c597110..00000000 --- a/3rd_party/date/conanfile.py +++ /dev/null @@ -1,140 +0,0 @@ -from conan import ConanFile -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout -from conan.tools.files import get, rmdir, apply_conandata_patches, export_conandata_patches, copy -from conan.tools.scm import Version - -import os - -required_conan_version = ">=1.53.0" - - -class DateConan(ConanFile): - name = "date" - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/HowardHinnant/date" - description = "A date and time library based on the C++11/14/17 header" - topics = ("datetime", "timezone", "calendar", "time", "iana-database") - license = "MIT" - - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - "header_only": [True, False], - "use_system_tz_db": [True, False], - "use_tz_db_in_dot": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "header_only": False, - "use_system_tz_db": False, - "use_tz_db_in_dot": False, - } - - def export_sources(self): - export_conandata_patches(self) - - def config_options(self): - if self.settings.os == "Windows": - del self.options.fPIC - if self.settings.os in ["iOS", "tvOS", "watchOS", "Android"]: - self.options.use_system_tz_db = True - - def configure(self): - if self.options.shared or self.options.header_only: - self.options.rm_safe("fPIC") - if self.options.header_only: - del self.options.shared - - def layout(self): - cmake_layout(self, src_folder="src") - - def requirements(self): - if not self.options.header_only and not self.options.use_system_tz_db: - self.requires("libcurl/7.86.0") - - def package_id(self): - if self.info.options.header_only: - self.info.clear() - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, 11) - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["ENABLE_DATE_TESTING"] = False - tc.variables["USE_SYSTEM_TZ_DB"] = self.options.use_system_tz_db - tc.variables["USE_TZ_DB_IN_DOT"] = self.options.use_tz_db_in_dot - tc.variables["BUILD_TZ_LIB"] = not self.options.header_only - # workaround for clang 5 not having string_view - if Version(self.version) >= "3.0.0" and self.settings.compiler == "clang" \ - and Version(self.settings.compiler.version) <= "5.0": - tc.cache_variables["DISABLE_STRING_VIEW"] = True - tc.generate() - - deps = CMakeDeps(self) - deps.generate() - - def build(self): - apply_conandata_patches(self) - if not self.options.header_only: - cmake = CMake(self) - cmake.configure() - cmake.build() - - def package(self): - copy(self, "LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) - if self.options.header_only: - src = os.path.join(self.source_folder, "include", "date") - dst = os.path.join(self.package_folder, "include", "date") - copy(self, "date.h", dst=dst, src=src) - copy(self, "tz.h", dst=dst, src=src) - copy(self, "ptz.h", dst=dst, src=src) - copy(self, "iso_week.h", dst=dst, src=src) - copy(self, "julian.h", dst=dst, src=src) - copy(self, "islamic.h", dst=dst, src=src) - else: - cmake = CMake(self) - cmake.install() - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - rmdir(self, os.path.join(self.package_folder, "CMake")) - - def package_info(self): - self.cpp_info.set_property("cmake_target_name", "date::date") - # TODO: Remove legacy .names attribute when conan 2.0 is released - self.cpp_info.names["cmake_find_package"] = "date" - self.cpp_info.names["cmake_find_package_multi"] = "date" - - # date-tz - if not self.options.header_only: - self.cpp_info.components["date-tz"].set_property("cmake_target_name", "date::date-tz") - # TODO: Remove legacy .names attribute when conan 2.0 is released - self.cpp_info.components["date-tz"].names["cmake_find_package"] = "date-tz" - self.cpp_info.components["date-tz"].names["cmake_find_package_multi"] = "date-tz" - lib_name = "{}tz".format("date-" if Version(self.version) >= "3.0.0" else "") - self.cpp_info.components["date-tz"].libs = [lib_name] - if self.settings.os == "Linux": - self.cpp_info.components["date-tz"].system_libs.append("pthread") - self.cpp_info.components["date-tz"].system_libs.append("m") - - if not self.options.use_system_tz_db: - self.cpp_info.components["date-tz"].requires.append("libcurl::libcurl") - - if self.options.use_system_tz_db and not self.settings.os == "Windows": - use_os_tzdb = 1 - else: - use_os_tzdb = 0 - - defines = ["USE_OS_TZDB={}".format(use_os_tzdb)] - if self.settings.os == "Windows" and self.options.shared: - defines.append("DATE_USE_DLL=1") - - self.cpp_info.components["date-tz"].defines.extend(defines) - else: - self.cpp_info.defines.append("DATE_HEADER_ONLY") diff --git a/3rd_party/date/patches/0001-fix-uwp.patch b/3rd_party/date/patches/0001-fix-uwp.patch deleted file mode 100644 index f7b5c246..00000000 --- a/3rd_party/date/patches/0001-fix-uwp.patch +++ /dev/null @@ -1,17 +0,0 @@ -diff --git a/include/date/date.h b/include/date/date.h -index cb115a9..66d87c2 100644 ---- a/include/date/date.h -+++ b/include/date/date.h -@@ -76,6 +76,12 @@ - # endif - #endif - -+#ifdef _MSC_VER -+# pragma warning(push) -+// warning C4127: conditional expression is constant -+# pragma warning(disable : 4127 4996) -+#endif -+ - namespace date - { - diff --git a/3rd_party/date/patches/cmake-3.0.0.patch b/3rd_party/date/patches/cmake-3.0.0.patch deleted file mode 100644 index 583e86e5..00000000 --- a/3rd_party/date/patches/cmake-3.0.0.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index ad74900..ac390a9 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -127,6 +127,9 @@ if( BUILD_TZ_LIB ) - target_include_directories( date-tz SYSTEM PRIVATE ${CURL_INCLUDE_DIRS} ) - target_link_libraries( date-tz PRIVATE ${CURL_LIBRARIES} ) - endif( ) -+ if( DISABLE_STRING_VIEW ) -+ target_compile_definitions( date-tz PRIVATE -DHAS_STRING_VIEW=0 -DHAS_DEDUCTION_GUIDES=0 ) -+ endif( ) - endif( ) - - #[===================================================================[ diff --git a/3rd_party/date/patches/cmake-3.0.1.patch b/3rd_party/date/patches/cmake-3.0.1.patch deleted file mode 100644 index 8edcb309..00000000 --- a/3rd_party/date/patches/cmake-3.0.1.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index ad74900..ac390a9 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -156,6 +156,9 @@ if( BUILD_TZ_LIB ) - target_include_directories( date-tz SYSTEM PRIVATE ${CURL_INCLUDE_DIRS} ) - target_link_libraries( date-tz PRIVATE ${CURL_LIBRARIES} ) - endif( ) -+ if( DISABLE_STRING_VIEW ) -+ target_compile_definitions( date-tz PRIVATE -DHAS_STRING_VIEW=0 -DHAS_DEDUCTION_GUIDES=0 ) -+ endif( ) - endif( ) - - #[===================================================================[ diff --git a/3rd_party/date/patches/cmake.patch b/3rd_party/date/patches/cmake.patch deleted file mode 100644 index 3f9df797..00000000 --- a/3rd_party/date/patches/cmake.patch +++ /dev/null @@ -1,19 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index f025a3a..7bc93df 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -57,8 +57,12 @@ else( ) - target_compile_definitions( tz PRIVATE -DHAS_REMOTE_API=1 ) - target_compile_definitions( tz PUBLIC -DUSE_OS_TZDB=0 ) - find_package( CURL REQUIRED ) -- include_directories( SYSTEM ${CURL_INCLUDE_DIRS} ) -- set( OPTIONAL_LIBRARIES ${CURL_LIBRARIES} ) -+ set( OPTIONAL_LIBRARIES CURL::libcurl ) -+endif() -+ -+if( BUILD_SHARED_LIBS ) -+ target_compile_definitions( tz PRIVATE -DDATE_BUILD_DLL=1 ) -+ target_compile_definitions( tz PUBLIC -DDATE_USE_DLL=1 ) - endif( ) - - if( USE_TZ_DB_IN_DOT ) diff --git a/3rd_party/date/patches/string_view.patch b/3rd_party/date/patches/string_view.patch deleted file mode 100644 index 008dd04c..00000000 --- a/3rd_party/date/patches/string_view.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/include/date/date.h b/include/date/date.h -index cb115a9..23cd05a 100644 ---- a/include/date/date.h -+++ b/include/date/date.h -@@ -31,7 +31,7 @@ - // We did not mean to shout. - - #ifndef HAS_STRING_VIEW --# if __cplusplus >= 201703 -+# if __cplusplus >= 201703 && __has_include() - # define HAS_STRING_VIEW 1 - # else - # define HAS_STRING_VIEW 0 diff --git a/conanfile.py b/conanfile.py index 9a19b678..356a987f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -8,7 +8,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "8.6.6" + version = "8.6.7" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" topics = ("ebay", "components", "core", "efficiency") @@ -68,7 +68,7 @@ def requirements(self): self.requires("fmt/8.1.1", override=True) self.requires("libevent/2.1.12", override=True) self.requires("openssl/1.1.1s", override=True) - self.requires("libcurl/7.86.0") + self.requires("libcurl/8.4.0", override=True) self.requires("xz_utils/5.2.5", override=True) self.requires("zlib/1.2.12", override=True) if self.options.malloc_impl == "jemalloc": From 05194f51216971e8673440517fa13af424e0d612 Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Tue, 30 Jan 2024 10:25:38 -0700 Subject: [PATCH 383/385] Ready recipe for conan v2. --- .gitignore | 5 + 3rd_party/breakpad/conandata.yml | 8 - 3rd_party/breakpad/conanfile.py | 82 ---- .../patches/0001-Use_conans_lss.patch | 228 ---------- .../patches/0002-Remove-hardcoded-fpic.patch | 29 -- 3rd_party/folly/CMakeLists.txt | 7 - 3rd_party/folly/conan_deps.cmake | 39 ++ 3rd_party/folly/conandata.yml | 86 +--- 3rd_party/folly/conanfile.py | 399 ++++++++++-------- .../folly/patches/0001-find-packages.patch | 93 ---- .../folly/patches/0002-compiler-flags.patch | 24 -- .../folly/patches/0003-boost-shared-ptr.patch | 24 -- .../patches/0004-disable-posix-names.patch | 27 -- .../folly/patches/0005-include-atomic.patch | 12 - .../folly/patches/0006-duplicate-hash.patch | 13 - .../folly/patches/0007-allow-builtins.patch | 128 ------ .../folly/patches/0008-find-packages.patch | 73 ---- .../patches/0009-ill-formed-atomic-copy.patch | 13 - .../folly/patches/0010-duplicate-hash.patch | 13 - .../patches/0011-disable-logger-example.patch | 12 - .../folly/patches/0012-compiler-flags.patch | 24 -- .../folly/patches/0013-include-bit.patch | 13 - 3rd_party/folly/patches/0014-find-librt.patch | 18 - .../0015-benchmark-format-macros.patch | 15 - .../folly/patches/0016-find-packages.patch | 80 ---- 3rd_party/folly/patches/0018-find-glog.patch | 16 - .../folly/patches/0019-exclude-example.patch | 12 - .../folly/patches/0020-include-ssizet.patch | 12 - .../folly/patches/0021-typedef-clockid.patch | 12 - .../patches/0022-fix-windows-minmax.patch | 12 - .../0023-fix-safe-check-sanitize.patch | 16 - .../folly/patches/0024-compiler-flags.patch | 23 - 3rd_party/folly/patches/0025-timespec.patch | 38 -- ...gs.patch => 2022-001-compiler-flags.patch} | 9 +- .../patches/2023-001-compiler-flags.patch | 19 + 3rd_party/folly/test_package/CMakeLists.txt | 12 - 3rd_party/folly/test_package/conanfile.py | 31 -- 3rd_party/folly/test_package/test_package.cpp | 26 -- .../folly/test_v1_package/CMakeLists.txt | 17 - 3rd_party/folly/test_v1_package/conanfile.py | 18 - .../folly/test_v1_package/test_package.cpp | 29 -- 3rd_party/gperftools/conanfile.py | 50 --- 3rd_party/jemalloc/conanfile.py | 195 --------- 3rd_party/prerelease_dummy/conanfile.py | 23 - 3rd_party/userspace-rcu/conandata.yml | 4 + 3rd_party/userspace-rcu/conanfile.py | 87 ++++ CMakeLists.txt | 53 +-- README.md | 12 +- conanfile.py | 166 ++++---- prepare.sh | 12 +- prepare_v2.sh | 8 + src/CMakeLists.txt | 2 +- src/cache/CMakeLists.txt | 6 +- src/fds/CMakeLists.txt | 2 +- src/grpc/CMakeLists.txt | 4 +- src/grpc/tests/CMakeLists.txt | 2 - src/metrics/CMakeLists.txt | 2 +- test_package/CMakeLists.txt | 5 +- test_package/conanfile.py | 24 +- test_package/test_package.cpp | 1 + 60 files changed, 548 insertions(+), 1877 deletions(-) delete mode 100644 3rd_party/breakpad/conandata.yml delete mode 100644 3rd_party/breakpad/conanfile.py delete mode 100644 3rd_party/breakpad/patches/0001-Use_conans_lss.patch delete mode 100644 3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch delete mode 100644 3rd_party/folly/CMakeLists.txt create mode 100644 3rd_party/folly/conan_deps.cmake delete mode 100644 3rd_party/folly/patches/0001-find-packages.patch delete mode 100644 3rd_party/folly/patches/0002-compiler-flags.patch delete mode 100644 3rd_party/folly/patches/0003-boost-shared-ptr.patch delete mode 100644 3rd_party/folly/patches/0004-disable-posix-names.patch delete mode 100644 3rd_party/folly/patches/0005-include-atomic.patch delete mode 100644 3rd_party/folly/patches/0006-duplicate-hash.patch delete mode 100644 3rd_party/folly/patches/0007-allow-builtins.patch delete mode 100644 3rd_party/folly/patches/0008-find-packages.patch delete mode 100644 3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch delete mode 100644 3rd_party/folly/patches/0010-duplicate-hash.patch delete mode 100644 3rd_party/folly/patches/0011-disable-logger-example.patch delete mode 100644 3rd_party/folly/patches/0012-compiler-flags.patch delete mode 100644 3rd_party/folly/patches/0013-include-bit.patch delete mode 100644 3rd_party/folly/patches/0014-find-librt.patch delete mode 100644 3rd_party/folly/patches/0015-benchmark-format-macros.patch delete mode 100644 3rd_party/folly/patches/0016-find-packages.patch delete mode 100644 3rd_party/folly/patches/0018-find-glog.patch delete mode 100644 3rd_party/folly/patches/0019-exclude-example.patch delete mode 100644 3rd_party/folly/patches/0020-include-ssizet.patch delete mode 100644 3rd_party/folly/patches/0021-typedef-clockid.patch delete mode 100644 3rd_party/folly/patches/0022-fix-windows-minmax.patch delete mode 100644 3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch delete mode 100644 3rd_party/folly/patches/0024-compiler-flags.patch delete mode 100644 3rd_party/folly/patches/0025-timespec.patch rename 3rd_party/folly/patches/{0017-compiler-flags.patch => 2022-001-compiler-flags.patch} (64%) create mode 100644 3rd_party/folly/patches/2023-001-compiler-flags.patch delete mode 100644 3rd_party/folly/test_package/CMakeLists.txt delete mode 100644 3rd_party/folly/test_package/conanfile.py delete mode 100644 3rd_party/folly/test_package/test_package.cpp delete mode 100644 3rd_party/folly/test_v1_package/CMakeLists.txt delete mode 100644 3rd_party/folly/test_v1_package/conanfile.py delete mode 100644 3rd_party/folly/test_v1_package/test_package.cpp delete mode 100644 3rd_party/gperftools/conanfile.py delete mode 100644 3rd_party/jemalloc/conanfile.py delete mode 100644 3rd_party/prerelease_dummy/conanfile.py create mode 100644 3rd_party/userspace-rcu/conandata.yml create mode 100644 3rd_party/userspace-rcu/conanfile.py create mode 100755 prepare_v2.sh diff --git a/.gitignore b/.gitignore index 5d114c73..84d06d73 100644 --- a/.gitignore +++ b/.gitignore @@ -107,3 +107,8 @@ CMakeSettings.json # Clangd .cache/clangd + +CMakeUserPresets.json +logs/ +conan.lock +graph_info.json diff --git a/3rd_party/breakpad/conandata.yml b/3rd_party/breakpad/conandata.yml deleted file mode 100644 index ceaf3e10..00000000 --- a/3rd_party/breakpad/conandata.yml +++ /dev/null @@ -1,8 +0,0 @@ -sources: - "cci.20230127": - url: "https://github.com/google/breakpad/archive/bae713b.tar.gz" - sha256: "65a0dd6db9065dc539ddf35f969d10b5ad8a7b2c305d2dc5a66a1f8d46f4a904" -patches: - "cci.20230127": - - patch_file: "patches/0001-Use_conans_lss.patch" - - patch_file: "patches/0002-Remove-hardcoded-fpic.patch" diff --git a/3rd_party/breakpad/conanfile.py b/3rd_party/breakpad/conanfile.py deleted file mode 100644 index bdd6327e..00000000 --- a/3rd_party/breakpad/conanfile.py +++ /dev/null @@ -1,82 +0,0 @@ -from conan import ConanFile -from conan.errors import ConanInvalidConfiguration -from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir -from conan.tools.gnu import Autotools, AutotoolsDeps, AutotoolsToolchain -from conan.tools.layout import basic_layout -import os - -required_conan_version = ">=1.52.0" - - -class BreakpadConan(ConanFile): - name = "breakpad" - description = "A set of client and server components which implement a crash-reporting system" - topics = ["crash", "report", "breakpad"] - license = "BSD-3-Clause" - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://chromium.googlesource.com/breakpad/breakpad/" - - settings = "os", "arch", "compiler", "build_type" - options = { - "fPIC": [True, False], - } - default_options = { - "fPIC": True, - } - - def export_sources(self): - export_conandata_patches(self) - - def layout(self): - basic_layout(self, src_folder="src") - - def requirements(self): - self.requires("linux-syscall-support/cci.20200813") - self.requires("zlib/1.2.13") - - def validate(self): - if self.settings.os != "Linux": - raise ConanInvalidConfiguration("Breakpad can only be built on Linux. For other OSs check sentry-breakpad") - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = AutotoolsToolchain(self) - # see https://github.com/conan-io/conan/issues/12020 - tc.configure_args.append("--libexecdir=${prefix}/bin") - tc.generate() - deps = AutotoolsDeps(self) - deps.generate() - - def build(self): - apply_conandata_patches(self) - autotools = Autotools(self) - autotools.configure() - autotools.make() - - def package(self): - copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - autotools = Autotools(self) - autotools.install() - rmdir(self, os.path.join(self.package_folder, "share")) - rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) - - def package_info(self): - self.cpp_info.components["libbreakpad"].set_property("pkg_config_name", "breakpad") - self.cpp_info.components["libbreakpad"].libs = ["breakpad"] - self.cpp_info.components["libbreakpad"].includedirs.append(os.path.join("include", "breakpad")) - self.cpp_info.components["libbreakpad"].system_libs.append("pthread") - self.cpp_info.components["libbreakpad"].requires.append("linux-syscall-support::linux-syscall-support") - - self.cpp_info.components["client"].set_property("pkg_config_name", "breakpad-client") - self.cpp_info.components["client"].libs = ["breakpad_client"] - self.cpp_info.components["client"].includedirs.append(os.path.join("include", "breakpad")) - self.cpp_info.components["client"].system_libs.append("pthread") - self.cpp_info.components["client"].requires.append("linux-syscall-support::linux-syscall-support") - - # workaround to always produce a global pkgconfig file for PkgConfigDeps - self.cpp_info.set_property("pkg_config_name", "breakpad-do-not-use") - - # TODO: to remove in conan v2 - self.env_info.PATH.append(os.path.join(self.package_folder, "bin")) diff --git a/3rd_party/breakpad/patches/0001-Use_conans_lss.patch b/3rd_party/breakpad/patches/0001-Use_conans_lss.patch deleted file mode 100644 index d2bedfe2..00000000 --- a/3rd_party/breakpad/patches/0001-Use_conans_lss.patch +++ /dev/null @@ -1,228 +0,0 @@ -diff -Naur a/src/client/linux/crash_generation/crash_generation_client.cc b/src/client/linux/crash_generation/crash_generation_client.cc ---- a/src/client/linux/crash_generation/crash_generation_client.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/crash_generation/crash_generation_client.cc 2023-06-20 10:56:18.746685403 -0700 -@@ -36,7 +36,7 @@ - - #include "common/linux/eintr_wrapper.h" - #include "common/linux/ignore_ret.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/client/linux/handler/exception_handler.cc b/src/client/linux/handler/exception_handler.cc ---- a/src/client/linux/handler/exception_handler.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/handler/exception_handler.cc 2023-06-20 10:56:18.750685408 -0700 -@@ -94,7 +94,7 @@ - #include "client/linux/minidump_writer/linux_dumper.h" - #include "client/linux/minidump_writer/minidump_writer.h" - #include "common/linux/eintr_wrapper.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - #if defined(__ANDROID__) - #include "linux/sched.h" -diff -Naur a/src/client/linux/handler/exception_handler_unittest.cc b/src/client/linux/handler/exception_handler_unittest.cc ---- a/src/client/linux/handler/exception_handler_unittest.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/handler/exception_handler_unittest.cc 2023-06-20 10:56:18.750685408 -0700 -@@ -49,7 +49,7 @@ - #include "common/linux/linux_libc_support.h" - #include "common/tests/auto_tempdir.h" - #include "common/using_std_string.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - #include "google_breakpad/processor/minidump.h" - - using namespace google_breakpad; -diff -Naur a/src/client/linux/log/log.cc b/src/client/linux/log/log.cc ---- a/src/client/linux/log/log.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/log/log.cc 2023-06-20 10:56:18.754685413 -0700 -@@ -32,7 +32,7 @@ - #include - #include - #else --#include "third_party/lss/linux_syscall_support.h" -+#include - #endif - - namespace logger { -diff -Naur a/src/client/linux/minidump_writer/cpu_set.h b/src/client/linux/minidump_writer/cpu_set.h ---- a/src/client/linux/minidump_writer/cpu_set.h 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/cpu_set.h 2023-06-20 10:56:21.690688837 -0700 -@@ -34,7 +34,7 @@ - #include - - #include "common/linux/linux_libc_support.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/client/linux/minidump_writer/directory_reader.h b/src/client/linux/minidump_writer/directory_reader.h ---- a/src/client/linux/minidump_writer/directory_reader.h 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/directory_reader.h 2023-06-20 10:56:21.694688842 -0700 -@@ -37,7 +37,7 @@ - #include - - #include "common/linux/linux_libc_support.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/client/linux/minidump_writer/line_reader.h b/src/client/linux/minidump_writer/line_reader.h ---- a/src/client/linux/minidump_writer/line_reader.h 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/line_reader.h 2023-06-20 10:56:21.694688842 -0700 -@@ -34,7 +34,7 @@ - #include - - #include "common/linux/linux_libc_support.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/client/linux/minidump_writer/linux_dumper.cc b/src/client/linux/minidump_writer/linux_dumper.cc ---- a/src/client/linux/minidump_writer/linux_dumper.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/linux_dumper.cc 2023-06-20 10:56:18.766685426 -0700 -@@ -50,7 +50,7 @@ - #include "common/linux/memory_mapped_file.h" - #include "common/linux/safe_readlink.h" - #include "google_breakpad/common/minidump_exception_linux.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - using google_breakpad::elf::FileID; - -diff -Naur a/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc b/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc ---- a/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc 2023-06-20 10:56:18.766685426 -0700 -@@ -38,7 +38,7 @@ - #include - - #include "common/scoped_ptr.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - #if defined(__ARM_EABI__) - #define TID_PTR_REGISTER "r3" -diff -Naur a/src/client/linux/minidump_writer/linux_ptrace_dumper.cc b/src/client/linux/minidump_writer/linux_ptrace_dumper.cc ---- a/src/client/linux/minidump_writer/linux_ptrace_dumper.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/linux_ptrace_dumper.cc 2023-06-20 10:56:18.766685426 -0700 -@@ -56,7 +56,7 @@ - #include "client/linux/minidump_writer/directory_reader.h" - #include "client/linux/minidump_writer/line_reader.h" - #include "common/linux/linux_libc_support.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - // Suspends a thread by attaching to it. - static bool SuspendThread(pid_t pid) { -diff -Naur a/src/client/linux/minidump_writer/minidump_writer.cc b/src/client/linux/minidump_writer/minidump_writer.cc ---- a/src/client/linux/minidump_writer/minidump_writer.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/minidump_writer.cc 2023-06-20 10:56:18.770685431 -0700 -@@ -78,7 +78,7 @@ - #include "common/linux/linux_libc_support.h" - #include "common/minidump_type_helper.h" - #include "google_breakpad/common/minidump_format.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace { - -diff -Naur a/src/client/linux/minidump_writer/proc_cpuinfo_reader.h b/src/client/linux/minidump_writer/proc_cpuinfo_reader.h ---- a/src/client/linux/minidump_writer/proc_cpuinfo_reader.h 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/linux/minidump_writer/proc_cpuinfo_reader.h 2023-06-20 10:56:21.702688851 -0700 -@@ -35,7 +35,7 @@ - - #include "client/linux/minidump_writer/line_reader.h" - #include "common/linux/linux_libc_support.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/client/minidump_file_writer.cc b/src/client/minidump_file_writer.cc ---- a/src/client/minidump_file_writer.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/client/minidump_file_writer.cc 2023-06-20 10:56:18.794685460 -0700 -@@ -40,7 +40,7 @@ - #include "common/linux/linux_libc_support.h" - #include "common/string_conversion.h" - #if defined(__linux__) && __linux__ --#include "third_party/lss/linux_syscall_support.h" -+#include - #endif - - #if defined(__ANDROID__) -diff -Naur a/src/common/linux/file_id.cc b/src/common/linux/file_id.cc ---- a/src/common/linux/file_id.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/common/linux/file_id.cc 2023-06-20 10:56:18.846685520 -0700 -@@ -45,7 +45,7 @@ - #include "common/linux/linux_libc_support.h" - #include "common/linux/memory_mapped_file.h" - #include "common/using_std_string.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - namespace elf { -diff -Naur a/src/common/linux/memory_mapped_file.cc b/src/common/linux/memory_mapped_file.cc ---- a/src/common/linux/memory_mapped_file.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/common/linux/memory_mapped_file.cc 2023-06-20 10:56:18.854685530 -0700 -@@ -39,7 +39,7 @@ - #include - - #include "common/memory_range.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/common/linux/safe_readlink.cc b/src/common/linux/safe_readlink.cc ---- a/src/common/linux/safe_readlink.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/common/linux/safe_readlink.cc 2023-06-20 10:56:18.858685533 -0700 -@@ -31,7 +31,7 @@ - - #include - --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace google_breakpad { - -diff -Naur a/src/common/memory_allocator.h b/src/common/memory_allocator.h ---- a/src/common/memory_allocator.h 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/common/memory_allocator.h 2023-06-20 10:56:21.818688987 -0700 -@@ -46,7 +46,7 @@ - #define sys_munmap munmap - #define MAP_ANONYMOUS MAP_ANON - #else --#include "third_party/lss/linux_syscall_support.h" -+#include - #endif - - namespace google_breakpad { -diff -Naur a/src/processor/testdata/linux_test_app.cc b/src/processor/testdata/linux_test_app.cc ---- a/src/processor/testdata/linux_test_app.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/processor/testdata/linux_test_app.cc 2023-06-20 10:56:18.990685688 -0700 -@@ -45,7 +45,7 @@ - #include - - #include "client/linux/handler/exception_handler.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - - namespace { - -diff -Naur a/src/tools/linux/md2core/minidump-2-core.cc b/src/tools/linux/md2core/minidump-2-core.cc ---- a/src/tools/linux/md2core/minidump-2-core.cc 2023-01-27 13:36:21.000000000 -0700 -+++ b/src/tools/linux/md2core/minidump-2-core.cc 2023-06-20 10:56:18.994685693 -0700 -@@ -51,7 +51,7 @@ - #include "common/using_std_string.h" - #include "google_breakpad/common/breakpad_types.h" - #include "google_breakpad/common/minidump_format.h" --#include "third_party/lss/linux_syscall_support.h" -+#include - #include "tools/linux/md2core/minidump_memory_range.h" - - #if ULONG_MAX == 0xffffffffffffffff diff --git a/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch b/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch deleted file mode 100644 index bd0eaba8..00000000 --- a/3rd_party/breakpad/patches/0002-Remove-hardcoded-fpic.patch +++ /dev/null @@ -1,29 +0,0 @@ ---- a/Makefile.in 2023-01-27 13:36:21.000000000 -0700 -+++ b/Makefile.in 2023-06-20 11:07:14.611452052 -0700 -@@ -129,8 +129,6 @@ - @ANDROID_HOST_TRUE@ -I$(top_srcdir)/src/common/android/testing/include - - # Build as PIC on Linux, for linux_client_unittest_shlib --@LINUX_HOST_TRUE@am__append_2 = -fPIC --@LINUX_HOST_TRUE@am__append_3 = -fPIC - libexec_PROGRAMS = $(am__EXEEXT_10) - bin_PROGRAMS = $(am__EXEEXT_2) $(am__EXEEXT_3) $(am__EXEEXT_4) - check_PROGRAMS = src/common/safe_math_unittest$(EXEEXT) \ -@@ -1744,7 +1742,7 @@ - HEADERS = $(includec_HEADERS) $(includecl_HEADERS) \ - $(includeclc_HEADERS) $(includecldwc_HEADERS) \ - $(includeclh_HEADERS) $(includeclm_HEADERS) \ -- $(includegbc_HEADERS) $(includelss_HEADERS) \ -+ $(includegbc_HEADERS) \ - $(includep_HEADERS) - am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) - # Read a list of newline-separated strings from the standard input, -@@ -9650,7 +9648,7 @@ - install-includeclHEADERS install-includeclcHEADERS \ - install-includecldwcHEADERS install-includeclhHEADERS \ - install-includeclmHEADERS install-includegbcHEADERS \ -- install-includelssHEADERS install-includepHEADERS \ -+ install-includepHEADERS \ - install-pkgconfigDATA - - install-dvi: install-dvi-am diff --git a/3rd_party/folly/CMakeLists.txt b/3rd_party/folly/CMakeLists.txt deleted file mode 100644 index 61f3d3b0..00000000 --- a/3rd_party/folly/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -cmake_minimum_required(VERSION 3.1) -project(cmake_wrapper) - -include(conanbuildinfo.cmake) -conan_basic_setup(KEEP_RPATHS) - -add_subdirectory("source_subfolder") diff --git a/3rd_party/folly/conan_deps.cmake b/3rd_party/folly/conan_deps.cmake new file mode 100644 index 00000000..bdc0907f --- /dev/null +++ b/3rd_party/folly/conan_deps.cmake @@ -0,0 +1,39 @@ +# Set the dependency flags expected by https://github.com/facebook/folly/blob/v2023.12.18.00/CMake/folly-deps.cmake + +macro(custom_find_package name var) + find_package(${name} ${ARGN} + # Allow only Conan packages + NO_DEFAULT_PATH + PATHS ${CMAKE_PREFIX_PATH} + ) + set(${var}_FOUND TRUE) + set(${var}_VERSION ${${name}_VERSION}) + set(${var}_VERSION_STRING ${${name}_VERSION_STRING}) + set(${var}_INCLUDE_DIRS ${${name}_INCLUDE_DIRS}) + set(${var}_INCLUDE_DIR ${${name}_INCLUDE_DIR}) + set(${var}_INCLUDE ${${name}_INCLUDE_DIR}) + set(${var}_LIB ${${name}_LIBRARIES}) + set(${var}_LIBRARY ${${name}_LIBRARIES}) + set(${var}_LIBRARIES ${${name}_LIBRARIES}) + set(${var}_DEFINITIONS ${${name}_DEFINITIONS}) +endmacro() + +custom_find_package(BZip2 BZIP2) +custom_find_package(Backtrace BACKTRACE) +custom_find_package(DoubleConversion DOUBLE_CONVERSION REQUIRED) +custom_find_package(Gflags LIBGFLAGS) +custom_find_package(Glog GLOG) +custom_find_package(LZ4 LZ4) +custom_find_package(LibAIO LIBAIO) +custom_find_package(LibDwarf LIBDWARF) +custom_find_package(LibEvent LIBEVENT REQUIRED) +custom_find_package(LibLZMA LIBLZMA) +custom_find_package(LibUnwind LIBUNWIND) +custom_find_package(LibUring LIBURING) +custom_find_package(Libiberty LIBIBERTY) +custom_find_package(Libsodium LIBSODIUM) +custom_find_package(OpenSSL OPENSSL REQUIRED) +custom_find_package(Snappy SNAPPY) +custom_find_package(ZLIB ZLIB) +custom_find_package(Zstd ZSTD) +custom_find_package(fmt FMT REQUIRED) diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml index 41532ccf..5841cab6 100644 --- a/3rd_party/folly/conandata.yml +++ b/3rd_party/folly/conandata.yml @@ -1,74 +1,16 @@ sources: - "2019.10.21.00": - url: "https://github.com/facebook/folly/archive/v2019.10.21.00.tar.gz" - sha256: "6efcc2b2090691a9fe3d339c433d102d6399bbdc6dc4893080d59f15f648f393" - "2020.08.10.00": - url: "https://github.com/facebook/folly/archive/v2020.08.10.00.tar.gz" - sha256: "e81140d04a4e89e3f848e528466a9b3d3ae37d7eeb9e65467fca50d70918eef6" - "nu2.2022.01.31.00": - url: "https://github.com/facebook/folly/archive/v2022.01.31.00.tar.gz" - sha256: "d764b9a7832d967bb7cfea4bcda15d650315aa4d559fde1da2a52b015cd88b9c" - "nu2.2023.12.11.00": - url: "https://github.com/facebook/folly/archive/v2023.12.11.00.tar.gz" - sha256: "1ff0c0258f8322a818a6e0cd27c0fc965360dc04af308e59349e1c79966190a1" + "nu2.2023.12.18.00": + url: "https://github.com/facebook/folly/releases/download/v2023.12.18.00/folly-v2023.12.18.00.tar.gz" + sha256: "57ce880e3ae7b4d4fe0980be64da9e6ca7dd09e2de477670bf984e11cf7739f2" + "2022.10.31.00": + url: "https://github.com/facebook/folly/releases/download/v2022.10.31.00/folly-v2022.10.31.00.tar.gz" + sha256: "d7749f78eee2a327c1fa6b4a290e4bcd7115cdd7f7ef59f9e043ed59e597ab30" patches: - "2019.10.21.00": - - patch_file: "patches/0001-find-packages.patch" - base_path: "source_subfolder" - - patch_file: "patches/0002-compiler-flags.patch" - base_path: "source_subfolder" - - patch_file: "patches/0003-boost-shared-ptr.patch" - base_path: "source_subfolder" - - patch_file: "patches/0004-disable-posix-names.patch" - base_path: "source_subfolder" - - patch_file: "patches/0005-include-atomic.patch" - base_path: "source_subfolder" - - patch_file: "patches/0006-duplicate-hash.patch" - base_path: "source_subfolder" - - patch_file: "patches/0007-allow-builtins.patch" - base_path: "source_subfolder" - - patch_file: "patches/0013-include-bit.patch" - base_path: "source_subfolder" - - patch_file: "patches/0020-include-ssizet.patch" - base_path: "source_subfolder" - "2020.08.10.00": - - patch_file: "patches/0008-find-packages.patch" - base_path: "source_subfolder" - - patch_file: "patches/0009-ill-formed-atomic-copy.patch" - base_path: "source_subfolder" - - patch_file: "patches/0010-duplicate-hash.patch" - base_path: "source_subfolder" - - patch_file: "patches/0011-disable-logger-example.patch" - base_path: "source_subfolder" - - patch_file: "patches/0012-compiler-flags.patch" - base_path: "source_subfolder" - - patch_file: "patches/0014-find-librt.patch" - base_path: "source_subfolder" - - patch_file: "patches/0015-benchmark-format-macros.patch" - base_path: "source_subfolder" - "nu2.2022.01.31.00": - - patch_file: "patches/0016-find-packages.patch" - base_path: "source_subfolder" - - patch_file: "patches/0017-compiler-flags.patch" - base_path: "source_subfolder" - - patch_file: "patches/0018-find-glog.patch" - base_path: "source_subfolder" - - patch_file: "patches/0019-exclude-example.patch" - base_path: "source_subfolder" - - patch_file: "patches/0022-fix-windows-minmax.patch" - base_path: "source_subfolder" - - patch_file: "patches/0023-fix-safe-check-sanitize.patch" - base_path: "source_subfolder" - "nu2.2023.12.11.00": - - patch_file: "patches/0016-find-packages.patch" - base_path: "source_subfolder" - - patch_file: "patches/0018-find-glog.patch" - base_path: "source_subfolder" - - patch_file: "patches/0019-exclude-example.patch" - base_path: "source_subfolder" - - patch_file: "patches/0022-fix-windows-minmax.patch" - base_path: "source_subfolder" - - patch_file: "patches/0024-compiler-flags.patch" - base_path: "source_subfolder" - - patch_file: "patches/0025-timespec.patch" - base_path: "source_subfolder" + "nu2.2023.12.18.00": + - patch_file: "patches/2023-001-compiler-flags.patch" + patch_description: "Do not hard-code debug flag for all build types" + patch_type: "conan" + "2022.10.31.00": + - patch_file: "patches/2022-001-compiler-flags.patch" + patch_description: "Do not hard-code debug flag for all build types" + patch_type: "conan" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 7ce3707c..02f9184c 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -1,14 +1,14 @@ -from conan.tools.microsoft import is_msvc, msvc_runtime_flag -from conan.tools.build import can_run -from conan.tools.scm import Version -from conan.tools import files from conan import ConanFile -from conans import CMake, tools from conan.errors import ConanInvalidConfiguration -import functools +from conan.tools.apple import is_apple_os +from conan.tools.build import can_run, check_min_cppstd +from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout +from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir, replace_in_file, save +from conan.tools.microsoft import is_msvc, msvc_runtime_flag +from conan.tools.scm import Version import os -required_conan_version = ">=1.45.0" +required_conan_version = ">=1.53.0" class FollyConan(ConanFile): @@ -19,223 +19,254 @@ class FollyConan(ConanFile): homepage = "https://github.com/facebook/folly" license = "Apache-2.0" + package_type = "library" settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], "fPIC": [True, False], - "use_sse4_2" : [True, False], + "use_sse4_2": [True, False], } default_options = { "shared": False, "fPIC": True, - "use_sse4_2" : False + "use_sse4_2": False } - generators = "cmake", "cmake_find_package" - - @property - def _source_subfolder(self): - return "source_subfolder" - @property - def _minimum_cpp_standard(self): - return 17 if Version(self.version) >= "2022.01.31.00" else 14 + def _min_cppstd(self): + return 17 @property - def _minimum_compilers_version(self): + def _compilers_minimum_version(self): return { - "Visual Studio": "15", - "gcc": "5", - "clang": "6", - "apple-clang": "8", - } if self._minimum_cpp_standard == 14 else { "gcc": "7", "Visual Studio": "16", + "msvc": "192", "clang": "6", "apple-clang": "10", } def export_sources(self): - self.copy("CMakeLists.txt") - for patch in self.conan_data.get("patches", {}).get(self.version, []): - self.copy(patch["patch_file"]) - + export_conandata_patches(self) + copy(self, "conan_deps.cmake", self.recipe_folder, os.path.join(self.export_sources_folder, "src")) def config_options(self): if self.settings.os == "Windows": del self.options.fPIC - - if str(self.settings.arch) not in ['x86', 'x86_64']: + if str(self.settings.arch) not in ["x86", "x86_64"]: del self.options.use_sse4_2 def configure(self): if self.options.shared: - del self.options.fPIC + self.options.rm_safe("fPIC") + + def layout(self): + cmake_layout(self, src_folder="src") def requirements(self): - self.requires("boost/1.82.0") + self.requires("boost/1.83.0", transitive_headers=True, transitive_libs=True) self.requires("bzip2/1.0.8") - self.requires("double-conversion/3.2.0") + self.requires("double-conversion/3.3.0", transitive_headers=True, transitive_libs=True) self.requires("gflags/2.2.2") - self.requires("glog/0.4.0") - self.requires("libevent/2.1.12") - self.requires("openssl/3.1.1") - self.requires("lz4/1.9.3") - self.requires("snappy/1.1.9") - self.requires("zlib/1.2.13") - self.requires("zstd/1.5.2") - self.requires("liburing/2.4") + self.requires("glog/0.6.0", transitive_headers=True, transitive_libs=True) + self.requires("libevent/2.1.12", transitive_headers=True, transitive_libs=True) + self.requires("openssl/[>=1.1 <4]") + self.requires("lz4/1.9.4", transitive_libs=True) + self.requires("snappy/1.1.10") + self.requires("zlib/[>=1.2.11 <2]") + self.requires("zstd/1.5.5", transitive_libs=True) if not is_msvc(self): self.requires("libdwarf/20191104") - self.requires("libsodium/1.0.18") - self.requires("xz_utils/5.2.5") + self.requires("libsodium/1.0.19") + self.requires("xz_utils/5.4.5") # FIXME: Causing compilation issues on clang: self.requires("jemalloc/5.2.1") - if self.settings.os == "Linux": + if self.settings.os in ["Linux", "FreeBSD"]: self.requires("libiberty/9.1.0") - self.requires("libunwind/1.5.0") - if Version(self.version) >= "2020.08.10.00": - self.requires("fmt/[>=10]") + self.requires("libunwind/1.7.2") + self.requires("fmt/10.2.1", transitive_headers=True, transitive_libs=True) @property def _required_boost_components(self): return ["context", "filesystem", "program_options", "regex", "system", "thread"] + @property + def _required_boost_conan_components(self): + return [f"boost::{comp}" for comp in self._required_boost_components] + + @property + def _required_boost_cmake_targets(self): + return [f"Boost::{comp}" for comp in self._required_boost_components] + def validate(self): - if self.settings.compiler.get_safe("cppstd"): - tools.check_min_cppstd(self, self._minimum_cpp_standard) - min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) - if not min_version: - self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) - else: - if Version(self.settings.compiler.version) < min_version: - raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format( - self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) - - if Version(self.version) < "2022.01.31.00" and self.settings.os != "Linux": - raise ConanInvalidConfiguration("Conan support for non-Linux platforms starts with Folly version 2022.01.31.00") - - if self.settings.os == "Macos" and self.settings.arch != "x86_64": + if self.settings.compiler.cppstd: + check_min_cppstd(self, self._min_cppstd) + minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) + if minimum_version and Version(self.settings.compiler.version) < minimum_version: + raise ConanInvalidConfiguration( + f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." + ) + + if is_apple_os(self) and self.settings.arch != "x86_64": raise ConanInvalidConfiguration("Conan currently requires a 64bit target architecture for Folly on Macos") + if is_apple_os(self): + raise ConanInvalidConfiguration("Current recipe doesn't support Macos. Contributions are welcome.") + if self.settings.os == "Windows" and self.settings.arch != "x86_64": raise ConanInvalidConfiguration("Folly requires a 64bit target architecture on Windows") - if self.settings.os in ["Macos", "Windows"] and self.options.shared: - raise ConanInvalidConfiguration("Folly could not be built on {} as shared library".format(self.settings.os)) + if (is_apple_os(self) or self.settings.os == "Windows") and self.options.shared: + raise ConanInvalidConfiguration(f"Folly could not be built on {self.settings.os} as shared library") + + if self.settings.os == "Windows": + raise ConanInvalidConfiguration(f"{self.ref} could not be built on {self.settings.os}. PR's are welcome.") - if Version(self.version) == "2020.08.10.00" and self.settings.compiler == "clang" and self.options.shared: - raise ConanInvalidConfiguration("Folly could not be built by clang as a shared library") + if self.settings.compiler == "clang" and self.options.shared: + raise ConanInvalidConfiguration(f"Folly {self.version} could not be built by clang as a shared library") - if self.options["boost"].header_only: + glog = self.dependencies["glog"] + if self.options.shared and not glog.options.shared: + raise ConanInvalidConfiguration(f"If Folly is built as shared lib, glog must be a shared lib too.") + + boost = self.dependencies["boost"] + if boost.options.header_only: raise ConanInvalidConfiguration("Folly could not be built with a header only Boost") - miss_boost_required_comp = any(getattr(self.options["boost"], "without_{}".format(boost_comp), True) for boost_comp in self._required_boost_components) + miss_boost_required_comp = any(getattr(boost.options, f"without_{boost_comp}", True) for boost_comp in self._required_boost_components) if miss_boost_required_comp: - raise ConanInvalidConfiguration("Folly requires these boost components: {}".format(", ".join(self._required_boost_components))) - - min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) - if not min_version: - self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) - else: - if Version(self.settings.compiler.version) < min_version: - raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format( - self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) + required_components = ", ".join(self._required_boost_components) + raise ConanInvalidConfiguration(f"Folly requires these boost components: {required_components}") if self.options.get_safe("use_sse4_2") and str(self.settings.arch) not in ['x86', 'x86_64']: raise ConanInvalidConfiguration(f"{self.ref} can use the option use_sse4_2 only on x86 and x86_64 archs.") - def build_requirements(self): - self.build_requires("cmake/3.27.0") - def source(self): - files.get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) + get(self, **self.conan_data["sources"][self.version], strip_root=False) + + def _cppstd_flag_value(self, cppstd): + cppstd = str(cppstd) + if cppstd.startswith("gnu"): + prefix = "gnu" + year = cppstd[3:] + else: + prefix = "c" + year = cppstd + if is_msvc(self): + prefix = "" + if year > "17": + year = "latest" + return f"{prefix}++{year}" + + def generate(self): + tc = CMakeToolchain(self) + + tc.cache_variables["CMAKE_PROJECT_folly_INCLUDE"] = os.path.join(self.source_folder, "conan_deps.cmake") - @functools.lru_cache(1) - def _configure_cmake(self): - cmake = CMake(self) if can_run(self): - cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE"] = "0" - cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE__TRYRUN_OUTPUT"] = "" - cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE"] = "0" - cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE__TRYRUN_OUTPUT"] = "" - cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE"] = "0" - cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE__TRYRUN_OUTPUT"] = "" - cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE"] = "0" - cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE__TRYRUN_OUTPUT"] = "" - - if self.options.get_safe("use_sse4_2") and str(self.settings.arch) in ['x86', 'x86_64']: - # in folly, if simd >=sse4.2, we also needs -mfma flag to avoid compiling error. + for var in ["FOLLY_HAVE_UNALIGNED_ACCESS", "FOLLY_HAVE_LINUX_VDSO", "FOLLY_HAVE_WCHAR_SUPPORT", "HAVE_VSNPRINTF_ERRORS"]: + tc.variables[f"{var}_EXITCODE"] = "0" + tc.variables[f"{var}_EXITCODE__TRYRUN_OUTPUT"] = "" + + if self.options.get_safe("use_sse4_2") and str(self.settings.arch) in ["x86", "x86_64"]: + tc.preprocessor_definitions["FOLLY_SSE"] = "4" + tc.preprocessor_definitions["FOLLY_SSE_MINOR"] = "2" if not is_msvc(self): - cmake.definitions["CMAKE_C_FLAGS"] = "-mfma" - cmake.definitions["CMAKE_CXX_FLAGS"] = "-mfma" + cflags = "-mfma" else: - cmake.definitions["CMAKE_C_FLAGS"] = "/arch:FMA" - cmake.definitions["CMAKE_CXX_FLAGS"] = "/arch:FMA" - - cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True) - - cxx_std_flag = tools.cppstd_flag(self.settings) - cxx_std_value = cxx_std_flag.split('=')[1] if cxx_std_flag else "c++{}".format(self._minimum_cpp_standard) - cmake.definitions["CXX_STD"] = cxx_std_value - if is_msvc: - cmake.definitions["MSVC_LANGUAGE_VERSION"] = cxx_std_value - cmake.definitions["MSVC_ENABLE_ALL_WARNINGS"] = False - cmake.definitions["MSVC_USE_STATIC_RUNTIME"] = "MT" in msvc_runtime_flag(self) - cmake.configure() - return cmake - + cflags = "/arch:FMA" + tc.blocks["cmake_flags_init"].template += ( + f'string(APPEND CMAKE_CXX_FLAGS_INIT " {cflags}")\n' + f'string(APPEND CMAKE_C_FLAGS_INIT " {cflags}")\n' + ) + + # Folly is not respecting this from the helper https://github.com/conan-io/conan-center-index/pull/15726/files#r1097068754 + tc.variables["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True) + # Relocatable shared lib on Macos + tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0042"] = "NEW" + # Honor CMAKE_REQUIRED_LIBRARIES in check_include_file_xxx + tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0075"] = "NEW" + # Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840) + tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW" + # Honor Boost_ROOT set by boost recipe + tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0074"] = "NEW" + + cxx_std_value = self._cppstd_flag_value(self.settings.get_safe("compiler.cppstd", self._min_cppstd)) + # 2019.10.21.00 -> either MSVC_ flags or CXX_STD + if is_msvc(self): + tc.variables["MSVC_LANGUAGE_VERSION"] = cxx_std_value + tc.variables["MSVC_ENABLE_ALL_WARNINGS"] = False + tc.variables["MSVC_USE_STATIC_RUNTIME"] = "MT" in msvc_runtime_flag(self) + tc.preprocessor_definitions["NOMINMAX"] = "" + else: + tc.variables["CXX_STD"] = cxx_std_value + + if not self.dependencies["boost"].options.header_only: + tc.cache_variables["BOOST_LINK_STATIC"] = not self.dependencies["boost"].options.shared + + tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0074"] = "NEW" # Honor Boost_ROOT set by boost recipe + tc.generate() + + deps = CMakeDeps(self) + # deps.set_property("backtrace", "cmake_file_name", "Backtrace") + deps.set_property("boost", "cmake_file_name", "Boost") + deps.set_property("bzip2", "cmake_file_name", "BZip2") + deps.set_property("double-conversion", "cmake_file_name", "DoubleConversion") + deps.set_property("fmt", "cmake_file_name", "fmt") + deps.set_property("gflags", "cmake_file_name", "Gflags") + deps.set_property("glog", "cmake_file_name", "Glog") + # deps.set_property("libaio", "cmake_file_name", "LibAIO") + deps.set_property("libdwarf", "cmake_file_name", "LibDwarf") + deps.set_property("libevent", "cmake_file_name", "LibEvent") + deps.set_property("libiberty", "cmake_file_name", "Libiberty") + deps.set_property("libsodium", "cmake_file_name", "Libsodium") + deps.set_property("libunwind", "cmake_file_name", "LibUnwind") + # deps.set_property("liburing", "cmake_file_name", "LibUring") + deps.set_property("lz4", "cmake_file_name", "LZ4") + deps.set_property("openssl", "cmake_file_name", "OpenSSL") + deps.set_property("snappy", "cmake_file_name", "Snappy") + deps.set_property("xz_utils", "cmake_file_name", "LibLZMA") + deps.set_property("zlib", "cmake_file_name", "ZLIB") + deps.set_property("zstd", "cmake_file_name", "Zstd") + deps.generate() + + def _patch_sources(self): + apply_conandata_patches(self) + folly_deps = os.path.join(self.source_folder, "CMake", "folly-deps.cmake") + replace_in_file(self, folly_deps, " MODULE", " ") + replace_in_file(self, folly_deps, "${Boost_LIBRARIES}", f"{' '.join(self._required_boost_cmake_targets)}") + replace_in_file(self, folly_deps, "OpenSSL 1.1.1", "OpenSSL") + # Disable example + save(self, os.path.join(self.source_folder, "folly", "logging", "example", "CMakeLists.txt"), "") def build(self): - for patch in self.conan_data.get("patches", {}).get(self.version, []): - tools.patch(**patch) - cmake = self._configure_cmake() + self._patch_sources() + cmake = CMake(self) + cmake.configure() cmake.build() def package(self): - self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) - cmake = self._configure_cmake() + copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) + cmake = CMake(self) cmake.install() - files.rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - files.rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) + rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) + rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) def package_info(self): self.cpp_info.set_property("cmake_file_name", "folly") - self.cpp_info.set_property("cmake_target_name", "Folly::folly") + self.cpp_info.set_property("cmake_target_name", "folly::folly") self.cpp_info.set_property("pkg_config_name", "libfolly") - # TODO: back to global scope in conan v2 once cmake_find_package_* generators removed - if Version(self.version) == "2019.10.21.00": - self.cpp_info.components["libfolly"].libs = [ - "follybenchmark", - "folly_test_util", - "folly" - ] - elif Version(self.version) >= "2020.08.10.00": - if self.settings.os == "Linux": - self.cpp_info.components["libfolly"].libs = [ - "folly_exception_counter", - "folly_exception_tracer", - "folly_exception_tracer_base", - "folly_test_util", - "follybenchmark", - "folly" - ] - else: - self.cpp_info.components["libfolly"].libs = [ - "folly_test_util", - "follybenchmark", - "folly" - ] - - self.cpp_info.components["libfolly"].requires = [ - "boost::boost", - "bzip2::bzip2", + self.cpp_info.components["libfolly"].set_property("cmake_target_name", "Folly::folly") + self.cpp_info.components["libfolly"].set_property("pkg_config_name", "libfolly") + self.cpp_info.components["libfolly"].libs = ["folly"] + self.cpp_info.components["libfolly"].requires = ["fmt::fmt"] + self._required_boost_conan_components + [ "double-conversion::double-conversion", "gflags::gflags", "glog::glog", "libevent::libevent", "lz4::lz4", "openssl::openssl", + "bzip2::bzip2", "snappy::snappy", "zlib::zlib", "zstd::zstd", @@ -244,53 +275,39 @@ def package_info(self): ] if not is_msvc(self): self.cpp_info.components["libfolly"].requires.append("libdwarf::libdwarf") - if self.settings.os == "Linux": + if self.settings.os in ["Linux", "FreeBSD"]: self.cpp_info.components["libfolly"].requires.extend(["libiberty::libiberty", "libunwind::libunwind"]) self.cpp_info.components["libfolly"].system_libs.extend(["pthread", "dl", "rt"]) - - if Version(self.version) >= "2020.08.10.00": - self.cpp_info.components["libfolly"].requires.append("fmt::fmt") - if self.settings.os == "Linux": - self.cpp_info.components["libfolly"].defines.extend(["FOLLY_HAVE_ELF", "FOLLY_HAVE_DWARF"]) - + self.cpp_info.components["libfolly"].defines.extend(["FOLLY_HAVE_ELF", "FOLLY_HAVE_DWARF"]) elif self.settings.os == "Windows": self.cpp_info.components["libfolly"].system_libs.extend(["ws2_32", "iphlpapi", "crypt32"]) - if (self.settings.os == "Linux" and self.settings.compiler == "clang" and - self.settings.compiler.libcxx == "libstdc++") or \ - (self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and - Version(self.settings.compiler.version.value) == "9.0" and self.settings.compiler.libcxx == "libc++"): + if str(self.settings.compiler.libcxx) == "libstdc++" or ( + self.settings.compiler == "apple-clang" and + Version(self.settings.compiler.version.value) == "9.0" and + self.settings.compiler.libcxx == "libc++"): self.cpp_info.components["libfolly"].system_libs.append("atomic") - if self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version.value) >= "11.0": + if self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version.value) >= "11.0": self.cpp_info.components["libfolly"].system_libs.append("c++abi") - if self.options.get_safe("use_sse4_2") and str(self.settings.arch) in ['x86', 'x86_64']: - self.cpp_info.components["libfolly"].defines = ["FOLLY_SSE=4", "FOLLY_SSE_MINOR=2"] + if self.settings.compiler == "gcc" and Version(self.settings.compiler.version) < "9": + self.cpp_info.components["libfolly"].system_libs.append("stdc++fs") - # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed - self.cpp_info.filenames["cmake_find_package"] = "folly" - self.cpp_info.filenames["cmake_find_package_multi"] = "folly" - self.cpp_info.names["cmake_find_package"] = "Folly" - self.cpp_info.names["cmake_find_package_multi"] = "Folly" - self.cpp_info.names["pkg_config"] = "libfolly" - self.cpp_info.components["libfolly"].names["cmake_find_package"] = "folly" - self.cpp_info.components["libfolly"].names["cmake_find_package_multi"] = "folly" - self.cpp_info.components["libfolly"].set_property("cmake_target_name", "Folly::folly") - self.cpp_info.components["libfolly"].set_property("pkg_config_name", "libfolly") + if self.settings.compiler == "clang" and Version(self.settings.compiler.version) < "9": + self.cpp_info.components["libfolly"].system_libs.append("stdc++fs" if self.settings.compiler.libcxx in ["libstdc++", "libstdc++11"] else "c++fs") - if Version(self.version) >= "2019.10.21.00": - self.cpp_info.components["follybenchmark"].set_property("cmake_target_name", "Folly::follybenchmark") - self.cpp_info.components["follybenchmark"].set_property("pkg_config_name", "libfollybenchmark") - self.cpp_info.components["follybenchmark"].libs = ["follybenchmark"] - self.cpp_info.components["follybenchmark"].requires = ["libfolly"] + self.cpp_info.components["follybenchmark"].set_property("cmake_target_name", "Folly::follybenchmark") + self.cpp_info.components["follybenchmark"].set_property("pkg_config_name", "libfollybenchmark") + self.cpp_info.components["follybenchmark"].libs = ["follybenchmark"] + self.cpp_info.components["follybenchmark"].requires = ["libfolly"] - self.cpp_info.components["folly_test_util"].set_property("cmake_target_name", "Folly::folly_test_util") - self.cpp_info.components["folly_test_util"].set_property("pkg_config_name", "libfolly_test_util") - self.cpp_info.components["folly_test_util"].libs = ["folly_test_util"] - self.cpp_info.components["folly_test_util"].requires = ["libfolly"] + self.cpp_info.components["folly_test_util"].set_property("cmake_target_name", "Folly::folly_test_util") + self.cpp_info.components["folly_test_util"].set_property("pkg_config_name", "libfolly_test_util") + self.cpp_info.components["folly_test_util"].libs = ["folly_test_util"] + self.cpp_info.components["folly_test_util"].requires = ["libfolly"] - if Version(self.version) >= "2020.08.10.00" and self.settings.os == "Linux": + if self.settings.os in ["Linux", "FreeBSD"]: self.cpp_info.components["folly_exception_tracer_base"].set_property("cmake_target_name", "Folly::folly_exception_tracer_base") self.cpp_info.components["folly_exception_tracer_base"].set_property("pkg_config_name", "libfolly_exception_tracer_base") self.cpp_info.components["folly_exception_tracer_base"].libs = ["folly_exception_tracer_base"] @@ -305,3 +322,27 @@ def package_info(self): self.cpp_info.components["folly_exception_counter"].set_property("pkg_config_name", "libfolly_exception_counter") self.cpp_info.components["folly_exception_counter"].libs = ["folly_exception_counter"] self.cpp_info.components["folly_exception_counter"].requires = ["folly_exception_tracer"] + + # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed + self.cpp_info.filenames["cmake_find_package"] = "folly" + self.cpp_info.filenames["cmake_find_package_multi"] = "folly" + self.cpp_info.names["cmake_find_package"] = "Folly" + self.cpp_info.names["cmake_find_package_multi"] = "Folly" + self.cpp_info.names["pkg_config"] = "libfolly" + self.cpp_info.components["libfolly"].names["cmake_find_package"] = "folly" + self.cpp_info.components["libfolly"].names["cmake_find_package_multi"] = "folly" + + # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed + self.cpp_info.components["follybenchmark"].names["cmake_find_package"] = "follybenchmark" + self.cpp_info.components["follybenchmark"].names["cmake_find_package_multi"] = "follybenchmark" + self.cpp_info.components["folly_test_util"].names["cmake_find_package"] = "folly_test_util" + self.cpp_info.components["folly_test_util"].names["cmake_find_package_multi"] = "folly_test_util" + + if self.settings.os in ["Linux", "FreeBSD"]: + # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed + self.cpp_info.components["folly_exception_tracer_base"].names["cmake_find_package"] = "folly_exception_tracer_base" + self.cpp_info.components["folly_exception_tracer_base"].names["cmake_find_package_multi"] = "folly_exception_tracer_base" + self.cpp_info.components["folly_exception_tracer"].names["cmake_find_package"] = "folly_exception_tracer" + self.cpp_info.components["folly_exception_tracer"].names["cmake_find_package_multi"] = "folly_exception_tracer" + self.cpp_info.components["folly_exception_counter"].names["cmake_find_package"] = "folly_exception_counter" + self.cpp_info.components["folly_exception_counter"].names["cmake_find_package_multi"] = "folly_exception_counter" diff --git a/3rd_party/folly/patches/0001-find-packages.patch b/3rd_party/folly/patches/0001-find-packages.patch deleted file mode 100644 index 4cee77cd..00000000 --- a/3rd_party/folly/patches/0001-find-packages.patch +++ /dev/null @@ -1,93 +0,0 @@ -diff --git a/CMake/FindLibsodium.cmake b/CMake/FindLibsodium.cmake -index 18d4d0c..2b3cd2a 100644 ---- a/CMake/FindLibsodium.cmake -+++ b/CMake/FindLibsodium.cmake -@@ -15,7 +15,7 @@ - find_path(LIBSODIUM_INCLUDE_DIR NAMES sodium.h) - mark_as_advanced(LIBSODIUM_INCLUDE_DIR) - --find_library(LIBSODIUM_LIBRARY NAMES sodium) -+find_library(LIBSODIUM_LIBRARY NAMES sodium libsodium PATHS ${CONAN_LIBSODIUM_ROOT}) - mark_as_advanced(LIBSODIUM_LIBRARY) - - include(FindPackageHandleStandardArgs) -diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake -index 048e1cd..da3ab8e 100644 ---- a/CMake/folly-deps.cmake -+++ b/CMake/folly-deps.cmake -@@ -36,19 +36,19 @@ find_package(DoubleConversion MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) - --find_package(Gflags MODULE) --set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) --list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) --list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBGFLAGS_INCLUDE_DIR}) --list(APPEND CMAKE_REQUIRED_LIBRARIES ${LIBGFLAGS_LIBRARY}) --list(APPEND CMAKE_REQUIRED_INCLUDES ${LIBGFLAGS_INCLUDE_DIR}) -- --find_package(Glog MODULE) -+find_package(gflags MODULE REQUIRED) -+set(FOLLY_HAVE_LIBGFLAGS ${GFLAGS_FOUND}) -+list(APPEND FOLLY_LINK_LIBRARIES ${CONAN_LIBS_GFLAGS}) -+list(APPEND FOLLY_INCLUDE_DIRECTORIES ${CONAN_INCLUDE_DIRS_GFLAGS}) -+list(APPEND CMAKE_REQUIRED_LIBRARIES ${gflags_LIBRARY}) -+list(APPEND CMAKE_REQUIRED_INCLUDES ${gflags_INCLUDE_DIR}) -+ -+find_package(glog MODULE) - set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) - list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) - --find_package(LibEvent MODULE REQUIRED) -+find_package(Libevent MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) - -diff --git a/build/fbcode_builder/CMake/FindGflags.cmake b/build/fbcode_builder/CMake/FindGflags.cmake -index 246ceac..385605e 100644 ---- a/build/fbcode_builder/CMake/FindGflags.cmake -+++ b/build/fbcode_builder/CMake/FindGflags.cmake -@@ -48,8 +48,13 @@ if (gflags_FOUND) - else() - FIND_PATH(LIBGFLAGS_INCLUDE_DIR gflags/gflags.h) - -- FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflagsd gflags_staticd) -- FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags gflags_static) -+ if(CMAKE_SYSTEM_NAME STREQUAL "Windows") -+ FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflagsd gflags_nothreads_staticd gflags_nothreads_static_debug PATHS ${CONAN_GFLAGS_ROOT}) -+ FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags gflags_nothreads_static PATHS ${CONAN_GFLAGS_ROOT}) -+ else() -+ FIND_LIBRARY(LIBGFLAGS_LIBRARY_DEBUG NAMES gflags gflags_nothreads_debug PATHS ${CONAN_GFLAGS_ROOT}) -+ FIND_LIBRARY(LIBGFLAGS_LIBRARY_RELEASE NAMES gflags_nothreads gflags PATHS ${CONAN_GFLAGS_ROOT}) -+ endif() - - INCLUDE(SelectLibraryConfigurations) - SELECT_LIBRARY_CONFIGURATIONS(LIBGFLAGS) -diff --git a/build/fbcode_builder/CMake/FindGlog.cmake b/build/fbcode_builder/CMake/FindGlog.cmake -index a589b2e..15aef75 100644 ---- a/build/fbcode_builder/CMake/FindGlog.cmake -+++ b/build/fbcode_builder/CMake/FindGlog.cmake -@@ -8,8 +8,7 @@ - - include(FindPackageHandleStandardArgs) - --find_library(GLOG_LIBRARY glog -- PATHS ${GLOG_LIBRARYDIR}) -+find_library(GLOG_LIBRARY glog glogd PATHS ${CONAN_GLOG_ROOT}) - - find_path(GLOG_INCLUDE_DIR glog/logging.h - PATHS ${GLOG_INCLUDEDIR}) -diff --git a/build/fbcode_builder/CMake/FindLibEvent.cmake b/build/fbcode_builder/CMake/FindLibEvent.cmake -index dd11ebd..9ef0807 100644 ---- a/build/fbcode_builder/CMake/FindLibEvent.cmake -+++ b/build/fbcode_builder/CMake/FindLibEvent.cmake -@@ -50,7 +50,7 @@ if (TARGET event) - endif() - else() - find_path(LIBEVENT_INCLUDE_DIR event.h PATHS ${LibEvent_INCLUDE_PATHS}) -- find_library(LIBEVENT_LIB NAMES event PATHS ${LibEvent_LIB_PATHS}) -+ find_library(LIBEVENT_LIB NAMES event libevent PATHS ${CONAN_LIBEVENT_ROOT}) - - if (LIBEVENT_LIB AND LIBEVENT_INCLUDE_DIR) - set(LibEvent_FOUND TRUE) diff --git a/3rd_party/folly/patches/0002-compiler-flags.patch b/3rd_party/folly/patches/0002-compiler-flags.patch deleted file mode 100644 index b9213ff6..00000000 --- a/3rd_party/folly/patches/0002-compiler-flags.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake -index 7fba75f..019d30f 100644 ---- a/CMake/FollyCompilerUnix.cmake -+++ b/CMake/FollyCompilerUnix.cmake -@@ -28,9 +28,9 @@ set( - ) - mark_as_advanced(CXX_STD) - --set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") -+set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") --set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") -+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") - - # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") -@@ -44,7 +44,6 @@ function(apply_folly_compile_options_to_target THETARGET) - ) - target_compile_options(${THETARGET} - PRIVATE -- -g - -std=${CXX_STD} - -finput-charset=UTF-8 - -fsigned-char diff --git a/3rd_party/folly/patches/0003-boost-shared-ptr.patch b/3rd_party/folly/patches/0003-boost-shared-ptr.patch deleted file mode 100644 index 7608d6b2..00000000 --- a/3rd_party/folly/patches/0003-boost-shared-ptr.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/folly/portability/PThread.cpp b/folly/portability/PThread.cpp -index f8cd6d4..0908668 100644 ---- a/folly/portability/PThread.cpp -+++ b/folly/portability/PThread.cpp -@@ -18,7 +18,9 @@ - - #if !FOLLY_HAVE_PTHREAD && defined(_WIN32) - #include // @manual -- -+#include -+#include -+#include - #include - - #include -@@ -683,7 +685,7 @@ int pthread_setspecific(pthread_key_t key, const void* value) { - // function, which we don't want to do. - boost::detail::set_tss_data( - realKey, -- boost::shared_ptr(), -+ 0,0, - const_cast(value), - false); - return 0; diff --git a/3rd_party/folly/patches/0004-disable-posix-names.patch b/3rd_party/folly/patches/0004-disable-posix-names.patch deleted file mode 100644 index 9efd4e24..00000000 --- a/3rd_party/folly/patches/0004-disable-posix-names.patch +++ /dev/null @@ -1,27 +0,0 @@ -diff --git a/folly/portability/Windows.h b/folly/portability/Windows.h -index f7990ca..b22fac5 100644 ---- a/folly/portability/Windows.h -+++ b/folly/portability/Windows.h -@@ -26,16 +26,12 @@ - // These have to be this way because we define our own versions - // of close(), because the normal Windows versions don't handle - // sockets at all. --#ifndef __STDC__ --/* nolint */ --#define __STDC__ 1 --#include // @manual nolint --#include // @manual nolint --#undef __STDC__ --#else --#include // @manual nolint --#include // @manual nolint --#endif -+#include -+#pragma push_macro("_CRT_INTERNAL_NONSTDC_NAMES") -+#define _CRT_INTERNAL_NONSTDC_NAMES 0 -+#include -+#include -+#pragma pop_macro("_CRT_INTERNAL_NONSTDC_NAMES") - - #if defined(min) || defined(max) - #error Windows.h needs to be included by this header, or else NOMINMAX needs \ diff --git a/3rd_party/folly/patches/0005-include-atomic.patch b/3rd_party/folly/patches/0005-include-atomic.patch deleted file mode 100644 index 0eb9382e..00000000 --- a/3rd_party/folly/patches/0005-include-atomic.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/folly/portability/PThread.cpp b/folly/portability/PThread.cpp -index 2891c4c..7c98975 100644 ---- a/folly/portability/PThread.cpp -+++ b/folly/portability/PThread.cpp -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - - #include - #include diff --git a/3rd_party/folly/patches/0006-duplicate-hash.patch b/3rd_party/folly/patches/0006-duplicate-hash.patch deleted file mode 100644 index f8905d00..00000000 --- a/3rd_party/folly/patches/0006-duplicate-hash.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/folly/hash/Hash.h b/folly/hash/Hash.h -index 33fa553..4266cf1 100644 ---- a/folly/hash/Hash.h -+++ b/folly/hash/Hash.h -@@ -730,7 +730,7 @@ struct TupleHasher<0, Ts...> { - - // Custom hash functions. - namespace std { --#if FOLLY_SUPPLY_MISSING_INT128_TRAITS -+#if 0 - template <> - struct hash<__int128> : folly::detail::integral_hasher<__int128> {}; - diff --git a/3rd_party/folly/patches/0007-allow-builtins.patch b/3rd_party/folly/patches/0007-allow-builtins.patch deleted file mode 100644 index de09722f..00000000 --- a/3rd_party/folly/patches/0007-allow-builtins.patch +++ /dev/null @@ -1,128 +0,0 @@ -diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h -index 971cb8819..e68de4456 100644 ---- a/folly/portability/Builtins.h -+++ b/folly/portability/Builtins.h -@@ -41,7 +41,6 @@ FOLLY_ALWAYS_INLINE void __builtin___clear_cache(char* begin, char* end) { - } - } - --#if !defined(_MSC_VER) || (_MSC_VER < 1923) - FOLLY_ALWAYS_INLINE int __builtin_clz(unsigned int x) { - unsigned long index; - return int(_BitScanReverse(&index, (unsigned long)x) ? 31 - index : 32); -@@ -93,7 +92,6 @@ FOLLY_ALWAYS_INLINE int __builtin_ctzll(unsigned long long x) { - return int(_BitScanForward64(&index, x) ? index : 64); - } - #endif --#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) - - FOLLY_ALWAYS_INLINE int __builtin_ffs(int x) { - unsigned long index; -@@ -119,15 +117,12 @@ FOLLY_ALWAYS_INLINE int __builtin_popcount(unsigned int x) { - return int(__popcnt(x)); - } - --#if !defined(_MSC_VER) || (_MSC_VER < 1923) - FOLLY_ALWAYS_INLINE int __builtin_popcountl(unsigned long x) { - static_assert(sizeof(x) == 4, ""); - return int(__popcnt(x)); - } --#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) - #endif - --#if !defined(_MSC_VER) || (_MSC_VER < 1923) - #if defined(_M_IX86) - FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { - return int(__popcnt((unsigned int)(x >> 32))) + -@@ -138,7 +133,6 @@ FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { - return int(__popcnt64(x)); - } - #endif --#endif // !defined(_MSC_VER) || (_MSC_VER < 1923) - - FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { - // I really hope frame is zero... --- - -diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h -index e68de4456..30caf4003 100644 ---- a/folly/portability/Builtins.h -+++ b/folly/portability/Builtins.h -@@ -16,7 +16,7 @@ - - #pragma once - --#if defined(_WIN32) && !defined(__clang__) -+#if defined(_WIN32) && !defined(__MINGW32__) && !defined(__clang__) - #include - #include - #include --- -see https://github.com/facebook/folly/issues/1412 -diff --git a/folly/portability/Builtins.h b/folly/portability/Builtins.h -index 30caf4003..e8ef97266 100644 ---- a/folly/portability/Builtins.h -+++ b/folly/portability/Builtins.h -@@ -22,6 +22,14 @@ - #include - #include - -+// MSVC had added support for __builtin_clz etc. in 16.3 (1923) but it will be -+// removed in 16.8 (1928). -+#if (_MSC_VER >= 1923) && (_MSC_VER < 1928) -+#define FOLLY_DETAILFOLLY_DETAIL_MSC_BUILTIN_SUPPORT 1 -+#else -+#define FOLLY_DETAILFOLLY_DETAIL_MSC_BUILTIN_SUPPORT 0 -+#endif -+ - namespace folly { - namespace portability { - namespace detail { -@@ -41,6 +49,7 @@ FOLLY_ALWAYS_INLINE void __builtin___clear_cache(char* begin, char* end) { - } - } - -+#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - FOLLY_ALWAYS_INLINE int __builtin_clz(unsigned int x) { - unsigned long index; - return int(_BitScanReverse(&index, (unsigned long)x) ? 31 - index : 32); -@@ -92,6 +101,7 @@ FOLLY_ALWAYS_INLINE int __builtin_ctzll(unsigned long long x) { - return int(_BitScanForward64(&index, x) ? index : 64); - } - #endif -+#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - - FOLLY_ALWAYS_INLINE int __builtin_ffs(int x) { - unsigned long index; -@@ -117,12 +127,15 @@ FOLLY_ALWAYS_INLINE int __builtin_popcount(unsigned int x) { - return int(__popcnt(x)); - } - -+#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - FOLLY_ALWAYS_INLINE int __builtin_popcountl(unsigned long x) { - static_assert(sizeof(x) == 4, ""); - return int(__popcnt(x)); - } -+#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - #endif - -+#if !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - #if defined(_M_IX86) - FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { - return int(__popcnt((unsigned int)(x >> 32))) + -@@ -133,6 +146,7 @@ FOLLY_ALWAYS_INLINE int __builtin_popcountll(unsigned long long x) { - return int(__popcnt64(x)); - } - #endif -+#endif // !defined(_MSC_VER) || !defined(FOLLY_DETAIL_MSC_BUILTIN_SUPPORT) - - FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { - // I really hope frame is zero... -@@ -141,3 +155,5 @@ FOLLY_ALWAYS_INLINE void* __builtin_return_address(unsigned int frame) { - return _ReturnAddress(); - } - #endif -+ -+#undef FOLLY_DETAIL_MSC_BUILTIN_SUPPORT --- - diff --git a/3rd_party/folly/patches/0008-find-packages.patch b/3rd_party/folly/patches/0008-find-packages.patch deleted file mode 100644 index 3329a684..00000000 --- a/3rd_party/folly/patches/0008-find-packages.patch +++ /dev/null @@ -1,73 +0,0 @@ -diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake -index 3169b972d52..23dc6d509b1 100644 ---- a/CMake/folly-deps.cmake -+++ b/CMake/folly-deps.cmake -@@ -46,11 +46,11 @@ find_package(Boost 1.51.0 MODULE - list(APPEND FOLLY_LINK_LIBRARIES ${Boost_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIRS}) - --find_package(DoubleConversion MODULE REQUIRED) -+find_package(double-conversion MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) - --find_package(Gflags MODULE) -+find_package(gflags MODULE) - set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) - if(LIBGFLAGS_FOUND) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) -@@ -59,12 +59,12 @@ if(LIBGFLAGS_FOUND) - list(APPEND CMAKE_REQUIRED_INCLUDES ${LIBGFLAGS_INCLUDE_DIR}) - endif() - --find_package(Glog MODULE) -+find_package(glog MODULE) - set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) - list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) - --find_package(LibEvent MODULE REQUIRED) -+find_package(Libevent MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) - -@@ -96,14 +96,14 @@ if (LIBLZMA_FOUND) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBLZMA_LIBRARIES}) - endif() - --find_package(LZ4 MODULE) -+find_package(lz4 MODULE) - set(FOLLY_HAVE_LIBLZ4 ${LZ4_FOUND}) - if (LZ4_FOUND) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LZ4_INCLUDE_DIR}) - list(APPEND FOLLY_LINK_LIBRARIES ${LZ4_LIBRARY}) - endif() - --find_package(Zstd MODULE) -+find_package(zstd MODULE) - set(FOLLY_HAVE_LIBZSTD ${ZSTD_FOUND}) - if(ZSTD_FOUND) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${ZSTD_INCLUDE_DIR}) -@@ -117,11 +117,11 @@ if (SNAPPY_FOUND) - list(APPEND FOLLY_LINK_LIBRARIES ${SNAPPY_LIBRARY}) - endif() - --find_package(LibDwarf) -+find_package(libdwarf) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBDWARF_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBDWARF_INCLUDE_DIRS}) - --find_package(Libiberty) -+find_package(libiberty) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBIBERTY_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBIBERTY_INCLUDE_DIRS}) - -@@ -133,7 +133,7 @@ find_package(LibUring) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBURING_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBURING_INCLUDE_DIRS}) - --find_package(Libsodium) -+find_package(libsodium) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBSODIUM_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBSODIUM_INCLUDE_DIRS}) - diff --git a/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch b/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch deleted file mode 100644 index 58f95224..00000000 --- a/3rd_party/folly/patches/0009-ill-formed-atomic-copy.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/folly/fibers/SemaphoreBase.cpp b/folly/fibers/SemaphoreBase.cpp -index 06e9ecc7111..77e2da75c18 100644 ---- a/folly/fibers/SemaphoreBase.cpp -+++ b/folly/fibers/SemaphoreBase.cpp -@@ -170,7 +170,7 @@ namespace { - class FutureWaiter final : public fibers::Baton::Waiter { - public: - explicit FutureWaiter(int64_t tokens) -- : semaphoreWaiter(SemaphoreBase::Waiter(tokens)) { -+ : semaphoreWaiter(tokens) { - semaphoreWaiter.baton.setWaiter(*this); - } - diff --git a/3rd_party/folly/patches/0010-duplicate-hash.patch b/3rd_party/folly/patches/0010-duplicate-hash.patch deleted file mode 100644 index 69268c6a..00000000 --- a/3rd_party/folly/patches/0010-duplicate-hash.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/folly/hash/Hash.h b/folly/hash/Hash.h -index a8a50e8e8dc..d7a3da8e61f 100644 ---- a/folly/hash/Hash.h -+++ b/folly/hash/Hash.h -@@ -733,7 +733,7 @@ struct TupleHasher<0, Ts...> { - - // Custom hash functions. - namespace std { --#if FOLLY_SUPPLY_MISSING_INT128_TRAITS -+#if 0 - template <> - struct hash<__int128> : folly::detail::integral_hasher<__int128> {}; - diff --git a/3rd_party/folly/patches/0011-disable-logger-example.patch b/3rd_party/folly/patches/0011-disable-logger-example.patch deleted file mode 100644 index fa209053..00000000 --- a/3rd_party/folly/patches/0011-disable-logger-example.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/folly/CMakeLists.txt b/folly/CMakeLists.txt -index 08de7daf20f..cdc1f03bf46 100644 ---- a/folly/CMakeLists.txt -+++ b/folly/CMakeLists.txt -@@ -27,7 +27,6 @@ install( - ) - - add_subdirectory(experimental/exception_tracer) --add_subdirectory(logging/example) - - if (PYTHON_EXTENSIONS) - # Create tree of symbolic links in structure required for successful diff --git a/3rd_party/folly/patches/0012-compiler-flags.patch b/3rd_party/folly/patches/0012-compiler-flags.patch deleted file mode 100644 index 358500a1..00000000 --- a/3rd_party/folly/patches/0012-compiler-flags.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake -index 8dcaf141a3a..200fe8d3798 100644 ---- a/CMake/FollyCompilerUnix.cmake -+++ b/CMake/FollyCompilerUnix.cmake -@@ -28,9 +28,9 @@ set( - ) - mark_as_advanced(CXX_STD) - --set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") -+set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") --set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") -+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") - - # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") -@@ -43,7 +43,6 @@ function(apply_folly_compile_options_to_target THETARGET) - ) - target_compile_options(${THETARGET} - PRIVATE -- -g - -std=${CXX_STD} - -finput-charset=UTF-8 - -fsigned-char diff --git a/3rd_party/folly/patches/0013-include-bit.patch b/3rd_party/folly/patches/0013-include-bit.patch deleted file mode 100644 index 1a8ac249..00000000 --- a/3rd_party/folly/patches/0013-include-bit.patch +++ /dev/null @@ -1,13 +0,0 @@ ---- a/folly/lang/Bits.h -+++ b/folly/lang/Bits.h -@@ -64,6 +64,10 @@ - #include - #include - -+#if __has_include() && __cpp_lib_bit_cast -+#include -+#endif -+ - namespace folly { - - #if __cpp_lib_bit_cast diff --git a/3rd_party/folly/patches/0014-find-librt.patch b/3rd_party/folly/patches/0014-find-librt.patch deleted file mode 100644 index 90a1f0f7..00000000 --- a/3rd_party/folly/patches/0014-find-librt.patch +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/CMake/FollyConfigChecks.cmake b/CMake/FollyConfigChecks.cmake -index 6b8b308c7..908d72d51 100644 ---- a/CMake/FollyConfigChecks.cmake -+++ b/CMake/FollyConfigChecks.cmake -@@ -83,6 +83,13 @@ string(REGEX REPLACE - CMAKE_REQUIRED_FLAGS - "${CMAKE_REQUIRED_FLAGS}") - -+if (CMAKE_SYSTEM_NAME STREQUAL "Linux") -+ find_library(LIBRT rt) -+ if (LIBRT) -+ list(APPEND CMAKE_REQUIRED_LIBRARIES "rt") -+ endif() -+endif() -+ - check_symbol_exists(pthread_atfork pthread.h FOLLY_HAVE_PTHREAD_ATFORK) - - # Unfortunately check_symbol_exists() does not work for memrchr(): diff --git a/3rd_party/folly/patches/0015-benchmark-format-macros.patch b/3rd_party/folly/patches/0015-benchmark-format-macros.patch deleted file mode 100644 index 14f8b208..00000000 --- a/3rd_party/folly/patches/0015-benchmark-format-macros.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/folly/Benchmark.cpp b/folly/Benchmark.cpp -index 389ee46a1..390b7674b 100644 ---- a/folly/Benchmark.cpp -+++ b/folly/Benchmark.cpp -@@ -16,6 +16,10 @@ - - // @author Andrei Alexandrescu (andrei.alexandrescu@fb.com) - -+#ifndef __STDC_FORMAT_MACROS -+#define __STDC_FORMAT_MACROS 1 -+#endif -+ - #include - - #include diff --git a/3rd_party/folly/patches/0016-find-packages.patch b/3rd_party/folly/patches/0016-find-packages.patch deleted file mode 100644 index c6cd14fa..00000000 --- a/3rd_party/folly/patches/0016-find-packages.patch +++ /dev/null @@ -1,80 +0,0 @@ -diff --git a/CMake/folly-deps.cmake b/CMake/folly-deps.cmake -index 9c9d9ea60..e78611542 100644 ---- a/CMake/folly-deps.cmake -+++ b/CMake/folly-deps.cmake -@@ -48,25 +48,25 @@ find_package(Boost 1.51.0 MODULE - list(APPEND FOLLY_LINK_LIBRARIES ${Boost_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${Boost_INCLUDE_DIRS}) - --find_package(DoubleConversion MODULE REQUIRED) -+find_package(double-conversion MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${DOUBLE_CONVERSION_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${DOUBLE_CONVERSION_INCLUDE_DIR}) - --find_package(Gflags MODULE) --set(FOLLY_HAVE_LIBGFLAGS ${LIBGFLAGS_FOUND}) --if(LIBGFLAGS_FOUND) -- list(APPEND FOLLY_LINK_LIBRARIES ${LIBGFLAGS_LIBRARY}) -- list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBGFLAGS_INCLUDE_DIR}) -- set(FOLLY_LIBGFLAGS_LIBRARY ${LIBGFLAGS_LIBRARY}) -- set(FOLLY_LIBGFLAGS_INCLUDE ${LIBGFLAGS_INCLUDE_DIR}) -+find_package(gflags MODULE) -+set(FOLLY_HAVE_LIBGFLAGS ${gflags_FOUND}) -+if(gflags_FOUND) -+ list(APPEND FOLLY_LINK_LIBRARIES ${gflags_LIBRARIES}) -+ list(APPEND FOLLY_INCLUDE_DIRECTORIES ${gflags_INCLUDE_DIRS}) -+ set(FOLLY_LIBGFLAGS_LIBRARY ${gflags_LIBRARIES}) -+ set(FOLLY_LIBGFLAGS_INCLUDE ${gflags_INCLUDE_DIRS}) - endif() - --find_package(Glog MODULE) -+find_package(glog MODULE) - set(FOLLY_HAVE_LIBGLOG ${GLOG_FOUND}) - list(APPEND FOLLY_LINK_LIBRARIES ${GLOG_LIBRARY}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) - --find_package(LibEvent MODULE REQUIRED) -+find_package(Libevent MODULE REQUIRED) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBEVENT_LIB}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBEVENT_INCLUDE_DIR}) - -@@ -104,14 +104,14 @@ if (LIBLZMA_FOUND) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBLZMA_LIBRARIES}) - endif() - --find_package(LZ4 MODULE) -+find_package(lz4 MODULE) - set(FOLLY_HAVE_LIBLZ4 ${LZ4_FOUND}) - if (LZ4_FOUND) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LZ4_INCLUDE_DIR}) - list(APPEND FOLLY_LINK_LIBRARIES ${LZ4_LIBRARY}) - endif() - --find_package(Zstd MODULE) -+find_package(zstd MODULE) - set(FOLLY_HAVE_LIBZSTD ${ZSTD_FOUND}) - if(ZSTD_FOUND) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${ZSTD_INCLUDE_DIR}) -@@ -125,11 +125,11 @@ if (SNAPPY_FOUND) - list(APPEND FOLLY_LINK_LIBRARIES ${SNAPPY_LIBRARY}) - endif() - --find_package(LibDwarf) -+find_package(libdwarf) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBDWARF_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBDWARF_INCLUDE_DIRS}) - --find_package(Libiberty) -+find_package(libiberty) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBIBERTY_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBIBERTY_INCLUDE_DIRS}) - -@@ -141,7 +141,7 @@ find_package(LibUring) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBURING_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBURING_INCLUDE_DIRS}) - --find_package(Libsodium) -+find_package(libsodium) - list(APPEND FOLLY_LINK_LIBRARIES ${LIBSODIUM_LIBRARIES}) - list(APPEND FOLLY_INCLUDE_DIRECTORIES ${LIBSODIUM_INCLUDE_DIRS}) - diff --git a/3rd_party/folly/patches/0018-find-glog.patch b/3rd_party/folly/patches/0018-find-glog.patch deleted file mode 100644 index b2d17ad4..00000000 --- a/3rd_party/folly/patches/0018-find-glog.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff --git a/build/fbcode_builder/CMake/FindGlog.cmake b/build/fbcode_builder/CMake/FindGlog.cmake -index 752647c..aa2fa1c 100644 ---- a/build/fbcode_builder/CMake/FindGlog.cmake -+++ b/build/fbcode_builder/CMake/FindGlog.cmake -@@ -10,9 +10,9 @@ include(FindPackageHandleStandardArgs) - include(SelectLibraryConfigurations) - - find_library(GLOG_LIBRARY_RELEASE glog -- PATHS ${GLOG_LIBRARYDIR}) -+ PATHS ${CONAN_GLOG_ROOT}) - find_library(GLOG_LIBRARY_DEBUG glogd -- PATHS ${GLOG_LIBRARYDIR}) -+ PATHS ${CONAN_GLOG_ROOT}) - - find_path(GLOG_INCLUDE_DIR glog/logging.h - PATHS ${GLOG_INCLUDEDIR}) diff --git a/3rd_party/folly/patches/0019-exclude-example.patch b/3rd_party/folly/patches/0019-exclude-example.patch deleted file mode 100644 index d2afb310..00000000 --- a/3rd_party/folly/patches/0019-exclude-example.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/folly/CMakeLists.txt b/folly/CMakeLists.txt -index 883f27c..2d2086f 100644 ---- a/folly/CMakeLists.txt -+++ b/folly/CMakeLists.txt -@@ -28,7 +28,6 @@ install( - ) - - add_subdirectory(experimental/exception_tracer) --add_subdirectory(logging/example) - - if (PYTHON_EXTENSIONS) - # Create tree of symbolic links in structure required for successful diff --git a/3rd_party/folly/patches/0020-include-ssizet.patch b/3rd_party/folly/patches/0020-include-ssizet.patch deleted file mode 100644 index 0575fecd..00000000 --- a/3rd_party/folly/patches/0020-include-ssizet.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/folly/executors/ExecutorWithPriority.h b/folly/executors/ExecutorWithPriority.h -index b95a6c4..18b8110 100644 ---- a/folly/executors/ExecutorWithPriority.h -+++ b/folly/executors/ExecutorWithPriority.h -@@ -18,6 +18,7 @@ - - #include - #include -+#include - - namespace folly { - diff --git a/3rd_party/folly/patches/0021-typedef-clockid.patch b/3rd_party/folly/patches/0021-typedef-clockid.patch deleted file mode 100644 index fb46c057..00000000 --- a/3rd_party/folly/patches/0021-typedef-clockid.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/folly/portability/Time.h b/folly/portability/Time.h -index 994a09e5d70..e4f0d101ca9 100644 ---- a/folly/portability/Time.h -+++ b/folly/portability/Time.h -@@ -49,7 +49,6 @@ - #define CLOCK_PROCESS_CPUTIME_ID 2 - #define CLOCK_THREAD_CPUTIME_ID 3 - --typedef uint8_t clockid_t; - extern "C" int clock_gettime(clockid_t clk_id, struct timespec* ts); - extern "C" int clock_getres(clockid_t clk_id, struct timespec* ts); - #endif diff --git a/3rd_party/folly/patches/0022-fix-windows-minmax.patch b/3rd_party/folly/patches/0022-fix-windows-minmax.patch deleted file mode 100644 index 1fc69a43..00000000 --- a/3rd_party/folly/patches/0022-fix-windows-minmax.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/CMake/FollyCompilerMSVC.cmake b/CMake/FollyCompilerMSVC.cmake -index ec2ce1a1d..16deda71c 100644 ---- a/CMake/FollyCompilerMSVC.cmake -+++ b/CMake/FollyCompilerMSVC.cmake -@@ -289,6 +289,7 @@ function(apply_folly_compile_options_to_target THETARGET) - # And the extra defines: - target_compile_definitions(${THETARGET} - PUBLIC -+ NOMINMAX - _CRT_NONSTDC_NO_WARNINGS # Don't deprecate posix names of functions. - _CRT_SECURE_NO_WARNINGS # Don't deprecate the non _s versions of various standard library functions, because safety is for chumps. - _SCL_SECURE_NO_WARNINGS # Don't deprecate the non _s versions of various standard library functions, because safety is for chumps. diff --git a/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch b/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch deleted file mode 100644 index 78ca432f..00000000 --- a/3rd_party/folly/patches/0023-fix-safe-check-sanitize.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff -Naur a/folly/lang/SafeAssert.h b/folly/lang/SafeAssert.h ---- a/folly/lang/SafeAssert.h 2022-01-29 03:30:47.000000000 -0700 -+++ b/folly/lang/SafeAssert.h 2022-06-28 09:47:46.779345576 -0700 -@@ -24,12 +24,7 @@ - #include - #include - --#if __GNUC__ && !__clang__ && FOLLY_SANITIZE_ADDRESS --// gcc+asan has a bug that discards sections when using `static` below --#define FOLLY_DETAIL_SAFE_CHECK_LINKAGE --#else - #define FOLLY_DETAIL_SAFE_CHECK_LINKAGE static --#endif - - #define FOLLY_DETAIL_SAFE_CHECK_IMPL(d, p, expr, expr_s, ...) \ - do { \ diff --git a/3rd_party/folly/patches/0024-compiler-flags.patch b/3rd_party/folly/patches/0024-compiler-flags.patch deleted file mode 100644 index adee0d6e..00000000 --- a/3rd_party/folly/patches/0024-compiler-flags.patch +++ /dev/null @@ -1,23 +0,0 @@ -diff -Naur a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake ---- a/CMake/FollyCompilerUnix.cmake 2023-12-08 20:38:13.000000000 -0700 -+++ b/CMake/FollyCompilerUnix.cmake 2023-12-11 12:34:46.769353376 -0700 -@@ -12,9 +12,9 @@ - # See the License for the specific language governing permissions and - # limitations under the License. - --set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") -+set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") --set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") -+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") - - list(APPEND CMAKE_REQUIRED_DEFINITIONS "-D_GNU_SOURCE") - function(apply_folly_compile_options_to_target THETARGET) -@@ -25,7 +25,6 @@ - ) - target_compile_options(${THETARGET} - PRIVATE -- -g - -finput-charset=UTF-8 - -fsigned-char - -Wall diff --git a/3rd_party/folly/patches/0025-timespec.patch b/3rd_party/folly/patches/0025-timespec.patch deleted file mode 100644 index 974a120d..00000000 --- a/3rd_party/folly/patches/0025-timespec.patch +++ /dev/null @@ -1,38 +0,0 @@ -diff -Naur a/folly/io/async/AsyncSocket.cpp b/folly/io/async/AsyncSocket.cpp ---- a/folly/io/async/AsyncSocket.cpp 2023-12-08 20:38:13.000000000 -0700 -+++ b/folly/io/async/AsyncSocket.cpp 2023-12-12 10:15:06.023030521 -0700 -@@ -18,6 +18,9 @@ - - #include - -+/* for struct sock_extended_err*/ -+#include -+ - #include - #include - #include -diff -Naur a/folly/io/async/AsyncUDPSocket.cpp b/folly/io/async/AsyncUDPSocket.cpp ---- a/folly/io/async/AsyncUDPSocket.cpp 2023-12-08 20:38:13.000000000 -0700 -+++ b/folly/io/async/AsyncUDPSocket.cpp 2023-12-12 10:19:06.419424565 -0700 -@@ -17,6 +17,9 @@ - #include - #include - -+/* for struct sock_extended_err*/ -+#include -+ - #include - - #include -diff -Naur a/folly/net/NetOps.h b/folly/net/NetOps.h ---- a/folly/net/NetOps.h 2023-12-12 10:16:10.675139766 -0700 -+++ b/folly/net/NetOps.h 2023-12-12 10:15:55.087113425 -0700 -@@ -114,7 +114,7 @@ - #endif - #endif - /* for struct sock_extended_err*/ --#include -+#include - #endif - #endif - diff --git a/3rd_party/folly/patches/0017-compiler-flags.patch b/3rd_party/folly/patches/2022-001-compiler-flags.patch similarity index 64% rename from 3rd_party/folly/patches/0017-compiler-flags.patch rename to 3rd_party/folly/patches/2022-001-compiler-flags.patch index 1290e801..f7894a5b 100644 --- a/3rd_party/folly/patches/0017-compiler-flags.patch +++ b/3rd_party/folly/patches/2022-001-compiler-flags.patch @@ -1,7 +1,5 @@ -diff --git a/CMake/FollyCompilerUnix.cmake b/CMake/FollyCompilerUnix.cmake -index 8dcaf14..200fe8d 100644 ---- a/CMake/FollyCompilerUnix.cmake -+++ b/CMake/FollyCompilerUnix.cmake +--- CMake/FollyCompilerUnix.cmake ++++ CMake/FollyCompilerUnix.cmake @@ -28,9 +28,9 @@ set( ) mark_as_advanced(CXX_STD) @@ -9,8 +7,7 @@ index 8dcaf14..200fe8d 100644 -set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") +set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") --set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") -+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") # Note that CMAKE_REQUIRED_FLAGS must be a string, not a list set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=${CXX_STD}") diff --git a/3rd_party/folly/patches/2023-001-compiler-flags.patch b/3rd_party/folly/patches/2023-001-compiler-flags.patch new file mode 100644 index 00000000..728a3771 --- /dev/null +++ b/3rd_party/folly/patches/2023-001-compiler-flags.patch @@ -0,0 +1,19 @@ +--- CMake/FollyCompilerUnix.cmake ++++ CMake/FollyCompilerUnix.cmake +@@ -12,7 +12,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-set(CMAKE_CXX_FLAGS_COMMON "-g -Wall -Wextra") ++set(CMAKE_CXX_FLAGS_COMMON "-Wall -Wextra") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${CMAKE_CXX_FLAGS_COMMON}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_COMMON} -O3") + +@@ -25,7 +25,6 @@ + ) + target_compile_options(${THETARGET} + PRIVATE +- -g + -finput-charset=UTF-8 + -fsigned-char + -Wall diff --git a/3rd_party/folly/test_package/CMakeLists.txt b/3rd_party/folly/test_package/CMakeLists.txt deleted file mode 100644 index cd964d40..00000000 --- a/3rd_party/folly/test_package/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -cmake_minimum_required(VERSION 3.15) -project(test_package CXX) - -find_package(folly REQUIRED CONFIG) - -add_executable(${PROJECT_NAME} test_package.cpp) -target_link_libraries(${PROJECT_NAME} - Folly::folly - Folly::follybenchmark) - - -set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) diff --git a/3rd_party/folly/test_package/conanfile.py b/3rd_party/folly/test_package/conanfile.py deleted file mode 100644 index 63889d52..00000000 --- a/3rd_party/folly/test_package/conanfile.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -from conan import ConanFile -from conan.tools.cmake import CMake, CMakeToolchain -from conan.tools.build import can_run -from conan.tools.cmake import cmake_layout - -required_conan_version = ">=1.43.0" - -class TestPackageConan(ConanFile): - settings = "os", "compiler", "build_type", "arch" - generators = "CMakeDeps", "VirtualRunEnv" - - def requirements(self): - self.requires(self.tested_reference_str) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["FOLLY_VERSION"] = self.dependencies["folly"].ref.version - tc.generate() - - def layout(self): - cmake_layout(self) - - def build(self): - cmake = CMake(self) - cmake.configure() - cmake.build() - - def test(self): - if can_run(self): - self.run(os.path.join(self.cpp.build.bindirs[0], "test_package"), env="conanrun") diff --git a/3rd_party/folly/test_package/test_package.cpp b/3rd_party/folly/test_package/test_package.cpp deleted file mode 100644 index ad935988..00000000 --- a/3rd_party/folly/test_package/test_package.cpp +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#if FOLLY_HAVE_ELF -#include -#endif - -int main() { - folly::ThreadedExecutor executor; - auto [promise, future] = folly::makePromiseContract< folly::fbstring >(&executor); - auto unit = std::move(future).thenValue([](auto const value) { - const folly::Uri uri(value); - std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; - }); - promise.setValue("https://github.com/bincrafters"); - std::move(unit).get(); -#if FOLLY_HAVE_ELF - folly::symbolizer::ElfFile elffile; -#endif - return EXIT_SUCCESS; -} diff --git a/3rd_party/folly/test_v1_package/CMakeLists.txt b/3rd_party/folly/test_v1_package/CMakeLists.txt deleted file mode 100644 index f8cc697a..00000000 --- a/3rd_party/folly/test_v1_package/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -cmake_minimum_required(VERSION 3.1) -project(test_package CXX) - -include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) -conan_basic_setup(TARGETS) - -find_package(folly CONFIG REQUIRED) - -add_executable(${PROJECT_NAME} test_package.cpp) -target_link_libraries(${PROJECT_NAME} Folly::folly) - - -if (${FOLLY_VERSION} VERSION_LESS "2021.07.20.00") - set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14) -else() - set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17) -endif() diff --git a/3rd_party/folly/test_v1_package/conanfile.py b/3rd_party/folly/test_v1_package/conanfile.py deleted file mode 100644 index 8b8cfae4..00000000 --- a/3rd_party/folly/test_v1_package/conanfile.py +++ /dev/null @@ -1,18 +0,0 @@ -from conans import ConanFile, CMake, tools -import os - - -class TestPackageConan(ConanFile): - settings = "os", "compiler", "build_type", "arch" - generators = "cmake", "cmake_find_package_multi" - - def build(self): - cmake = CMake(self) - cmake.definitions["FOLLY_VERSION"] = self.deps_cpp_info["folly"].version - cmake.configure() - cmake.build() - - def test(self): - if not tools.cross_building(self): - bin_path = os.path.join("bin", "test_package") - self.run(command=bin_path, run_environment=True) diff --git a/3rd_party/folly/test_v1_package/test_package.cpp b/3rd_party/folly/test_v1_package/test_package.cpp deleted file mode 100644 index cc522b8b..00000000 --- a/3rd_party/folly/test_v1_package/test_package.cpp +++ /dev/null @@ -1,29 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#if FOLLY_HAVE_ELF -#include -#endif - -static void print_uri(const folly::fbstring& value) { - const folly::Uri uri(value); - std::cout << "The authority from " << value << " is " << uri.authority() << std::endl; -} - -int main() { - folly::ThreadedExecutor executor; - folly::Promise promise; - folly::Future future = promise.getSemiFuture().via(&executor); - folly::Future unit = std::move(future).thenValue(print_uri); - promise.setValue("https://github.com/bincrafters"); - std::move(unit).get(); -#if FOLLY_HAVE_ELF - folly::symbolizer::ElfFile elffile; -#endif - return EXIT_SUCCESS; -} diff --git a/3rd_party/gperftools/conanfile.py b/3rd_party/gperftools/conanfile.py deleted file mode 100644 index f63430b4..00000000 --- a/3rd_party/gperftools/conanfile.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from conans import ConanFile, AutoToolsBuildEnvironment, tools - -class GPerfToolsConan(ConanFile): - name = "gperftools" - version = "2.7.0" - release = "2.7" - license = "BSD" - - description = "A portable library to determine the call-chain of a C program" - url = "https://github.com/conan-io/conan-center-index" - settings = "os", "arch", "compiler", "build_type" - - options = {"shared": [True, False], "fPIC": [True, False]} - default_options = "shared=False", "fPIC=True" - - requires = (("xz_utils/5.2.4")) - - generators = "compiler_args" - - def source(self): - source_url = "https://github.com/{0}/{0}/releases/download".format(self.name) - tools.get("{0}/{1}-{2}/{1}-{2}.tar.gz".format(source_url, self.name, self.release)) - - def build(self): - env_build = AutoToolsBuildEnvironment(self) - env_build.cxx_flags.append("@conanbuildinfo.args") - if self.settings.build_type != "Debug": - env_build.defines.append('NDEBUG') - configure_args = ['--disable-dependency-tracking', '--enable-libunwind'] - if self.options.shared: - configure_args += ['--enable-shared=yes', '--enable-static=no'] - else: - configure_args += ['--enable-shared=no', '--enable-static=yes'] - env_build.configure(args=configure_args,configure_dir="{0}-{1}".format(self.name, self.release)) - env_build.make(args=["-j1"]) - - def package(self): - headers = ['heap-checker.h', 'heap-profiler.h', 'malloc_extension.h', 'malloc_extension_c.h', - 'malloc_hook.h', 'malloc_hook_c.h', 'profiler.h', 'stacktrace.h', 'tcmalloc.h'] - for header in headers: - self.copy("*{0}".format(header), dst="include/google", src="{0}-{1}/src/google".format(self.name, self.release), keep_path=False) - self.copy("*{0}".format(header), dst="include/gperftools", src="{0}-{1}/src/gperftools".format(self.name, self.release), keep_path=False) - self.copy("*.so*", dst="lib", keep_path=False, symlinks=True) - self.copy("*.a", dst="lib", keep_path=False, symlinks=True) - - def package_info(self): - self.cpp_info.libs = ['tcmalloc_minimal'] diff --git a/3rd_party/jemalloc/conanfile.py b/3rd_party/jemalloc/conanfile.py deleted file mode 100644 index 80c50505..00000000 --- a/3rd_party/jemalloc/conanfile.py +++ /dev/null @@ -1,195 +0,0 @@ -from conans import AutoToolsBuildEnvironment, ConanFile, MSBuild, tools -from conans.errors import ConanInvalidConfiguration -from conans.client.tools import msvs_toolset -import os -import shutil -import string - - -class JemallocConan(ConanFile): - name = "jemalloc" - description = "jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support." - url = "https://github.com/conan-io/conan-center-index" - license = "BSD-2-Clause" - homepage = "http://jemalloc.net/" - topics = ("conan", "jemalloc", "malloc", "free") - settings = "os", "arch", "compiler" - version = "5.2.1" - source_url = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" - options = { - "shared": [True, False], - "fPIC": [True, False], - "prefix": "ANY", - "enable_cxx": [True, False], - "enable_fill": [True, False], - "enable_xmalloc": [True, False], - "enable_readlinkat": [True, False], - "enable_syscall": [True, False], - "enable_lazy_lock": [True, False], - "enable_debug_logging": [True, False], - "enable_initial_exec_tls": [True, False], - "enable_libdl": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "prefix": "", - "enable_cxx": True, - "enable_fill": True, - "enable_xmalloc": False, - "enable_readlinkat": False, - "enable_syscall": True, - "enable_lazy_lock": False, - "enable_debug_logging": False, - "enable_initial_exec_tls": True, - "enable_libdl": True, - } - - _autotools = None - - _source_subfolder = "source_subfolder" - - def config_options(self): - if self.settings.os == "Windows": - del self.options.fPIC - - def configure(self): - if self.settings.compiler.get_safe("libcxx") == "libc++": - raise ConanInvalidConfiguration("libc++ is missing a mutex implementation. Remove this when it is added") - if self.settings.compiler == "Visual Studio" and self.settings.compiler.version != "15": - # https://github.com/jemalloc/jemalloc/issues/1703 - raise ConanInvalidConfiguration("Only Visual Studio 15 2017 is supported. Please fix this if other versions are supported") - if self.options.shared: - del self.options.fPIC - if not self.options.enable_cxx: - del self.settings.compiler.libcxx - del self.settings.compiler.cppstd - if self.settings.compiler == "Visual Studio" and self.settings.arch not in ("x86_64", "x86"): - raise ConanInvalidConfiguration("Unsupported arch") - - def source(self): - tools.get(self.source_url) - os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) - - def build_requirements(self): - if tools.os_info.is_windows and not os.environ.get("CONAN_BASH_PATH", None): - self.build_requires("msys2/20190524") - - @property - def _autotools_args(self): - conf_args = [ - "--with-jemalloc-prefix={}".format(self.options.prefix), - "--disable-debug", - "--enable-cxx" if self.options.enable_cxx else "--disable-cxx", - "--enable-fill" if self.options.enable_fill else "--disable-fill", - "--enable-xmalloc" if self.options.enable_cxx else "--disable-xmalloc", - "--enable-readlinkat" if self.options.enable_readlinkat else "--disable-readlinkat", - "--enable-syscall" if self.options.enable_syscall else "--disable-syscall", - "--enable-lazy-lock" if self.options.enable_lazy_lock else "--disable-lazy-lock", - "--enable-log" if self.options.enable_debug_logging else "--disable-log", - "--enable-initial-exec-tld" if self.options.enable_initial_exec_tls else "--disable-initial-exec-tls", - "--enable-libdl" if self.options.enable_libdl else "--disable-libdl", - ] - if self.options.shared: - conf_args.extend(["--enable-shared", "--disable-static"]) - else: - conf_args.extend(["--disable-shared", "--enable-static"]) - return conf_args - - def _configure_autotools(self): - if self._autotools: - return self._autotools - self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) - self._autotools.configure(args=self._autotools_args, configure_dir=self._source_subfolder) - return self._autotools - - @property - def _msvc_build_type(self): - build_type = "Release" - if not self.options.shared: - build_type += "-static" - return build_type - - def _patch_sources(self): - if self.settings.os == "Windows": - makefile_in = os.path.join(self._source_subfolder, "Makefile.in") - tools.replace_in_file(makefile_in, - "DSO_LDFLAGS = @DSO_LDFLAGS@", - "DSO_LDFLAGS = @DSO_LDFLAGS@ -Wl,--out-implib,lib/libjemalloc.a") - tools.replace_in_file(makefile_in, - "\t$(INSTALL) -d $(LIBDIR)\n" - "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)", - "\t$(INSTALL) -d $(BINDIR)\n" - "\t$(INSTALL) -d $(LIBDIR)\n" - "\t$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(BINDIR)\n" - "\t$(INSTALL) -m 644 $(objroot)lib/libjemalloc.a $(LIBDIR)") - - def build(self): - self._patch_sources() - if self.settings.compiler == "Visual Studio": - with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op(): - with tools.environment_append({"CC": "cl", "CXX": "cl"}) if self.settings.compiler == "Visual Studio" else tools.no_op(): - with tools.chdir(self._source_subfolder): - # Do not use AutoToolsBuildEnvironment because we want to run configure as ./configure - self.run("./configure {}".format(" ".join(self._autotools_args)), win_bash=tools.os_info.is_windows) - msbuild = MSBuild(self) - # Do not use the 2015 solution: unresolved external symbols: test_hooks_libc_hook and test_hooks_arena_new_hook - sln_file = os.path.join(self._source_subfolder, "msvc", "jemalloc_vc2017.sln") - msbuild.build(sln_file, targets=["jemalloc"], build_type=self._msvc_build_type) - else: - autotools = self._configure_autotools() - autotools.make() - - @property - def _library_name(self): - libname = "jemalloc" - if self.settings.compiler == "Visual Studio": - if self.options.shared: - if "Release" == "Debug": - libname += "d" - else: - toolset = msvs_toolset(self.settings) - toolset_number = "".join(c for c in toolset if c in string.digits) - libname += "-vc{}-{}".format(toolset_number, self._msvc_build_type) - else: - if self.settings.os == "Windows": - if not self.options.shared: - libname += "_s" - else: - if not self.options.shared and self.options.fPIC: - libname += "_pic" - return libname - - def package(self): - self.copy(pattern="COPYING", src=self._source_subfolder, dst="licenses") - if self.settings.compiler == "Visual Studio": - arch_subdir = { - "x86_64": "x64", - "x86": "x86", - }[str(self.settings.arch)] - self.copy("*.lib", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "lib")) - self.copy("*.dll", src=os.path.join(self._source_subfolder, "msvc", arch_subdir, self._msvc_build_type), dst=os.path.join(self.package_folder, "bin")) - self.copy("jemalloc.h", src=os.path.join(self._source_subfolder, "include", "jemalloc"), dst=os.path.join(self.package_folder, "include", "jemalloc"), keep_path=True) - shutil.copytree(os.path.join(self._source_subfolder, "include", "msvc_compat"), - os.path.join(self.package_folder, "include", "msvc_compat")) - else: - autotools = self._configure_autotools() - # Use install_lib_XXX and install_include to avoid mixing binaries and dll's - autotools.make(target="install_lib_shared" if self.options.shared else "install_lib_static") - autotools.make(target="install_include") - if self.settings.os == "Windows" and self.settings.compiler == "gcc": - os.rename(os.path.join(self.package_folder, "lib", "{}.lib".format(self._library_name)), - os.path.join(self.package_folder, "lib", "lib{}.a".format(self._library_name))) - if not self.options.shared: - os.unlink(os.path.join(self.package_folder, "lib", "jemalloc.lib")) - - def package_info(self): - self.cpp_info.libs = [self._library_name] - self.cpp_info.includedirs = [os.path.join(self.package_folder, "include"), - os.path.join(self.package_folder, "include", "jemalloc")] - if self.settings.compiler == "Visual Studio": - self.cpp_info.includedirs.append(os.path.join(self.package_folder, "include", "msvc_compat")) - if not self.options.shared: - self.cpp_info.defines = ["JEMALLOC_EXPORT="] - if self.settings.os == "Linux": - self.cpp_info.system_libs.extend(["dl", "pthread"]) diff --git a/3rd_party/prerelease_dummy/conanfile.py b/3rd_party/prerelease_dummy/conanfile.py deleted file mode 100644 index 34daaeed..00000000 --- a/3rd_party/prerelease_dummy/conanfile.py +++ /dev/null @@ -1,23 +0,0 @@ -from conans import ConanFile, CMake, tools - -class PrereleaseConan(ConanFile): - name = "prerelease_dummy" - version = "1.0.1" - homepage = "https://github.corp.ebay.com/SDS/prerelease_dummy" - description = "A dummy package to invoke PRERELEASE option" - topics = ("ebay", "nublox") - url = "https://github.corp.ebay.com/SDS/prerelease_dummy" - license = "Apache-2.0" - - settings = () - - exports_sources = ("LICENSE") - - def build(self): - pass - - def package(self): - pass - - def package_info(self): - self.cpp_info.cxxflags.append("-D_PRERELEASE=1") diff --git a/3rd_party/userspace-rcu/conandata.yml b/3rd_party/userspace-rcu/conandata.yml new file mode 100644 index 00000000..9243c443 --- /dev/null +++ b/3rd_party/userspace-rcu/conandata.yml @@ -0,0 +1,4 @@ +sources: + "nu2.0.14.0": + url: "https://github.com/urcu/userspace-rcu/archive/refs/tags/v0.14.0.tar.gz" + sha256: "42fb5129a3fffe5a4b790dfe1ea3a734c69ee095fefbf649326269bba94c262d" diff --git a/3rd_party/userspace-rcu/conanfile.py b/3rd_party/userspace-rcu/conanfile.py new file mode 100644 index 00000000..bc82bbc3 --- /dev/null +++ b/3rd_party/userspace-rcu/conanfile.py @@ -0,0 +1,87 @@ +import os + +from conan import ConanFile +from conan.errors import ConanInvalidConfiguration +from conan.tools.env import VirtualBuildEnv +from conan.tools.files import chdir, copy, get, rm, rmdir +from conan.tools.gnu import Autotools, AutotoolsToolchain +from conan.tools.layout import basic_layout + +required_conan_version = ">=1.53.0" + + +class UserspaceRCUConan(ConanFile): + name = "userspace-rcu" + description = "Userspace RCU (read-copy-update) library" + license = "LGPL-2.1" + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://liburcu.org/" + topics = "urcu" + + package_type = "library" + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + } + + def configure(self): + self.settings.rm_safe("compiler.libcxx") + self.settings.rm_safe("compiler.cppstd") + if self.options.shared: + self.options.rm_safe("fPIC") + + def layout(self): + basic_layout(self, src_folder="src") + + def validate(self): + if self.settings.os not in ["Linux", "FreeBSD", "Macos"]: + raise ConanInvalidConfiguration(f"Building for {self.settings.os} unsupported") + if self.version == "0.11.4" and self.settings.compiler == "apple-clang": + # Fails with "cds_hlist_add_head_rcu.c:19:10: fatal error: 'urcu/urcu-memb.h' file not found" + raise ConanInvalidConfiguration(f"{self.ref} is not compatible with apple-clang") + + def build_requirements(self): + self.tool_requires("libtool/2.4.7") + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + def generate(self): + env = VirtualBuildEnv(self) + env.generate() + tc = AutotoolsToolchain(self) + tc.generate() + + def build(self): + autotools = Autotools(self) + autotools.autoreconf() + autotools.configure() + autotools.make() + + def package(self): + copy(self, "LICENSE*", + src=self.source_folder, + dst=os.path.join(self.package_folder, "licenses")) + autotools = Autotools(self) + autotools.install() + + rm(self, "*.la", self.package_folder, recursive=True) + rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) + rmdir(self, os.path.join(self.package_folder, "share")) + + def package_info(self): + for lib_type in ["", "-bp", "-cds", "-mb", "-memb", "-qsbr", "-signal"]: + component_name = f"urcu{lib_type}" + self.cpp_info.components[component_name].libs = ["urcu-common", component_name] + self.cpp_info.components[component_name].set_property("pkg_config_name", component_name) + if self.settings.os in ["Linux", "FreeBSD"]: + self.cpp_info.components[component_name].system_libs = ["pthread"] + + # Some definitions needed for MB and Signal variants + self.cpp_info.components["urcu-mb"].defines = ["RCU_MB"] + self.cpp_info.components["urcu-signal"].defines = ["RCU_SIGNAL"] diff --git a/CMakeLists.txt b/CMakeLists.txt index 3cb1667f..52d0d67c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,22 +10,14 @@ include (cmake/Flags.cmake) set(CMAKE_CXX_STANDARD 20) -if(EXISTS ${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) - conan_basic_setup(TARGETS) -else () - message("The file conanbuildinfo.cmake doesn't exist, some properties will be unavailable") -endif () - -if (DEFINED ENABLE_TESTING) - if (${ENABLE_TESTING}) - enable_testing() - find_package(GTest QUIET REQUIRED) - endif() +if (NOT BUILD_TESTING STREQUAL OFF) + set(ENABLE_TESTING ON) + enable_testing() + find_package(GTest QUIET REQUIRED) endif() -if (DEFINED CONAN_BUILD_COVERAGE) - if (${CONAN_BUILD_COVERAGE}) +if (DEFINED BUILD_COVERAGE) + if (${BUILD_COVERAGE}) include (cmake/CodeCoverage.cmake) APPEND_COVERAGE_COMPILER_FLAGS() SETUP_TARGET_FOR_COVERAGE_GCOVR_XML(NAME coverage EXECUTABLE ctest DEPENDENCIES ) @@ -64,7 +56,7 @@ if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") set(CMAKE_THREAD_PREFER_PTHREAD TRUE) endif() -find_package(benchmark QUIET REQUIRED) +find_package(Benchmark QUIET REQUIRED) find_package(Boost QUIET REQUIRED) find_package(cxxopts QUIET REQUIRED) if (${MALLOC_IMPL} STREQUAL "tcmalloc") @@ -75,17 +67,18 @@ if (${MALLOC_IMPL} STREQUAL "jemalloc") find_package(jemalloc QUIET REQUIRED) endif() -find_package(nlohmann_json QUIET REQUIRED) -find_package(prerelease_dummy QUIET) -find_package(prometheus-cpp QUIET REQUIRED) -find_package(zmarok-semver QUIET REQUIRED) -find_package(spdlog QUIET REQUIRED) -find_package(Threads QUIET REQUIRED) - -# Linux Specific dependencies -find_package(folly QUIET) -find_package(userspace-rcu QUIET) -find_package(breakpad QUIET) +find_package(folly) +find_package(Boost) +find_package(breakpad) +find_package(cxxopts) +find_package(flatbuffers) +find_package(gRPC) +find_package(nlohmann_json) +find_package(prometheus-cpp) +find_package(userspace-rcu) +find_package(spdlog) +find_package(zmarok-semver) +find_package(benchmark) list (APPEND COMMON_DEPS Boost::headers @@ -131,12 +124,12 @@ endif() # add conan information add_flags("-DPACKAGE_NAME=sisl") -if (DEFINED CONAN_PACKAGE_VERSION) - message("Package Version: [${CONAN_PACKAGE_VERSION}]") - add_flags("-DPACKAGE_VERSION=\\\"${CONAN_PACKAGE_VERSION}\\\"") +if (DEFINED PACKAGE_VERSION) + message("Package Version: [${PACKAGE_VERSION}]") + add_flags("-DPACKAGE_VERSION=\\\"${PACKAGE_VERSION}\\\"") else () message("Unknown Package Version") - add_flags("-DPACKAGE_VERSION=\\\"${CONAN_PACKAGE_VERSION}\\\"") + add_flags("-DPACKAGE_VERSION=\\\"unknown\\\"") endif () if(UNIX) diff --git a/README.md b/README.md index 65c8e133..95711f83 100644 --- a/README.md +++ b/README.md @@ -73,19 +73,15 @@ Assuming the conan setup is already done ``` $ ./prepare.sh # this will export some recipes to the conan cache -$ mkdir build -$ cd build +$ // ./prepare_v2.sh for conan >= 2.0 # Install all dependencies -$ conan install .. +$ conan install . -# Build the libsisl.a -$ conan build .. +# Build and Run Tests +$ conan build . ``` -### Without conan -To be Added - ## Contributing to This Project We welcome contributions. If you find any bugs, potential flaws and edge cases, improvements, new feature suggestions or discussions, please submit issues or pull requests. diff --git a/conanfile.py b/conanfile.py index 24e75459..e496308a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,14 +1,15 @@ -from os.path import join from conan import ConanFile -from conan.tools.files import copy +from conan.errors import ConanInvalidConfiguration from conan.tools.build import check_min_cppstd -from conans import CMake +from conan.tools.cmake import CMakeToolchain, CMakeDeps, CMake, cmake_layout +from conan.tools.files import copy +from os.path import join -required_conan_version = ">=1.52.0" +required_conan_version = ">=1.60.0" class SISLConan(ConanFile): name = "sisl" - version = "11.0.6" + version = "11.1.1" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -22,7 +23,6 @@ class SISLConan(ConanFile): "shared": ['True', 'False'], "fPIC": ['True', 'False'], "coverage": ['True', 'False'], - 'testing' : ['True', 'False'], "sanitize": ['True', 'False'], 'prerelease' : ['True', 'False'], 'malloc_impl' : ['libc', 'tcmalloc', 'jemalloc'], @@ -31,99 +31,102 @@ class SISLConan(ConanFile): 'shared': False, 'fPIC': True, 'coverage': False, - 'testing': True, 'sanitize': False, 'prerelease': False, 'malloc_impl': 'libc', } - generators = "cmake", "cmake_find_package" - exports = ["LICENSE"] exports_sources = ( + "LICENSE", "CMakeLists.txt", "cmake/*", "include/*", "src/*", ) + def _min_cppstd(self): + return 20 + def validate(self): - if self.info.settings.compiler.cppstd: - check_min_cppstd(self, 20) + if self.settings.compiler.get_safe("cppstd"): + check_min_cppstd(self, self._min_cppstd) def configure(self): if self.settings.compiler in ["gcc"]: self.options['pistache'].with_ssl: True if self.options.shared: - del self.options.fPIC + self.options.rm_safe("fPIC") if self.settings.build_type == "Debug": - self.options.prerelease = True + self.options.rm_safe("prerelease") if self.options.coverage and self.options.sanitize: raise ConanInvalidConfiguration("Sanitizer does not work with Code Coverage!") - if not self.options.testing: + if self.conf.get("tools.build:skip_test", default=False): if self.options.coverage or self.options.sanitize: raise ConanInvalidConfiguration("Coverage/Sanitizer requires Testing!") def build_requirements(self): - self.build_requires("benchmark/1.8.2") - self.build_requires("cmake/3.27.0") - self.build_requires("gtest/1.14.0") + self.test_requires("benchmark/1.8.2") + self.test_requires("gtest/1.14.0") def requirements(self): - # Custom packages - if self.options.prerelease: - self.requires("prerelease_dummy/1.0.1") - # Memory allocation if self.options.malloc_impl == "tcmalloc": - self.requires("gperftools/2.7.0") + self.requires("gperftools/2.15") elif self.options.malloc_impl == "jemalloc": - self.requires("jemalloc/5.2.1") + self.requires("jemalloc/5.3.0") # Linux Specific Support if self.settings.os in ["Linux"]: - self.requires("folly/nu2.2023.12.11.00") - self.requires("userspace-rcu/0.11.4") + self.requires("folly/nu2.2023.12.18.00") + self.requires("userspace-rcu/nu2.0.14.0") # Generic packages (conan-center) - self.requires("boost/1.82.0") + self.requires("boost/1.83.0") if self.settings.os in ["Linux"]: - self.requires("breakpad/cci.20230127") + self.requires("breakpad/cci.20210521") self.requires("cxxopts/3.1.1") self.requires("flatbuffers/23.5.26") - self.requires("grpc/1.50.1") + self.requires("grpc/1.54.3") self.requires("nlohmann_json/3.11.2") self.requires("prometheus-cpp/1.1.0") self.requires("spdlog/1.12.0") self.requires("zmarok-semver/1.1.0") - self.requires("fmt/10.0.0", override=True) + self.requires("fmt/10.0.0", override=True) self.requires("libcurl/8.4.0", override=True) - self.requires("openssl/3.1.3", override=True) - self.requires("xz_utils/5.2.5", override=True) - self.requires("zlib/1.2.13", override=True) - - def build(self): - cmake = CMake(self) - - definitions = {'CONAN_BUILD_COVERAGE': 'OFF', - 'ENABLE_TESTING': 'OFF', - 'CMAKE_EXPORT_COMPILE_COMMANDS': 'ON', - 'CONAN_CMAKE_SILENT_OUTPUT': 'ON', - 'MEMORY_SANITIZER_ON': 'OFF', - 'MALLOC_IMPL': self.options.malloc_impl} - + self.requires("xz_utils/5.4.5", override=True) + + def layout(self): + cmake_layout(self) + + def generate(self): + # This generates "conan_toolchain.cmake" in self.generators_folder + tc = CMakeToolchain(self) + tc.variables["CONAN_CMAKE_SILENT_OUTPUT"] = "ON" + tc.variables["CTEST_OUTPUT_ON_FAILURE"] = "ON" + tc.variables["MEMORY_SANITIZER_ON"] = "OFF" + tc.variables["BUILD_COVERAGE"] = "OFF" + tc.variables['MALLOC_IMPL'] = self.options.malloc_impl + tc.variables["PACKAGE_VERSION"] = self.version + if self.options.get_safe("prerelease"): + tc.preprocessor_definitions["_PRERELEASE"] = "1" if self.settings.build_type == "Debug": - if self.options.sanitize: - definitions['MEMORY_SANITIZER_ON'] = 'ON' - elif self.options.coverage: - definitions['CONAN_BUILD_COVERAGE'] = 'ON' + tc.preprocessor_definitions["_PRERELEASE"] = "1" + if self.options.get_safe("coverage"): + tc.variables['BUILD_COVERAGE'] = 'ON' + elif self.options.get_safe("sanitize"): + tc.variables['MEMORY_SANITIZER_ON'] = 'ON' + tc.generate() - if self.options.testing: - definitions['ENABLE_TESTING'] = 'ON' + # This generates "boost-config.cmake" and "grpc-config.cmake" etc in self.generators_folder + deps = CMakeDeps(self) + deps.generate() - cmake.configure(defs=definitions) + def build(self): + cmake = CMake(self) + cmake.configure() cmake.build() - if self.options.testing: - cmake.test(output_on_failure=True) + if not self.conf.get("tools.build:skip_test", default=False): + cmake.test() def package(self): lib_dir = join(self.package_folder, "lib") @@ -145,25 +148,42 @@ def package(self): copy(self, "*security_config_generated.h", join(self.build_folder, "src"), gen_dir, keep_path=True) copy(self, "settings_gen.cmake", join(self.source_folder, "cmake"), join(self.package_folder, "cmake"), keep_path=False) + def _add_component(self, lib): + self.cpp_info.components[lib].libs = [lib] + self.cpp_info.components[lib].set_property("pkg_config_name", f"lib{lib}") + def package_info(self): - self.cpp_info.libs = ["sisl"] - - if self.settings.os == "Linux": - self.cpp_info.libs.append("flip") - self.cpp_info.cppflags.append("-D_POSIX_C_SOURCE=200809L") - self.cpp_info.cppflags.append("-D_FILE_OFFSET_BITS=64") - self.cpp_info.cppflags.append("-D_LARGEFILE64") - self.cpp_info.system_libs.extend(["dl", "pthread"]) - self.cpp_info.exelinkflags.extend(["-export-dynamic"]) - - if self.options.sanitize: - self.cpp_info.sharedlinkflags.append("-fsanitize=address") - self.cpp_info.exelinkflags.append("-fsanitize=address") - self.cpp_info.sharedlinkflags.append("-fsanitize=undefined") - self.cpp_info.exelinkflags.append("-fsanitize=undefined") - if self.options.malloc_impl == 'jemalloc': - self.cpp_info.cppflags.append("-DUSE_JEMALLOC=1") - elif self.options.malloc_impl == 'tcmalloc': - self.cpp_info.cppflags.append("-DUSING_TCMALLOC=1") - self.cpp_info.libdirs += self.deps_cpp_info["gperftools"].lib_paths - self.cpp_info.libs += ["tcmalloc"] + self._add_component("sisl") + self._add_component("flip") + + for component in self.cpp_info.components.values(): + component.requires.extend([ + "boost::boost", + "breakpad::breakpad", + "cxxopts::cxxopts", + "folly::folly", + "flatbuffers::flatbuffers", + "spdlog::spdlog", + "grpc::grpc++", + "nlohmann_json::nlohmann_json", + "prometheus-cpp::prometheus-cpp", + "userspace-rcu::userspace-rcu", + "zmarok-semver::zmarok-semver", + ]) + if self.settings.os in ["Linux", "FreeBSD"]: + component.defines.append("_POSIX_C_SOURCE=200809L") + component.defines.append("_FILE_OFFSET_BITS=64") + component.defines.append("_LARGEFILE64") + component.system_libs.extend(["dl", "pthread"]) + component.exelinkflags.extend(["-export-dynamic"]) + if self.options.get_safe("prerelease"): + component.defines.append("_PRERELEASE=1") + if self.options.get_safe("sanitize"): + component.sharedlinkflags.append("-fsanitize=address") + component.exelinkflags.append("-fsanitize=address") + component.sharedlinkflags.append("-fsanitize=undefined") + component.exelinkflags.append("-fsanitize=undefined") + if self.options.malloc_impl == 'jemalloc': + self.cpp_info.defines.append("USE_JEMALLOC=1") + elif self.options.malloc_impl == 'tcmalloc': + self.cpp_info.defines.append("USING_TCMALLOC=1") diff --git a/prepare.sh b/prepare.sh index ef8116a1..ec92a7be 100755 --- a/prepare.sh +++ b/prepare.sh @@ -1,14 +1,8 @@ set -eu echo -n "Exporting custom recipes..." -echo -n "breakpad." -conan export 3rd_party/breakpad breakpad/cci.20230127@ >/dev/null echo -n "folly." -conan export 3rd_party/folly folly/nu2.2023.12.11.00@ >/dev/null -echo -n "gperftools." -conan export 3rd_party/gperftools >/dev/null -echo -n "jemalloc." -conan export 3rd_party/jemalloc >/dev/null -echo -n "prerelease_dummy." -conan export 3rd_party/prerelease_dummy >/dev/null +conan export 3rd_party/folly folly/nu2.2023.12.18.00@ >/dev/null +echo -n "userpace rcu." +conan export 3rd_party/userspace-rcu userspace-rcu/nu2.0.14.0@ >/dev/null echo "done." diff --git a/prepare_v2.sh b/prepare_v2.sh new file mode 100755 index 00000000..480965bb --- /dev/null +++ b/prepare_v2.sh @@ -0,0 +1,8 @@ +set -eu + +echo -n "Exporting custom recipes..." +echo -n "folly." +conan export 3rd_party/folly --name folly --version nu2.2023.12.18.00 >/dev/null +echo -n "folly." +conan export 3rd_party/userspace-rcu --name userspace-rcu --version nu2.0.14.0 >/dev/null +echo "done." diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ec2bd709..40408586 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -34,7 +34,7 @@ if(${folly_FOUND}) $ ) list(APPEND SISL_DEPS - Folly::Folly + folly::folly breakpad::breakpad ) endif() diff --git a/src/cache/CMakeLists.txt b/src/cache/CMakeLists.txt index d33e8db0..592ec8b4 100644 --- a/src/cache/CMakeLists.txt +++ b/src/cache/CMakeLists.txt @@ -13,7 +13,7 @@ if (DEFINED ENABLE_TESTING) tests/test_range_hashmap.cpp ) target_include_directories(test_range_hashmap BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) - target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + target_link_libraries(test_range_hashmap sisl ${COMMON_DEPS} folly::folly GTest::gtest) #add_test(NAME RangeHashMap COMMAND test_range_hashmap --num_iters 10000) add_executable(test_range_cache) @@ -21,7 +21,7 @@ if (DEFINED ENABLE_TESTING) tests/test_range_cache.cpp ) target_include_directories(test_range_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) - target_link_libraries(test_range_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + target_link_libraries(test_range_cache sisl ${COMMON_DEPS} folly::folly GTest::gtest) #add_test(NAME RangeCache COMMAND test_range_cache --num_iters 1000) add_executable(test_simple_cache) @@ -29,7 +29,7 @@ if (DEFINED ENABLE_TESTING) tests/test_simple_cache.cpp ) target_include_directories(test_simple_cache BEFORE PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) - target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} Folly::Folly GTest::gtest) + target_link_libraries(test_simple_cache sisl ${COMMON_DEPS} folly::folly GTest::gtest) add_test(NAME SimpleCache COMMAND test_simple_cache --num_iters 1000) endif() endif() diff --git a/src/fds/CMakeLists.txt b/src/fds/CMakeLists.txt index f500ece9..c00ee154 100644 --- a/src/fds/CMakeLists.txt +++ b/src/fds/CMakeLists.txt @@ -6,7 +6,7 @@ add_library(sisl_buffer OBJECT) target_sources(sisl_buffer PRIVATE buffer.cpp ) -target_link_libraries(sisl_buffer Folly::Folly ${COMMON_DEPS}) +target_link_libraries(sisl_buffer folly::folly ${COMMON_DEPS}) if (DEFINED ENABLE_TESTING) if (${ENABLE_TESTING}) diff --git a/src/grpc/CMakeLists.txt b/src/grpc/CMakeLists.txt index 7ca0105b..b10493dc 100644 --- a/src/grpc/CMakeLists.txt +++ b/src/grpc/CMakeLists.txt @@ -13,7 +13,7 @@ target_sources(sisl_grpc PRIVATE target_link_libraries(sisl_grpc gRPC::grpc++ flatbuffers::flatbuffers - Folly::Folly + folly::folly ${COMMON_DEPS} ) @@ -21,4 +21,4 @@ if (DEFINED ENABLE_TESTING) if (${ENABLE_TESTING}) add_subdirectory(tests) endif() -endif() \ No newline at end of file +endif() diff --git a/src/grpc/tests/CMakeLists.txt b/src/grpc/tests/CMakeLists.txt index e4090cf8..fc1ba245 100644 --- a/src/grpc/tests/CMakeLists.txt +++ b/src/grpc/tests/CMakeLists.txt @@ -1,7 +1,5 @@ cmake_minimum_required (VERSION 3.11) -find_package(GTest QUIET REQUIRED) - add_subdirectory(proto) enable_testing() diff --git a/src/metrics/CMakeLists.txt b/src/metrics/CMakeLists.txt index 9fb9958c..fd5e7c09 100644 --- a/src/metrics/CMakeLists.txt +++ b/src/metrics/CMakeLists.txt @@ -12,7 +12,7 @@ target_sources(sisl_metrics PRIVATE ) target_link_libraries(sisl_metrics ${COMMON_DEPS} - Folly::Folly + folly::folly ) if (DEFINED ENABLE_TESTING) diff --git a/test_package/CMakeLists.txt b/test_package/CMakeLists.txt index 489b1a68..4378dc15 100644 --- a/test_package/CMakeLists.txt +++ b/test_package/CMakeLists.txt @@ -3,10 +3,7 @@ project(test_package) set(CMAKE_CXX_STANDARD 20) -include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) -conan_basic_setup(TARGETS) - -find_package(sisl CONFIG QUIET REQUIRED) +find_package(sisl QUIET REQUIRED) add_executable(${PROJECT_NAME} test_package.cpp example_decl.cpp) target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) diff --git a/test_package/conanfile.py b/test_package/conanfile.py index 3b0b71cb..075654ab 100644 --- a/test_package/conanfile.py +++ b/test_package/conanfile.py @@ -1,18 +1,26 @@ -from conans import ConanFile -from conan.tools.build import cross_building -from conans import CMake +from conan import ConanFile +from conan.tools.build import can_run +from conan.tools.cmake import cmake_layout, CMake import os + class TestPackageConan(ConanFile): settings = "os", "compiler", "build_type", "arch" - generators = "cmake", "cmake_find_package_multi" + generators = "CMakeDeps", "CMakeToolchain", "VirtualRunEnv" + test_type = "explicit" + + def requirements(self): + self.requires(self.tested_reference_str) + + def layout(self): + cmake_layout(self) def build(self): cmake = CMake(self) - cmake.configure(defs={'CONAN_CMAKE_SILENT_OUTPUT': 'ON'}) + cmake.configure() cmake.build() def test(self): - if not cross_building(self): - bin_path = os.path.join("bin", "test_package") - self.run(bin_path, run_environment=True) + if can_run(self): + bin_path = os.path.join(self.cpp.build.bindir, "test_package") + self.run(bin_path, env="conanrun") diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index 1cc93839..de2800cd 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -1,5 +1,6 @@ #include #include +#include #include SISL_LOGGING_DECL(my_module) From ce80be6a1ec85ce3f656e3a266e739d028f430ff Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Thu, 1 Feb 2024 16:51:31 -0700 Subject: [PATCH 384/385] Re-introduce timespec bug. (#219) --- 3rd_party/folly/conandata.yml | 3 ++ 3rd_party/folly/conanfile.py | 3 +- .../folly/patches/2023-002-timespec.patch | 38 +++++++++++++++++++ conanfile.py | 2 +- 4 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 3rd_party/folly/patches/2023-002-timespec.patch diff --git a/3rd_party/folly/conandata.yml b/3rd_party/folly/conandata.yml index 5841cab6..93d9352f 100644 --- a/3rd_party/folly/conandata.yml +++ b/3rd_party/folly/conandata.yml @@ -10,6 +10,9 @@ patches: - patch_file: "patches/2023-001-compiler-flags.patch" patch_description: "Do not hard-code debug flag for all build types" patch_type: "conan" + - patch_file: "patches/2023-002-timespec.patch" + patch_description: "Fix liburing inclusion of timespec" + patch_type: "conan" "2022.10.31.00": - patch_file: "patches/2022-001-compiler-flags.patch" patch_description: "Do not hard-code debug flag for all build types" diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 02f9184c..8c0c572c 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -74,6 +74,7 @@ def requirements(self): self.requires("lz4/1.9.4", transitive_libs=True) self.requires("snappy/1.1.10") self.requires("zlib/[>=1.2.11 <2]") + self.requires("liburing/[>=2.1]") self.requires("zstd/1.5.5", transitive_libs=True) if not is_msvc(self): self.requires("libdwarf/20191104") @@ -220,7 +221,7 @@ def generate(self): deps.set_property("libiberty", "cmake_file_name", "Libiberty") deps.set_property("libsodium", "cmake_file_name", "Libsodium") deps.set_property("libunwind", "cmake_file_name", "LibUnwind") - # deps.set_property("liburing", "cmake_file_name", "LibUring") + deps.set_property("liburing", "cmake_file_name", "LibUring") deps.set_property("lz4", "cmake_file_name", "LZ4") deps.set_property("openssl", "cmake_file_name", "OpenSSL") deps.set_property("snappy", "cmake_file_name", "Snappy") diff --git a/3rd_party/folly/patches/2023-002-timespec.patch b/3rd_party/folly/patches/2023-002-timespec.patch new file mode 100644 index 00000000..974a120d --- /dev/null +++ b/3rd_party/folly/patches/2023-002-timespec.patch @@ -0,0 +1,38 @@ +diff -Naur a/folly/io/async/AsyncSocket.cpp b/folly/io/async/AsyncSocket.cpp +--- a/folly/io/async/AsyncSocket.cpp 2023-12-08 20:38:13.000000000 -0700 ++++ b/folly/io/async/AsyncSocket.cpp 2023-12-12 10:15:06.023030521 -0700 +@@ -18,6 +18,9 @@ + + #include + ++/* for struct sock_extended_err*/ ++#include ++ + #include + #include + #include +diff -Naur a/folly/io/async/AsyncUDPSocket.cpp b/folly/io/async/AsyncUDPSocket.cpp +--- a/folly/io/async/AsyncUDPSocket.cpp 2023-12-08 20:38:13.000000000 -0700 ++++ b/folly/io/async/AsyncUDPSocket.cpp 2023-12-12 10:19:06.419424565 -0700 +@@ -17,6 +17,9 @@ + #include + #include + ++/* for struct sock_extended_err*/ ++#include ++ + #include + + #include +diff -Naur a/folly/net/NetOps.h b/folly/net/NetOps.h +--- a/folly/net/NetOps.h 2023-12-12 10:16:10.675139766 -0700 ++++ b/folly/net/NetOps.h 2023-12-12 10:15:55.087113425 -0700 +@@ -114,7 +114,7 @@ + #endif + #endif + /* for struct sock_extended_err*/ +-#include ++#include + #endif + #endif + diff --git a/conanfile.py b/conanfile.py index e496308a..a847c06d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -9,7 +9,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "11.1.1" + version = "11.1.2" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" From 6dec7cd5174a67e93d56917e4bca1de459e6509c Mon Sep 17 00:00:00 2001 From: Brian Szmyd Date: Fri, 2 Feb 2024 17:47:26 -0700 Subject: [PATCH 385/385] Some v2 fixes to propgate header dependencies. (#221) --- 3rd_party/folly/conanfile.py | 1 + conanfile.py | 31 ++++++++++++++++--------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/3rd_party/folly/conanfile.py b/3rd_party/folly/conanfile.py index 8c0c572c..a924d3b7 100755 --- a/3rd_party/folly/conanfile.py +++ b/3rd_party/folly/conanfile.py @@ -269,6 +269,7 @@ def package_info(self): "openssl::openssl", "bzip2::bzip2", "snappy::snappy", + "liburing::liburing", "zlib::zlib", "zstd::zstd", "libsodium::libsodium", diff --git a/conanfile.py b/conanfile.py index a847c06d..17d83487 100644 --- a/conanfile.py +++ b/conanfile.py @@ -9,7 +9,7 @@ class SISLConan(ConanFile): name = "sisl" - version = "11.1.2" + version = "11.1.3" homepage = "https://github.com/eBay/sisl" description = "Library for fast data structures, utilities" @@ -49,7 +49,7 @@ def _min_cppstd(self): def validate(self): if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, self._min_cppstd) + check_min_cppstd(self, self._min_cppstd()) def configure(self): if self.settings.compiler in ["gcc"]: @@ -71,26 +71,27 @@ def build_requirements(self): def requirements(self): # Memory allocation if self.options.malloc_impl == "tcmalloc": - self.requires("gperftools/2.15") + self.requires("gperftools/2.15", transitive_headers=True) elif self.options.malloc_impl == "jemalloc": - self.requires("jemalloc/5.3.0") + self.requires("jemalloc/5.3.0", transitive_headers=True) # Linux Specific Support if self.settings.os in ["Linux"]: - self.requires("folly/nu2.2023.12.18.00") - self.requires("userspace-rcu/nu2.0.14.0") + self.requires("folly/nu2.2023.12.18.00", transitive_headers=True) + self.requires("userspace-rcu/nu2.0.14.0", transitive_headers=True) # Generic packages (conan-center) - self.requires("boost/1.83.0") + self.requires("boost/1.83.0", transitive_headers=True) + self.requires("cxxopts/3.1.1", transitive_headers=True) + self.requires("flatbuffers/23.5.26", transitive_headers=True) + self.requires("grpc/1.54.3", transitive_headers=True) + self.requires("nlohmann_json/3.11.2", transitive_headers=True) + self.requires("prometheus-cpp/1.1.0", transitive_headers=True) + self.requires("spdlog/1.12.0", transitive_headers=True) + self.requires("zmarok-semver/1.1.0", transitive_headers=True) + if self.settings.os in ["Linux"]: self.requires("breakpad/cci.20210521") - self.requires("cxxopts/3.1.1") - self.requires("flatbuffers/23.5.26") - self.requires("grpc/1.54.3") - self.requires("nlohmann_json/3.11.2") - self.requires("prometheus-cpp/1.1.0") - self.requires("spdlog/1.12.0") - self.requires("zmarok-semver/1.1.0") self.requires("fmt/10.0.0", override=True) self.requires("libcurl/8.4.0", override=True) self.requires("xz_utils/5.4.5", override=True) @@ -164,7 +165,7 @@ def package_info(self): "folly::folly", "flatbuffers::flatbuffers", "spdlog::spdlog", - "grpc::grpc++", + "grpc::grpc", "nlohmann_json::nlohmann_json", "prometheus-cpp::prometheus-cpp", "userspace-rcu::userspace-rcu",