Skip to content

Adapt to latest Asio APIs (Asio 1.32 or Boost.Asio 1.87) #477

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/workflows/ci-pr-validation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,11 @@ jobs:
- name: Run unit tests
run: RETRY_FAILED=3 ./run-unit-tests.sh

- name: Build with Boost.Asio
run: |
cmake -B build-boost-asio -DINTEGRATE_VCPKG=ON -DBUILD_TESTS=ON
cmake --build build-boost-asio -j8

- name: Build perf tools
run: |
cmake . -DINTEGRATE_VCPKG=ON -DBUILD_TESTS=ON -DBUILD_PERF_TOOLS=ON
Expand Down
7 changes: 4 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,16 @@

cmake_minimum_required(VERSION 3.13)

option(USE_ASIO "Use Asio instead of Boost.Asio" OFF)

option(INTEGRATE_VCPKG "Integrate with Vcpkg" OFF)
if (INTEGRATE_VCPKG)
set(USE_ASIO ON)
option(USE_ASIO "Use Asio instead of Boost.Asio" ON)
if (NOT CMAKE_TOOLCHAIN_FILE)
set(CMAKE_TOOLCHAIN_FILE "${CMAKE_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake")
endif ()
else ()
option(USE_ASIO "Use Asio instead of Boost.Asio" OFF)
endif ()
message(STATUS "USE_ASIO: ${USE_ASIO}")

option(BUILD_TESTS "Build tests" ON)
message(STATUS "BUILD_TESTS: " ${BUILD_TESTS})
Expand Down
5 changes: 2 additions & 3 deletions lib/AckGroupingTrackerEnabled.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,7 @@ void AckGroupingTrackerEnabled::close() {
this->flush();
std::lock_guard<std::mutex> lock(this->mutexTimer_);
if (this->timer_) {
ASIO_ERROR ec;
this->timer_->cancel(ec);
this->timer_->cancel();
}
}

Expand Down Expand Up @@ -168,7 +167,7 @@ void AckGroupingTrackerEnabled::scheduleTimer() {

std::lock_guard<std::mutex> lock(this->mutexTimer_);
this->timer_ = this->executor_->createDeadlineTimer();
this->timer_->expires_from_now(std::chrono::milliseconds(std::max(1L, this->ackGroupingTimeMs_)));
this->timer_->expires_after(std::chrono::milliseconds(std::max(1L, this->ackGroupingTimeMs_)));
auto self = shared_from_this();
this->timer_->async_wait([this, self](const ASIO_ERROR& ec) -> void {
if (!ec) {
Expand Down
305 changes: 137 additions & 168 deletions lib/ClientConnection.cc

Large diffs are not rendered by default.

17 changes: 5 additions & 12 deletions lib/ClientConnection.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@
#include <atomic>
#ifdef USE_ASIO
#include <asio/bind_executor.hpp>
#include <asio/io_service.hpp>
#include <asio/io_context.hpp>
#include <asio/ip/tcp.hpp>
#include <asio/ssl/stream.hpp>
#include <asio/strand.hpp>
#else
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/asio/strand.hpp>
Expand Down Expand Up @@ -231,13 +231,8 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this<Clien
DeadlineTimerPtr timer;
};

/*
* handler for connectAsync
* creates a ConnectionPtr which has a valid ClientConnection object
* although not usable at this point, since this is just tcp connection
* Pulsar - Connect/Connected has yet to happen
*/
void handleTcpConnected(const ASIO_ERROR& err, ASIO::ip::tcp::resolver::iterator endpointIterator);
void asyncConnect(const std::vector<ASIO::ip::tcp::endpoint>& endpoints, size_t index);
void completeConnect(ASIO::ip::tcp::endpoint endpoint);

void handleHandshake(const ASIO_ERROR& err);

Expand All @@ -260,8 +255,6 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this<Clien

void handlePulsarConnected(const proto::CommandConnected& cmdConnected);

void handleResolve(const ASIO_ERROR& err, ASIO::ip::tcp::resolver::iterator endpointIterator);

void handleSend(const ASIO_ERROR& err, const SharedBuffer& cmd);
void handleSendPair(const ASIO_ERROR& err);
void sendPendingCommands();
Expand Down Expand Up @@ -324,7 +317,7 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this<Clien
*/
SocketPtr socket_;
TlsSocketPtr tlsSocket_;
ASIO::strand<ASIO::io_service::executor_type> strand_;
ASIO::strand<ASIO::io_context::executor_type> strand_;

const std::string logicalAddress_;
/*
Expand Down
9 changes: 4 additions & 5 deletions lib/ConsumerImpl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ void ConsumerImpl::discardChunkMessages(std::string uuid, MessageId messageId, b
}

void ConsumerImpl::triggerCheckExpiredChunkedTimer() {
checkExpiredChunkedTimer_->expires_from_now(milliseconds(expireTimeOfIncompleteChunkedMessageMs_));
checkExpiredChunkedTimer_->expires_after(milliseconds(expireTimeOfIncompleteChunkedMessageMs_));
std::weak_ptr<ConsumerImplBase> weakSelf{shared_from_this()};
checkExpiredChunkedTimer_->async_wait([this, weakSelf](const ASIO_ERROR& ec) -> void {
auto self = weakSelf.lock();
Expand Down Expand Up @@ -1668,7 +1668,7 @@ void ConsumerImpl::internalGetLastMessageIdAsync(const BackoffPtr& backoff, Time
}
remainTime -= next;

timer->expires_from_now(next);
timer->expires_after(next);

auto self = shared_from_this();
timer->async_wait([this, backoff, remainTime, timer, next, callback,
Expand Down Expand Up @@ -1791,9 +1791,8 @@ std::shared_ptr<ConsumerImpl> ConsumerImpl::get_shared_this_ptr() {
}

void ConsumerImpl::cancelTimers() noexcept {
ASIO_ERROR ec;
batchReceiveTimer_->cancel(ec);
checkExpiredChunkedTimer_->cancel(ec);
batchReceiveTimer_->cancel();
checkExpiredChunkedTimer_->cancel();
unAckedMessageTrackerPtr_->stop();
consumerStatsBasePtr_->stop();
}
Expand Down
2 changes: 1 addition & 1 deletion lib/ConsumerImplBase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ ConsumerImplBase::ConsumerImplBase(ClientImplPtr client, const std::string& topi

void ConsumerImplBase::triggerBatchReceiveTimerTask(long timeoutMs) {
if (timeoutMs > 0) {
batchReceiveTimer_->expires_from_now(std::chrono::milliseconds(timeoutMs));
batchReceiveTimer_->expires_after(std::chrono::milliseconds(timeoutMs));
std::weak_ptr<ConsumerImplBase> weakSelf{shared_from_this()};
batchReceiveTimer_->async_wait([weakSelf](const ASIO_ERROR& ec) {
auto self = weakSelf.lock();
Expand Down
39 changes: 20 additions & 19 deletions lib/ExecutorService.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,12 @@
*/
#include "ExecutorService.h"

#ifdef USE_ASIO
#include <asio/post.hpp>
#else
#include <boost/asio/post.hpp>
#endif

#include "LogUtils.h"
#include "TimeUtils.h"
DECLARE_LOG_OBJECT()
Expand All @@ -31,18 +37,13 @@ ExecutorService::~ExecutorService() { close(0); }
void ExecutorService::start() {
auto self = shared_from_this();
std::thread t{[this, self] {
LOG_DEBUG("Run io_service in a single thread");
ASIO_ERROR ec;
LOG_DEBUG("Run io_context in a single thread");
while (!closed_) {
io_service_.restart();
IOService::work work{getIOService()};
io_service_.run(ec);
}
if (ec) {
LOG_ERROR("Failed to run io_service: " << ec.message());
} else {
LOG_DEBUG("Event loop of ExecutorService exits successfully");
io_context_.restart();
auto work{ASIO::make_work_guard(io_context_)};
io_context_.run();
}
LOG_DEBUG("Event loop of ExecutorService exits successfully");
{
std::lock_guard<std::mutex> lock{mutex_};
ioServiceDone_ = true;
Expand All @@ -63,12 +64,12 @@ ExecutorServicePtr ExecutorService::create() {
}

/*
* factory method of ASIO::ip::tcp::socket associated with io_service_ instance
* factory method of ASIO::ip::tcp::socket associated with io_context_ instance
* @ returns shared_ptr to this socket
*/
SocketPtr ExecutorService::createSocket() {
try {
return SocketPtr(new ASIO::ip::tcp::socket(io_service_));
return SocketPtr(new ASIO::ip::tcp::socket(io_context_));
} catch (const ASIO_SYSTEM_ERROR &e) {
restart();
auto error = std::string("Failed to create socket: ") + e.what();
Expand All @@ -82,12 +83,12 @@ TlsSocketPtr ExecutorService::createTlsSocket(SocketPtr &socket, ASIO::ssl::cont
}

/*
* factory method of Resolver object associated with io_service_ instance
* factory method of Resolver object associated with io_context_ instance
* @returns shraed_ptr to resolver object
*/
TcpResolverPtr ExecutorService::createTcpResolver() {
try {
return TcpResolverPtr(new ASIO::ip::tcp::resolver(io_service_));
return TcpResolverPtr(new ASIO::ip::tcp::resolver(io_context_));
} catch (const ASIO_SYSTEM_ERROR &e) {
restart();
auto error = std::string("Failed to create resolver: ") + e.what();
Expand All @@ -97,36 +98,36 @@ TcpResolverPtr ExecutorService::createTcpResolver() {

DeadlineTimerPtr ExecutorService::createDeadlineTimer() {
try {
return DeadlineTimerPtr(new ASIO::steady_timer(io_service_));
return DeadlineTimerPtr(new ASIO::steady_timer(io_context_));
} catch (const ASIO_SYSTEM_ERROR &e) {
restart();
auto error = std::string("Failed to create steady_timer: ") + e.what();
throw std::runtime_error(error);
}
}

void ExecutorService::restart() { io_service_.stop(); }
void ExecutorService::restart() { io_context_.stop(); }

void ExecutorService::close(long timeoutMs) {
bool expectedState = false;
if (!closed_.compare_exchange_strong(expectedState, true)) {
return;
}
if (timeoutMs == 0) { // non-blocking
io_service_.stop();
io_context_.stop();
return;
}

std::unique_lock<std::mutex> lock{mutex_};
io_service_.stop();
io_context_.stop();
if (timeoutMs > 0) {
cond_.wait_for(lock, std::chrono::milliseconds(timeoutMs), [this] { return ioServiceDone_; });
} else { // < 0
cond_.wait(lock, [this] { return ioServiceDone_; });
}
}

void ExecutorService::postWork(std::function<void(void)> task) { io_service_.post(task); }
void ExecutorService::postWork(std::function<void(void)> task) { ASIO::post(io_context_, task); }

/////////////////////

Expand Down
12 changes: 6 additions & 6 deletions lib/ExecutorService.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@

#include <atomic>
#ifdef USE_ASIO
#include <asio/io_service.hpp>
#include <asio/io_context.hpp>
#include <asio/ip/tcp.hpp>
#include <asio/ssl.hpp>
#else
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl.hpp>
#endif
Expand All @@ -46,7 +46,7 @@ typedef std::shared_ptr<ASIO::ssl::stream<ASIO::ip::tcp::socket &> > TlsSocketPt
typedef std::shared_ptr<ASIO::ip::tcp::resolver> TcpResolverPtr;
class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this<ExecutorService> {
public:
using IOService = ASIO::io_service;
using IOService = ASIO::io_context;
using SharedPtr = std::shared_ptr<ExecutorService>;

static SharedPtr create();
Expand All @@ -67,14 +67,14 @@ class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this<Execut
// See TimeoutProcessor for the semantics of the parameter.
void close(long timeoutMs = 3000);

IOService &getIOService() { return io_service_; }
IOService &getIOService() { return io_context_; }
bool isClosed() const noexcept { return closed_; }

private:
/*
* io_service is our interface to os, io object schedule async ops on this object
* io_context is our interface to os, io object schedule async ops on this object
*/
IOService io_service_;
IOService io_context_;

std::atomic_bool closed_{false};
std::mutex mutex_;
Expand Down
15 changes: 6 additions & 9 deletions lib/HandlerBase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,8 @@ HandlerBase::HandlerBase(const ClientImplPtr& client, const std::string& topic,
redirectedClusterURI_("") {}

HandlerBase::~HandlerBase() {
ASIO_ERROR ignored;
timer_->cancel(ignored);
creationTimer_->cancel(ignored);
timer_->cancel();
creationTimer_->cancel();
}

void HandlerBase::start() {
Expand All @@ -61,15 +60,14 @@ void HandlerBase::start() {
if (state_.compare_exchange_strong(state, Pending)) {
grabCnx();
}
creationTimer_->expires_from_now(operationTimeut_);
creationTimer_->expires_after(operationTimeut_);
std::weak_ptr<HandlerBase> weakSelf{shared_from_this()};
creationTimer_->async_wait([this, weakSelf](const ASIO_ERROR& error) {
auto self = weakSelf.lock();
if (self && !error) {
LOG_WARN("Cancel the pending reconnection due to the start timeout");
connectionFailed(ResultTimeout);
ASIO_ERROR ignored;
timer_->cancel(ignored);
timer_->cancel();
}
});
}
Expand Down Expand Up @@ -133,8 +131,7 @@ void HandlerBase::grabCnx(const boost::optional<std::string>& assignedBrokerUrl)
connectionTimeMs_ =
duration_cast<milliseconds>(high_resolution_clock::now() - before).count();
// Prevent the creationTimer_ from cancelling the timer_ in future
ASIO_ERROR ignored;
creationTimer_->cancel(ignored);
creationTimer_->cancel();
LOG_INFO("Finished connecting to broker after " << connectionTimeMs_ << " ms")
} else if (isResultRetryable(result)) {
scheduleReconnection();
Expand Down Expand Up @@ -188,7 +185,7 @@ void HandlerBase::scheduleReconnection(const boost::optional<std::string>& assig
TimeDuration delay = assignedBrokerUrl ? std::chrono::milliseconds(0) : backoff_.next();

LOG_INFO(getName() << "Schedule reconnection in " << (toMillis(delay) / 1000.0) << " s");
timer_->expires_from_now(delay);
timer_->expires_after(delay);
// passing shared_ptr here since time_ will get destroyed, so tasks will be cancelled
// so we will not run into the case where grabCnx is invoked on out of scope handler
auto name = getName();
Expand Down
5 changes: 2 additions & 3 deletions lib/MultiTopicsConsumerImpl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -962,7 +962,7 @@ uint64_t MultiTopicsConsumerImpl::getNumberOfConnectedConsumer() {
return numberOfConnectedConsumer;
}
void MultiTopicsConsumerImpl::runPartitionUpdateTask() {
partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_);
partitionsUpdateTimer_->expires_after(partitionsUpdateInterval_);
auto weakSelf = weak_from_this();
partitionsUpdateTimer_->async_wait([weakSelf](const ASIO_ERROR& ec) {
// If two requests call runPartitionUpdateTask at the same time, the timer will fail, and it
Expand Down Expand Up @@ -1115,8 +1115,7 @@ void MultiTopicsConsumerImpl::beforeConnectionChange(ClientConnection& cnx) {

void MultiTopicsConsumerImpl::cancelTimers() noexcept {
if (partitionsUpdateTimer_) {
ASIO_ERROR ec;
partitionsUpdateTimer_->cancel(ec);
partitionsUpdateTimer_->cancel();
}
}

Expand Down
5 changes: 2 additions & 3 deletions lib/NegativeAcksTracker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void NegativeAcksTracker::scheduleTimer() {
return;
}
std::weak_ptr<NegativeAcksTracker> weakSelf{shared_from_this()};
timer_->expires_from_now(timerInterval_);
timer_->expires_after(timerInterval_);
timer_->async_wait([weakSelf](const ASIO_ERROR &ec) {
if (auto self = weakSelf.lock()) {
self->handleTimer(ec);
Expand Down Expand Up @@ -107,8 +107,7 @@ void NegativeAcksTracker::add(const MessageId &m) {

void NegativeAcksTracker::close() {
closed_ = true;
ASIO_ERROR ec;
timer_->cancel(ec);
timer_->cancel();
std::lock_guard<std::mutex> lock(mutex_);
nackedMessages_.clear();
}
Expand Down
5 changes: 2 additions & 3 deletions lib/PartitionedProducerImpl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ void PartitionedProducerImpl::flushAsync(FlushCallback callback) {

void PartitionedProducerImpl::runPartitionUpdateTask() {
auto weakSelf = weak_from_this();
partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_);
partitionsUpdateTimer_->expires_after(partitionsUpdateInterval_);
partitionsUpdateTimer_->async_wait([weakSelf](const ASIO_ERROR& ec) {
auto self = weakSelf.lock();
if (self) {
Expand Down Expand Up @@ -524,8 +524,7 @@ uint64_t PartitionedProducerImpl::getNumberOfConnectedProducer() {

void PartitionedProducerImpl::cancelTimers() noexcept {
if (partitionsUpdateTimer_) {
ASIO_ERROR ec;
partitionsUpdateTimer_->cancel(ec);
partitionsUpdateTimer_->cancel();
}
}

Expand Down
Loading
Loading