diff --git a/src/svc_rqst.c b/src/svc_rqst.c index a31fcf61e3..9ba42276f5 100644 --- a/src/svc_rqst.c +++ b/src/svc_rqst.c @@ -884,16 +884,15 @@ static void svc_rqst_clean_idle(int timeout) { struct svc_rqst_clean_arg acc; - static mutex_t active_mtx = MUTEX_INITIALIZER; - static uint32_t active; + static int32_t active; - if (mutex_trylock(&active_mtx) != 0) - return; - - if (active > 0) - goto unlock; - - ++active; + /* Allow only one thread to do this work at any time. active + * starts with 0 and a thread that moves it from 0 to 1 is the + * only one that is allowed to do the work. Others just return + * without doing anything. + */ + if (atomic_postinc_int32_t(&active) != 0) + goto out; #ifdef _HAVE_GSSAPI /* trim gss context cache */ @@ -901,7 +900,7 @@ svc_rqst_clean_idle(int timeout) #endif /* _HAVE_GSSAPI */ if (timeout <= 0) - goto unlock; + goto out; /* trim xprts (not sorted, not aggressive [but self limiting]) */ (void)clock_gettime(CLOCK_MONOTONIC_FAST, &acc.ts); @@ -910,10 +909,8 @@ svc_rqst_clean_idle(int timeout) svc_xprt_foreach(svc_rqst_clean_func, (void *)&acc); - unlock: - --active; - mutex_unlock(&active_mtx); - return; +out: + (void)atomic_postdec_int32_t(&active); } #ifdef TIRPC_EPOLL