Skip to content

Commit 6a2d7a9

Browse files
Eric DumazetLinus Torvalds
Eric Dumazet
authored and
Linus Torvalds
committed
[PATCH] SLAB: use a multiply instead of a divide in obj_to_index()
When some objects are allocated by one CPU but freed by another CPU we can consume lot of cycles doing divides in obj_to_index(). (Typical load on a dual processor machine where network interrupts are handled by one particular CPU (allocating skbufs), and the other CPU is running the application (consuming and freeing skbufs)) Here on one production server (dual-core AMD Opteron 285), I noticed this divide took 1.20 % of CPU_CLK_UNHALTED events in kernel. But Opteron are quite modern cpus and the divide is much more expensive on oldest architectures : On a 200 MHz sparcv9 machine, the division takes 64 cycles instead of 1 cycle for a multiply. Doing some math, we can use a reciprocal multiplication instead of a divide. If we want to compute V = (A / B) (A and B being u32 quantities) we can instead use : V = ((u64)A * RECIPROCAL(B)) >> 32 ; where RECIPROCAL(B) is precalculated to ((1LL << 32) + (B - 1)) / B Note : I wrote pure C code for clarity. gcc output for i386 is not optimal but acceptable : mull 0x14(%ebx) mov %edx,%eax // part of the >> 32 xor %edx,%edx // useless mov %eax,(%esp) // could be avoided mov %edx,0x4(%esp) // useless mov (%esp),%ebx [[email protected]: small cleanups] Signed-off-by: Eric Dumazet <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: David Miller <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 02a0e53 commit 6a2d7a9

File tree

4 files changed

+57
-4
lines changed

4 files changed

+57
-4
lines changed

include/linux/reciprocal_div.h

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
#ifndef _LINUX_RECIPROCAL_DIV_H
2+
#define _LINUX_RECIPROCAL_DIV_H
3+
4+
#include <linux/types.h>
5+
6+
/*
7+
* This file describes reciprocical division.
8+
*
9+
* This optimizes the (A/B) problem, when A and B are two u32
10+
* and B is a known value (but not known at compile time)
11+
*
12+
* The math principle used is :
13+
* Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B)
14+
* Then A / B = (u32)(((u64)(A) * (R)) >> 32)
15+
*
16+
* This replaces a divide by a multiply (and a shift), and
17+
* is generally less expensive in CPU cycles.
18+
*/
19+
20+
/*
21+
* Computes the reciprocal value (R) for the value B of the divisor.
22+
* Should not be called before each reciprocal_divide(),
23+
* or else the performance is slower than a normal divide.
24+
*/
25+
extern u32 reciprocal_value(u32 B);
26+
27+
28+
static inline u32 reciprocal_divide(u32 A, u32 R)
29+
{
30+
return (u32)(((u64)A * R) >> 32);
31+
}
32+
#endif

lib/Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
lib-y := ctype.o string.o vsprintf.o cmdline.o \
66
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
77
idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8-
sha1.o irq_regs.o
8+
sha1.o irq_regs.o reciprocal_div.o
99

1010
lib-$(CONFIG_MMU) += ioremap.o
1111
lib-$(CONFIG_SMP) += cpumask.o

lib/reciprocal_div.c

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#include <asm/div64.h>
2+
#include <linux/reciprocal_div.h>
3+
4+
u32 reciprocal_value(u32 k)
5+
{
6+
u64 val = (1LL << 32) + (k - 1);
7+
do_div(val, k);
8+
return (u32)val;
9+
}

mm/slab.c

+15-3
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@
109109
#include <linux/mutex.h>
110110
#include <linux/fault-inject.h>
111111
#include <linux/rtmutex.h>
112+
#include <linux/reciprocal_div.h>
112113

113114
#include <asm/cacheflush.h>
114115
#include <asm/tlbflush.h>
@@ -386,6 +387,7 @@ struct kmem_cache {
386387
unsigned int shared;
387388

388389
unsigned int buffer_size;
390+
u32 reciprocal_buffer_size;
389391
/* 3) touched by every alloc & free from the backend */
390392
struct kmem_list3 *nodelists[MAX_NUMNODES];
391393

@@ -627,10 +629,17 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
627629
return slab->s_mem + cache->buffer_size * idx;
628630
}
629631

630-
static inline unsigned int obj_to_index(struct kmem_cache *cache,
631-
struct slab *slab, void *obj)
632+
/*
633+
* We want to avoid an expensive divide : (offset / cache->buffer_size)
634+
* Using the fact that buffer_size is a constant for a particular cache,
635+
* we can replace (offset / cache->buffer_size) by
636+
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
637+
*/
638+
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
639+
const struct slab *slab, void *obj)
632640
{
633-
return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
641+
u32 offset = (obj - slab->s_mem);
642+
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
634643
}
635644

636645
/*
@@ -1427,6 +1436,8 @@ void __init kmem_cache_init(void)
14271436

14281437
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
14291438
cache_line_size());
1439+
cache_cache.reciprocal_buffer_size =
1440+
reciprocal_value(cache_cache.buffer_size);
14301441

14311442
for (order = 0; order < MAX_ORDER; order++) {
14321443
cache_estimate(order, cache_cache.buffer_size,
@@ -2313,6 +2324,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
23132324
if (flags & SLAB_CACHE_DMA)
23142325
cachep->gfpflags |= GFP_DMA;
23152326
cachep->buffer_size = size;
2327+
cachep->reciprocal_buffer_size = reciprocal_value(size);
23162328

23172329
if (flags & CFLGS_OFF_SLAB) {
23182330
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);

0 commit comments

Comments
 (0)