Skip to content

Commit

Permalink
Initial support for valgrind
Browse files Browse the repository at this point in the history
See #3
  • Loading branch information
rtsisyk committed Dec 22, 2016
1 parent d04a5c6 commit c18051d
Show file tree
Hide file tree
Showing 7 changed files with 7,526 additions and 11 deletions.
10 changes: 10 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ endif()
# Enable GNU glibc extentions.
add_definitions("-D_GNU_SOURCE")

# Valgrind
include_directories(third_party)

set(lib_headers
small/ibuf.h
small/lf_lifo.h
Expand Down Expand Up @@ -54,6 +57,13 @@ if(DEFINED SMALL_EMBEDDED)
return()
endif()

option(ENABLE_VALGRIND "Enable integration with valgrind, a memory analyzing tool" OFF)
if (NOT ENABLE_VALGRIND)
add_definitions(-DNVALGRIND=1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-value")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-value")
endif()

add_library(${PROJECT_NAME}_shared SHARED ${lib_sources})
set_target_properties(${PROJECT_NAME}_shared PROPERTIES VERSION 1.0 SOVERSION 1)
set_target_properties(${PROJECT_NAME}_shared PROPERTIES OUTPUT_NAME ${PROJECT_NAME})
Expand Down
10 changes: 9 additions & 1 deletion small/mempool.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@
#include "mempool.h"
#include <stdlib.h>
#include <string.h>
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>

#include "slab_cache.h"

/* slab fragmentation must reach 1/8 before it's recycled */
Expand Down Expand Up @@ -90,6 +93,8 @@ mslab_free(struct mempool *pool, struct mslab *slab, void *ptr)
/* put object to garbage list */
*(void **)ptr = slab->free_list;
slab->free_list = ptr;
VALGRIND_FREELIKE_BLOCK(ptr, 0);
VALGRIND_MAKE_MEM_DEFINED(ptr, sizeof(void *));

slab->nfree++;

Expand Down Expand Up @@ -195,7 +200,10 @@ mempool_alloc(struct mempool *pool)
pool->first_hot_slab = slab;
}
pool->slabs.stats.used += pool->objsize;
return mslab_alloc(pool, slab);
void *ptr = mslab_alloc(pool, slab);
assert(ptr != NULL);
VALGRIND_MALLOCLIKE_BLOCK(ptr, pool->objsize, 0, 0);
return ptr;
}

void
Expand Down
3 changes: 3 additions & 0 deletions small/region.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
*/
#include "region.h"
#include <sys/types.h> /* ssize_t */
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>

void *
region_reserve_slow(struct region *region, size_t size)
Expand All @@ -48,6 +50,7 @@ region_reserve_slow(struct region *region, size_t size)
* region_truncate() won't work.
*/
slab_list_add(&region->slabs, &slab->slab, next_in_list);
VALGRIND_MALLOCLIKE_BLOCK(rslab_data(slab), rslab_unused(slab), 0, 0);
return rslab_data(slab);
}

Expand Down
24 changes: 19 additions & 5 deletions small/slab_arena.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@
#include <assert.h>
#include <limits.h>
#include "pmatomic.h"
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>

#if !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
Expand Down Expand Up @@ -127,6 +129,8 @@ slab_arena_create(struct slab_arena *arena, struct quota *quota,
{
assert(flags & (MAP_PRIVATE | MAP_SHARED));
lf_lifo_init(&arena->cache);
VALGRIND_MAKE_MEM_DEFINED(&arena->cache, sizeof(struct lf_lifo));

/*
* Round up the user supplied data - it can come in
* directly from the configuration file. Allow
Expand Down Expand Up @@ -178,32 +182,42 @@ void *
slab_map(struct slab_arena *arena)
{
void *ptr;
if ((ptr = lf_lifo_pop(&arena->cache)))
if ((ptr = lf_lifo_pop(&arena->cache))) {
VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size);
return ptr;
}

if (quota_use(arena->quota, arena->slab_size) < 0)
return NULL;

/** Need to allocate a new slab. */
size_t used = pm_atomic_fetch_add(&arena->used, arena->slab_size);
used += arena->slab_size;
if (used <= arena->prealloc)
return arena->arena + used - arena->slab_size;
if (used <= arena->prealloc) {
ptr = arena->arena + used - arena->slab_size;
VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size);
return ptr;
}

ptr = mmap_checked(arena->slab_size, arena->slab_size,
arena->flags);
if (!ptr) {
__sync_sub_and_fetch(&arena->used, arena->slab_size);
quota_release(arena->quota, arena->slab_size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size);
return ptr;
}

void
slab_unmap(struct slab_arena *arena, void *ptr)
{
if (ptr)
lf_lifo_push(&arena->cache, ptr);
if (ptr == NULL)
return;

lf_lifo_push(&arena->cache, ptr);
VALGRIND_MAKE_MEM_NOACCESS(ptr, arena->slab_size);
VALGRIND_MAKE_MEM_DEFINED(lf_lifo(ptr), sizeof(struct lf_lifo));
}

void
Expand Down
28 changes: 23 additions & 5 deletions small/slab_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
#include <stdio.h>
#include <stdbool.h>
#include <unistd.h>
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>

const uint32_t slab_magic = 0xeec0ffee;

Expand Down Expand Up @@ -73,6 +75,7 @@ slab_set_free(struct slab_cache *cache, struct slab *slab)
cache->allocated.stats.used -= slab->size;
cache->orders[slab->order].stats.used -= slab->size;
slab->in_use = 0;
VALGRIND_MEMPOOL_FREE(cache, slab_data(slab));
}

static inline void
Expand All @@ -82,6 +85,7 @@ slab_set_used(struct slab_cache *cache, struct slab *slab)
cache->orders[slab->order].stats.used += slab->size;
/* Not a boolean to have an extra assert. */
slab->in_use = 1 + slab->order;
VALGRIND_MEMPOOL_ALLOC(cache, slab_data(slab), slab_capacity(slab));
}

static inline bool
Expand All @@ -95,10 +99,11 @@ slab_poison(struct slab *slab)
{
(void)slab;
#ifndef NDEBUG
VALGRIND_MAKE_MEM_UNDEFINED(slab_data(slab), slab_capacity(slab));
const char poison_char = 'P';
memset((char *) slab + slab_sizeof(), poison_char,
slab->size - slab_sizeof());
memset(slab_data(slab), poison_char, slab_capacity(slab));
#endif
VALGRIND_MAKE_MEM_NOACCESS(slab_data(slab), slab_capacity(slab));
}

static inline void
Expand Down Expand Up @@ -130,9 +135,12 @@ slab_split(struct slab_cache *cache, struct slab *slab)
size_t new_size = slab_order_size(cache, new_order);

slab_create(slab, new_order, new_size);

struct slab *buddy = slab_buddy(cache, slab);
VALGRIND_MAKE_MEM_UNDEFINED(buddy, sizeof(*buddy));
slab_create(buddy, new_order, new_size);
slab_list_add(&cache->orders[buddy->order], buddy, next_in_list);

return slab;
}

Expand Down Expand Up @@ -171,6 +179,8 @@ slab_cache_create(struct slab_cache *cache, struct slab_arena *arena)
for (i = 0; i <= cache->order_max; i++)
slab_list_create(&cache->orders[i]);
slab_cache_set_thread(cache);
VALGRIND_CREATE_MEMPOOL_EXT(cache, 0, 0, VALGRIND_MEMPOOL_METAPOOL |
VALGRIND_MEMPOOL_AUTO_FREE);
}

void
Expand All @@ -184,11 +194,14 @@ slab_cache_destroy(struct slab_cache *cache)
*/
struct slab *slab, *tmp;
rlist_foreach_entry_safe(slab, slabs, next_in_cache, tmp) {
if (slab->order == cache->order_max + 1)
if (slab->order == cache->order_max + 1) {
VALGRIND_MEMPOOL_FREE(cache, slab_data(slab));
free(slab);
else
} else {
slab_unmap(cache->arena, slab);
}
}
VALGRIND_DESTROY_MEMPOOL(cache);
}

struct slab *
Expand Down Expand Up @@ -260,6 +273,8 @@ slab_get(struct slab_cache *cache, size_t size)
slab_create(slab, order, size);
slab_list_add(&cache->allocated, slab, next_in_cache);
cache->allocated.stats.used += size;
VALGRIND_MEMPOOL_ALLOC(cache, slab_data(slab),
slab_capacity(slab));
return slab;
}
return slab_get_with_order(cache, order);
Expand All @@ -275,8 +290,11 @@ slab_put(struct slab_cache *cache, struct slab *slab)
* Free a huge slab right away, we have no
* further business to do with it.
*/
uint32_t slab_size = slab->size;
slab_list_del(&cache->allocated, slab, next_in_cache);
cache->allocated.stats.used -= slab->size;
cache->allocated.stats.used -= slab_size;
slab_poison(slab);
VALGRIND_MEMPOOL_FREE(cache, slab_data(slab));
free(slab);
return;
}
Expand Down
Loading

0 comments on commit c18051d

Please sign in to comment.