3636#include <stdio.h>
3737#include <stdbool.h>
3838#include <unistd.h>
39+ #include <valgrind/valgrind.h>
40+ #include <valgrind/memcheck.h>
3941
4042const uint32_t slab_magic = 0xeec0ffee ;
4143
@@ -73,6 +75,7 @@ slab_set_free(struct slab_cache *cache, struct slab *slab)
7375 cache -> allocated .stats .used -= slab -> size ;
7476 cache -> orders [slab -> order ].stats .used -= slab -> size ;
7577 slab -> in_use = 0 ;
78+ VALGRIND_MEMPOOL_FREE (cache , slab_data (slab ));
7679}
7780
7881static inline void
@@ -82,6 +85,7 @@ slab_set_used(struct slab_cache *cache, struct slab *slab)
8285 cache -> orders [slab -> order ].stats .used += slab -> size ;
8386 /* Not a boolean to have an extra assert. */
8487 slab -> in_use = 1 + slab -> order ;
88+ VALGRIND_MEMPOOL_ALLOC (cache , slab_data (slab ), slab_capacity (slab ));
8589}
8690
8791static inline bool
@@ -95,10 +99,11 @@ slab_poison(struct slab *slab)
9599{
96100 (void )slab ;
97101#ifndef NDEBUG
102+ VALGRIND_MAKE_MEM_UNDEFINED (slab_data (slab ), slab_capacity (slab ));
98103 const char poison_char = 'P' ;
99- memset ((char * ) slab + slab_sizeof (), poison_char ,
100- slab -> size - slab_sizeof ());
104+ memset (slab_data (slab ), poison_char , slab_capacity (slab ));
101105#endif
106+ VALGRIND_MAKE_MEM_NOACCESS (slab_data (slab ), slab_capacity (slab ));
102107}
103108
104109static inline void
@@ -130,9 +135,12 @@ slab_split(struct slab_cache *cache, struct slab *slab)
130135 size_t new_size = slab_order_size (cache , new_order );
131136
132137 slab_create (slab , new_order , new_size );
138+
133139 struct slab * buddy = slab_buddy (cache , slab );
140+ VALGRIND_MAKE_MEM_UNDEFINED (buddy , sizeof (* buddy ));
134141 slab_create (buddy , new_order , new_size );
135142 slab_list_add (& cache -> orders [buddy -> order ], buddy , next_in_list );
143+
136144 return slab ;
137145}
138146
@@ -171,6 +179,8 @@ slab_cache_create(struct slab_cache *cache, struct slab_arena *arena)
171179 for (i = 0 ; i <= cache -> order_max ; i ++ )
172180 slab_list_create (& cache -> orders [i ]);
173181 slab_cache_set_thread (cache );
182+ VALGRIND_CREATE_MEMPOOL_EXT (cache , 0 , 0 , VALGRIND_MEMPOOL_METAPOOL |
183+ VALGRIND_MEMPOOL_AUTO_FREE );
174184}
175185
176186void
@@ -184,11 +194,14 @@ slab_cache_destroy(struct slab_cache *cache)
184194 */
185195 struct slab * slab , * tmp ;
186196 rlist_foreach_entry_safe (slab , slabs , next_in_cache , tmp ) {
187- if (slab -> order == cache -> order_max + 1 )
197+ if (slab -> order == cache -> order_max + 1 ) {
198+ VALGRIND_MEMPOOL_FREE (cache , slab_data (slab ));
188199 free (slab );
189- else
200+ } else {
190201 slab_unmap (cache -> arena , slab );
202+ }
191203 }
204+ VALGRIND_DESTROY_MEMPOOL (cache );
192205}
193206
194207struct slab *
@@ -260,6 +273,8 @@ slab_get(struct slab_cache *cache, size_t size)
260273 slab_create (slab , order , size );
261274 slab_list_add (& cache -> allocated , slab , next_in_cache );
262275 cache -> allocated .stats .used += size ;
276+ VALGRIND_MEMPOOL_ALLOC (cache , slab_data (slab ),
277+ slab_capacity (slab ));
263278 return slab ;
264279 }
265280 return slab_get_with_order (cache , order );
@@ -275,8 +290,11 @@ slab_put(struct slab_cache *cache, struct slab *slab)
275290 * Free a huge slab right away, we have no
276291 * further business to do with it.
277292 */
293+ uint32_t slab_size = slab -> size ;
278294 slab_list_del (& cache -> allocated , slab , next_in_cache );
279- cache -> allocated .stats .used -= slab -> size ;
295+ cache -> allocated .stats .used -= slab_size ;
296+ slab_poison (slab );
297+ VALGRIND_MEMPOOL_FREE (cache , slab_data (slab ));
280298 free (slab );
281299 return ;
282300 }
0 commit comments