@@ -226,7 +226,6 @@ typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
226
226
typedef struct _zend_mm_page zend_mm_page ;
227
227
typedef struct _zend_mm_bin zend_mm_bin ;
228
228
typedef struct _zend_mm_free_slot zend_mm_free_slot ;
229
- typedef struct _zend_mm_chunk zend_mm_chunk ;
230
229
typedef struct _zend_mm_huge_list zend_mm_huge_list ;
231
230
232
231
static bool zend_mm_use_huge_pages = false;
@@ -322,6 +321,9 @@ struct _zend_mm_chunk {
322
321
zend_mm_heap heap_slot ; /* used only in main chunk */
323
322
zend_mm_page_map free_map ; /* 512 bits or 64 bytes */
324
323
zend_mm_page_info map [ZEND_MM_PAGES ]; /* 2 KB = 512 * 4 */
324
+ bool preserve ; /* Never free this chunk.
325
+ Ensures the address space can
326
+ not be re-used. */
325
327
};
326
328
327
329
struct _zend_mm_page {
@@ -804,7 +806,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
804
806
}
805
807
}
806
808
807
- static void * zend_mm_chunk_alloc (zend_mm_heap * heap , size_t size , size_t alignment )
809
+ ZEND_API void * zend_mm_chunk_alloc (zend_mm_heap * heap , size_t size , size_t alignment )
808
810
{
809
811
#if ZEND_MM_STORAGE
810
812
if (UNEXPECTED (heap -> storage )) {
@@ -1172,11 +1174,15 @@ static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_
1172
1174
}
1173
1175
}
1174
1176
if (!heap -> cached_chunks || chunk -> num > heap -> cached_chunks -> num ) {
1175
- zend_mm_chunk_free (heap , chunk , ZEND_MM_CHUNK_SIZE );
1177
+ if (!chunk -> preserve ) {
1178
+ zend_mm_chunk_free (heap , chunk , ZEND_MM_CHUNK_SIZE );
1179
+ }
1176
1180
} else {
1177
1181
//TODO: select the best chunk to delete???
1178
1182
chunk -> next = heap -> cached_chunks -> next ;
1179
- zend_mm_chunk_free (heap , heap -> cached_chunks , ZEND_MM_CHUNK_SIZE );
1183
+ if (!heap -> cached_chunks -> preserve ) {
1184
+ zend_mm_chunk_free (heap , heap -> cached_chunks , ZEND_MM_CHUNK_SIZE );
1185
+ }
1180
1186
heap -> cached_chunks = chunk ;
1181
1187
}
1182
1188
}
@@ -2195,6 +2201,53 @@ ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
2195
2201
return collected * ZEND_MM_PAGE_SIZE ;
2196
2202
}
2197
2203
2204
+ ZEND_API zend_mm_chunk * zend_mm_get_chunk_list (zend_mm_heap * heap )
2205
+ {
2206
+ return heap -> main_chunk ;
2207
+ }
2208
+
2209
+ ZEND_API int zend_mm_get_chunks_count (zend_mm_heap * heap )
2210
+ {
2211
+ return heap -> chunks_count ;
2212
+ }
2213
+
2214
+ ZEND_API void * zend_mm_get_huge_list (zend_mm_heap * heap )
2215
+ {
2216
+ return heap -> huge_list ;
2217
+ }
2218
+
2219
+ ZEND_API zend_mm_chunk * zend_mm_get_next_chunk (zend_mm_heap * heap , zend_mm_chunk * chunk )
2220
+ {
2221
+ ZEND_ASSERT (chunk -> heap == heap );
2222
+ zend_mm_chunk * next = chunk -> next ;
2223
+ if (next == heap -> main_chunk ) {
2224
+ return NULL ;
2225
+ }
2226
+ return next ;
2227
+ }
2228
+
2229
+ /* Adds the given chunk to the heap. The chunk is not initialized, and can have
2230
+ * allocated slots and pages. */
2231
+ ZEND_API void zend_mm_adopt_chunk (zend_mm_heap * heap , zend_mm_chunk * chunk )
2232
+ {
2233
+ /* Do not import free lists, as the chunk may have been created with a
2234
+ * different key. However, free pages can be allocated. */
2235
+ chunk -> heap = heap ;
2236
+ chunk -> next = heap -> main_chunk ;
2237
+ chunk -> prev = heap -> main_chunk -> prev ;
2238
+ chunk -> prev -> next = chunk ;
2239
+ chunk -> next -> prev = chunk ;
2240
+ chunk -> num = chunk -> prev -> num + 1 ;
2241
+ heap -> chunks_count ++ ;
2242
+ heap -> peak_chunks_count ++ ;
2243
+ }
2244
+
2245
+ ZEND_API void zend_mm_preserve_chunk (zend_mm_heap * heap , zend_mm_chunk * chunk )
2246
+ {
2247
+ ZEND_ASSERT (chunk -> heap == heap );
2248
+ chunk -> preserve = true;
2249
+ }
2250
+
2198
2251
#if ZEND_DEBUG
2199
2252
/******************/
2200
2253
/* Leak detection */
@@ -2460,23 +2513,44 @@ ZEND_API void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2460
2513
while (heap -> cached_chunks ) {
2461
2514
p = heap -> cached_chunks ;
2462
2515
heap -> cached_chunks = p -> next ;
2463
- zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2516
+ if (!p -> preserve ) {
2517
+ zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2518
+ }
2464
2519
}
2465
2520
/* free the first chunk */
2466
- zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
2521
+ if (!heap -> main_chunk -> preserve ) {
2522
+ zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
2523
+ }
2467
2524
} else {
2525
+ /* unlink preserved chunks from the cache */
2526
+ p = heap -> cached_chunks ;
2527
+ while (p ) {
2528
+ zend_mm_chunk * q = p -> next ;
2529
+ while (q && q -> preserve ) {
2530
+ p -> next = q = q -> next ;
2531
+ heap -> cached_chunks_count -- ;
2532
+ }
2533
+ p = q ;
2534
+ }
2535
+ if (heap -> cached_chunks && heap -> cached_chunks -> preserve ) {
2536
+ heap -> cached_chunks_count -- ;
2537
+ heap -> cached_chunks = heap -> cached_chunks -> next ;
2538
+ }
2539
+
2468
2540
/* free some cached chunks to keep average count */
2469
2541
heap -> avg_chunks_count = (heap -> avg_chunks_count + (double )heap -> peak_chunks_count ) / 2.0 ;
2470
2542
while ((double )heap -> cached_chunks_count + 0.9 > heap -> avg_chunks_count &&
2471
2543
heap -> cached_chunks ) {
2472
2544
p = heap -> cached_chunks ;
2473
2545
heap -> cached_chunks = p -> next ;
2546
+ ZEND_ASSERT (!p -> preserve );
2474
2547
zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2475
2548
heap -> cached_chunks_count -- ;
2476
2549
}
2477
2550
/* clear cached chunks */
2478
2551
p = heap -> cached_chunks ;
2479
2552
while (p != NULL ) {
2553
+ ZEND_ASSERT (!p -> preserve );
2480
2554
zend_mm_chunk * q = p -> next ;
2481
2555
memset (p , 0 , sizeof (zend_mm_chunk ));
2482
2556
p -> next = q ;
@@ -2864,7 +2938,9 @@ ZEND_API zend_result zend_set_memory_limit(size_t memory_limit)
2864
2938
do {
2865
2939
zend_mm_chunk * p = heap -> cached_chunks ;
2866
2940
heap -> cached_chunks = p -> next ;
2867
- zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2941
+ if (!p -> preserve ) {
2942
+ zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2943
+ }
2868
2944
heap -> cached_chunks_count -- ;
2869
2945
heap -> real_size -= ZEND_MM_CHUNK_SIZE ;
2870
2946
} while (memory_limit < heap -> real_size );
@@ -2919,8 +2995,22 @@ ZEND_API void zend_memory_reset_peak_usage(void)
2919
2995
#endif
2920
2996
}
2921
2997
2998
+ static void alloc_globals_ctor (zend_alloc_globals * alloc_globals );
2999
+
2922
3000
ZEND_API void shutdown_memory_manager (bool silent , bool full_shutdown )
2923
3001
{
3002
+ if (!full_shutdown ) {
3003
+ zend_mm_heap * heap = AG (mm_heap );
3004
+ if (heap -> main_chunk -> preserve ) {
3005
+ /* The main chunk is preserved, so we can not re-use it in the next
3006
+ * request: We have to full shutdown and start a new heap.
3007
+ * This happens when snapshot_state() was called during the request.
3008
+ */
3009
+ zend_mm_shutdown (AG (mm_heap ), 1 , silent );
3010
+ alloc_globals_ctor (& alloc_globals );
3011
+ return ;
3012
+ }
3013
+ }
2924
3014
zend_mm_shutdown (AG (mm_heap ), full_shutdown , silent );
2925
3015
}
2926
3016
0 commit comments