@@ -570,7 +570,7 @@ func mallocinit() {
570
570
// heap reservation.
571
571
572
572
const arenaMetaSize = (1 << arenaBits ) * unsafe .Sizeof (heapArena {})
573
- meta := uintptr (sysReserve (nil , arenaMetaSize ))
573
+ meta := uintptr (sysReserve (nil , arenaMetaSize , "heap reservation" ))
574
574
if meta != 0 {
575
575
mheap_ .heapArenaAlloc .init (meta , arenaMetaSize , true )
576
576
}
@@ -607,7 +607,7 @@ func mallocinit() {
607
607
128 << 20 ,
608
608
}
609
609
for _ , arenaSize := range arenaSizes {
610
- a , size := sysReserveAligned (unsafe .Pointer (p ), arenaSize , heapArenaBytes )
610
+ a , size := sysReserveAligned (unsafe .Pointer (p ), arenaSize , heapArenaBytes , "heap reservation" )
611
611
if a != nil {
612
612
mheap_ .arena .init (uintptr (a ), size , false )
613
613
p = mheap_ .arena .end // For hint below
@@ -657,7 +657,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
657
657
//
658
658
// Only do this if we're using the regular heap arena hints.
659
659
// This behavior is only for the heap.
660
- v = h .arena .alloc (n , heapArenaBytes , & gcController .heapReleased )
660
+ v = h .arena .alloc (n , heapArenaBytes , & gcController .heapReleased , "heap" )
661
661
if v != nil {
662
662
size = n
663
663
goto mapped
@@ -678,7 +678,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
678
678
// Outside addressable heap. Can't use.
679
679
v = nil
680
680
} else {
681
- v = sysReserve (unsafe .Pointer (p ), n )
681
+ v = sysReserve (unsafe .Pointer (p ), n , "heap reservation" )
682
682
}
683
683
if p == uintptr (v ) {
684
684
// Success. Update the hint.
@@ -714,7 +714,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
714
714
// All of the hints failed, so we'll take any
715
715
// (sufficiently aligned) address the kernel will give
716
716
// us.
717
- v , size = sysReserveAligned (nil , n , heapArenaBytes )
717
+ v , size = sysReserveAligned (nil , n , heapArenaBytes , "heap" )
718
718
if v == nil {
719
719
return nil , 0
720
720
}
@@ -764,7 +764,7 @@ mapped:
764
764
// is paged in is too expensive. Trying to account for the whole region means
765
765
// that it will appear like an enormous memory overhead in statistics, even though
766
766
// it is not.
767
- l2 = (* [1 << arenaL2Bits ]* heapArena )(sysAllocOS (unsafe .Sizeof (* l2 )))
767
+ l2 = (* [1 << arenaL2Bits ]* heapArena )(sysAllocOS (unsafe .Sizeof (* l2 ), "heap index" ))
768
768
if l2 == nil {
769
769
throw ("out of memory allocating heap arena map" )
770
770
}
@@ -780,7 +780,7 @@ mapped:
780
780
throw ("arena already initialized" )
781
781
}
782
782
var r * heapArena
783
- r = (* heapArena )(h .heapArenaAlloc .alloc (unsafe .Sizeof (* r ), goarch .PtrSize , & memstats .gcMiscSys ))
783
+ r = (* heapArena )(h .heapArenaAlloc .alloc (unsafe .Sizeof (* r ), goarch .PtrSize , & memstats .gcMiscSys , "heap metadata" ))
784
784
if r == nil {
785
785
r = (* heapArena )(persistentalloc (unsafe .Sizeof (* r ), goarch .PtrSize , & memstats .gcMiscSys ))
786
786
if r == nil {
@@ -827,7 +827,7 @@ mapped:
827
827
// sysReserveAligned is like sysReserve, but the returned pointer is
828
828
// aligned to align bytes. It may reserve either n or n+align bytes,
829
829
// so it returns the size that was reserved.
830
- func sysReserveAligned (v unsafe.Pointer , size , align uintptr ) (unsafe.Pointer , uintptr ) {
830
+ func sysReserveAligned (v unsafe.Pointer , size , align uintptr , vmaName string ) (unsafe.Pointer , uintptr ) {
831
831
if isSbrkPlatform {
832
832
if v != nil {
833
833
throw ("unexpected heap arena hint on sbrk platform" )
@@ -839,7 +839,7 @@ func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, u
839
839
// for a larger region and remove the parts we don't need.
840
840
retries := 0
841
841
retry:
842
- p := uintptr (sysReserve (v , size + align ))
842
+ p := uintptr (sysReserve (v , size + align , vmaName ))
843
843
switch {
844
844
case p == 0 :
845
845
return nil , 0
@@ -852,7 +852,7 @@ retry:
852
852
// so we may have to try again.
853
853
sysFreeOS (unsafe .Pointer (p ), size + align )
854
854
p = alignUp (p , align )
855
- p2 := sysReserve (unsafe .Pointer (p ), size )
855
+ p2 := sysReserve (unsafe .Pointer (p ), size , vmaName )
856
856
if p != uintptr (p2 ) {
857
857
// Must have raced. Try again.
858
858
sysFreeOS (p2 , size )
@@ -1933,7 +1933,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1933
1933
}
1934
1934
1935
1935
if size >= maxBlock {
1936
- return (* notInHeap )(sysAlloc (size , sysStat ))
1936
+ return (* notInHeap )(sysAlloc (size , sysStat , "immortal metadata" ))
1937
1937
}
1938
1938
1939
1939
mp := acquirem ()
@@ -1946,7 +1946,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
1946
1946
}
1947
1947
persistent .off = alignUp (persistent .off , align )
1948
1948
if persistent .off + size > persistentChunkSize || persistent .base == nil {
1949
- persistent .base = (* notInHeap )(sysAlloc (persistentChunkSize , & memstats .other_sys ))
1949
+ persistent .base = (* notInHeap )(sysAlloc (persistentChunkSize , & memstats .other_sys , "immortal metadata" ))
1950
1950
if persistent .base == nil {
1951
1951
if persistent == & globalAlloc .persistentAlloc {
1952
1952
unlock (& globalAlloc .mutex )
@@ -2020,7 +2020,7 @@ func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
2020
2020
l .mapMemory = mapMemory
2021
2021
}
2022
2022
2023
- func (l * linearAlloc ) alloc (size , align uintptr , sysStat * sysMemStat ) unsafe.Pointer {
2023
+ func (l * linearAlloc ) alloc (size , align uintptr , sysStat * sysMemStat , vmaName string ) unsafe.Pointer {
2024
2024
p := alignUp (l .next , align )
2025
2025
if p + size > l .end {
2026
2026
return nil
@@ -2030,7 +2030,7 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Poi
2030
2030
if l .mapMemory {
2031
2031
// Transition from Reserved to Prepared to Ready.
2032
2032
n := pEnd - l .mapped
2033
- sysMap (unsafe .Pointer (l .mapped ), n , sysStat )
2033
+ sysMap (unsafe .Pointer (l .mapped ), n , sysStat , vmaName )
2034
2034
sysUsed (unsafe .Pointer (l .mapped ), n , n )
2035
2035
}
2036
2036
l .mapped = pEnd
0 commit comments