@@ -5565,20 +5565,16 @@ static int __perf_read_group_add(struct perf_event *leader,
5565
5565
}
5566
5566
5567
5567
static int perf_read_group (struct perf_event * event ,
5568
- u64 read_format , char __user * buf )
5568
+ u64 read_format , char __user * buf ,
5569
+ u64 * values )
5569
5570
{
5570
5571
struct perf_event * leader = event -> group_leader , * child ;
5571
5572
struct perf_event_context * ctx = leader -> ctx ;
5572
5573
int ret ;
5573
- u64 * values ;
5574
5574
5575
5575
lockdep_assert_held (& ctx -> mutex );
5576
5576
5577
- values = kzalloc (event -> read_size , GFP_KERNEL );
5578
- if (!values )
5579
- return - ENOMEM ;
5580
-
5581
- values [0 ] = 1 + leader -> nr_siblings ;
5577
+ * values = 1 + leader -> nr_siblings ;
5582
5578
5583
5579
mutex_lock (& leader -> child_mutex );
5584
5580
@@ -5592,25 +5588,17 @@ static int perf_read_group(struct perf_event *event,
5592
5588
goto unlock ;
5593
5589
}
5594
5590
5595
- mutex_unlock (& leader -> child_mutex );
5596
-
5597
5591
ret = event -> read_size ;
5598
- if (copy_to_user (buf , values , event -> read_size ))
5599
- ret = - EFAULT ;
5600
- goto out ;
5601
-
5602
5592
unlock :
5603
5593
mutex_unlock (& leader -> child_mutex );
5604
- out :
5605
- kfree (values );
5606
5594
return ret ;
5607
5595
}
5608
5596
5609
5597
static int perf_read_one (struct perf_event * event ,
5610
- u64 read_format , char __user * buf )
5598
+ u64 read_format , char __user * buf ,
5599
+ u64 * values )
5611
5600
{
5612
5601
u64 enabled , running ;
5613
- u64 values [5 ];
5614
5602
int n = 0 ;
5615
5603
5616
5604
values [n ++ ] = __perf_event_read_value (event , & enabled , & running );
@@ -5623,9 +5611,6 @@ static int perf_read_one(struct perf_event *event,
5623
5611
if (read_format & PERF_FORMAT_LOST )
5624
5612
values [n ++ ] = atomic64_read (& event -> lost_samples );
5625
5613
5626
- if (copy_to_user (buf , values , n * sizeof (u64 )))
5627
- return - EFAULT ;
5628
-
5629
5614
return n * sizeof (u64 );
5630
5615
}
5631
5616
@@ -5646,7 +5631,8 @@ static bool is_event_hup(struct perf_event *event)
5646
5631
* Read the performance event - simple non blocking version for now
5647
5632
*/
5648
5633
static ssize_t
5649
- __perf_read (struct perf_event * event , char __user * buf , size_t count )
5634
+ __perf_read (struct perf_event * event , char __user * buf ,
5635
+ size_t count , u64 * values )
5650
5636
{
5651
5637
u64 read_format = event -> attr .read_format ;
5652
5638
int ret ;
@@ -5664,9 +5650,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
5664
5650
5665
5651
WARN_ON_ONCE (event -> ctx -> parent_ctx );
5666
5652
if (read_format & PERF_FORMAT_GROUP )
5667
- ret = perf_read_group (event , read_format , buf );
5653
+ ret = perf_read_group (event , read_format , buf , values );
5668
5654
else
5669
- ret = perf_read_one (event , read_format , buf );
5655
+ ret = perf_read_one (event , read_format , buf , values );
5670
5656
5671
5657
return ret ;
5672
5658
}
@@ -5676,16 +5662,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5676
5662
{
5677
5663
struct perf_event * event = file -> private_data ;
5678
5664
struct perf_event_context * ctx ;
5665
+ u64 stack_values [8 ];
5666
+ u64 * values ;
5679
5667
int ret ;
5680
5668
5681
5669
ret = security_perf_event_read (event );
5682
5670
if (ret )
5683
5671
return ret ;
5684
5672
5673
+ if (event -> read_size <= sizeof (stack_values ))
5674
+ values = memset (stack_values , 0 , event -> read_size );
5675
+ else
5676
+ values = kzalloc (event -> read_size , GFP_KERNEL );
5677
+ if (!values )
5678
+ return - ENOMEM ;
5679
+
5685
5680
ctx = perf_event_ctx_lock (event );
5686
- ret = __perf_read (event , buf , count );
5681
+ ret = __perf_read (event , buf , count , values );
5687
5682
perf_event_ctx_unlock (event , ctx );
5688
5683
5684
+ if (ret > 0 && copy_to_user (buf , values , ret ))
5685
+ ret = - EFAULT ;
5686
+
5687
+ if (values != stack_values )
5688
+ kfree (values );
5689
+
5689
5690
return ret ;
5690
5691
}
5691
5692
0 commit comments