@@ -5500,20 +5500,16 @@ static int __perf_read_group_add(struct perf_event *leader,
5500
5500
}
5501
5501
5502
5502
static int perf_read_group (struct perf_event * event ,
5503
- u64 read_format , char __user * buf )
5503
+ u64 read_format , char __user * buf ,
5504
+ u64 * values )
5504
5505
{
5505
5506
struct perf_event * leader = event -> group_leader , * child ;
5506
5507
struct perf_event_context * ctx = leader -> ctx ;
5507
5508
int ret ;
5508
- u64 * values ;
5509
5509
5510
5510
lockdep_assert_held (& ctx -> mutex );
5511
5511
5512
- values = kzalloc (event -> read_size , GFP_KERNEL );
5513
- if (!values )
5514
- return - ENOMEM ;
5515
-
5516
- values [0 ] = 1 + leader -> nr_siblings ;
5512
+ * values = 1 + leader -> nr_siblings ;
5517
5513
5518
5514
/*
5519
5515
* By locking the child_mutex of the leader we effectively
@@ -5531,25 +5527,17 @@ static int perf_read_group(struct perf_event *event,
5531
5527
goto unlock ;
5532
5528
}
5533
5529
5534
- mutex_unlock (& leader -> child_mutex );
5535
-
5536
5530
ret = event -> read_size ;
5537
- if (copy_to_user (buf , values , event -> read_size ))
5538
- ret = - EFAULT ;
5539
- goto out ;
5540
-
5541
5531
unlock :
5542
5532
mutex_unlock (& leader -> child_mutex );
5543
- out :
5544
- kfree (values );
5545
5533
return ret ;
5546
5534
}
5547
5535
5548
5536
static int perf_read_one (struct perf_event * event ,
5549
- u64 read_format , char __user * buf )
5537
+ u64 read_format , char __user * buf ,
5538
+ u64 * values )
5550
5539
{
5551
5540
u64 enabled , running ;
5552
- u64 values [5 ];
5553
5541
int n = 0 ;
5554
5542
5555
5543
values [n ++ ] = __perf_event_read_value (event , & enabled , & running );
@@ -5562,9 +5550,6 @@ static int perf_read_one(struct perf_event *event,
5562
5550
if (read_format & PERF_FORMAT_LOST )
5563
5551
values [n ++ ] = atomic64_read (& event -> lost_samples );
5564
5552
5565
- if (copy_to_user (buf , values , n * sizeof (u64 )))
5566
- return - EFAULT ;
5567
-
5568
5553
return n * sizeof (u64 );
5569
5554
}
5570
5555
@@ -5585,7 +5570,8 @@ static bool is_event_hup(struct perf_event *event)
5585
5570
* Read the performance event - simple non blocking version for now
5586
5571
*/
5587
5572
static ssize_t
5588
- __perf_read (struct perf_event * event , char __user * buf , size_t count )
5573
+ __perf_read (struct perf_event * event , char __user * buf ,
5574
+ size_t count , u64 * values )
5589
5575
{
5590
5576
u64 read_format = event -> attr .read_format ;
5591
5577
int ret ;
@@ -5603,9 +5589,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
5603
5589
5604
5590
WARN_ON_ONCE (event -> ctx -> parent_ctx );
5605
5591
if (read_format & PERF_FORMAT_GROUP )
5606
- ret = perf_read_group (event , read_format , buf );
5592
+ ret = perf_read_group (event , read_format , buf , values );
5607
5593
else
5608
- ret = perf_read_one (event , read_format , buf );
5594
+ ret = perf_read_one (event , read_format , buf , values );
5609
5595
5610
5596
return ret ;
5611
5597
}
@@ -5615,16 +5601,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5615
5601
{
5616
5602
struct perf_event * event = file -> private_data ;
5617
5603
struct perf_event_context * ctx ;
5604
+ u64 stack_values [8 ];
5605
+ u64 * values ;
5618
5606
int ret ;
5619
5607
5620
5608
ret = security_perf_event_read (event );
5621
5609
if (ret )
5622
5610
return ret ;
5623
5611
5612
+ if (event -> read_size <= sizeof (stack_values ))
5613
+ values = memset (stack_values , 0 , event -> read_size );
5614
+ else
5615
+ values = kzalloc (event -> read_size , GFP_KERNEL );
5616
+ if (!values )
5617
+ return - ENOMEM ;
5618
+
5624
5619
ctx = perf_event_ctx_lock (event );
5625
- ret = __perf_read (event , buf , count );
5620
+ ret = __perf_read (event , buf , count , values );
5626
5621
perf_event_ctx_unlock (event , ctx );
5627
5622
5623
+ if (ret > 0 && copy_to_user (buf , values , ret ))
5624
+ ret = - EFAULT ;
5625
+
5626
+ if (values != stack_values )
5627
+ kfree (values );
5628
+
5628
5629
return ret ;
5629
5630
}
5630
5631
0 commit comments