Skip to content
This repository was archived by the owner on Oct 3, 2024. It is now read-only.

Commit 9163b2a

Browse files
committed
Merge remote-tracking branch 'drm-intel/topic/core-for-CI' into drm-tip
2 parents 32dd90a + 44909e5 commit 9163b2a

File tree

20 files changed

+150
-65
lines changed

20 files changed

+150
-65
lines changed

drivers/acpi/sleep.c

+20
Original file line numberDiff line numberDiff line change
@@ -84,11 +84,16 @@ static int acpi_sleep_prepare(u32 acpi_state)
8484
return 0;
8585
}
8686

87+
static u8 max_sleep_state = -1;
88+
8789
bool acpi_sleep_state_supported(u8 sleep_state)
8890
{
8991
acpi_status status;
9092
u8 type_a, type_b;
9193

94+
if (sleep_state > max_sleep_state)
95+
return false;
96+
9297
status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
9398
return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
9499
|| (acpi_gbl_FADT.sleep_control.address
@@ -165,6 +170,13 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
165170
return 0;
166171
}
167172

173+
static int __init init_nosleep(const struct dmi_system_id *d)
174+
{
175+
pr_info("Disabling ACPI suspend\n");
176+
max_sleep_state = 0;
177+
return 0;
178+
}
179+
168180
bool acpi_sleep_default_s3;
169181

170182
static int __init init_default_s3(const struct dmi_system_id *d)
@@ -385,6 +397,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
385397
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
386398
},
387399
},
400+
{
401+
.callback = init_nosleep,
402+
.ident = "samus",
403+
.matches = {
404+
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
405+
DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
406+
},
407+
},
388408
{},
389409
};
390410

drivers/ata/libata-core.c

+8-8
Original file line numberDiff line numberDiff line change
@@ -2207,7 +2207,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
22072207
* for drives which implement this ATA level or above.
22082208
*/
22092209
if (ata_id_major_version(dev->id) >= 10)
2210-
ata_dev_warn(dev,
2210+
ata_dev_notice(dev,
22112211
"ATA Identify Device Log not supported\n");
22122212
dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
22132213
return false;
@@ -2279,7 +2279,7 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
22792279
unsigned int err_mask;
22802280

22812281
if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2282-
ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2282+
ata_dev_notice(dev, "NCQ Send/Recv Log not supported\n");
22832283
return;
22842284
}
22852285
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
@@ -2304,8 +2304,8 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
23042304
unsigned int err_mask;
23052305

23062306
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2307-
ata_dev_warn(dev,
2308-
"NCQ Send/Recv Log not supported\n");
2307+
ata_dev_notice(dev,
2308+
"NCQ Send/Recv Log not supported\n");
23092309
return;
23102310
}
23112311
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
@@ -2958,14 +2958,14 @@ int ata_dev_configure(struct ata_device *dev)
29582958
if (ata_id_is_cfa(id)) {
29592959
/* CPRM may make this media unusable */
29602960
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2961-
ata_dev_warn(dev,
2961+
ata_dev_notice(dev,
29622962
"supports DRM functions and may not be fully accessible\n");
29632963
snprintf(revbuf, 7, "CFA");
29642964
} else {
29652965
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
29662966
/* Warn the user if the device has TPM extensions */
29672967
if (ata_id_has_tpm(id))
2968-
ata_dev_warn(dev,
2968+
ata_dev_notice(dev,
29692969
"supports DRM functions and may not be fully accessible\n");
29702970
}
29712971

@@ -3120,8 +3120,8 @@ int ata_dev_configure(struct ata_device *dev)
31203120
}
31213121

31223122
if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
3123-
ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
3124-
ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
3123+
ata_dev_notice(dev, "WARNING: device requires firmware update to be fully functional\n");
3124+
ata_dev_notice(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
31253125
}
31263126

31273127
return 0;

drivers/net/phy/phy.c

+2-5
Original file line numberDiff line numberDiff line change
@@ -1273,7 +1273,7 @@ static void phy_error_precise(struct phy_device *phydev,
12731273
*/
12741274
void phy_error(struct phy_device *phydev)
12751275
{
1276-
WARN_ON(1);
1276+
pr_notice_once("%s\n", __func__);
12771277
phy_process_error(phydev);
12781278
}
12791279
EXPORT_SYMBOL(phy_error);
@@ -1499,11 +1499,8 @@ void phy_stop(struct phy_device *phydev)
14991499
enum phy_state old_state;
15001500

15011501
if (!phy_is_started(phydev) && phydev->state != PHY_DOWN &&
1502-
phydev->state != PHY_ERROR) {
1503-
WARN(1, "called from state %s\n",
1504-
phy_state_to_str(phydev->state));
1502+
phydev->state != PHY_ERROR)
15051503
return;
1506-
}
15071504

15081505
mutex_lock(&phydev->lock);
15091506
old_state = phydev->state;

drivers/pci/msi/msi.c

+3-1
Original file line numberDiff line numberDiff line change
@@ -421,8 +421,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
421421
if (maxvec < minvec)
422422
return -ERANGE;
423423

424-
if (WARN_ON_ONCE(dev->msi_enabled))
424+
if (dev->msi_enabled) {
425+
pci_info(dev, "can't enable MSI, already enabled\n");
425426
return -EINVAL;
427+
}
426428

427429
nvec = pci_msi_vec_count(dev);
428430
if (nvec < 0)

drivers/thermal/intel/therm_throt.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -345,10 +345,10 @@ static void __maybe_unused throttle_active_work(struct work_struct *work)
345345
avg /= ARRAY_SIZE(state->temp_samples);
346346

347347
if (state->average > avg) {
348-
pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n",
349-
this_cpu,
350-
state->level == CORE_LEVEL ? "Core" : "Package",
351-
state->count);
348+
pr_notice("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n",
349+
this_cpu,
350+
state->level == CORE_LEVEL ? "Core" : "Package",
351+
state->count);
352352
state->rate_control_active = true;
353353
}
354354

drivers/usb/core/usb-acpi.c

+25
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/acpi.h>
1313
#include <linux/pci.h>
1414
#include <linux/usb/hcd.h>
15+
#include <linux/dmi.h>
1516

1617
#include "hub.h"
1718

@@ -142,6 +143,19 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
142143
}
143144
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
144145

146+
static const struct dmi_system_id intel_icl_broken_acpi[] = {
147+
{
148+
.ident = "ICL RVP",
149+
.matches = {
150+
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
151+
DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client Platform"),
152+
},
153+
},
154+
155+
{ }
156+
};
157+
static bool acpi_connection_type_broken;
158+
145159
/*
146160
* Private to usb-acpi, all the core needs to know is that
147161
* port_dev->location is non-zero when it has been set by the firmware.
@@ -157,6 +171,12 @@ usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
157171
struct acpi_pld_info *pld = NULL;
158172
acpi_status status;
159173

174+
/* Work around unknown ACPI instruction error on ICL RVP BIOSes. */
175+
if (acpi_connection_type_broken) {
176+
port_dev->connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
177+
return;
178+
}
179+
160180
/*
161181
* According to 9.14 in ACPI Spec 6.2. _PLD indicates whether usb port
162182
* is user visible and _UPC indicates whether it is connectable. If
@@ -321,6 +341,11 @@ static struct acpi_bus_type usb_acpi_bus = {
321341

322342
int usb_acpi_register(void)
323343
{
344+
if (dmi_check_system(intel_icl_broken_acpi)) {
345+
pr_info("USB ACPI connection type broken.\n");
346+
acpi_connection_type_broken = true;
347+
}
348+
324349
return register_acpi_bus_type(&usb_acpi_bus);
325350
}
326351

include/linux/lockdep_types.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -251,8 +251,8 @@ struct held_lock {
251251
unsigned int check:1; /* see lock_acquire() comment */
252252
unsigned int hardirqs_off:1;
253253
unsigned int sync:1;
254-
unsigned int references:11; /* 32 bits */
255-
unsigned int pin_count;
254+
unsigned int pin_count:11; /* 32 bits */
255+
unsigned int references;
256256
};
257257

258258
#else /* !CONFIG_LOCKDEP */

kernel/events/core.c

+25-24
Original file line numberDiff line numberDiff line change
@@ -5565,20 +5565,16 @@ static int __perf_read_group_add(struct perf_event *leader,
55655565
}
55665566

55675567
static int perf_read_group(struct perf_event *event,
5568-
u64 read_format, char __user *buf)
5568+
u64 read_format, char __user *buf,
5569+
u64 *values)
55695570
{
55705571
struct perf_event *leader = event->group_leader, *child;
55715572
struct perf_event_context *ctx = leader->ctx;
55725573
int ret;
5573-
u64 *values;
55745574

55755575
lockdep_assert_held(&ctx->mutex);
55765576

5577-
values = kzalloc(event->read_size, GFP_KERNEL);
5578-
if (!values)
5579-
return -ENOMEM;
5580-
5581-
values[0] = 1 + leader->nr_siblings;
5577+
*values = 1 + leader->nr_siblings;
55825578

55835579
mutex_lock(&leader->child_mutex);
55845580

@@ -5592,25 +5588,17 @@ static int perf_read_group(struct perf_event *event,
55925588
goto unlock;
55935589
}
55945590

5595-
mutex_unlock(&leader->child_mutex);
5596-
55975591
ret = event->read_size;
5598-
if (copy_to_user(buf, values, event->read_size))
5599-
ret = -EFAULT;
5600-
goto out;
5601-
56025592
unlock:
56035593
mutex_unlock(&leader->child_mutex);
5604-
out:
5605-
kfree(values);
56065594
return ret;
56075595
}
56085596

56095597
static int perf_read_one(struct perf_event *event,
5610-
u64 read_format, char __user *buf)
5598+
u64 read_format, char __user *buf,
5599+
u64 *values)
56115600
{
56125601
u64 enabled, running;
5613-
u64 values[5];
56145602
int n = 0;
56155603

56165604
values[n++] = __perf_event_read_value(event, &enabled, &running);
@@ -5623,9 +5611,6 @@ static int perf_read_one(struct perf_event *event,
56235611
if (read_format & PERF_FORMAT_LOST)
56245612
values[n++] = atomic64_read(&event->lost_samples);
56255613

5626-
if (copy_to_user(buf, values, n * sizeof(u64)))
5627-
return -EFAULT;
5628-
56295614
return n * sizeof(u64);
56305615
}
56315616

@@ -5646,7 +5631,8 @@ static bool is_event_hup(struct perf_event *event)
56465631
* Read the performance event - simple non blocking version for now
56475632
*/
56485633
static ssize_t
5649-
__perf_read(struct perf_event *event, char __user *buf, size_t count)
5634+
__perf_read(struct perf_event *event, char __user *buf,
5635+
size_t count, u64 *values)
56505636
{
56515637
u64 read_format = event->attr.read_format;
56525638
int ret;
@@ -5664,9 +5650,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
56645650

56655651
WARN_ON_ONCE(event->ctx->parent_ctx);
56665652
if (read_format & PERF_FORMAT_GROUP)
5667-
ret = perf_read_group(event, read_format, buf);
5653+
ret = perf_read_group(event, read_format, buf, values);
56685654
else
5669-
ret = perf_read_one(event, read_format, buf);
5655+
ret = perf_read_one(event, read_format, buf, values);
56705656

56715657
return ret;
56725658
}
@@ -5676,16 +5662,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
56765662
{
56775663
struct perf_event *event = file->private_data;
56785664
struct perf_event_context *ctx;
5665+
u64 stack_values[8];
5666+
u64 *values;
56795667
int ret;
56805668

56815669
ret = security_perf_event_read(event);
56825670
if (ret)
56835671
return ret;
56845672

5673+
if (event->read_size <= sizeof(stack_values))
5674+
values = memset(stack_values, 0, event->read_size);
5675+
else
5676+
values = kzalloc(event->read_size, GFP_KERNEL);
5677+
if (!values)
5678+
return -ENOMEM;
5679+
56855680
ctx = perf_event_ctx_lock(event);
5686-
ret = __perf_read(event, buf, count);
5681+
ret = __perf_read(event, buf, count, values);
56875682
perf_event_ctx_unlock(event, ctx);
56885683

5684+
if (ret > 0 && copy_to_user(buf, values, ret))
5685+
ret = -EFAULT;
5686+
5687+
if (values != stack_values)
5688+
kfree(values);
5689+
56895690
return ret;
56905691
}
56915692

kernel/hung_task.c

+2
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
121121
console_verbose();
122122
hung_task_show_lock = true;
123123
hung_task_call_panic = true;
124+
} else {
125+
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
124126
}
125127

126128
/*

kernel/locking/lockdep.c

+7-4
Original file line numberDiff line numberDiff line change
@@ -5517,11 +5517,14 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
55175517

55185518
if (match_held_lock(hlock, lock)) {
55195519
/*
5520-
* Grab 16bits of randomness; this is sufficient to not
5521-
* be guessable and still allows some pin nesting in
5522-
* our u32 pin_count.
5520+
* Grab 6bits of randomness; this is barely sufficient
5521+
* to not be guessable and still allows some 32 levels
5522+
* of pin nesting in our u11 pin_count.
55235523
*/
5524-
cookie.val = 1 + (sched_clock() & 0xffff);
5524+
cookie.val = 1 + (sched_clock() & 0x3f);
5525+
if (DEBUG_LOCKS_WARN_ON(hlock->pin_count + cookie.val >= 1 << 11))
5526+
return NIL_COOKIE;
5527+
55255528
hlock->pin_count += cookie.val;
55265529
return cookie;
55275530
}

kernel/panic.c

+8-7
Original file line numberDiff line numberDiff line change
@@ -340,13 +340,6 @@ void panic(const char *fmt, ...)
340340
buf[len - 1] = '\0';
341341

342342
pr_emerg("Kernel panic - not syncing: %s\n", buf);
343-
#ifdef CONFIG_DEBUG_BUGVERBOSE
344-
/*
345-
* Avoid nested stack-dumping if a panic occurs during oops processing
346-
*/
347-
if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
348-
dump_stack();
349-
#endif
350343

351344
/*
352345
* If kgdb is enabled, give it a chance to run before we stop all
@@ -376,6 +369,14 @@ void panic(const char *fmt, ...)
376369

377370
panic_print_sys_info(false);
378371

372+
#ifdef CONFIG_DEBUG_BUGVERBOSE
373+
/*
374+
* Avoid nested stack-dumping if a panic occurs during oops processing
375+
*/
376+
if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
377+
dump_stack();
378+
#endif
379+
379380
kmsg_dump(KMSG_DUMP_PANIC);
380381

381382
/*

kernel/sched/deadline.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -866,7 +866,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
866866
* entity.
867867
*/
868868
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
869-
printk_deferred_once("sched: DL replenish lagged too much\n");
869+
printk_deferred_once(KERN_NOTICE "sched: DL replenish lagged too much\n");
870870
replenish_dl_new_period(dl_se, rq);
871871
}
872872

0 commit comments

Comments
 (0)