@@ -52,6 +52,11 @@ LOG_MODULE_REGISTER(bt_conn);
52
52
53
53
K_FIFO_DEFINE (free_tx );
54
54
55
+ #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ )
56
+ static struct k_work_q conn_tx_workq ;
57
+ static K_KERNEL_STACK_DEFINE (conn_tx_workq_thread_stack , CONFIG_BT_CONN_TX_NOTIFY_WQ_STACK_SIZE ) ;
58
+ #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
59
+
55
60
static void tx_free (struct bt_conn_tx * tx );
56
61
57
62
static void conn_tx_destroy (struct bt_conn * conn , struct bt_conn_tx * tx )
@@ -254,12 +259,21 @@ static void tx_free(struct bt_conn_tx *tx)
254
259
}
255
260
256
261
#if defined(CONFIG_BT_CONN_TX )
257
- static void tx_notify ( struct bt_conn * conn )
262
+ static struct k_work_q * tx_notify_workqueue_get ( void )
258
263
{
259
- __ASSERT_NO_MSG (k_current_get () ==
260
- k_work_queue_thread_get (& k_sys_work_q ));
264
+ #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ )
265
+ return & conn_tx_workq ;
266
+ #else
267
+ return & k_sys_work_q ;
268
+ #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
269
+ }
261
270
262
- LOG_DBG ("conn %p" , conn );
271
+ static void tx_notify_process (struct bt_conn * conn )
272
+ {
273
+ /* TX notify processing is done only from a single thread. */
274
+ __ASSERT_NO_MSG (k_current_get () == k_work_queue_thread_get (tx_notify_workqueue_get ()));
275
+
276
+ LOG_DBG ("conn %p" , (void * )conn );
263
277
264
278
while (1 ) {
265
279
struct bt_conn_tx * tx = NULL ;
@@ -300,7 +314,30 @@ static void tx_notify(struct bt_conn *conn)
300
314
bt_tx_irq_raise ();
301
315
}
302
316
}
303
- #endif /* CONFIG_BT_CONN_TX */
317
+ #endif /* CONFIG_BT_CONN_TX */
318
+
319
+ void bt_conn_tx_notify (struct bt_conn * conn , bool wait_for_completion )
320
+ {
321
+ #if defined(CONFIG_BT_CONN_TX )
322
+ /* Ensure that function is called only from a single context. */
323
+ if (k_current_get () == k_work_queue_thread_get (tx_notify_workqueue_get ())) {
324
+ tx_notify_process (conn );
325
+ } else {
326
+ struct k_work_sync sync ;
327
+ int err ;
328
+
329
+ err = k_work_submit_to_queue (tx_notify_workqueue_get (), & conn -> tx_complete_work );
330
+ __ASSERT (err >= 0 , "couldn't submit (err %d)" , err );
331
+
332
+ if (wait_for_completion ) {
333
+ (void )k_work_flush (& conn -> tx_complete_work , & sync );
334
+ }
335
+ }
336
+ #else
337
+ ARG_UNUSED (conn );
338
+ ARG_UNUSED (wait_for_completion );
339
+ #endif /* CONFIG_BT_CONN_TX */
340
+ }
304
341
305
342
struct bt_conn * bt_conn_new (struct bt_conn * conns , size_t size )
306
343
{
@@ -439,38 +476,15 @@ static void bt_acl_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags
439
476
bt_l2cap_recv (conn , buf , true);
440
477
}
441
478
442
- static void wait_for_tx_work (struct bt_conn * conn )
443
- {
444
- #if defined(CONFIG_BT_CONN_TX )
445
- LOG_DBG ("conn %p" , conn );
446
-
447
- if (IS_ENABLED (CONFIG_BT_RECV_WORKQ_SYS ) ||
448
- k_current_get () == k_work_queue_thread_get (& k_sys_work_q )) {
449
- tx_notify (conn );
450
- } else {
451
- struct k_work_sync sync ;
452
- int err ;
453
-
454
- err = k_work_submit (& conn -> tx_complete_work );
455
- __ASSERT (err >= 0 , "couldn't submit (err %d)" , err );
456
-
457
- k_work_flush (& conn -> tx_complete_work , & sync );
458
- }
459
- LOG_DBG ("done" );
460
- #else
461
- ARG_UNUSED (conn );
462
- #endif /* CONFIG_BT_CONN_TX */
463
- }
464
-
465
479
void bt_conn_recv (struct bt_conn * conn , struct net_buf * buf , uint8_t flags )
466
480
{
467
481
/* Make sure we notify any pending TX callbacks before processing
468
482
* new data for this connection.
469
483
*
470
484
* Always do so from the same context for sanity. In this case that will
471
- * be the system workqueue.
485
+ * be either a dedicated Bluetooth connection TX workqueue or system workqueue.
472
486
*/
473
- wait_for_tx_work (conn );
487
+ bt_conn_tx_notify (conn , true );
474
488
475
489
LOG_DBG ("handle %u len %u flags %02x" , conn -> handle , buf -> len , flags );
476
490
@@ -1250,7 +1264,7 @@ void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
1250
1264
*/
1251
1265
switch (old_state ) {
1252
1266
case BT_CONN_DISCONNECT_COMPLETE :
1253
- wait_for_tx_work (conn );
1267
+ bt_conn_tx_notify (conn , true );
1254
1268
1255
1269
bt_conn_reset_rx_state (conn );
1256
1270
@@ -1641,12 +1655,9 @@ struct net_buf *bt_conn_create_pdu_timeout(struct net_buf_pool *pool,
1641
1655
#if defined(CONFIG_BT_CONN_TX )
1642
1656
static void tx_complete_work (struct k_work * work )
1643
1657
{
1644
- struct bt_conn * conn = CONTAINER_OF (work , struct bt_conn ,
1645
- tx_complete_work );
1658
+ struct bt_conn * conn = CONTAINER_OF (work , struct bt_conn , tx_complete_work );
1646
1659
1647
- LOG_DBG ("conn %p" , conn );
1648
-
1649
- tx_notify (conn );
1660
+ tx_notify_process (conn );
1650
1661
}
1651
1662
#endif /* CONFIG_BT_CONN_TX */
1652
1663
@@ -4171,3 +4182,23 @@ void bt_hci_le_df_cte_req_failed(struct net_buf *buf)
4171
4182
#endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
4172
4183
4173
4184
#endif /* CONFIG_BT_CONN */
4185
+
4186
+ #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ )
4187
+ static int bt_conn_tx_workq_init (void )
4188
+ {
4189
+ const struct k_work_queue_config cfg = {
4190
+ .name = "BT CONN TX WQ" ,
4191
+ .no_yield = false,
4192
+ .essential = false,
4193
+ };
4194
+
4195
+ k_work_queue_init (& conn_tx_workq );
4196
+ k_work_queue_start (& conn_tx_workq , conn_tx_workq_thread_stack ,
4197
+ K_THREAD_STACK_SIZEOF (conn_tx_workq_thread_stack ),
4198
+ K_PRIO_COOP (CONFIG_BT_CONN_TX_NOTIFY_WQ_PRIO ), & cfg );
4199
+
4200
+ return 0 ;
4201
+ }
4202
+
4203
+ SYS_INIT (bt_conn_tx_workq_init , POST_KERNEL , CONFIG_BT_CONN_TX_NOTIFY_WQ_INIT_PRIORITY );
4204
+ #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
0 commit comments