17
17
#include <errno.h>
18
18
#include <ksched.h>
19
19
#include <zephyr/sys/printk.h>
20
+ #include <zephyr/logging/log.h>
21
+
22
+ LOG_MODULE_DECLARE (os , CONFIG_KERNEL_LOG_LEVEL );
20
23
21
24
static inline void flag_clear (uint32_t * flagp ,
22
25
uint32_t bit )
@@ -599,6 +602,52 @@ bool k_work_cancel_sync(struct k_work *work,
599
602
return pending ;
600
603
}
601
604
605
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
606
+ static void work_timeout_handler (struct _timeout * record )
607
+ {
608
+ struct k_work_q * queue = CONTAINER_OF (record , struct k_work_q , work_timeout_record );
609
+ struct k_work * work ;
610
+ k_work_handler_t handler ;
611
+ const char * name ;
612
+ const char * space = " " ;
613
+
614
+ K_SPINLOCK (& lock ) {
615
+ work = queue -> work ;
616
+ handler = work -> handler ;
617
+ }
618
+
619
+ name = k_thread_name_get (queue -> thread_id );
620
+ if (name == NULL ) {
621
+ name = "" ;
622
+ space = "" ;
623
+ }
624
+
625
+ LOG_ERR ("queue %p%s%s blocked by work %p with handler %p" ,
626
+ queue , space , name , work , handler );
627
+
628
+ k_thread_abort (queue -> thread_id );
629
+ }
630
+
631
+ static void work_timeout_start_locked (struct k_work_q * queue , struct k_work * work )
632
+ {
633
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
634
+ return ;
635
+ }
636
+
637
+ queue -> work = work ;
638
+ z_add_timeout (& queue -> work_timeout_record , work_timeout_handler , queue -> work_timeout );
639
+ }
640
+
641
+ static void work_timeout_stop_locked (struct k_work_q * queue )
642
+ {
643
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
644
+ return ;
645
+ }
646
+
647
+ z_abort_timeout (& queue -> work_timeout_record );
648
+ }
649
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
650
+
602
651
/* Loop executed by a work queue thread.
603
652
*
604
653
* @param workq_ptr pointer to the work queue structure
@@ -678,6 +727,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
678
727
continue ;
679
728
}
680
729
730
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
731
+ work_timeout_start_locked (queue , work );
732
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
733
+
681
734
k_spin_unlock (& lock , key );
682
735
683
736
__ASSERT_NO_MSG (handler != NULL );
@@ -690,6 +743,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
690
743
*/
691
744
key = k_spin_lock (& lock );
692
745
746
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
747
+ work_timeout_stop_locked (queue );
748
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
749
+
693
750
flag_clear (& work -> flags , K_WORK_RUNNING_BIT );
694
751
if (flag_test (& work -> flags , K_WORK_FLUSHING_BIT )) {
695
752
finalize_flush_locked (work );
@@ -736,6 +793,14 @@ void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *
736
793
k_thread_name_set (_current , cfg -> name );
737
794
}
738
795
796
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
797
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
798
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
799
+ } else {
800
+ queue -> work_timeout = K_FOREVER ;
801
+ }
802
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
803
+
739
804
sys_slist_init (& queue -> pending );
740
805
z_waitq_init (& queue -> notifyq );
741
806
z_waitq_init (& queue -> drainq );
@@ -784,6 +849,14 @@ void k_work_queue_start(struct k_work_q *queue,
784
849
queue -> thread .base .user_options |= K_ESSENTIAL ;
785
850
}
786
851
852
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
853
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
854
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
855
+ } else {
856
+ queue -> work_timeout = K_FOREVER ;
857
+ }
858
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
859
+
787
860
k_thread_start (& queue -> thread );
788
861
queue -> thread_id = & queue -> thread ;
789
862
0 commit comments