@@ -335,6 +335,8 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
335
335
336
336
if (vdev -> fw -> sched_mode == VPU_SCHEDULING_MODE_OS )
337
337
ivpu_jsm_context_release (vdev , file_priv -> ctx .id );
338
+
339
+ file_priv -> aborted = true;
338
340
}
339
341
340
342
static int ivpu_cmdq_push_job (struct ivpu_cmdq * cmdq , struct ivpu_job * job )
@@ -467,23 +469,23 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
467
469
{
468
470
struct ivpu_job * job ;
469
471
470
- xa_lock (& vdev -> submitted_jobs_xa );
471
- job = __xa_erase (& vdev -> submitted_jobs_xa , job_id );
472
+ lockdep_assert_held (& vdev -> submitted_jobs_lock );
472
473
474
+ job = xa_erase (& vdev -> submitted_jobs_xa , job_id );
473
475
if (xa_empty (& vdev -> submitted_jobs_xa ) && job ) {
474
476
vdev -> busy_time = ktime_add (ktime_sub (ktime_get (), vdev -> busy_start_ts ),
475
477
vdev -> busy_time );
476
478
}
477
479
478
- xa_unlock (& vdev -> submitted_jobs_xa );
479
-
480
480
return job ;
481
481
}
482
482
483
483
static int ivpu_job_signal_and_destroy (struct ivpu_device * vdev , u32 job_id , u32 job_status )
484
484
{
485
485
struct ivpu_job * job ;
486
486
487
+ lockdep_assert_held (& vdev -> submitted_jobs_lock );
488
+
487
489
job = ivpu_job_remove_from_submitted_jobs (vdev , job_id );
488
490
if (!job )
489
491
return - ENOENT ;
@@ -501,6 +503,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
501
503
ivpu_stop_job_timeout_detection (vdev );
502
504
503
505
ivpu_rpm_put (vdev );
506
+
507
+ if (!xa_empty (& vdev -> submitted_jobs_xa ))
508
+ ivpu_start_job_timeout_detection (vdev );
509
+
504
510
return 0 ;
505
511
}
506
512
@@ -509,8 +515,12 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
509
515
struct ivpu_job * job ;
510
516
unsigned long id ;
511
517
518
+ mutex_lock (& vdev -> submitted_jobs_lock );
519
+
512
520
xa_for_each (& vdev -> submitted_jobs_xa , id , job )
513
521
ivpu_job_signal_and_destroy (vdev , id , DRM_IVPU_JOB_STATUS_ABORTED );
522
+
523
+ mutex_unlock (& vdev -> submitted_jobs_lock );
514
524
}
515
525
516
526
static int ivpu_job_submit (struct ivpu_job * job , u8 priority )
@@ -535,15 +545,16 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
535
545
goto err_unlock_file_priv ;
536
546
}
537
547
538
- xa_lock (& vdev -> submitted_jobs_xa );
548
+ mutex_lock (& vdev -> submitted_jobs_lock );
549
+
539
550
is_first_job = xa_empty (& vdev -> submitted_jobs_xa );
540
- ret = __xa_alloc_cyclic (& vdev -> submitted_jobs_xa , & job -> job_id , job , file_priv -> job_limit ,
541
- & file_priv -> job_id_next , GFP_KERNEL );
551
+ ret = xa_alloc_cyclic (& vdev -> submitted_jobs_xa , & job -> job_id , job , file_priv -> job_limit ,
552
+ & file_priv -> job_id_next , GFP_KERNEL );
542
553
if (ret < 0 ) {
543
554
ivpu_dbg (vdev , JOB , "Too many active jobs in ctx %d\n" ,
544
555
file_priv -> ctx .id );
545
556
ret = - EBUSY ;
546
- goto err_unlock_submitted_jobs_xa ;
557
+ goto err_unlock_submitted_jobs ;
547
558
}
548
559
549
560
ret = ivpu_cmdq_push_job (cmdq , job );
@@ -565,19 +576,21 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
565
576
job -> job_id , file_priv -> ctx .id , job -> engine_idx , priority ,
566
577
job -> cmd_buf_vpu_addr , cmdq -> jobq -> header .tail );
567
578
568
- xa_unlock (& vdev -> submitted_jobs_xa );
569
-
579
+ mutex_unlock (& vdev -> submitted_jobs_lock );
570
580
mutex_unlock (& file_priv -> lock );
571
581
572
- if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW ))
582
+ if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW )) {
583
+ mutex_lock (& vdev -> submitted_jobs_lock );
573
584
ivpu_job_signal_and_destroy (vdev , job -> job_id , VPU_JSM_STATUS_SUCCESS );
585
+ mutex_unlock (& vdev -> submitted_jobs_lock );
586
+ }
574
587
575
588
return 0 ;
576
589
577
590
err_erase_xa :
578
- __xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
579
- err_unlock_submitted_jobs_xa :
580
- xa_unlock (& vdev -> submitted_jobs_xa );
591
+ xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
592
+ err_unlock_submitted_jobs :
593
+ mutex_unlock (& vdev -> submitted_jobs_lock );
581
594
err_unlock_file_priv :
582
595
mutex_unlock (& file_priv -> lock );
583
596
ivpu_rpm_put (vdev );
@@ -748,7 +761,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
748
761
struct vpu_jsm_msg * jsm_msg )
749
762
{
750
763
struct vpu_ipc_msg_payload_job_done * payload ;
751
- int ret ;
752
764
753
765
if (!jsm_msg ) {
754
766
ivpu_err (vdev , "IPC message has no JSM payload\n" );
@@ -761,9 +773,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
761
773
}
762
774
763
775
payload = (struct vpu_ipc_msg_payload_job_done * )& jsm_msg -> payload ;
764
- ret = ivpu_job_signal_and_destroy (vdev , payload -> job_id , payload -> job_status );
765
- if (!ret && !xa_empty (& vdev -> submitted_jobs_xa ))
766
- ivpu_start_job_timeout_detection (vdev );
776
+
777
+ mutex_lock (& vdev -> submitted_jobs_lock );
778
+ ivpu_job_signal_and_destroy (vdev , payload -> job_id , payload -> job_status );
779
+ mutex_unlock (& vdev -> submitted_jobs_lock );
767
780
}
768
781
769
782
void ivpu_job_done_consumer_init (struct ivpu_device * vdev )
@@ -776,3 +789,36 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
776
789
{
777
790
ivpu_ipc_consumer_del (vdev , & vdev -> job_done_consumer );
778
791
}
792
+
793
+ void ivpu_context_abort_thread_handler (struct work_struct * work )
794
+ {
795
+ struct ivpu_device * vdev = container_of (work , struct ivpu_device , context_abort_work );
796
+ struct ivpu_file_priv * file_priv ;
797
+ unsigned long ctx_id ;
798
+ struct ivpu_job * job ;
799
+ unsigned long id ;
800
+
801
+ mutex_lock (& vdev -> context_list_lock );
802
+ xa_for_each (& vdev -> context_xa , ctx_id , file_priv ) {
803
+ if (!file_priv -> has_mmu_faults || file_priv -> aborted )
804
+ continue ;
805
+
806
+ mutex_lock (& file_priv -> lock );
807
+ ivpu_context_abort_locked (file_priv );
808
+ mutex_unlock (& file_priv -> lock );
809
+ }
810
+ mutex_unlock (& vdev -> context_list_lock );
811
+
812
+ if (vdev -> fw -> sched_mode != VPU_SCHEDULING_MODE_HW )
813
+ return ;
814
+ /*
815
+ * In hardware scheduling mode NPU already has stopped processing jobs
816
+ * and won't send us any further notifications, thus we have to free job related resources
817
+ * and notify userspace
818
+ */
819
+ mutex_lock (& vdev -> submitted_jobs_lock );
820
+ xa_for_each (& vdev -> submitted_jobs_xa , id , job )
821
+ if (job -> file_priv -> aborted )
822
+ ivpu_job_signal_and_destroy (vdev , job -> job_id , DRM_IVPU_JOB_STATUS_ABORTED );
823
+ mutex_unlock (& vdev -> submitted_jobs_lock );
824
+ }
0 commit comments