@@ -135,8 +135,31 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
135
135
struct v3d_stats * global_stats = & v3d -> queue [queue ].stats ;
136
136
struct v3d_stats * local_stats = & file -> stats [queue ];
137
137
u64 now = local_clock ();
138
-
139
- preempt_disable ();
138
+ unsigned long flags ;
139
+
140
+ /*
141
+ * We only need to disable local interrupts to appease lockdep who
142
+ * otherwise would think v3d_job_start_stats vs v3d_stats_update has an
143
+ * unsafe in-irq vs no-irq-off usage problem. This is a false positive
144
+ * because all the locks are per queue and stats type, and all jobs are
145
+ * completely one at a time serialised. More specifically:
146
+ *
147
+ * 1. Locks for GPU queues are updated from interrupt handlers under a
148
+ * spin lock and started here with preemption disabled.
149
+ *
150
+ * 2. Locks for CPU queues are updated from the worker with preemption
151
+ * disabled and equally started here with preemption disabled.
152
+ *
153
+ * Therefore both are consistent.
154
+ *
155
+ * 3. Because next job can only be queued after the previous one has
156
+ * been signaled, and locks are per queue, there is also no scope for
157
+ * the start part to race with the update part.
158
+ */
159
+ if (IS_ENABLED (CONFIG_LOCKDEP ))
160
+ local_irq_save (flags );
161
+ else
162
+ preempt_disable ();
140
163
141
164
write_seqcount_begin (& local_stats -> lock );
142
165
local_stats -> start_ns = now ;
@@ -146,7 +169,10 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
146
169
global_stats -> start_ns = now ;
147
170
write_seqcount_end (& global_stats -> lock );
148
171
149
- preempt_enable ();
172
+ if (IS_ENABLED (CONFIG_LOCKDEP ))
173
+ local_irq_restore (flags );
174
+ else
175
+ preempt_enable ();
150
176
}
151
177
152
178
static void
@@ -167,11 +193,21 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
167
193
struct v3d_stats * global_stats = & v3d -> queue [queue ].stats ;
168
194
struct v3d_stats * local_stats = & file -> stats [queue ];
169
195
u64 now = local_clock ();
196
+ unsigned long flags ;
197
+
198
+ /* See comment in v3d_job_start_stats() */
199
+ if (IS_ENABLED (CONFIG_LOCKDEP ))
200
+ local_irq_save (flags );
201
+ else
202
+ preempt_disable ();
170
203
171
- preempt_disable ();
172
204
v3d_stats_update (local_stats , now );
173
205
v3d_stats_update (global_stats , now );
174
- preempt_enable ();
206
+
207
+ if (IS_ENABLED (CONFIG_LOCKDEP ))
208
+ local_irq_restore (flags );
209
+ else
210
+ preempt_enable ();
175
211
}
176
212
177
213
static struct dma_fence * v3d_bin_job_run (struct drm_sched_job * sched_job )
0 commit comments