Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit d3564d7

Browse files
authoredJun 9, 2025··
Auto merge of #142247 - zetanumbers:remove-parallel-deadlock-detection, r=<try>
Remove deadlock detection for benchmarking
2 parents 14863ea + 1a40227 commit d3564d7

File tree

9 files changed

+20
-381
lines changed

9 files changed

+20
-381
lines changed
 

‎Cargo.lock

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3179,8 +3179,7 @@ dependencies = [
31793179
[[package]]
31803180
name = "rustc-rayon-core"
31813181
version = "0.5.1"
3182-
source = "registry+https://github.com/rust-lang/crates.io-index"
3183-
checksum = "2f42932dcd3bcbe484b38a3ccf79b7906fac41c02d408b5b1bac26da3416efdb"
3182+
source = "git+https://github.com/zetanumbers/rayon?branch=rustc-remove-deadlock-detection#3b8d9c138ab70138c2016d19fbb2801a372614f6"
31843183
dependencies = [
31853184
"crossbeam-deque",
31863185
"crossbeam-utils",
@@ -4344,7 +4343,6 @@ version = "0.0.0"
43444343
dependencies = [
43454344
"hashbrown",
43464345
"parking_lot",
4347-
"rustc-rayon-core",
43484346
"rustc_abi",
43494347
"rustc_ast",
43504348
"rustc_attr_data_structures",
@@ -6599,3 +6597,8 @@ dependencies = [
65996597
"quote",
66006598
"syn 2.0.101",
66016599
]
6600+
6601+
[[patch.unused]]
6602+
name = "rustc-rayon"
6603+
version = "0.5.1"
6604+
source = "git+https://github.com/zetanumbers/rayon?branch=rustc-remove-deadlock-detection#3b8d9c138ab70138c2016d19fbb2801a372614f6"

‎Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,10 @@ exclude = [
6060
"obj",
6161
]
6262

63+
[patch.crates-io]
64+
rustc-rayon = { git = "https://github.com/zetanumbers/rayon", branch = "rustc-remove-deadlock-detection" }
65+
rustc-rayon-core = { git = "https://github.com/zetanumbers/rayon", branch = "rustc-remove-deadlock-detection" }
66+
6367
[profile.release.package.rustc-rayon-core]
6468
# The rustc fork of Rayon has deadlock detection code which intermittently
6569
# causes overflows in the CI (see https://github.com/rust-lang/rust/issues/90227)

‎compiler/rustc_interface/src/util.rs

Lines changed: 1 addition & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use rustc_session::{EarlyDiagCtxt, Session, filesearch};
1818
use rustc_span::edit_distance::find_best_match_for_name;
1919
use rustc_span::edition::Edition;
2020
use rustc_span::source_map::SourceMapInputs;
21-
use rustc_span::{SessionGlobals, Symbol, sym};
21+
use rustc_span::{Symbol, sym};
2222
use rustc_target::spec::Target;
2323
use tracing::info;
2424

@@ -174,13 +174,7 @@ pub(crate) fn run_in_thread_pool_with_globals<
174174
sm_inputs: SourceMapInputs,
175175
f: F,
176176
) -> R {
177-
use std::process;
178-
179-
use rustc_data_structures::defer;
180177
use rustc_data_structures::sync::FromDyn;
181-
use rustc_middle::ty::tls;
182-
use rustc_query_impl::QueryCtxt;
183-
use rustc_query_system::query::{QueryContext, break_query_cycles};
184178

185179
let thread_stack_size = init_stack_size(thread_builder_diag);
186180

@@ -202,7 +196,6 @@ pub(crate) fn run_in_thread_pool_with_globals<
202196
}
203197

204198
let current_gcx = FromDyn::from(CurrentGcx::new());
205-
let current_gcx2 = current_gcx.clone();
206199

207200
let proxy = Proxy::new();
208201

@@ -213,46 +206,6 @@ pub(crate) fn run_in_thread_pool_with_globals<
213206
.acquire_thread_handler(move || proxy_.acquire_thread())
214207
.release_thread_handler(move || proxy__.release_thread())
215208
.num_threads(threads)
216-
.deadlock_handler(move || {
217-
// On deadlock, creates a new thread and forwards information in thread
218-
// locals to it. The new thread runs the deadlock handler.
219-
220-
let current_gcx2 = current_gcx2.clone();
221-
let registry = rayon_core::Registry::current();
222-
let session_globals = rustc_span::with_session_globals(|session_globals| {
223-
session_globals as *const SessionGlobals as usize
224-
});
225-
thread::Builder::new()
226-
.name("rustc query cycle handler".to_string())
227-
.spawn(move || {
228-
let on_panic = defer(|| {
229-
eprintln!("internal compiler error: query cycle handler thread panicked, aborting process");
230-
// We need to abort here as we failed to resolve the deadlock,
231-
// otherwise the compiler could just hang,
232-
process::abort();
233-
});
234-
235-
// Get a `GlobalCtxt` reference from `CurrentGcx` as we cannot rely on having a
236-
// `TyCtxt` TLS reference here.
237-
current_gcx2.access(|gcx| {
238-
tls::enter_context(&tls::ImplicitCtxt::new(gcx), || {
239-
tls::with(|tcx| {
240-
// Accessing session globals is sound as they outlive `GlobalCtxt`.
241-
// They are needed to hash query keys containing spans or symbols.
242-
let query_map = rustc_span::set_session_globals_then(unsafe { &*(session_globals as *const SessionGlobals) }, || {
243-
// Ensure there was no errors collecting all active jobs.
244-
// We need the complete map to ensure we find a cycle to break.
245-
QueryCtxt::new(tcx).collect_active_jobs().ok().expect("failed to collect active queries in deadlock handler")
246-
});
247-
break_query_cycles(query_map, &registry);
248-
})
249-
})
250-
});
251-
252-
on_panic.disable();
253-
})
254-
.unwrap();
255-
})
256209
.stack_size(thread_stack_size);
257210

258211
// We create the session globals on the main thread, then create the thread

‎compiler/rustc_query_system/Cargo.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ edition = "2024"
66
[dependencies]
77
# tidy-alphabetical-start
88
parking_lot = "0.12"
9-
rustc-rayon-core = { version = "0.5.0" }
109
rustc_abi = { path = "../rustc_abi" }
1110
rustc_ast = { path = "../rustc_ast" }
1211
rustc_attr_data_structures = { path = "../rustc_attr_data_structures" }

‎compiler/rustc_query_system/src/query/job.rs

Lines changed: 4 additions & 323 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
11
use std::fmt::Debug;
22
use std::hash::Hash;
33
use std::io::Write;
4-
use std::iter;
54
use std::num::NonZero;
65
use std::sync::Arc;
76

87
use parking_lot::{Condvar, Mutex};
9-
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
8+
use rustc_data_structures::fx::FxHashMap;
109
use rustc_errors::{Diag, DiagCtxtHandle};
1110
use rustc_hir::def::DefKind;
1211
use rustc_session::Session;
13-
use rustc_span::{DUMMY_SP, Span};
12+
use rustc_span::Span;
1413

1514
use super::QueryStackFrameExtra;
1615
use crate::dep_graph::DepContext;
@@ -45,18 +44,6 @@ impl QueryJobId {
4544
fn query<I: Clone>(self, map: &QueryMap<I>) -> QueryStackFrame<I> {
4645
map.get(&self).unwrap().query.clone()
4746
}
48-
49-
fn span<I>(self, map: &QueryMap<I>) -> Span {
50-
map.get(&self).unwrap().job.span
51-
}
52-
53-
fn parent<I>(self, map: &QueryMap<I>) -> Option<QueryJobId> {
54-
map.get(&self).unwrap().job.parent
55-
}
56-
57-
fn latch<I>(self, map: &QueryMap<I>) -> Option<&QueryLatch<I>> {
58-
map.get(&self).unwrap().job.latch.as_ref()
59-
}
6047
}
6148

6249
#[derive(Clone, Debug)]
@@ -173,9 +160,7 @@ impl QueryJobId {
173160

174161
#[derive(Debug)]
175162
struct QueryWaiter<I> {
176-
query: Option<QueryJobId>,
177163
condvar: Condvar,
178-
span: Span,
179164
cycle: Mutex<Option<CycleError<I>>>,
180165
}
181166

@@ -204,14 +189,8 @@ impl<I> QueryLatch<I> {
204189
}
205190

206191
/// Awaits for the query job to complete.
207-
pub(super) fn wait_on(
208-
&self,
209-
qcx: impl QueryContext,
210-
query: Option<QueryJobId>,
211-
span: Span,
212-
) -> Result<(), CycleError<I>> {
213-
let waiter =
214-
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
192+
pub(super) fn wait_on(&self, qcx: impl QueryContext) -> Result<(), CycleError<I>> {
193+
let waiter = Arc::new(QueryWaiter { cycle: Mutex::new(None), condvar: Condvar::new() });
215194
self.wait_on_inner(qcx, &waiter);
216195
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
217196
// although another thread may still have a Arc reference so we cannot
@@ -233,10 +212,6 @@ impl<I> QueryLatch<I> {
233212
// this thread.
234213
info.waiters.push(Arc::clone(waiter));
235214

236-
// If this detects a deadlock and the deadlock handler wants to resume this thread
237-
// we have to be in the `wait` call. This is ensured by the deadlock handler
238-
// getting the self.info lock.
239-
rayon_core::mark_blocked();
240215
let proxy = qcx.jobserver_proxy();
241216
proxy.release_thread();
242217
waiter.condvar.wait(&mut info);
@@ -251,304 +226,10 @@ impl<I> QueryLatch<I> {
251226
let mut info = self.info.lock();
252227
debug_assert!(!info.complete);
253228
info.complete = true;
254-
let registry = rayon_core::Registry::current();
255229
for waiter in info.waiters.drain(..) {
256-
rayon_core::mark_unblocked(&registry);
257230
waiter.condvar.notify_one();
258231
}
259232
}
260-
261-
/// Removes a single waiter from the list of waiters.
262-
/// This is used to break query cycles.
263-
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<I>> {
264-
let mut info = self.info.lock();
265-
debug_assert!(!info.complete);
266-
// Remove the waiter from the list of waiters
267-
info.waiters.remove(waiter)
268-
}
269-
}
270-
271-
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
272-
type Waiter = (QueryJobId, usize);
273-
274-
/// Visits all the non-resumable and resumable waiters of a query.
275-
/// Only waiters in a query are visited.
276-
/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
277-
/// and a span indicating the reason the query waited on `query_ref`.
278-
/// If `visit` returns Some, this function returns.
279-
/// For visits of non-resumable waiters it returns the return value of `visit`.
280-
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
281-
/// required information to resume the waiter.
282-
/// If all `visit` calls returns None, this function also returns None.
283-
fn visit_waiters<I, F>(
284-
query_map: &QueryMap<I>,
285-
query: QueryJobId,
286-
mut visit: F,
287-
) -> Option<Option<Waiter>>
288-
where
289-
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
290-
{
291-
// Visit the parent query which is a non-resumable waiter since it's on the same stack
292-
if let Some(parent) = query.parent(query_map) {
293-
if let Some(cycle) = visit(query.span(query_map), parent) {
294-
return Some(cycle);
295-
}
296-
}
297-
298-
// Visit the explicit waiters which use condvars and are resumable
299-
if let Some(latch) = query.latch(query_map) {
300-
for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
301-
if let Some(waiter_query) = waiter.query {
302-
if visit(waiter.span, waiter_query).is_some() {
303-
// Return a value which indicates that this waiter can be resumed
304-
return Some(Some((query, i)));
305-
}
306-
}
307-
}
308-
}
309-
310-
None
311-
}
312-
313-
/// Look for query cycles by doing a depth first search starting at `query`.
314-
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
315-
/// If a cycle is detected, this initial value is replaced with the span causing
316-
/// the cycle.
317-
fn cycle_check<I>(
318-
query_map: &QueryMap<I>,
319-
query: QueryJobId,
320-
span: Span,
321-
stack: &mut Vec<(Span, QueryJobId)>,
322-
visited: &mut FxHashSet<QueryJobId>,
323-
) -> Option<Option<Waiter>> {
324-
if !visited.insert(query) {
325-
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
326-
// We detected a query cycle, fix up the initial span and return Some
327-
328-
// Remove previous stack entries
329-
stack.drain(0..p);
330-
// Replace the span for the first query with the cycle cause
331-
stack[0].0 = span;
332-
Some(None)
333-
} else {
334-
None
335-
};
336-
}
337-
338-
// Query marked as visited is added it to the stack
339-
stack.push((span, query));
340-
341-
// Visit all the waiters
342-
let r = visit_waiters(query_map, query, |span, successor| {
343-
cycle_check(query_map, successor, span, stack, visited)
344-
});
345-
346-
// Remove the entry in our stack if we didn't find a cycle
347-
if r.is_none() {
348-
stack.pop();
349-
}
350-
351-
r
352-
}
353-
354-
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
355-
/// from `query` without going through any of the queries in `visited`.
356-
/// This is achieved with a depth first search.
357-
fn connected_to_root<I>(
358-
query_map: &QueryMap<I>,
359-
query: QueryJobId,
360-
visited: &mut FxHashSet<QueryJobId>,
361-
) -> bool {
362-
// We already visited this or we're deliberately ignoring it
363-
if !visited.insert(query) {
364-
return false;
365-
}
366-
367-
// This query is connected to the root (it has no query parent), return true
368-
if query.parent(query_map).is_none() {
369-
return true;
370-
}
371-
372-
visit_waiters(query_map, query, |_, successor| {
373-
connected_to_root(query_map, successor, visited).then_some(None)
374-
})
375-
.is_some()
376-
}
377-
378-
// Deterministically pick an query from a list
379-
fn pick_query<'a, I: Clone, T, F>(query_map: &QueryMap<I>, queries: &'a [T], f: F) -> &'a T
380-
where
381-
F: Fn(&T) -> (Span, QueryJobId),
382-
{
383-
// Deterministically pick an entry point
384-
// FIXME: Sort this instead
385-
queries
386-
.iter()
387-
.min_by_key(|v| {
388-
let (span, query) = f(v);
389-
let hash = query.query(query_map).hash;
390-
// Prefer entry points which have valid spans for nicer error messages
391-
// We add an integer to the tuple ensuring that entry points
392-
// with valid spans are picked first
393-
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
394-
(span_cmp, hash)
395-
})
396-
.unwrap()
397-
}
398-
399-
/// Looks for query cycles starting from the last query in `jobs`.
400-
/// If a cycle is found, all queries in the cycle is removed from `jobs` and
401-
/// the function return true.
402-
/// If a cycle was not found, the starting query is removed from `jobs` and
403-
/// the function returns false.
404-
fn remove_cycle<I: Clone>(
405-
query_map: &QueryMap<I>,
406-
jobs: &mut Vec<QueryJobId>,
407-
wakelist: &mut Vec<Arc<QueryWaiter<I>>>,
408-
) -> bool {
409-
let mut visited = FxHashSet::default();
410-
let mut stack = Vec::new();
411-
// Look for a cycle starting with the last query in `jobs`
412-
if let Some(waiter) =
413-
cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
414-
{
415-
// The stack is a vector of pairs of spans and queries; reverse it so that
416-
// the earlier entries require later entries
417-
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
418-
419-
// Shift the spans so that queries are matched with the span for their waitee
420-
spans.rotate_right(1);
421-
422-
// Zip them back together
423-
let mut stack: Vec<_> = iter::zip(spans, queries).collect();
424-
425-
// Remove the queries in our cycle from the list of jobs to look at
426-
for r in &stack {
427-
if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
428-
jobs.remove(pos);
429-
}
430-
}
431-
432-
// Find the queries in the cycle which are
433-
// connected to queries outside the cycle
434-
let entry_points = stack
435-
.iter()
436-
.filter_map(|&(span, query)| {
437-
if query.parent(query_map).is_none() {
438-
// This query is connected to the root (it has no query parent)
439-
Some((span, query, None))
440-
} else {
441-
let mut waiters = Vec::new();
442-
// Find all the direct waiters who lead to the root
443-
visit_waiters(query_map, query, |span, waiter| {
444-
// Mark all the other queries in the cycle as already visited
445-
let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
446-
447-
if connected_to_root(query_map, waiter, &mut visited) {
448-
waiters.push((span, waiter));
449-
}
450-
451-
None
452-
});
453-
if waiters.is_empty() {
454-
None
455-
} else {
456-
// Deterministically pick one of the waiters to show to the user
457-
let waiter = *pick_query(query_map, &waiters, |s| *s);
458-
Some((span, query, Some(waiter)))
459-
}
460-
}
461-
})
462-
.collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
463-
464-
// Deterministically pick an entry point
465-
let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
466-
467-
// Shift the stack so that our entry point is first
468-
let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
469-
if let Some(pos) = entry_point_pos {
470-
stack.rotate_left(pos);
471-
}
472-
473-
let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
474-
475-
// Create the cycle error
476-
let error = CycleError {
477-
usage,
478-
cycle: stack
479-
.iter()
480-
.map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
481-
.collect(),
482-
};
483-
484-
// We unwrap `waiter` here since there must always be one
485-
// edge which is resumable / waited using a query latch
486-
let (waitee_query, waiter_idx) = waiter.unwrap();
487-
488-
// Extract the waiter we want to resume
489-
let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
490-
491-
// Set the cycle error so it will be picked up when resumed
492-
*waiter.cycle.lock() = Some(error);
493-
494-
// Put the waiter on the list of things to resume
495-
wakelist.push(waiter);
496-
497-
true
498-
} else {
499-
false
500-
}
501-
}
502-
503-
/// Detects query cycles by using depth first search over all active query jobs.
504-
/// If a query cycle is found it will break the cycle by finding an edge which
505-
/// uses a query latch and then resuming that waiter.
506-
/// There may be multiple cycles involved in a deadlock, so this searches
507-
/// all active queries for cycles before finally resuming all the waiters at once.
508-
pub fn break_query_cycles<I: Clone + Debug>(
509-
query_map: QueryMap<I>,
510-
registry: &rayon_core::Registry,
511-
) {
512-
let mut wakelist = Vec::new();
513-
// It is OK per the comments:
514-
// - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932
515-
// - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798866392
516-
#[allow(rustc::potential_query_instability)]
517-
let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
518-
519-
let mut found_cycle = false;
520-
521-
while jobs.len() > 0 {
522-
if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
523-
found_cycle = true;
524-
}
525-
}
526-
527-
// Check that a cycle was found. It is possible for a deadlock to occur without
528-
// a query cycle if a query which can be waited on uses Rayon to do multithreading
529-
// internally. Such a query (X) may be executing on 2 threads (A and B) and A may
530-
// wait using Rayon on B. Rayon may then switch to executing another query (Y)
531-
// which in turn will wait on X causing a deadlock. We have a false dependency from
532-
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
533-
// only considers the true dependency and won't detect a cycle.
534-
if !found_cycle {
535-
panic!(
536-
"deadlock detected as we're unable to find a query cycle to break\n\
537-
current query map:\n{:#?}",
538-
query_map
539-
);
540-
}
541-
542-
// Mark all the thread we're about to wake up as unblocked. This needs to be done before
543-
// we wake the threads up as otherwise Rayon could detect a deadlock if a thread we
544-
// resumed fell asleep and this thread had yet to mark the remaining threads as unblocked.
545-
for _ in 0..wakelist.len() {
546-
rayon_core::mark_unblocked(registry);
547-
}
548-
549-
for waiter in wakelist.into_iter() {
550-
waiter.condvar.notify_one();
551-
}
552233
}
553234

554235
#[inline(never)]

‎compiler/rustc_query_system/src/query/mod.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ pub use self::plumbing::*;
88

99
mod job;
1010
pub use self::job::{
11-
QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack,
12-
report_cycle,
11+
QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, print_query_stack, report_cycle,
1312
};
1413

1514
mod caches;

‎compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -281,10 +281,8 @@ where
281281
fn wait_for_query<Q, Qcx>(
282282
query: Q,
283283
qcx: Qcx,
284-
span: Span,
285284
key: Q::Key,
286285
latch: QueryLatch<Qcx::QueryInfo>,
287-
current: Option<QueryJobId>,
288286
) -> (Q::Value, Option<DepNodeIndex>)
289287
where
290288
Q: QueryConfig<Qcx>,
@@ -297,7 +295,7 @@ where
297295

298296
// With parallel queries we might just have to wait on some other
299297
// thread.
300-
let result = latch.wait_on(qcx, current, span);
298+
let result = latch.wait_on(qcx);
301299

302300
match result {
303301
Ok(()) => {
@@ -381,7 +379,7 @@ where
381379

382380
// Only call `wait_for_query` if we're using a Rayon thread pool
383381
// as it will attempt to mark the worker thread as blocked.
384-
return wait_for_query(query, qcx, span, key, latch, current_job_id);
382+
return wait_for_query(query, qcx, key, latch);
385383
}
386384

387385
let id = job.id;

‎src/tools/tidy/src/extdeps.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ const ALLOWED_SOURCES: &[&str] = &[
88
r#""registry+https://github.com/rust-lang/crates.io-index""#,
99
// This is `rust_team_data` used by `site` in src/tools/rustc-perf,
1010
r#""git+https://github.com/rust-lang/team#a5260e76d3aa894c64c56e6ddc8545b9a98043ec""#,
11+
r#""git+https://github.com/zetanumbers/rayon?branch=rustc-remove-deadlock-detection#3b8d9c138ab70138c2016d19fbb2801a372614f6""#,
1112
];
1213

1314
/// Checks for external package sources. `root` is the path to the directory that contains the

‎tests/ui/parallel-rustc/cycle_crash.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
//@ ignore-test
12
//@ compile-flags: -Z threads=2
23

34
const FOO: usize = FOO; //~ERROR cycle detected when simplifying constant for the type system `FOO`

0 commit comments

Comments
 (0)
Please sign in to comment.