@@ -107,25 +107,30 @@ where
107
107
}
108
108
}
109
109
110
- pub trait ToAttrTokenStream : sync:: DynSend + sync:: DynSync {
111
- fn to_attr_token_stream ( & self ) -> AttrTokenStream ;
112
- }
113
-
114
- impl ToAttrTokenStream for AttrTokenStream {
115
- fn to_attr_token_stream ( & self ) -> AttrTokenStream {
116
- self . clone ( )
117
- }
118
- }
119
-
120
- /// A lazy version of [`TokenStream`], which defers creation
121
- /// of an actual `TokenStream` until it is needed.
122
- /// `Box` is here only to reduce the structure size.
110
+ /// A lazy version of [`AttrTokenStream`], which defers creation of an actual
111
+ /// `AttrTokenStream` until it is needed.
123
112
#[ derive( Clone ) ]
124
- pub struct LazyAttrTokenStream ( Lrc < Box < dyn ToAttrTokenStream > > ) ;
113
+ pub struct LazyAttrTokenStream ( Lrc < LazyAttrTokenStreamInner > ) ;
125
114
126
115
impl LazyAttrTokenStream {
127
- pub fn new ( inner : impl ToAttrTokenStream + ' static ) -> LazyAttrTokenStream {
128
- LazyAttrTokenStream ( Lrc :: new ( Box :: new ( inner) ) )
116
+ pub fn new_direct ( stream : AttrTokenStream ) -> LazyAttrTokenStream {
117
+ LazyAttrTokenStream ( Lrc :: new ( LazyAttrTokenStreamInner :: Direct ( stream) ) )
118
+ }
119
+
120
+ pub fn new_pending (
121
+ start_token : ( Token , Spacing ) ,
122
+ cursor_snapshot : TokenCursor ,
123
+ num_calls : u32 ,
124
+ break_last_token : bool ,
125
+ node_replacements : Box < [ NodeReplacement ] > ,
126
+ ) -> LazyAttrTokenStream {
127
+ LazyAttrTokenStream ( Lrc :: new ( LazyAttrTokenStreamInner :: Pending {
128
+ start_token,
129
+ cursor_snapshot,
130
+ num_calls,
131
+ break_last_token,
132
+ node_replacements,
133
+ } ) )
129
134
}
130
135
131
136
pub fn to_attr_token_stream ( & self ) -> AttrTokenStream {
@@ -208,16 +213,6 @@ impl NodeRange {
208
213
}
209
214
}
210
215
211
- // From a value of this type we can reconstruct the `TokenStream` seen by the
212
- // `f` callback passed to a call to `Parser::collect_tokens`, by
213
- // replaying the getting of the tokens. This saves us producing a `TokenStream`
214
- // if it is never needed, e.g. a captured `macro_rules!` argument that is never
215
- // passed to a proc macro. In practice, token stream creation happens rarely
216
- // compared to calls to `collect_tokens` (see some statistics in #78736) so we
217
- // are doing as little up-front work as possible.
218
- //
219
- // This also makes `Parser` very cheap to clone, since
220
- // there is no intermediate collection buffer to clone.
221
216
pub struct LazyAttrTokenStreamImpl {
222
217
pub start_token : ( Token , Spacing ) ,
223
218
pub cursor_snapshot : TokenCursor ,
@@ -226,60 +221,96 @@ pub struct LazyAttrTokenStreamImpl {
226
221
pub node_replacements : Box < [ NodeReplacement ] > ,
227
222
}
228
223
229
- impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
230
- fn to_attr_token_stream ( & self ) -> AttrTokenStream {
231
- // The token produced by the final call to `{,inlined_}next` was not
232
- // actually consumed by the callback. The combination of chaining the
233
- // initial token and using `take` produces the desired result - we
234
- // produce an empty `TokenStream` if no calls were made, and omit the
235
- // final token otherwise.
236
- let mut cursor_snapshot = self . cursor_snapshot . clone ( ) ;
237
- let tokens = iter:: once ( FlatToken :: Token ( self . start_token . clone ( ) ) )
238
- . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
239
- . take ( self . num_calls as usize ) ;
240
-
241
- if self . node_replacements . is_empty ( ) {
242
- make_attr_token_stream ( tokens, self . break_last_token )
243
- } else {
244
- let mut tokens: Vec < _ > = tokens. collect ( ) ;
245
- let mut node_replacements = self . node_replacements . to_vec ( ) ;
246
- node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
247
-
248
- #[ cfg( debug_assertions) ]
249
- for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
250
- node_replacements. array_windows ( )
251
- {
252
- assert ! (
253
- node_range. 0 . end <= next_node_range. 0 . start,
254
- "Node ranges should be disjoint: ({:?}, {:?}) ({:?}, {:?})" ,
255
- node_range,
256
- tokens,
257
- next_node_range,
258
- next_tokens,
259
- ) ;
260
- }
224
+ enum LazyAttrTokenStreamInner {
225
+ // The token stream has already been produced.
226
+ Direct ( AttrTokenStream ) ,
227
+
228
+ // From a value of this type we can reconstruct the `TokenStream` seen by
229
+ // the `f` callback passed to a call to `Parser::collect_tokens`, by
230
+ // replaying the getting of the tokens. This saves us producing a
231
+ // `TokenStream` if it is never needed, e.g. a captured `macro_rules!`
232
+ // argument that is never passed to a proc macro. In practice, token stream
233
+ // creation happens rarely compared to calls to `collect_tokens` (see some
234
+ // statistics in #78736) so we are doing as little up-front work as
235
+ // possible.
236
+ //
237
+ // This also makes `Parser` very cheap to clone, since there is no
238
+ // intermediate collection buffer to clone.
239
+ Pending {
240
+ start_token : ( Token , Spacing ) ,
241
+ cursor_snapshot : TokenCursor ,
242
+ num_calls : u32 ,
243
+ break_last_token : bool ,
244
+ node_replacements : Box < [ NodeReplacement ] > ,
245
+ } ,
246
+ }
261
247
262
- // Process the replace ranges.
263
- for ( node_range, target) in node_replacements. into_iter ( ) {
264
- assert ! (
265
- !node_range. 0 . is_empty( ) ,
266
- "Cannot replace an empty node range: {:?}" ,
267
- node_range. 0
268
- ) ;
248
+ impl LazyAttrTokenStreamInner {
249
+ fn to_attr_token_stream ( & self ) -> AttrTokenStream {
250
+ match self {
251
+ LazyAttrTokenStreamInner :: Direct ( stream) => stream. clone ( ) ,
252
+ LazyAttrTokenStreamInner :: Pending {
253
+ start_token,
254
+ cursor_snapshot,
255
+ num_calls,
256
+ break_last_token,
257
+ node_replacements,
258
+ } => {
259
+ // The token produced by the final call to `{,inlined_}next` was not
260
+ // actually consumed by the callback. The combination of chaining the
261
+ // initial token and using `take` produces the desired result - we
262
+ // produce an empty `TokenStream` if no calls were made, and omit the
263
+ // final token otherwise.
264
+ let mut cursor_snapshot = cursor_snapshot. clone ( ) ;
265
+ let tokens = iter:: once ( FlatToken :: Token ( start_token. clone ( ) ) )
266
+ . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
267
+ . take ( * num_calls as usize ) ;
268
+
269
+ if node_replacements. is_empty ( ) {
270
+ make_attr_token_stream ( tokens, * break_last_token)
271
+ } else {
272
+ let mut tokens: Vec < _ > = tokens. collect ( ) ;
273
+ let mut node_replacements = node_replacements. to_vec ( ) ;
274
+ node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
275
+
276
+ #[ cfg( debug_assertions) ]
277
+ for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
278
+ node_replacements. array_windows ( )
279
+ {
280
+ assert ! (
281
+ node_range. 0 . end <= next_node_range. 0 . start,
282
+ "Node ranges should be disjoint: ({:?}, {:?}) ({:?}, {:?})" ,
283
+ node_range,
284
+ tokens,
285
+ next_node_range,
286
+ next_tokens,
287
+ ) ;
288
+ }
269
289
270
- // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s, plus
271
- // enough `FlatToken::Empty`s to fill up the rest of the range. This keeps the
272
- // total length of `tokens` constant throughout the replacement process, allowing
273
- // us to do all replacements without adjusting indices.
274
- let target_len = target. is_some ( ) as usize ;
275
- tokens. splice (
276
- ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
277
- target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
278
- iter:: repeat ( FlatToken :: Empty ) . take ( node_range. 0 . len ( ) - target_len) ,
279
- ) ,
280
- ) ;
290
+ // Process the replace ranges.
291
+ for ( node_range, target) in node_replacements. into_iter ( ) {
292
+ assert ! (
293
+ !node_range. 0 . is_empty( ) ,
294
+ "Cannot replace an empty node range: {:?}" ,
295
+ node_range. 0
296
+ ) ;
297
+
298
+ // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s,
299
+ // plus enough `FlatToken::Empty`s to fill up the rest of the range. This
300
+ // keeps the total length of `tokens` constant throughout the replacement
301
+ // process, allowing us to do all replacements without adjusting indices.
302
+ let target_len = target. is_some ( ) as usize ;
303
+ tokens. splice (
304
+ ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
305
+ target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
306
+ iter:: repeat ( FlatToken :: Empty )
307
+ . take ( node_range. 0 . len ( ) - target_len) ,
308
+ ) ,
309
+ ) ;
310
+ }
311
+ make_attr_token_stream ( tokens. into_iter ( ) , * break_last_token)
312
+ }
281
313
}
282
- make_attr_token_stream ( tokens. into_iter ( ) , self . break_last_token )
283
314
}
284
315
}
285
316
}
@@ -1057,7 +1088,7 @@ mod size_asserts {
1057
1088
static_assert_size ! ( AttrTokenStream , 8 ) ;
1058
1089
static_assert_size ! ( AttrTokenTree , 32 ) ;
1059
1090
static_assert_size ! ( LazyAttrTokenStream , 8 ) ;
1060
- static_assert_size ! ( LazyAttrTokenStreamImpl , 96 ) ;
1091
+ static_assert_size ! ( LazyAttrTokenStreamInner , 96 ) ;
1061
1092
static_assert_size ! ( Option <LazyAttrTokenStream >, 8 ) ; // must be small, used in many AST nodes
1062
1093
static_assert_size ! ( TokenStream , 8 ) ;
1063
1094
static_assert_size ! ( TokenTree , 32 ) ;
0 commit comments