@@ -107,25 +107,30 @@ where
107
107
}
108
108
}
109
109
110
- pub trait ToAttrTokenStream : sync:: DynSend + sync:: DynSync {
111
- fn to_attr_token_stream ( & self ) -> AttrTokenStream ;
112
- }
113
-
114
- impl ToAttrTokenStream for AttrTokenStream {
115
- fn to_attr_token_stream ( & self ) -> AttrTokenStream {
116
- self . clone ( )
117
- }
118
- }
119
-
120
- /// A lazy version of [`TokenStream`], which defers creation
121
- /// of an actual `TokenStream` until it is needed.
122
- /// `Box` is here only to reduce the structure size.
110
+ /// A lazy version of [`AttrTokenStream`], which defers creation of an actual
111
+ /// `AttrTokenStream` until it is needed.
123
112
#[ derive( Clone ) ]
124
- pub struct LazyAttrTokenStream ( Arc < Box < dyn ToAttrTokenStream > > ) ;
113
+ pub struct LazyAttrTokenStream ( Arc < LazyAttrTokenStreamInner > ) ;
125
114
126
115
impl LazyAttrTokenStream {
127
- pub fn new ( inner : impl ToAttrTokenStream + ' static ) -> LazyAttrTokenStream {
128
- LazyAttrTokenStream ( Arc :: new ( Box :: new ( inner) ) )
116
+ pub fn new_direct ( stream : AttrTokenStream ) -> LazyAttrTokenStream {
117
+ LazyAttrTokenStream ( Arc :: new ( LazyAttrTokenStreamInner :: Direct ( stream) ) )
118
+ }
119
+
120
+ pub fn new_pending (
121
+ start_token : ( Token , Spacing ) ,
122
+ cursor_snapshot : TokenCursor ,
123
+ num_calls : u32 ,
124
+ break_last_token : u32 ,
125
+ node_replacements : Box < [ NodeReplacement ] > ,
126
+ ) -> LazyAttrTokenStream {
127
+ LazyAttrTokenStream ( Arc :: new ( LazyAttrTokenStreamInner :: Pending {
128
+ start_token,
129
+ cursor_snapshot,
130
+ num_calls,
131
+ break_last_token,
132
+ node_replacements,
133
+ } ) )
129
134
}
130
135
131
136
pub fn to_attr_token_stream ( & self ) -> AttrTokenStream {
@@ -208,91 +213,109 @@ impl NodeRange {
208
213
}
209
214
}
210
215
211
- // From a value of this type we can reconstruct the `TokenStream` seen by the
212
- // `f` callback passed to a call to `Parser::collect_tokens`, by
213
- // replaying the getting of the tokens. This saves us producing a `TokenStream`
214
- // if it is never needed, e.g. a captured `macro_rules!` argument that is never
215
- // passed to a proc macro. In practice, token stream creation happens rarely
216
- // compared to calls to `collect_tokens` (see some statistics in #78736) so we
217
- // are doing as little up-front work as possible.
218
- //
219
- // This also makes `Parser` very cheap to clone, since
220
- // there is no intermediate collection buffer to clone.
221
- pub struct LazyAttrTokenStreamImpl {
222
- pub start_token : ( Token , Spacing ) ,
223
- pub cursor_snapshot : TokenCursor ,
224
- pub num_calls : u32 ,
225
- pub break_last_token : u32 ,
226
- pub node_replacements : Box < [ NodeReplacement ] > ,
216
+ enum LazyAttrTokenStreamInner {
217
+ // The token stream has already been produced.
218
+ Direct ( AttrTokenStream ) ,
219
+
220
+ // From a value of this type we can reconstruct the `TokenStream` seen by
221
+ // the `f` callback passed to a call to `Parser::collect_tokens`, by
222
+ // replaying the getting of the tokens. This saves us producing a
223
+ // `TokenStream` if it is never needed, e.g. a captured `macro_rules!`
224
+ // argument that is never passed to a proc macro. In practice, token stream
225
+ // creation happens rarely compared to calls to `collect_tokens` (see some
226
+ // statistics in #78736) so we are doing as little up-front work as
227
+ // possible.
228
+ //
229
+ // This also makes `Parser` very cheap to clone, since there is no
230
+ // intermediate collection buffer to clone.
231
+ Pending {
232
+ start_token : ( Token , Spacing ) ,
233
+ cursor_snapshot : TokenCursor ,
234
+ num_calls : u32 ,
235
+ break_last_token : u32 ,
236
+ node_replacements : Box < [ NodeReplacement ] > ,
237
+ } ,
227
238
}
228
239
229
- impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
240
+ impl LazyAttrTokenStreamInner {
230
241
fn to_attr_token_stream ( & self ) -> AttrTokenStream {
231
- // The token produced by the final call to `{,inlined_}next` was not
232
- // actually consumed by the callback. The combination of chaining the
233
- // initial token and using `take` produces the desired result - we
234
- // produce an empty `TokenStream` if no calls were made, and omit the
235
- // final token otherwise.
236
- let mut cursor_snapshot = self . cursor_snapshot . clone ( ) ;
237
- let tokens = iter:: once ( FlatToken :: Token ( self . start_token ) )
238
- . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
239
- . take ( self . num_calls as usize ) ;
240
-
241
- if self . node_replacements . is_empty ( ) {
242
- make_attr_token_stream ( tokens, self . break_last_token )
243
- } else {
244
- let mut tokens: Vec < _ > = tokens. collect ( ) ;
245
- let mut node_replacements = self . node_replacements . to_vec ( ) ;
246
- node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
242
+ match self {
243
+ LazyAttrTokenStreamInner :: Direct ( stream) => stream. clone ( ) ,
244
+ LazyAttrTokenStreamInner :: Pending {
245
+ start_token,
246
+ cursor_snapshot,
247
+ num_calls,
248
+ break_last_token,
249
+ node_replacements,
250
+ } => {
251
+ // The token produced by the final call to `{,inlined_}next` was not
252
+ // actually consumed by the callback. The combination of chaining the
253
+ // initial token and using `take` produces the desired result - we
254
+ // produce an empty `TokenStream` if no calls were made, and omit the
255
+ // final token otherwise.
256
+ let mut cursor_snapshot = cursor_snapshot. clone ( ) ;
257
+ let tokens = iter:: once ( FlatToken :: Token ( * start_token) )
258
+ . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
259
+ . take ( * num_calls as usize ) ;
260
+
261
+ if node_replacements. is_empty ( ) {
262
+ make_attr_token_stream ( tokens, * break_last_token)
263
+ } else {
264
+ let mut tokens: Vec < _ > = tokens. collect ( ) ;
265
+ let mut node_replacements = node_replacements. to_vec ( ) ;
266
+ node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
247
267
248
- #[ cfg( debug_assertions) ]
249
- for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
250
- node_replacements. array_windows ( )
251
- {
252
- assert ! (
253
- node_range. 0 . end <= next_node_range. 0 . start
254
- || node_range. 0 . end >= next_node_range. 0 . end,
255
- "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})" ,
256
- node_range,
257
- tokens,
258
- next_node_range,
259
- next_tokens,
260
- ) ;
261
- }
268
+ #[ cfg( debug_assertions) ]
269
+ for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
270
+ node_replacements. array_windows ( )
271
+ {
272
+ assert ! (
273
+ node_range. 0 . end <= next_node_range. 0 . start
274
+ || node_range. 0 . end >= next_node_range. 0 . end,
275
+ "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})" ,
276
+ node_range,
277
+ tokens,
278
+ next_node_range,
279
+ next_tokens,
280
+ ) ;
281
+ }
262
282
263
- // Process the replace ranges, starting from the highest start
264
- // position and working our way back. If have tokens like:
265
- //
266
- // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
267
- //
268
- // Then we will generate replace ranges for both
269
- // the `#[cfg(FALSE)] field: bool` and the entire
270
- // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
271
- //
272
- // By starting processing from the replace range with the greatest
273
- // start position, we ensure that any (outer) replace range which
274
- // encloses another (inner) replace range will fully overwrite the
275
- // inner range's replacement.
276
- for ( node_range, target) in node_replacements. into_iter ( ) . rev ( ) {
277
- assert ! (
278
- !node_range. 0 . is_empty( ) ,
279
- "Cannot replace an empty node range: {:?}" ,
280
- node_range. 0
281
- ) ;
282
-
283
- // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s, plus
284
- // enough `FlatToken::Empty`s to fill up the rest of the range. This keeps the
285
- // total length of `tokens` constant throughout the replacement process, allowing
286
- // us to do all replacements without adjusting indices.
287
- let target_len = target. is_some ( ) as usize ;
288
- tokens. splice (
289
- ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
290
- target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
291
- iter:: repeat ( FlatToken :: Empty ) . take ( node_range. 0 . len ( ) - target_len) ,
292
- ) ,
293
- ) ;
283
+ // Process the replace ranges, starting from the highest start
284
+ // position and working our way back. If have tokens like:
285
+ //
286
+ // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
287
+ //
288
+ // Then we will generate replace ranges for both
289
+ // the `#[cfg(FALSE)] field: bool` and the entire
290
+ // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
291
+ //
292
+ // By starting processing from the replace range with the greatest
293
+ // start position, we ensure that any (outer) replace range which
294
+ // encloses another (inner) replace range will fully overwrite the
295
+ // inner range's replacement.
296
+ for ( node_range, target) in node_replacements. into_iter ( ) . rev ( ) {
297
+ assert ! (
298
+ !node_range. 0 . is_empty( ) ,
299
+ "Cannot replace an empty node range: {:?}" ,
300
+ node_range. 0
301
+ ) ;
302
+
303
+ // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s,
304
+ // plus enough `FlatToken::Empty`s to fill up the rest of the range. This
305
+ // keeps the total length of `tokens` constant throughout the replacement
306
+ // process, allowing us to do all replacements without adjusting indices.
307
+ let target_len = target. is_some ( ) as usize ;
308
+ tokens. splice (
309
+ ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
310
+ target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
311
+ iter:: repeat ( FlatToken :: Empty )
312
+ . take ( node_range. 0 . len ( ) - target_len) ,
313
+ ) ,
314
+ ) ;
315
+ }
316
+ make_attr_token_stream ( tokens. into_iter ( ) , * break_last_token)
317
+ }
294
318
}
295
- make_attr_token_stream ( tokens. into_iter ( ) , self . break_last_token )
296
319
}
297
320
}
298
321
}
@@ -1011,6 +1034,7 @@ mod size_asserts {
1011
1034
static_assert_size ! ( AttrTokenStream , 8 ) ;
1012
1035
static_assert_size ! ( AttrTokenTree , 32 ) ;
1013
1036
static_assert_size ! ( LazyAttrTokenStream , 8 ) ;
1037
+ static_assert_size ! ( LazyAttrTokenStreamInner , 96 ) ;
1014
1038
static_assert_size ! ( Option <LazyAttrTokenStream >, 8 ) ; // must be small, used in many AST nodes
1015
1039
static_assert_size ! ( TokenStream , 8 ) ;
1016
1040
static_assert_size ! ( TokenTree , 32 ) ;
0 commit comments