tracing_subscriber/registry/
sharded.rs

1use sharded_slab::{pool::Ref, Clear, Pool};
2use thread_local::ThreadLocal;
3
4use super::stack::SpanStack;
5use crate::{
6    filter::{FilterId, FilterMap, FilterState},
7    registry::{
8        extensions::{Extensions, ExtensionsInner, ExtensionsMut},
9        LookupSpan, SpanData,
10    },
11    sync::RwLock,
12};
13use core::{
14    cell::{self, Cell, RefCell},
15    sync::atomic::{fence, AtomicUsize, Ordering},
16};
17use std::thread_local;
18use tracing_core::{
19    dispatcher::{self, Dispatch},
20    span::{self, Current, Id},
21    Event, Interest, Metadata, Subscriber,
22};
23
24/// A shared, reusable store for spans.
25///
26/// A `Registry` is a [`Subscriber`] around which multiple [`Layer`]s
27/// implementing various behaviors may be [added]. Unlike other types
28/// implementing `Subscriber`, `Registry` does not actually record traces itself:
29/// instead, it collects and stores span data that is exposed to any [`Layer`]s
30/// wrapping it through implementations of the [`LookupSpan`] trait.
31/// The `Registry` is responsible for storing span metadata, recording
32/// relationships between spans, and tracking which spans are active and which
33/// are closed. In addition, it provides a mechanism for [`Layer`]s to store
34/// user-defined per-span data, called [extensions], in the registry. This
35/// allows [`Layer`]-specific data to benefit from the `Registry`'s
36/// high-performance concurrent storage.
37///
38/// This registry is implemented using a [lock-free sharded slab][slab], and is
39/// highly optimized for concurrent access.
40///
41/// # Span ID Generation
42///
43/// Span IDs are not globally unique, but the registry ensures that
44/// no two currently active spans have the same ID within a process.
45///
46/// One of the primary responsibilities of the registry is to generate [span
47/// IDs]. Therefore, it's important for other code that interacts with the
48/// registry, such as [`Layer`]s, to understand the guarantees of the
49/// span IDs that are generated.
50///
51/// The registry's span IDs are guaranteed to be unique **at a given point
52/// in time**. This means that an active span will never be assigned the
53/// same ID as another **currently active** span. However, the registry
54/// **will** eventually reuse the IDs of [closed] spans, although an ID
55/// will never be reassigned immediately after a span has closed.
56///
57/// Spans are not [considered closed] by the `Registry` until *every*
58/// [`Span`] reference with that ID has been dropped.
59///
60/// Thus: span IDs generated by the registry should be considered unique
61/// only at a given point in time, and only relative to other spans
62/// generated by the same process. Two spans with the same ID will not exist
63/// in the same process concurrently. However, if historical span data is
64/// being stored, the same ID may occur for multiple spans times in that
65/// data. If spans must be uniquely identified in historical data, the user
66/// code storing this data must assign its own unique identifiers to those
67/// spans. A counter is generally sufficient for this.
68///
69/// Similarly, span IDs generated by the registry are not unique outside of
70/// a given process. Distributed tracing systems may require identifiers
71/// that are unique across multiple processes on multiple machines (for
72/// example, [OpenTelemetry's `SpanId`s and `TraceId`s][ot]). `tracing` span
73/// IDs generated by the registry should **not** be used for this purpose.
74/// Instead, code which integrates with a distributed tracing system should
75/// generate and propagate its own IDs according to the rules specified by
76/// the distributed tracing system. These IDs can be associated with
77/// `tracing` spans using [fields] and/or [stored span data].
78///
79/// [span IDs]: tracing_core::span::Id
80/// [slab]: sharded_slab
81/// [`Layer`]: crate::Layer
82/// [added]: crate::layer::Layer#composing-layers
83/// [extensions]: super::Extensions
84/// [closed]: https://docs.rs/tracing/latest/tracing/span/index.html#closing-spans
85/// [considered closed]: tracing_core::subscriber::Subscriber::try_close()
86/// [`Span`]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html
87/// [ot]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spancontext
88/// [fields]: tracing_core::field
89/// [stored span data]: crate::registry::SpanData::extensions_mut
90#[cfg(feature = "registry")]
91#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
92#[derive(Debug)]
93pub struct Registry {
94    spans: Pool<DataInner>,
95    current_spans: ThreadLocal<RefCell<SpanStack>>,
96    next_filter_id: u8,
97}
98
99/// Span data stored in a [`Registry`].
100///
101/// The registry stores well-known data defined by tracing: span relationships,
102/// metadata and reference counts. Additional user-defined data provided by
103/// [`Layer`s], such as formatted fields, metrics, or distributed traces should
104/// be stored in the [extensions] typemap.
105///
106/// [`Layer`s]: crate::layer::Layer
107/// [extensions]: Extensions
108#[cfg(feature = "registry")]
109#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
110#[derive(Debug)]
111pub struct Data<'a> {
112    /// Immutable reference to the pooled `DataInner` entry.
113    inner: Ref<'a, DataInner>,
114}
115
116/// Stored data associated with a span.
117///
118/// This type is pooled using [`sharded_slab::Pool`]; when a span is
119/// dropped, the `DataInner` entry at that span's slab index is cleared
120/// in place and reused by a future span. Thus, the `Default` and
121/// [`sharded_slab::Clear`] implementations for this type are
122/// load-bearing.
123#[derive(Debug)]
124struct DataInner {
125    filter_map: FilterMap,
126    metadata: &'static Metadata<'static>,
127    parent: Option<Id>,
128    ref_count: AtomicUsize,
129    // The span's `Extensions` typemap. Allocations for the `HashMap` backing
130    // this are pooled and reused in place.
131    pub(crate) extensions: RwLock<ExtensionsInner>,
132}
133
134// === impl Registry ===
135
136impl Default for Registry {
137    fn default() -> Self {
138        Self {
139            spans: Pool::new(),
140            current_spans: ThreadLocal::new(),
141            next_filter_id: 0,
142        }
143    }
144}
145
146#[inline]
147fn idx_to_id(idx: usize) -> Id {
148    Id::from_u64(idx as u64 + 1)
149}
150
151#[inline]
152fn id_to_idx(id: &Id) -> usize {
153    id.into_u64() as usize - 1
154}
155
156/// A guard that tracks how many [`Registry`]-backed `Layer`s have
157/// processed an `on_close` event.
158///
159/// This is needed to enable a [`Registry`]-backed Layer to access span
160/// data after the `Layer` has recieved the `on_close` callback.
161///
162/// Once all `Layer`s have processed this event, the [`Registry`] knows
163/// that is able to safely remove the span tracked by `id`. `CloseGuard`
164/// accomplishes this through a two-step process:
165/// 1. Whenever a [`Registry`]-backed `Layer::on_close` method is
166///    called, `Registry::start_close` is closed.
167///    `Registry::start_close` increments a thread-local `CLOSE_COUNT`
168///    by 1 and returns a `CloseGuard`.
169/// 2. The `CloseGuard` is dropped at the end of `Layer::on_close`. On
170///    drop, `CloseGuard` checks thread-local `CLOSE_COUNT`. If
171///    `CLOSE_COUNT` is 0, the `CloseGuard` removes the span with the
172///    `id` from the registry, as all `Layers` that might have seen the
173///    `on_close` notification have processed it. If `CLOSE_COUNT` is
174///    greater than 0, `CloseGuard` decrements the counter by one and
175///    _does not_ remove the span from the [`Registry`].
176///
177pub(crate) struct CloseGuard<'a> {
178    id: Id,
179    registry: &'a Registry,
180    is_closing: bool,
181}
182
183impl Registry {
184    fn get(&self, id: &Id) -> Option<Ref<'_, DataInner>> {
185        self.spans.get(id_to_idx(id))
186    }
187
188    /// Returns a guard which tracks how many `Layer`s have
189    /// processed an `on_close` notification via the `CLOSE_COUNT` thread-local.
190    /// For additional details, see [`CloseGuard`].
191    ///
192    pub(crate) fn start_close(&self, id: Id) -> CloseGuard<'_> {
193        CLOSE_COUNT.with(|count| {
194            let c = count.get();
195            count.set(c + 1);
196        });
197        CloseGuard {
198            id,
199            registry: self,
200            is_closing: false,
201        }
202    }
203
204    pub(crate) fn has_per_layer_filters(&self) -> bool {
205        self.next_filter_id > 0
206    }
207
208    pub(crate) fn span_stack(&self) -> cell::Ref<'_, SpanStack> {
209        self.current_spans.get_or_default().borrow()
210    }
211}
212
213thread_local! {
214    /// `CLOSE_COUNT` is the thread-local counter used by `CloseGuard` to
215    /// track how many layers have processed the close.
216    /// For additional details, see [`CloseGuard`].
217    ///
218    static CLOSE_COUNT: Cell<usize> = const { Cell::new(0) };
219}
220
221impl Subscriber for Registry {
222    fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest {
223        if self.has_per_layer_filters() {
224            return FilterState::take_interest().unwrap_or_else(Interest::always);
225        }
226
227        Interest::always()
228    }
229
230    fn enabled(&self, _: &Metadata<'_>) -> bool {
231        if self.has_per_layer_filters() {
232            return FilterState::event_enabled();
233        }
234        true
235    }
236
237    #[inline]
238    fn new_span(&self, attrs: &span::Attributes<'_>) -> span::Id {
239        let parent = if attrs.is_root() {
240            None
241        } else if attrs.is_contextual() {
242            self.current_span().id().map(|id| self.clone_span(id))
243        } else {
244            attrs.parent().map(|id| self.clone_span(id))
245        };
246
247        let id = self
248            .spans
249            // Check out a `DataInner` entry from the pool for the new span. If
250            // there are free entries already allocated in the pool, this will
251            // preferentially reuse one; otherwise, a new `DataInner` is
252            // allocated and added to the pool.
253            .create_with(|data| {
254                data.metadata = attrs.metadata();
255                data.parent = parent;
256                data.filter_map = crate::filter::FILTERING.with(|filtering| filtering.filter_map());
257                #[cfg(debug_assertions)]
258                {
259                    if data.filter_map != FilterMap::new() {
260                        debug_assert!(self.has_per_layer_filters());
261                    }
262                }
263
264                let refs = data.ref_count.get_mut();
265                debug_assert_eq!(*refs, 0);
266                *refs = 1;
267            })
268            .expect("Unable to allocate another span");
269        idx_to_id(id)
270    }
271
272    /// This is intentionally not implemented, as recording fields
273    /// on a span is the responsibility of layers atop of this registry.
274    #[inline]
275    fn record(&self, _: &span::Id, _: &span::Record<'_>) {}
276
277    fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {}
278
279    fn event_enabled(&self, _event: &Event<'_>) -> bool {
280        if self.has_per_layer_filters() {
281            return FilterState::event_enabled();
282        }
283        true
284    }
285
286    /// This is intentionally not implemented, as recording events
287    /// is the responsibility of layers atop of this registry.
288    fn event(&self, _: &Event<'_>) {}
289
290    fn enter(&self, id: &span::Id) {
291        if self
292            .current_spans
293            .get_or_default()
294            .borrow_mut()
295            .push(id.clone())
296        {
297            self.clone_span(id);
298        }
299    }
300
301    fn exit(&self, id: &span::Id) {
302        if let Some(spans) = self.current_spans.get() {
303            if spans.borrow_mut().pop(id) {
304                dispatcher::get_default(|dispatch| dispatch.try_close(id.clone()));
305            }
306        }
307    }
308
309    fn clone_span(&self, id: &span::Id) -> span::Id {
310        let span = self
311            .get(id)
312            .unwrap_or_else(|| panic!(
313                "tried to clone {:?}, but no span exists with that ID\n\
314                This may be caused by consuming a parent span (`parent: span`) rather than borrowing it (`parent: &span`).",
315                id,
316            ));
317        // Like `std::sync::Arc`, adds to the ref count (on clone) don't require
318        // a strong ordering; if we call` clone_span`, the reference count must
319        // always at least 1. The only synchronization necessary is between
320        // calls to `try_close`: we have to ensure that all threads have
321        // dropped their refs to the span before the span is closed.
322        let refs = span.ref_count.fetch_add(1, Ordering::Relaxed);
323        assert_ne!(
324            refs, 0,
325            "tried to clone a span ({:?}) that already closed",
326            id
327        );
328        id.clone()
329    }
330
331    fn current_span(&self) -> Current {
332        self.current_spans
333            .get()
334            .and_then(|spans| {
335                let spans = spans.borrow();
336                let id = spans.current()?;
337                let span = self.get(id)?;
338                Some(Current::new(id.clone(), span.metadata))
339            })
340            .unwrap_or_else(Current::none)
341    }
342
343    /// Decrements the reference count of the span with the given `id`, and
344    /// removes the span if it is zero.
345    ///
346    /// The allocated span slot will be reused when a new span is created.
347    fn try_close(&self, id: span::Id) -> bool {
348        let span = match self.get(&id) {
349            Some(span) => span,
350            None if std::thread::panicking() => return false,
351            None => panic!("tried to drop a ref to {:?}, but no such span exists!", id),
352        };
353
354        let refs = span.ref_count.fetch_sub(1, Ordering::Release);
355        if !std::thread::panicking() {
356            assert!(refs < usize::MAX, "reference count overflow!");
357        }
358        if refs > 1 {
359            return false;
360        }
361
362        // Synchronize if we are actually removing the span (stolen
363        // from std::Arc); this ensures that all other `try_close` calls on
364        // other threads happen-before we actually remove the span.
365        fence(Ordering::Acquire);
366        true
367    }
368}
369
370impl<'a> LookupSpan<'a> for Registry {
371    type Data = Data<'a>;
372
373    fn span_data(&'a self, id: &Id) -> Option<Self::Data> {
374        let inner = self.get(id)?;
375        Some(Data { inner })
376    }
377
378    fn register_filter(&mut self) -> FilterId {
379        let id = FilterId::new(self.next_filter_id);
380        self.next_filter_id += 1;
381        id
382    }
383}
384
385// === impl CloseGuard ===
386
387impl CloseGuard<'_> {
388    pub(crate) fn set_closing(&mut self) {
389        self.is_closing = true;
390    }
391}
392
393impl Drop for CloseGuard<'_> {
394    fn drop(&mut self) {
395        // If this returns with an error, we are already panicking. At
396        // this point, there's nothing we can really do to recover
397        // except by avoiding a double-panic.
398        let _ = CLOSE_COUNT.try_with(|count| {
399            let c = count.get();
400            // Decrement the count to indicate that _this_ guard's
401            // `on_close` callback has completed.
402            //
403            // Note that we *must* do this before we actually remove the span
404            // from the registry, since dropping the `DataInner` may trigger a
405            // new close, if this span is the last reference to a parent span.
406            count.set(c - 1);
407
408            // If the current close count is 1, this stack frame is the last
409            // `on_close` call. If the span is closing, it's okay to remove the
410            // span.
411            if c == 1 && self.is_closing {
412                self.registry.spans.clear(id_to_idx(&self.id));
413            }
414        });
415    }
416}
417
418// === impl Data ===
419
420impl<'a> SpanData<'a> for Data<'a> {
421    fn id(&self) -> Id {
422        idx_to_id(self.inner.key())
423    }
424
425    fn metadata(&self) -> &'static Metadata<'static> {
426        self.inner.metadata
427    }
428
429    fn parent(&self) -> Option<&Id> {
430        self.inner.parent.as_ref()
431    }
432
433    fn extensions(&self) -> Extensions<'_> {
434        Extensions::new(self.inner.extensions.read().expect("Mutex poisoned"))
435    }
436
437    fn extensions_mut(&self) -> ExtensionsMut<'_> {
438        ExtensionsMut::new(self.inner.extensions.write().expect("Mutex poisoned"))
439    }
440
441    #[inline]
442    fn is_enabled_for(&self, filter: FilterId) -> bool {
443        self.inner.filter_map.is_enabled(filter)
444    }
445}
446
447// === impl DataInner ===
448
449impl Default for DataInner {
450    fn default() -> Self {
451        // Since `DataInner` owns a `&'static Callsite` pointer, we need
452        // something to use as the initial default value for that callsite.
453        // Since we can't access a `DataInner` until it has had actual span data
454        // inserted into it, the null metadata will never actually be accessed.
455        struct NullCallsite;
456        impl tracing_core::callsite::Callsite for NullCallsite {
457            fn set_interest(&self, _: Interest) {
458                unreachable!(
459                    "/!\\ Tried to register the null callsite /!\\\n \
460                    This should never have happened and is definitely a bug. \
461                    A `tracing` bug report would be appreciated."
462                )
463            }
464
465            fn metadata(&self) -> &Metadata<'_> {
466                unreachable!(
467                    "/!\\ Tried to access the null callsite's metadata /!\\\n \
468                    This should never have happened and is definitely a bug. \
469                    A `tracing` bug report would be appreciated."
470                )
471            }
472        }
473
474        static NULL_CALLSITE: NullCallsite = NullCallsite;
475        static NULL_METADATA: Metadata<'static> = tracing_core::metadata! {
476            name: "",
477            target: "",
478            level: tracing_core::Level::TRACE,
479            fields: &[],
480            callsite: &NULL_CALLSITE,
481            kind: tracing_core::metadata::Kind::SPAN,
482        };
483
484        Self {
485            filter_map: FilterMap::new(),
486            metadata: &NULL_METADATA,
487            parent: None,
488            ref_count: AtomicUsize::new(0),
489            extensions: RwLock::new(ExtensionsInner::new()),
490        }
491    }
492}
493
494impl Clear for DataInner {
495    /// Clears the span's data in place, dropping the parent's reference count.
496    fn clear(&mut self) {
497        // A span is not considered closed until all of its children have closed.
498        // Therefore, each span's `DataInner` holds a "reference" to the parent
499        // span, keeping the parent span open until all its children have closed.
500        // When we close a span, we must then decrement the parent's ref count
501        // (potentially, allowing it to close, if this child is the last reference
502        // to that span).
503        // We have to actually unpack the option inside the `get_default`
504        // closure, since it is a `FnMut`, but testing that there _is_ a value
505        // here lets us avoid the thread-local access if we don't need the
506        // dispatcher at all.
507        if self.parent.is_some() {
508            // Note that --- because `Layered::try_close` works by calling
509            // `try_close` on the inner subscriber and using the return value to
510            // determine whether to call the `Layer`'s `on_close` callback ---
511            // we must call `try_close` on the entire subscriber stack, rather
512            // than just on the registry. If the registry called `try_close` on
513            // itself directly, the layers wouldn't see the close notification.
514            let subscriber = dispatcher::get_default(Dispatch::clone);
515            if let Some(parent) = self.parent.take() {
516                let _ = subscriber.try_close(parent);
517            }
518        }
519
520        // Clear (but do not deallocate!) the pooled `HashMap` for the span's extensions.
521        self.extensions
522            .get_mut()
523            .unwrap_or_else(|l| {
524                // This function can be called in a `Drop` impl, such as while
525                // panicking, so ignore lock poisoning.
526                l.into_inner()
527            })
528            .clear();
529
530        self.filter_map = FilterMap::new();
531    }
532}
533
534#[cfg(test)]
535mod tests {
536    use super::*;
537    use crate::{layer::Context, registry::LookupSpan, Layer};
538    use std::{
539        collections::HashMap,
540        dbg, println,
541        sync::{Arc, Mutex, Weak},
542        vec::Vec,
543    };
544    use tracing::{self, subscriber::with_default};
545    use tracing_core::{
546        dispatcher,
547        span::{Attributes, Id},
548        Subscriber,
549    };
550
551    struct AssertionLayer;
552    impl<S> Layer<S> for AssertionLayer
553    where
554        S: Subscriber + for<'a> LookupSpan<'a>,
555    {
556        fn on_close(&self, id: Id, ctx: Context<'_, S>) {
557            dbg!(format_args!("closing {:?}", id));
558            assert!(&ctx.span(&id).is_some());
559        }
560    }
561
562    #[test]
563    fn single_layer_can_access_closed_span() {
564        let subscriber = AssertionLayer.with_subscriber(Registry::default());
565
566        with_default(subscriber, || {
567            let span = tracing::debug_span!("span");
568            drop(span);
569        });
570    }
571
572    #[test]
573    fn multiple_layers_can_access_closed_span() {
574        let subscriber = AssertionLayer
575            .and_then(AssertionLayer)
576            .with_subscriber(Registry::default());
577
578        with_default(subscriber, || {
579            let span = tracing::debug_span!("span");
580            drop(span);
581        });
582    }
583
584    struct CloseLayer {
585        inner: Arc<Mutex<CloseState>>,
586    }
587
588    struct CloseHandle {
589        state: Arc<Mutex<CloseState>>,
590    }
591
592    #[derive(Default)]
593    struct CloseState {
594        open: HashMap<&'static str, Weak<()>>,
595        closed: Vec<(&'static str, Weak<()>)>,
596    }
597
598    #[allow(dead_code)] // Field is exercised via checking `Arc::downgrade()`
599    struct SetRemoved(Arc<()>);
600
601    impl<S> Layer<S> for CloseLayer
602    where
603        S: Subscriber + for<'a> LookupSpan<'a>,
604    {
605        fn on_new_span(&self, _: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {
606            let span = ctx.span(id).expect("Missing span; this is a bug");
607            let mut lock = self.inner.lock().unwrap();
608            let is_removed = Arc::new(());
609            assert!(
610                lock.open
611                    .insert(span.name(), Arc::downgrade(&is_removed))
612                    .is_none(),
613                "test layer saw multiple spans with the same name, the test is probably messed up"
614            );
615            let mut extensions = span.extensions_mut();
616            extensions.insert(SetRemoved(is_removed));
617        }
618
619        fn on_close(&self, id: Id, ctx: Context<'_, S>) {
620            let span = if let Some(span) = ctx.span(&id) {
621                span
622            } else {
623                println!(
624                    "span {:?} did not exist in `on_close`, are we panicking?",
625                    id
626                );
627                return;
628            };
629            let name = span.name();
630            println!("close {} ({:?})", name, id);
631            if let Ok(mut lock) = self.inner.lock() {
632                if let Some(is_removed) = lock.open.remove(name) {
633                    assert!(is_removed.upgrade().is_some());
634                    lock.closed.push((name, is_removed));
635                }
636            }
637        }
638    }
639
640    impl CloseLayer {
641        fn new() -> (Self, CloseHandle) {
642            let state = Arc::new(Mutex::new(CloseState::default()));
643            (
644                Self {
645                    inner: state.clone(),
646                },
647                CloseHandle { state },
648            )
649        }
650    }
651
652    impl CloseState {
653        fn is_open(&self, span: &str) -> bool {
654            self.open.contains_key(span)
655        }
656
657        fn is_closed(&self, span: &str) -> bool {
658            self.closed.iter().any(|(name, _)| name == &span)
659        }
660    }
661
662    impl CloseHandle {
663        fn assert_closed(&self, span: &str) {
664            let lock = self.state.lock().unwrap();
665            assert!(
666                lock.is_closed(span),
667                "expected {} to be closed{}",
668                span,
669                if lock.is_open(span) {
670                    " (it was still open)"
671                } else {
672                    ", but it never existed (is there a problem with the test?)"
673                }
674            )
675        }
676
677        fn assert_open(&self, span: &str) {
678            let lock = self.state.lock().unwrap();
679            assert!(
680                lock.is_open(span),
681                "expected {} to be open{}",
682                span,
683                if lock.is_closed(span) {
684                    " (it was still open)"
685                } else {
686                    ", but it never existed (is there a problem with the test?)"
687                }
688            )
689        }
690
691        fn assert_removed(&self, span: &str) {
692            let lock = self.state.lock().unwrap();
693            let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
694                Some((_, is_removed)) => is_removed,
695                None => panic!(
696                    "expected {} to be removed from the registry, but it was not closed {}",
697                    span,
698                    if lock.is_closed(span) {
699                        " (it was still open)"
700                    } else {
701                        ", but it never existed (is there a problem with the test?)"
702                    }
703                ),
704            };
705            assert!(
706                is_removed.upgrade().is_none(),
707                "expected {} to have been removed from the registry",
708                span
709            )
710        }
711
712        fn assert_not_removed(&self, span: &str) {
713            let lock = self.state.lock().unwrap();
714            let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
715                Some((_, is_removed)) => is_removed,
716                None if lock.is_open(span) => return,
717                None => unreachable!(),
718            };
719            assert!(
720                is_removed.upgrade().is_some(),
721                "expected {} to have been removed from the registry",
722                span
723            )
724        }
725
726        #[allow(unused)] // may want this for future tests
727        fn assert_last_closed(&self, span: Option<&str>) {
728            let lock = self.state.lock().unwrap();
729            let last = lock.closed.last().map(|(span, _)| span);
730            assert_eq!(
731                last,
732                span.as_ref(),
733                "expected {:?} to have closed last",
734                span
735            );
736        }
737
738        fn assert_closed_in_order(&self, order: impl AsRef<[&'static str]>) {
739            let lock = self.state.lock().unwrap();
740            let order = order.as_ref();
741            for (i, name) in order.iter().enumerate() {
742                assert_eq!(
743                    lock.closed.get(i).map(|(span, _)| span),
744                    Some(name),
745                    "expected close order: {:?}, actual: {:?}",
746                    order,
747                    lock.closed.iter().map(|(name, _)| name).collect::<Vec<_>>()
748                );
749            }
750        }
751    }
752
753    #[test]
754    fn spans_are_removed_from_registry() {
755        let (close_layer, state) = CloseLayer::new();
756        let subscriber = AssertionLayer
757            .and_then(close_layer)
758            .with_subscriber(Registry::default());
759
760        // Create a `Dispatch` (which is internally reference counted) so that
761        // the subscriber lives to the end of the test. Otherwise, if we just
762        // passed the subscriber itself to `with_default`, we could see the span
763        // be dropped when the subscriber itself is dropped, destroying the
764        // registry.
765        let dispatch = dispatcher::Dispatch::new(subscriber);
766
767        dispatcher::with_default(&dispatch, || {
768            let span = tracing::debug_span!("span1");
769            drop(span);
770            let span = tracing::info_span!("span2");
771            drop(span);
772        });
773
774        state.assert_removed("span1");
775        state.assert_removed("span2");
776
777        // Ensure the registry itself outlives the span.
778        drop(dispatch);
779    }
780
781    #[test]
782    fn spans_are_only_closed_when_the_last_ref_drops() {
783        let (close_layer, state) = CloseLayer::new();
784        let subscriber = AssertionLayer
785            .and_then(close_layer)
786            .with_subscriber(Registry::default());
787
788        // Create a `Dispatch` (which is internally reference counted) so that
789        // the subscriber lives to the end of the test. Otherwise, if we just
790        // passed the subscriber itself to `with_default`, we could see the span
791        // be dropped when the subscriber itself is dropped, destroying the
792        // registry.
793        let dispatch = dispatcher::Dispatch::new(subscriber);
794
795        let span2 = dispatcher::with_default(&dispatch, || {
796            let span = tracing::debug_span!("span1");
797            drop(span);
798            let span2 = tracing::info_span!("span2");
799            let span2_clone = span2.clone();
800            drop(span2);
801            span2_clone
802        });
803
804        state.assert_removed("span1");
805        state.assert_not_removed("span2");
806
807        drop(span2);
808        state.assert_removed("span1");
809
810        // Ensure the registry itself outlives the span.
811        drop(dispatch);
812    }
813
814    #[test]
815    fn span_enter_guards_are_dropped_out_of_order() {
816        let (close_layer, state) = CloseLayer::new();
817        let subscriber = AssertionLayer
818            .and_then(close_layer)
819            .with_subscriber(Registry::default());
820
821        // Create a `Dispatch` (which is internally reference counted) so that
822        // the subscriber lives to the end of the test. Otherwise, if we just
823        // passed the subscriber itself to `with_default`, we could see the span
824        // be dropped when the subscriber itself is dropped, destroying the
825        // registry.
826        let dispatch = dispatcher::Dispatch::new(subscriber);
827
828        dispatcher::with_default(&dispatch, || {
829            let span1 = tracing::debug_span!("span1");
830            let span2 = tracing::info_span!("span2");
831
832            let enter1 = span1.enter();
833            let enter2 = span2.enter();
834
835            drop(enter1);
836            drop(span1);
837
838            state.assert_removed("span1");
839            state.assert_not_removed("span2");
840
841            drop(enter2);
842            state.assert_not_removed("span2");
843
844            drop(span2);
845            state.assert_removed("span1");
846            state.assert_removed("span2");
847        });
848    }
849
850    #[test]
851    fn child_closes_parent() {
852        // This test asserts that if a parent span's handle is dropped before
853        // a child span's handle, the parent will remain open until child
854        // closes, and will then be closed.
855
856        let (close_layer, state) = CloseLayer::new();
857        let subscriber = close_layer.with_subscriber(Registry::default());
858
859        let dispatch = dispatcher::Dispatch::new(subscriber);
860
861        dispatcher::with_default(&dispatch, || {
862            let span1 = tracing::info_span!("parent");
863            let span2 = tracing::info_span!(parent: &span1, "child");
864
865            state.assert_open("parent");
866            state.assert_open("child");
867
868            drop(span1);
869            state.assert_open("parent");
870            state.assert_open("child");
871
872            drop(span2);
873            state.assert_closed("parent");
874            state.assert_closed("child");
875        });
876    }
877
878    #[test]
879    fn child_closes_grandparent() {
880        // This test asserts that, when a span is kept open by a child which
881        // is *itself* kept open by a child, closing the grandchild will close
882        // both the parent *and* the grandparent.
883        let (close_layer, state) = CloseLayer::new();
884        let subscriber = close_layer.with_subscriber(Registry::default());
885
886        let dispatch = dispatcher::Dispatch::new(subscriber);
887
888        dispatcher::with_default(&dispatch, || {
889            let span1 = tracing::info_span!("grandparent");
890            let span2 = tracing::info_span!(parent: &span1, "parent");
891            let span3 = tracing::info_span!(parent: &span2, "child");
892
893            state.assert_open("grandparent");
894            state.assert_open("parent");
895            state.assert_open("child");
896
897            drop(span1);
898            drop(span2);
899            state.assert_open("grandparent");
900            state.assert_open("parent");
901            state.assert_open("child");
902
903            drop(span3);
904
905            state.assert_closed_in_order(["child", "parent", "grandparent"]);
906        });
907    }
908}