Skip to content

Commit 1c033f6

Browse files
committed
Revert "feat: Leverage Suppression Context in Sdk (open-telemetry#2868)"
This reverts commit 62e43c5.
1 parent 77ae46c commit 1c033f6

File tree

17 files changed

+113
-267
lines changed

17 files changed

+113
-267
lines changed

docs/design/logs.md

+1-19
Original file line numberDiff line numberDiff line change
@@ -345,25 +345,7 @@ only meant for OTel components itself and anyone writing extensions like custom
345345
Exporters etc.
346346

347347
// TODO: Document the principles followed when selecting severity for internal
348-
logs
349-
350-
When OpenTelemetry components generate logs that could potentially feed back
351-
into OpenTelemetry, this can result in what is known as "telemetry-induced
352-
telemetry." To address this, OpenTelemetry provides a mechanism to suppress such
353-
telemetry using the `Context`. Components are expected to mark telemetry as
354-
suppressed within a specific `Context` by invoking
355-
`Context::enter_telemetry_suppressed_scope()`. The Logs SDK implementation
356-
checks this flag in the current `Context` and ignores logs if suppression is
357-
enabled.
358-
359-
This mechanism relies on proper in-process propagation of the `Context`.
360-
However, external libraries like `hyper` and `tonic`, which are used by
361-
OpenTelemetry in its OTLP Exporters, do not propagate OpenTelemetry's `Context`.
362-
As a result, the suppression mechanism does not work out-of-the-box to suppress
363-
logs originating from these libraries.
364-
365-
// TODO: Document how OTLP can solve this issue without asking external
366-
crates to respect and propagate OTel Context.
348+
logs // TODO: Document how this can cause circular loop and plans to address it.
367349

368350
## Summary
369351

examples/logs-basic/src/main.rs

+10-12
Original file line numberDiff line numberDiff line change
@@ -15,21 +15,19 @@ fn main() {
1515
.with_simple_exporter(exporter)
1616
.build();
1717

18-
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
19-
// logging is properly suppressed. However, logs emitted by external components
20-
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
21-
// OpenTelemetry context. Until this issue is addressed
22-
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
23-
// filtering like this is the best way to suppress such logs.
24-
//
25-
// The filter levels are set as follows:
18+
// For the OpenTelemetry layer, add a tracing filter to filter events from
19+
// OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates
20+
// like reqwest/tonic etc.) from being sent back to OTel itself, thus
21+
// preventing infinite telemetry generation. The filter levels are set as
22+
// follows:
2623
// - Allow `info` level and above by default.
27-
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
28-
//
29-
// Note: This filtering will also drop logs from these components even when
30-
// they are used outside of the OTLP Exporter.
24+
// - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely.
25+
// Note: This will also drop events from crates like `tonic` etc. even when
26+
// they are used outside the OTLP Exporter. For more details, see:
27+
// https://github.com/open-telemetry/opentelemetry-rust/issues/761
3128
let filter_otel = EnvFilter::new("info")
3229
.add_directive("hyper=off".parse().unwrap())
30+
.add_directive("opentelemetry=off".parse().unwrap())
3331
.add_directive("tonic=off".parse().unwrap())
3432
.add_directive("h2=off".parse().unwrap())
3533
.add_directive("reqwest=off".parse().unwrap());

opentelemetry-appender-tracing/examples/basic.rs

+9-12
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,16 @@ fn main() {
1616
.with_simple_exporter(exporter)
1717
.build();
1818

19-
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
20-
// logging is properly suppressed. However, logs emitted by external components
21-
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
22-
// OpenTelemetry context. Until this issue is addressed
23-
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
24-
// filtering like this is the best way to suppress such logs.
25-
//
26-
// The filter levels are set as follows:
19+
// For the OpenTelemetry layer, add a tracing filter to filter events from
20+
// OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates
21+
// like reqwest/tonic etc.) from being sent back to OTel itself, thus
22+
// preventing infinite telemetry generation. The filter levels are set as
23+
// follows:
2724
// - Allow `info` level and above by default.
28-
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
29-
//
30-
// Note: This filtering will also drop logs from these components even when
31-
// they are used outside of the OTLP Exporter.
25+
// - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely.
26+
// Note: This will also drop events from crates like `tonic` etc. even when
27+
// they are used outside the OTLP Exporter. For more details, see:
28+
// https://github.com/open-telemetry/opentelemetry-rust/issues/761
3229
let filter_otel = EnvFilter::new("info")
3330
.add_directive("hyper=off".parse().unwrap())
3431
.add_directive("opentelemetry=off".parse().unwrap())

opentelemetry-appender-tracing/src/layer.rs

+67-2
Original file line numberDiff line numberDiff line change
@@ -289,11 +289,13 @@ mod tests {
289289
use opentelemetry::{logs::AnyValue, Key};
290290
use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
291291
use opentelemetry_sdk::logs::{InMemoryLogExporter, LogProcessor};
292+
use opentelemetry_sdk::logs::{LogBatch, LogExporter};
292293
use opentelemetry_sdk::logs::{SdkLogRecord, SdkLoggerProvider};
293294
use opentelemetry_sdk::trace::{Sampler, SdkTracerProvider};
294-
use tracing::error;
295+
use tracing::{error, warn};
295296
use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt;
296-
use tracing_subscriber::Layer;
297+
use tracing_subscriber::util::SubscriberInitExt;
298+
use tracing_subscriber::{EnvFilter, Layer};
297299

298300
pub fn attributes_contains(log_record: &SdkLogRecord, key: &Key, value: &AnyValue) -> bool {
299301
log_record
@@ -311,6 +313,69 @@ mod tests {
311313
}
312314

313315
// cargo test --features=testing
316+
317+
#[derive(Clone, Debug, Default)]
318+
struct ReentrantLogExporter;
319+
320+
impl LogExporter for ReentrantLogExporter {
321+
async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult {
322+
// This will cause a deadlock as the export itself creates a log
323+
// while still within the lock of the SimpleLogProcessor.
324+
warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "[email protected]");
325+
Ok(())
326+
}
327+
}
328+
329+
#[test]
330+
#[ignore = "See issue: https://github.com/open-telemetry/opentelemetry-rust/issues/1745"]
331+
fn simple_processor_deadlock() {
332+
let exporter: ReentrantLogExporter = ReentrantLogExporter;
333+
let logger_provider = SdkLoggerProvider::builder()
334+
.with_simple_exporter(exporter.clone())
335+
.build();
336+
337+
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
338+
339+
// Setting subscriber as global as that is the only way to test this scenario.
340+
tracing_subscriber::registry().with(layer).init();
341+
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
342+
}
343+
344+
#[test]
345+
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
346+
fn simple_processor_no_deadlock() {
347+
let exporter: ReentrantLogExporter = ReentrantLogExporter;
348+
let logger_provider = SdkLoggerProvider::builder()
349+
.with_simple_exporter(exporter.clone())
350+
.build();
351+
352+
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
353+
354+
// This filter will prevent the deadlock as the reentrant log will be
355+
// ignored.
356+
let filter = EnvFilter::new("debug").add_directive("reentrant=error".parse().unwrap());
357+
// Setting subscriber as global as that is the only way to test this scenario.
358+
tracing_subscriber::registry()
359+
.with(filter)
360+
.with(layer)
361+
.init();
362+
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
363+
}
364+
365+
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
366+
#[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."]
367+
async fn batch_processor_no_deadlock() {
368+
let exporter: ReentrantLogExporter = ReentrantLogExporter;
369+
let logger_provider = SdkLoggerProvider::builder()
370+
.with_batch_exporter(exporter.clone())
371+
.build();
372+
373+
let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider);
374+
375+
tracing_subscriber::registry().with(layer).init();
376+
warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "[email protected]");
377+
}
378+
314379
#[test]
315380
fn tracing_appender_standalone() {
316381
// Arrange

opentelemetry-otlp/examples/basic-otlp-http/src/main.rs

+10-12
Original file line numberDiff line numberDiff line change
@@ -72,21 +72,19 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
7272
// Create a new OpenTelemetryTracingBridge using the above LoggerProvider.
7373
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
7474

75-
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
76-
// logging is properly suppressed. However, logs emitted by external components
77-
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
78-
// OpenTelemetry context. Until this issue is addressed
79-
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
80-
// filtering like this is the best way to suppress such logs.
81-
//
82-
// The filter levels are set as follows:
75+
// For the OpenTelemetry layer, add a tracing filter to filter events from
76+
// OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates
77+
// like reqwest/tonic etc.) from being sent back to OTel itself, thus
78+
// preventing infinite telemetry generation. The filter levels are set as
79+
// follows:
8380
// - Allow `info` level and above by default.
84-
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
85-
//
86-
// Note: This filtering will also drop logs from these components even when
87-
// they are used outside of the OTLP Exporter.
81+
// - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely.
82+
// Note: This will also drop events from crates like `tonic` etc. even when
83+
// they are used outside the OTLP Exporter. For more details, see:
84+
// https://github.com/open-telemetry/opentelemetry-rust/issues/761
8885
let filter_otel = EnvFilter::new("info")
8986
.add_directive("hyper=off".parse().unwrap())
87+
.add_directive("opentelemetry=off".parse().unwrap())
9088
.add_directive("tonic=off".parse().unwrap())
9189
.add_directive("h2=off".parse().unwrap())
9290
.add_directive("reqwest=off".parse().unwrap());

opentelemetry-otlp/examples/basic-otlp/src/main.rs

+10-12
Original file line numberDiff line numberDiff line change
@@ -66,21 +66,19 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
6666
// Create a new OpenTelemetryTracingBridge using the above LoggerProvider.
6767
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider);
6868

69-
// To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal
70-
// logging is properly suppressed. However, logs emitted by external components
71-
// (such as reqwest, tonic, etc.) are not suppressed as they do not propagate
72-
// OpenTelemetry context. Until this issue is addressed
73-
// (https://github.com/open-telemetry/opentelemetry-rust/issues/2877),
74-
// filtering like this is the best way to suppress such logs.
75-
//
76-
// The filter levels are set as follows:
69+
// For the OpenTelemetry layer, add a tracing filter to filter events from
70+
// OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates
71+
// like reqwest/tonic etc.) from being sent back to OTel itself, thus
72+
// preventing infinite telemetry generation. The filter levels are set as
73+
// follows:
7774
// - Allow `info` level and above by default.
78-
// - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`.
79-
//
80-
// Note: This filtering will also drop logs from these components even when
81-
// they are used outside of the OTLP Exporter.
75+
// - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely.
76+
// Note: This will also drop events from crates like `tonic` etc. even when
77+
// they are used outside the OTLP Exporter. For more details, see:
78+
// https://github.com/open-telemetry/opentelemetry-rust/issues/761
8279
let filter_otel = EnvFilter::new("info")
8380
.add_directive("hyper=off".parse().unwrap())
81+
.add_directive("opentelemetry=off".parse().unwrap())
8482
.add_directive("tonic=off".parse().unwrap())
8583
.add_directive("h2=off".parse().unwrap())
8684
.add_directive("reqwest=off".parse().unwrap());

opentelemetry-sdk/CHANGELOG.md

-10
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,6 @@
22

33
## vNext
44

5-
[#2868](https://github.com/open-telemetry/opentelemetry-rust/pull/2868)
6-
`SdkLogger`, `SdkTracer` modified to respect telemetry suppression based on
7-
`Context`. In other words, if the current context has telemetry suppression
8-
enabled, then logs/spans will be ignored. The flag is typically set by OTel
9-
components to prevent telemetry from itself being fed back into OTel.
10-
`BatchLogProcessor`, `BatchSpanProcessor`, and `PeriodicReader` modified to set
11-
the suppression flag in their dedicated thread, so that telemetry generated from
12-
those threads will not be fed back into OTel. Similarly, `SimpleLogProcessor`
13-
also modified to suppress telemetry before invoking exporters.
14-
155
## 0.29.0
166

177
Released 2025-Mar-21

opentelemetry-sdk/benches/log_enabled.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
Total Number of Cores:   14 (10 performance and 4 efficiency)
66
| Test | Average time|
77
|---------------------------------------------|-------------|
8-
| exporter_disabled_concurrent_processor | 2.5 ns |
9-
| exporter_disabled_simple_processor | 5.3 ns |
8+
| exporter_disabled_concurrent_processor | 1.1 ns |
9+
| exporter_disabled_simple_processor | 4.3 ns |
1010
*/
1111

1212
// cargo bench --bench log_enabled --features="spec_unstable_logs_enabled,experimental_logs_concurrent_log_processor"

opentelemetry-sdk/src/logs/batch_log_processor.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ use crate::{
2323
};
2424
use std::sync::mpsc::{self, RecvTimeoutError, SyncSender};
2525

26-
use opentelemetry::{otel_debug, otel_error, otel_warn, Context, InstrumentationScope};
26+
use opentelemetry::{otel_debug, otel_error, otel_warn, InstrumentationScope};
2727

2828
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
2929
use std::{cmp::min, env, sync::Mutex};
@@ -342,7 +342,6 @@ impl BatchLogProcessor {
342342
let handle = thread::Builder::new()
343343
.name("OpenTelemetry.Logs.BatchProcessor".to_string())
344344
.spawn(move || {
345-
let _suppress_guard = Context::enter_telemetry_suppressed_scope();
346345
otel_debug!(
347346
name: "BatchLogProcessor.ThreadStarted",
348347
interval_in_millisecs = config.scheduled_delay.as_millis(),

opentelemetry-sdk/src/logs/logger.rs

-6
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,6 @@ impl opentelemetry::logs::Logger for SdkLogger {
2929

3030
/// Emit a `LogRecord`.
3131
fn emit(&self, mut record: Self::LogRecord) {
32-
if Context::is_current_telemetry_suppressed() {
33-
return;
34-
}
3532
let provider = &self.provider;
3633
let processors = provider.log_processors();
3734

@@ -55,9 +52,6 @@ impl opentelemetry::logs::Logger for SdkLogger {
5552
#[cfg(feature = "spec_unstable_logs_enabled")]
5653
#[inline]
5754
fn event_enabled(&self, level: Severity, target: &str, name: Option<&str>) -> bool {
58-
if Context::is_current_telemetry_suppressed() {
59-
return false;
60-
}
6155
self.provider
6256
.log_processors()
6357
.iter()

0 commit comments

Comments
 (0)