diff --git a/.cursor/rules/pr.mdc b/.cursor/rules/pr.mdc index 08a07511c67..e15c0a0a563 100644 --- a/.cursor/rules/pr.mdc +++ b/.cursor/rules/pr.mdc @@ -258,3 +258,5 @@ git push **Never merge into the collection branch.** Syncing only happens between stack PR branches. The collection branch is untouched until the user merges PRs through GitHub. Prefer merge over rebase — it preserves commit history, doesn't invalidate existing review comments, and avoids the need for force-pushing. Only rebase if explicitly requested. + +**Never amend or force-push stack branches.** Do not use `git commit --amend`, `--force`, or `--force-with-lease` on branches that are part of a stack. Amending a pushed commit requires a force-push, which can cause GitHub to auto-merge or auto-close other PRs in the stack. If a commit needs fixing, add a new fixup commit instead. diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bd3b127948..be35e99df6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ ### Features +- Add `sentry-kafka` module for Kafka queue instrumentation without Spring ([#5288](https://github.com/getsentry/sentry-java/pull/5288)) +- Add Kafka queue tracing for Spring Boot 3 ([#5254](https://github.com/getsentry/sentry-java/pull/5254)), ([#5255](https://github.com/getsentry/sentry-java/pull/5255)), ([#5256](https://github.com/getsentry/sentry-java/pull/5256)) +- Add `enableQueueTracing` option and messaging span data conventions ([#5250](https://github.com/getsentry/sentry-java/pull/5250)) - Prevent cross-organization trace continuation ([#5136](https://github.com/getsentry/sentry-java/pull/5136)) - By default, the SDK now extracts the organization ID from the DSN (e.g. `o123.ingest.sentry.io`) and compares it with the `sentry-org_id` value in incoming baggage headers. When the two differ, the SDK starts a fresh trace instead of continuing the foreign one. This guards against accidentally linking traces across organizations. - New option `enableStrictTraceContinuation` (default `false`): when enabled, both the SDK's org ID **and** the incoming baggage org ID must be present and match for a trace to be continued. Traces with a missing org ID on either side are rejected. Configurable via code (`setStrictTraceContinuation(true)`), `sentry.properties` (`enable-strict-trace-continuation=true`), Android manifest (`io.sentry.strict-trace-continuation.enabled`), or Spring Boot (`sentry.strict-trace-continuation=true`). @@ -11,6 +14,11 @@ - Android: Attachments on the scope will now be synced to native ([#5211](https://github.com/getsentry/sentry-java/pull/5211)) - Add THIRD_PARTY_NOTICES.md for vendored third-party code, bundled as SENTRY_THIRD_PARTY_NOTICES.md in the sentry JAR under META-INF ([#5186](https://github.com/getsentry/sentry-java/pull/5186)) +### Fixes + +- Inject Kafka trace headers even without an active span so distributed tracing works for background workers and `@Scheduled` jobs ([#5338](https://github.com/getsentry/sentry-java/pull/5338)) +- Write the `sentry-task-enqueued-time` Kafka header as a plain decimal so cross-SDK consumers (e.g. sentry-python) can parse it ([#5328](https://github.com/getsentry/sentry-java/pull/5328)) + ## 8.37.1 ### Fixes diff --git a/README.md b/README.md index 25fedc8217f..72737932c55 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ Sentry SDK for Java and Android | sentry | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry?style=for-the-badge&logo=sentry&color=green) | 21 | | sentry-jul | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-jul?style=for-the-badge&logo=sentry&color=green) | | sentry-jdbc | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-jdbc?style=for-the-badge&logo=sentry&color=green) | +| sentry-kafka | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-kafka?style=for-the-badge&logo=sentry&color=green) | | sentry-apollo | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-apollo?style=for-the-badge&logo=sentry&color=green) | 21 | | sentry-apollo-3 | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-apollo-3?style=for-the-badge&logo=sentry&color=green) | 21 | | sentry-apollo-4 | ![Maven Central Version](https://img.shields.io/maven-central/v/io.sentry/sentry-apollo-4?style=for-the-badge&logo=sentry&color=green) | 21 | diff --git a/buildSrc/src/main/java/Config.kt b/buildSrc/src/main/java/Config.kt index b5d1dafeb74..0e353f1c5e5 100644 --- a/buildSrc/src/main/java/Config.kt +++ b/buildSrc/src/main/java/Config.kt @@ -80,6 +80,7 @@ object Config { val SENTRY_JCACHE_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.jcache" val SENTRY_QUARTZ_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.quartz" val SENTRY_JDBC_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.jdbc" + val SENTRY_KAFKA_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.kafka" val SENTRY_OPENFEATURE_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.openfeature" val SENTRY_LAUNCHDARKLY_SERVER_SDK_NAME = "$SENTRY_JAVA_SDK_NAME.launchdarkly-server" val SENTRY_LAUNCHDARKLY_ANDROID_SDK_NAME = "$SENTRY_ANDROID_SDK_NAME.launchdarkly" diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index eb7ab86e4bd..2238800c53f 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -183,6 +183,8 @@ springboot3-starter-security = { module = "org.springframework.boot:spring-boot- springboot3-starter-jdbc = { module = "org.springframework.boot:spring-boot-starter-jdbc", version.ref = "springboot3" } springboot3-starter-actuator = { module = "org.springframework.boot:spring-boot-starter-actuator", version.ref = "springboot3" } springboot3-starter-cache = { module = "org.springframework.boot:spring-boot-starter-cache", version.ref = "springboot3" } +spring-kafka3 = { module = "org.springframework.kafka:spring-kafka", version = "3.3.5" } +kafka-clients = { module = "org.apache.kafka:kafka-clients", version = "3.8.1" } springboot4-otel = { module = "io.opentelemetry.instrumentation:opentelemetry-spring-boot-starter", version.ref = "otelInstrumentation" } springboot4-resttestclient = { module = "org.springframework.boot:spring-boot-resttestclient", version.ref = "springboot4" } springboot4-starter = { module = "org.springframework.boot:spring-boot-starter", version.ref = "springboot4" } diff --git a/sentry-kafka/README.md b/sentry-kafka/README.md new file mode 100644 index 00000000000..ef4b5319859 --- /dev/null +++ b/sentry-kafka/README.md @@ -0,0 +1,5 @@ +# sentry-kafka + +This module provides Kafka-native queue instrumentation for applications using `kafka-clients` directly. + +Spring users should use `sentry-spring-boot-jakarta` / `sentry-spring-jakarta`, which provide higher-fidelity consumer instrumentation via Spring Kafka hooks. diff --git a/sentry-kafka/api/sentry-kafka.api b/sentry-kafka/api/sentry-kafka.api new file mode 100644 index 00000000000..00649245845 --- /dev/null +++ b/sentry-kafka/api/sentry-kafka.api @@ -0,0 +1,19 @@ +public final class io/sentry/kafka/BuildConfig { + public static final field SENTRY_KAFKA_SDK_NAME Ljava/lang/String; + public static final field VERSION_NAME Ljava/lang/String; +} + +public final class io/sentry/kafka/SentryKafkaConsumerTracing { + public static final field TRACE_ORIGIN Ljava/lang/String; + public static fun withTracing (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Ljava/lang/Runnable;)V + public static fun withTracing (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Ljava/util/concurrent/Callable;)Ljava/lang/Object; +} + +public final class io/sentry/kafka/SentryKafkaProducer { + public static final field SENTRY_ENQUEUED_TIME_HEADER Ljava/lang/String; + public static final field TRACE_ORIGIN Ljava/lang/String; + public static fun wrap (Lorg/apache/kafka/clients/producer/Producer;)Lorg/apache/kafka/clients/producer/Producer; + public static fun wrap (Lorg/apache/kafka/clients/producer/Producer;Lio/sentry/IScopes;)Lorg/apache/kafka/clients/producer/Producer; + public static fun wrap (Lorg/apache/kafka/clients/producer/Producer;Lio/sentry/IScopes;Ljava/lang/String;)Lorg/apache/kafka/clients/producer/Producer; +} + diff --git a/sentry-kafka/build.gradle.kts b/sentry-kafka/build.gradle.kts new file mode 100644 index 00000000000..ee3ba0d4a60 --- /dev/null +++ b/sentry-kafka/build.gradle.kts @@ -0,0 +1,83 @@ +import net.ltgt.gradle.errorprone.errorprone +import org.jetbrains.kotlin.gradle.tasks.KotlinCompile + +plugins { + `java-library` + id("io.sentry.javadoc") + alias(libs.plugins.kotlin.jvm) + jacoco + alias(libs.plugins.errorprone) + alias(libs.plugins.gradle.versions) + alias(libs.plugins.buildconfig) +} + +tasks.withType().configureEach { + compilerOptions.jvmTarget = org.jetbrains.kotlin.gradle.dsl.JvmTarget.JVM_1_8 +} + +dependencies { + api(projects.sentry) + compileOnly(libs.kafka.clients) + compileOnly(libs.jetbrains.annotations) + compileOnly(libs.nopen.annotations) + + errorprone(libs.errorprone.core) + errorprone(libs.nopen.checker) + errorprone(libs.nullaway) + + // tests + testImplementation(projects.sentryTestSupport) + testImplementation(kotlin(Config.kotlinStdLib)) + testImplementation(libs.kotlin.test.junit) + testImplementation(libs.mockito.kotlin) + testImplementation(libs.mockito.inline) + testImplementation(libs.kafka.clients) +} + +configure { test { java.srcDir("src/test/java") } } + +jacoco { toolVersion = libs.versions.jacoco.get() } + +tasks.jacocoTestReport { + reports { + xml.required.set(true) + html.required.set(false) + } +} + +tasks { + jacocoTestCoverageVerification { + violationRules { rule { limit { minimum = Config.QualityPlugins.Jacoco.minimumCoverage } } } + } + check { + dependsOn(jacocoTestCoverageVerification) + dependsOn(jacocoTestReport) + } +} + +tasks.withType().configureEach { + options.errorprone { + check("NullAway", net.ltgt.gradle.errorprone.CheckSeverity.ERROR) + option("NullAway:AnnotatedPackages", "io.sentry") + } +} + +buildConfig { + useJavaOutput() + packageName("io.sentry.kafka") + buildConfigField("String", "SENTRY_KAFKA_SDK_NAME", "\"${Config.Sentry.SENTRY_KAFKA_SDK_NAME}\"") + buildConfigField("String", "VERSION_NAME", "\"${project.version}\"") +} + +tasks.jar { + manifest { + attributes( + "Sentry-Version-Name" to project.version, + "Sentry-SDK-Name" to Config.Sentry.SENTRY_KAFKA_SDK_NAME, + "Sentry-SDK-Package-Name" to "maven:io.sentry:sentry-kafka", + "Implementation-Vendor" to "Sentry", + "Implementation-Title" to project.name, + "Implementation-Version" to project.version, + ) + } +} diff --git a/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaConsumerTracing.java b/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaConsumerTracing.java new file mode 100644 index 00000000000..1231cae15ec --- /dev/null +++ b/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaConsumerTracing.java @@ -0,0 +1,281 @@ +package io.sentry.kafka; + +import io.sentry.BaggageHeader; +import io.sentry.DateUtils; +import io.sentry.IScopes; +import io.sentry.ISentryLifecycleToken; +import io.sentry.ITransaction; +import io.sentry.ScopesAdapter; +import io.sentry.SentryLevel; +import io.sentry.SentryTraceHeader; +import io.sentry.SpanDataConvention; +import io.sentry.SpanStatus; +import io.sentry.TransactionContext; +import io.sentry.TransactionOptions; +import io.sentry.util.SpanUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.Header; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** Helper methods for instrumenting raw Kafka consumer record processing. */ +@ApiStatus.Experimental +public final class SentryKafkaConsumerTracing { + + public static final @NotNull String TRACE_ORIGIN = "manual.queue.kafka.consumer"; + + private static final @NotNull String CREATOR = "SentryKafkaConsumerTracing"; + private static final @NotNull String DELIVERY_ATTEMPT_HEADER = "kafka_deliveryAttempt"; + private static final @NotNull String MESSAGE_ID_HEADER = "messaging.message.id"; + + private final @NotNull IScopes scopes; + + SentryKafkaConsumerTracing(final @NotNull IScopes scopes) { + this.scopes = scopes; + } + + /** + * Runs the provided {@link Callable} with a Kafka consumer processing transaction for the given + * record. + * + * @param record the Kafka record being processed + * @param callable the processing callback + * @return the return value of the callback + * @param the Kafka record key type + * @param the Kafka record value type + * @param the callback return type + */ + public static U withTracing( + final @NotNull ConsumerRecord record, final @NotNull Callable callable) + throws Exception { + return new SentryKafkaConsumerTracing(ScopesAdapter.getInstance()) + .withTracingImpl(record, callable); + } + + /** + * Runs the provided {@link Runnable} with a Kafka consumer processing transaction for the given + * record. + * + * @param record the Kafka record being processed + * @param runnable the processing callback + * @param the Kafka record key type + * @param the Kafka record value type + */ + public static void withTracing( + final @NotNull ConsumerRecord record, final @NotNull Runnable runnable) { + new SentryKafkaConsumerTracing(ScopesAdapter.getInstance()).withTracingImpl(record, runnable); + } + + U withTracingImpl( + final @NotNull ConsumerRecord record, final @NotNull Callable callable) + throws Exception { + if (!scopes.getOptions().isEnableQueueTracing() || isIgnored()) { + return callable.call(); + } + + final @NotNull IScopes forkedScopes; + final @NotNull ISentryLifecycleToken lifecycleToken; + try { + forkedScopes = scopes.forkedRootScopes(CREATOR); + lifecycleToken = forkedScopes.makeCurrent(); + } catch (Throwable t) { + scopes + .getOptions() + .getLogger() + .log(SentryLevel.ERROR, "Failed to fork scopes for Kafka consumer tracing.", t); + return callable.call(); + } + + try (final @NotNull ISentryLifecycleToken ignored = lifecycleToken) { + final @Nullable ITransaction transaction = startTransaction(forkedScopes, record); + boolean didError = false; + @Nullable Throwable callbackThrowable = null; + + try { + return callable.call(); + } catch (Throwable t) { + didError = true; + callbackThrowable = t; + throw t; + } finally { + finishTransaction( + transaction, didError ? SpanStatus.INTERNAL_ERROR : SpanStatus.OK, callbackThrowable); + } + } + } + + void withTracingImpl( + final @NotNull ConsumerRecord record, final @NotNull Runnable runnable) { + try { + withTracingImpl( + record, + () -> { + runnable.run(); + return null; + }); + } catch (Throwable t) { + throwUnchecked(t); + } + } + + @SuppressWarnings("unchecked") + private static void throwUnchecked(final @NotNull Throwable throwable) + throws T { + throw (T) throwable; + } + + private boolean isIgnored() { + return SpanUtils.isIgnored(scopes.getOptions().getIgnoredSpanOrigins(), TRACE_ORIGIN); + } + + private @Nullable ITransaction startTransaction( + final @NotNull IScopes forkedScopes, final @NotNull ConsumerRecord record) { + try { + final @Nullable TransactionContext continued = continueTrace(forkedScopes, record); + if (!forkedScopes.getOptions().isTracingEnabled()) { + return null; + } + + final @NotNull TransactionContext txContext = + continued != null ? continued : new TransactionContext("queue.process", "queue.process"); + txContext.setName("queue.process"); + txContext.setOperation("queue.process"); + + final @NotNull TransactionOptions txOptions = new TransactionOptions(); + txOptions.setOrigin(TRACE_ORIGIN); + txOptions.setBindToScope(true); + + final @NotNull ITransaction transaction = forkedScopes.startTransaction(txContext, txOptions); + if (transaction.isNoOp()) { + return null; + } + + transaction.setData(SpanDataConvention.MESSAGING_SYSTEM, "kafka"); + transaction.setData(SpanDataConvention.MESSAGING_DESTINATION_NAME, record.topic()); + + final @Nullable String messageId = headerValue(record, MESSAGE_ID_HEADER); + if (messageId != null) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_ID, messageId); + } + + final int bodySize = record.serializedValueSize(); + if (bodySize >= 0) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_BODY_SIZE, bodySize); + } + + final @Nullable Integer retryCount = retryCount(record); + if (retryCount != null) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_RETRY_COUNT, retryCount); + } + + final @Nullable Long receiveLatency = receiveLatency(record); + if (receiveLatency != null) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_RECEIVE_LATENCY, receiveLatency); + } + + return transaction; + } catch (Throwable t) { + scopes + .getOptions() + .getLogger() + .log(SentryLevel.ERROR, "Failed to start Kafka consumer tracing transaction.", t); + return null; + } + } + + private void finishTransaction( + final @Nullable ITransaction transaction, + final @NotNull SpanStatus status, + final @Nullable Throwable throwable) { + if (transaction == null || transaction.isNoOp()) { + return; + } + + try { + transaction.setStatus(status); + if (throwable != null) { + transaction.setThrowable(throwable); + } + transaction.finish(); + } catch (Throwable t) { + // Instrumentation must never break customer processing. + scopes + .getOptions() + .getLogger() + .log(SentryLevel.ERROR, "Failed to finish Kafka consumer tracing transaction.", t); + } + } + + private @Nullable TransactionContext continueTrace( + final @NotNull IScopes forkedScopes, final @NotNull ConsumerRecord record) { + final @Nullable String sentryTrace = headerValue(record, SentryTraceHeader.SENTRY_TRACE_HEADER); + final @Nullable List baggageHeaders = + headerValues(record, BaggageHeader.BAGGAGE_HEADER); + return forkedScopes.continueTrace(sentryTrace, baggageHeaders); + } + + private @Nullable Integer retryCount(final @NotNull ConsumerRecord record) { + final @Nullable Header header = record.headers().lastHeader(DELIVERY_ATTEMPT_HEADER); + if (header == null) { + return null; + } + + final byte[] value = header.value(); + if (value == null || value.length != Integer.BYTES) { + return null; + } + + final int attempt = ByteBuffer.wrap(value).getInt(); + if (attempt <= 0) { + return null; + } + + return attempt - 1; + } + + private @Nullable Long receiveLatency(final @NotNull ConsumerRecord record) { + final @Nullable String enqueuedTimeStr = + headerValue(record, SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER); + if (enqueuedTimeStr == null) { + return null; + } + + try { + final double enqueuedTimeSeconds = Double.parseDouble(enqueuedTimeStr); + final double nowSeconds = DateUtils.millisToSeconds(System.currentTimeMillis()); + final long latencyMs = (long) ((nowSeconds - enqueuedTimeSeconds) * 1000); + return latencyMs >= 0 ? latencyMs : null; + } catch (NumberFormatException ignored) { + return null; + } + } + + private @Nullable String headerValue( + final @NotNull ConsumerRecord record, final @NotNull String headerName) { + final @Nullable Header header = record.headers().lastHeader(headerName); + if (header == null || header.value() == null) { + return null; + } + return new String(header.value(), StandardCharsets.UTF_8); + } + + private @Nullable List headerValues( + final @NotNull ConsumerRecord record, final @NotNull String headerName) { + @Nullable List values = null; + for (final @NotNull Header header : record.headers().headers(headerName)) { + if (header.value() != null) { + if (values == null) { + values = new ArrayList<>(); + } + values.add(new String(header.value(), StandardCharsets.UTF_8)); + } + } + return values; + } +} diff --git a/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaProducer.java b/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaProducer.java new file mode 100644 index 00000000000..7400e5ba2c8 --- /dev/null +++ b/sentry-kafka/src/main/java/io/sentry/kafka/SentryKafkaProducer.java @@ -0,0 +1,264 @@ +package io.sentry.kafka; + +import io.sentry.BaggageHeader; +import io.sentry.DateUtils; +import io.sentry.IScopes; +import io.sentry.ISpan; +import io.sentry.ScopesAdapter; +import io.sentry.SentryLevel; +import io.sentry.SentryTraceHeader; +import io.sentry.SpanDataConvention; +import io.sentry.SpanOptions; +import io.sentry.SpanStatus; +import io.sentry.util.SpanUtils; +import io.sentry.util.TracingUtils; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.header.Headers; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Wraps a Kafka {@link Producer} via {@link Proxy} to record a {@code queue.publish} span around + * each {@code send} and to inject Sentry trace propagation headers into the produced record. + * + *

Only the two {@code send} overloads are intercepted; every other {@link Producer} method is + * forwarded directly to the delegate. Because the wrapper is a dynamic proxy, it is compatible with + * any Kafka client version — new methods added to the {@link Producer} interface in future Kafka + * releases are forwarded automatically without recompilation. + * + *

For raw Kafka usage: + * + *

{@code
+ * Producer producer =
+ *     SentryKafkaProducer.wrap(new KafkaProducer<>(props));
+ * }
+ * + *

For Spring Kafka, the {@code SentryKafkaProducerBeanPostProcessor} in {@code + * sentry-spring-jakarta} installs this wrapper automatically via {@code + * ProducerFactory.addPostProcessor(...)}. + */ +@ApiStatus.Experimental +public final class SentryKafkaProducer { + + public static final @NotNull String TRACE_ORIGIN = "auto.queue.kafka.producer"; + public static final @NotNull String SENTRY_ENQUEUED_TIME_HEADER = "sentry-task-enqueued-time"; + + private SentryKafkaProducer() {} + + /** + * Wraps the given producer with Sentry instrumentation using the global scopes. + * + * @param delegate the Kafka producer to wrap + * @return an instrumented producer that records {@code queue.publish} spans + * @param the Kafka record key type + * @param the Kafka record value type + */ + public static @NotNull Producer wrap(final @NotNull Producer delegate) { + return wrap(delegate, ScopesAdapter.getInstance(), TRACE_ORIGIN); + } + + /** + * Wraps the given producer with Sentry instrumentation using the provided scopes. + * + * @param delegate the Kafka producer to wrap + * @param scopes the Sentry scopes to use for span creation and header injection + * @return an instrumented producer that records {@code queue.publish} spans + * @param the Kafka record key type + * @param the Kafka record value type + */ + public static @NotNull Producer wrap( + final @NotNull Producer delegate, final @NotNull IScopes scopes) { + return wrap(delegate, scopes, TRACE_ORIGIN); + } + + /** + * Wraps the given producer with Sentry instrumentation. + * + * @param delegate the Kafka producer to wrap + * @param scopes the Sentry scopes to use for span creation and header injection + * @param traceOrigin the trace origin to set on created spans + * @return an instrumented producer that records {@code queue.publish} spans + * @param the Kafka record key type + * @param the Kafka record value type + */ + @SuppressWarnings("unchecked") + public static @NotNull Producer wrap( + final @NotNull Producer delegate, + final @NotNull IScopes scopes, + final @NotNull String traceOrigin) { + return (Producer) + Proxy.newProxyInstance( + delegate.getClass().getClassLoader(), + new Class[] {Producer.class}, + new SentryProducerHandler<>(delegate, scopes, traceOrigin)); + } + + static final class SentryProducerHandler implements InvocationHandler { + + final @NotNull Producer delegate; + private final @NotNull IScopes scopes; + private final @NotNull String traceOrigin; + + SentryProducerHandler( + final @NotNull Producer delegate, + final @NotNull IScopes scopes, + final @NotNull String traceOrigin) { + this.delegate = delegate; + this.scopes = scopes; + this.traceOrigin = traceOrigin; + } + + @Override + @SuppressWarnings("unchecked") + public @Nullable Object invoke( + final @NotNull Object proxy, final @NotNull Method method, final @Nullable Object[] args) + throws Throwable { + if ("send".equals(method.getName()) && args != null) { + if (args.length == 1) { + return instrumentedSend((ProducerRecord) args[0], null); + } else if (args.length == 2) { + return instrumentedSend((ProducerRecord) args[0], (Callback) args[1]); + } + } + + if ("toString".equals(method.getName()) && (args == null || args.length == 0)) { + return "SentryKafkaProducer[delegate=" + delegate + "]"; + } + + try { + return method.invoke(delegate, args); + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } + + @SuppressWarnings("unchecked") + private @NotNull Object instrumentedSend( + final @NotNull ProducerRecord record, final @Nullable Callback callback) { + if (!scopes.getOptions().isEnableQueueTracing() || isIgnored()) { + return delegate.send(record, callback); + } + + final @Nullable ISpan activeSpan = scopes.getSpan(); + if (activeSpan == null || activeSpan.isNoOp()) { + maybeInjectHeaders(record.headers(), null); + return delegate.send(record, callback); + } + + final @NotNull SpanOptions spanOptions = new SpanOptions(); + spanOptions.setOrigin(traceOrigin); + final @NotNull ISpan span = + activeSpan.startChild("queue.publish", record.topic(), spanOptions); + + span.setData(SpanDataConvention.MESSAGING_SYSTEM, "kafka"); + span.setData(SpanDataConvention.MESSAGING_DESTINATION_NAME, record.topic()); + maybeInjectHeaders(record.headers(), span); + + try { + return delegate.send(record, wrapCallback(callback, span)); + } catch (Throwable t) { + finishWithError(span, t); + throw t; + } + } + + private @NotNull Callback wrapCallback( + final @Nullable Callback userCallback, final @NotNull ISpan span) { + return (metadata, exception) -> { + try { + if (exception != null) { + span.setThrowable(exception); + span.setStatus(SpanStatus.INTERNAL_ERROR); + } else { + span.setStatus(SpanStatus.OK); + } + } catch (Throwable t) { + scopes + .getOptions() + .getLogger() + .log(SentryLevel.ERROR, "Failed to set status on Kafka producer span.", t); + } finally { + try { + span.finish(); + } finally { + if (userCallback != null) { + userCallback.onCompletion(metadata, exception); + } + } + } + }; + } + + private void finishWithError(final @NotNull ISpan span, final @NotNull Throwable t) { + span.setThrowable(t); + span.setStatus(SpanStatus.INTERNAL_ERROR); + span.finish(); + } + + private boolean isIgnored() { + return SpanUtils.isIgnored(scopes.getOptions().getIgnoredSpanOrigins(), traceOrigin); + } + + private void maybeInjectHeaders(final @NotNull Headers headers, final @Nullable ISpan span) { + try { + final @Nullable List existingBaggageHeaders = + readHeaderValues(headers, BaggageHeader.BAGGAGE_HEADER); + final @Nullable TracingUtils.TracingHeaders tracingHeaders = + TracingUtils.trace(scopes, existingBaggageHeaders, span); + if (tracingHeaders != null) { + final @NotNull SentryTraceHeader sentryTraceHeader = + tracingHeaders.getSentryTraceHeader(); + headers.remove(sentryTraceHeader.getName()); + headers.add( + sentryTraceHeader.getName(), + sentryTraceHeader.getValue().getBytes(StandardCharsets.UTF_8)); + + final @Nullable BaggageHeader baggageHeader = tracingHeaders.getBaggageHeader(); + if (baggageHeader != null) { + headers.remove(baggageHeader.getName()); + headers.add( + baggageHeader.getName(), baggageHeader.getValue().getBytes(StandardCharsets.UTF_8)); + } + } + + headers.remove(SENTRY_ENQUEUED_TIME_HEADER); + headers.add( + SENTRY_ENQUEUED_TIME_HEADER, + DateUtils.doubleToBigDecimal(DateUtils.millisToSeconds(System.currentTimeMillis())) + .toString() + .getBytes(StandardCharsets.UTF_8)); + } catch (Throwable t) { + scopes + .getOptions() + .getLogger() + .log(SentryLevel.ERROR, "Failed to inject Sentry headers into Kafka record.", t); + } + } + + private static @Nullable List readHeaderValues( + final @NotNull Headers headers, final @NotNull String name) { + @Nullable List values = null; + for (final @NotNull Header header : headers.headers(name)) { + final byte @Nullable [] value = header.value(); + if (value != null) { + if (values == null) { + values = new ArrayList<>(); + } + values.add(new String(value, StandardCharsets.UTF_8)); + } + } + return values; + } + } +} diff --git a/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaConsumerTracingTest.kt b/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaConsumerTracingTest.kt new file mode 100644 index 00000000000..3bd992e8c8c --- /dev/null +++ b/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaConsumerTracingTest.kt @@ -0,0 +1,254 @@ +package io.sentry.kafka + +import io.sentry.BaggageHeader +import io.sentry.IScopes +import io.sentry.ISentryLifecycleToken +import io.sentry.ITransaction +import io.sentry.SentryOptions +import io.sentry.SentryTraceHeader +import io.sentry.SpanDataConvention +import io.sentry.SpanStatus +import io.sentry.TransactionContext +import io.sentry.TransactionOptions +import java.io.IOException +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.util.Optional +import java.util.concurrent.Callable +import java.util.concurrent.atomic.AtomicBoolean +import kotlin.test.BeforeTest +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFailsWith +import kotlin.test.assertTrue +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.common.header.internals.RecordHeaders +import org.apache.kafka.common.record.TimestampType +import org.mockito.kotlin.any +import org.mockito.kotlin.argumentCaptor +import org.mockito.kotlin.check +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.never +import org.mockito.kotlin.verify +import org.mockito.kotlin.whenever + +class SentryKafkaConsumerTracingTest { + + private lateinit var scopes: IScopes + private lateinit var forkedScopes: IScopes + private lateinit var options: SentryOptions + private lateinit var lifecycleToken: ISentryLifecycleToken + private lateinit var transaction: ITransaction + private lateinit var tracing: SentryKafkaConsumerTracing + + @BeforeTest + fun setup() { + scopes = mock() + forkedScopes = mock() + lifecycleToken = mock() + transaction = mock() + tracing = SentryKafkaConsumerTracing(scopes) + + options = + SentryOptions().apply { + dsn = "https://key@sentry.io/proj" + isEnableQueueTracing = true + tracesSampleRate = 1.0 + } + + whenever(scopes.options).thenReturn(options) + whenever(scopes.forkedRootScopes(any())).thenReturn(forkedScopes) + whenever(forkedScopes.options).thenReturn(options) + whenever(forkedScopes.makeCurrent()).thenReturn(lifecycleToken) + whenever(forkedScopes.startTransaction(any(), any())) + .thenReturn(transaction) + whenever(transaction.isNoOp).thenReturn(false) + } + + @Test + fun `withTracing creates queue process transaction with record metadata`() { + val sentryTraceValue = "2722d9f6ec019ade60c776169d9a8904-cedf5b7571cb4972-1" + val baggageValue = "sentry-sample_rate=1" + val record = + createRecord( + sentryTrace = sentryTraceValue, + baggage = baggageValue, + messageId = "message-123", + deliveryAttempt = 3, + enqueuedTime = (System.currentTimeMillis() / 1000.0 - 1.0).toString(), + serializedValueSize = 5, + ) + + val txContextCaptor = argumentCaptor() + val txOptionsCaptor = argumentCaptor() + + val result = tracing.withTracingImpl(record, Callable { "done" }) + + assertEquals("done", result) + verify(scopes).forkedRootScopes("SentryKafkaConsumerTracing") + verify(forkedScopes).makeCurrent() + verify(forkedScopes).continueTrace(eq(sentryTraceValue), eq(listOf(baggageValue))) + verify(forkedScopes).startTransaction(txContextCaptor.capture(), txOptionsCaptor.capture()) + + assertEquals("queue.process", txContextCaptor.firstValue.name) + assertEquals("queue.process", txContextCaptor.firstValue.operation) + assertEquals(SentryKafkaConsumerTracing.TRACE_ORIGIN, txOptionsCaptor.firstValue.origin) + assertTrue(txOptionsCaptor.firstValue.isBindToScope) + + verify(transaction).setData(SpanDataConvention.MESSAGING_SYSTEM, "kafka") + verify(transaction).setData(SpanDataConvention.MESSAGING_DESTINATION_NAME, "my-topic") + verify(transaction).setData(SpanDataConvention.MESSAGING_MESSAGE_ID, "message-123") + verify(transaction).setData(SpanDataConvention.MESSAGING_MESSAGE_BODY_SIZE, 5) + verify(transaction).setData(SpanDataConvention.MESSAGING_MESSAGE_RETRY_COUNT, 2) + verify(transaction) + .setData( + eq(SpanDataConvention.MESSAGING_MESSAGE_RECEIVE_LATENCY), + check { assertTrue(it >= 0) }, + ) + verify(transaction).setStatus(SpanStatus.OK) + verify(transaction).finish() + verify(lifecycleToken).close() + } + + @Test + fun `withTracing passes all baggage headers to continueTrace`() { + val sentryTraceValue = "2722d9f6ec019ade60c776169d9a8904-cedf5b7571cb4972-1" + val record = + createRecord( + sentryTrace = sentryTraceValue, + baggageHeaders = listOf("third=party", "sentry-sample_rate=1"), + ) + + tracing.withTracingImpl(record, Callable { "done" }) + + verify(forkedScopes) + .continueTrace(eq(sentryTraceValue), eq(listOf("third=party", "sentry-sample_rate=1"))) + } + + @Test + fun `withTracing skips scope forking when queue tracing is disabled`() { + options.isEnableQueueTracing = false + val record = createRecord() + + val result = tracing.withTracingImpl(record, Callable { "done" }) + + assertEquals("done", result) + verify(scopes, never()).forkedRootScopes(any()) + } + + @Test + fun `withTracing skips scope forking when origin is ignored`() { + options.setIgnoredSpanOrigins(listOf(SentryKafkaConsumerTracing.TRACE_ORIGIN)) + val record = createRecord() + + val result = tracing.withTracingImpl(record, Callable { "done" }) + + assertEquals("done", result) + verify(scopes, never()).forkedRootScopes(any()) + } + + @Test + fun `withTracing marks transaction as error when callback throws`() { + val record = createRecord() + val exception = RuntimeException("boom") + + val thrown = + assertFailsWith { + tracing.withTracingImpl(record, Callable { throw exception }) + } + + assertEquals(exception, thrown) + verify(transaction).setStatus(SpanStatus.INTERNAL_ERROR) + verify(transaction).setThrowable(exception) + verify(transaction).finish() + verify(lifecycleToken).close() + } + + @Test + fun `withTracing falls back to direct callback execution when instrumentation setup fails`() { + whenever(scopes.forkedRootScopes(any())) + .thenThrow(RuntimeException("broken instrumentation")) + val record = createRecord() + + val result = tracing.withTracingImpl(record, Callable { "done" }) + + assertEquals("done", result) + verify(forkedScopes, never()).makeCurrent() + verify(transaction, never()).finish() + } + + @Test + fun `withTracing runnable overload executes callback`() { + val record = createRecord() + val didRun = AtomicBoolean(false) + + tracing.withTracingImpl(record, Runnable { didRun.set(true) }) + + assertTrue(didRun.get()) + verify(transaction).setStatus(SpanStatus.OK) + verify(transaction).finish() + } + + @Test + fun `withTracing runnable overload preserves original throwable`() { + val record = createRecord() + val exception = IOException("boom") + + val thrown = + assertFailsWith { tracing.withTracingImpl(record, Runnable { throw exception }) } + + assertEquals(exception, thrown) + verify(transaction).setStatus(SpanStatus.INTERNAL_ERROR) + verify(transaction).setThrowable(exception) + verify(transaction).finish() + } + + private fun createRecord( + topic: String = "my-topic", + sentryTrace: String? = null, + baggage: String? = null, + baggageHeaders: List? = null, + messageId: String? = null, + deliveryAttempt: Int? = null, + enqueuedTime: String? = null, + serializedValueSize: Int = -1, + ): ConsumerRecord { + val headers = RecordHeaders() + sentryTrace?.let { + headers.add(SentryTraceHeader.SENTRY_TRACE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + baggage?.let { + headers.add(BaggageHeader.BAGGAGE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + baggageHeaders?.forEach { + headers.add(BaggageHeader.BAGGAGE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + messageId?.let { + headers.add(SpanDataConvention.MESSAGING_MESSAGE_ID, it.toByteArray(StandardCharsets.UTF_8)) + } + deliveryAttempt?.let { + headers.add("kafka_deliveryAttempt", ByteBuffer.allocate(Int.SIZE_BYTES).putInt(it).array()) + } + enqueuedTime?.let { + headers.add( + SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER, + it.toByteArray(StandardCharsets.UTF_8), + ) + } + + return ConsumerRecord( + topic, + 0, + 0L, + System.currentTimeMillis(), + TimestampType.CREATE_TIME, + 3, + serializedValueSize, + "key", + "value", + headers, + Optional.empty(), + ) + } +} diff --git a/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaProducerTest.kt b/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaProducerTest.kt new file mode 100644 index 00000000000..15ea2d104ea --- /dev/null +++ b/sentry-kafka/src/test/kotlin/io/sentry/kafka/SentryKafkaProducerTest.kt @@ -0,0 +1,361 @@ +package io.sentry.kafka + +import io.sentry.BaggageHeader +import io.sentry.IScopes +import io.sentry.ISentryLifecycleToken +import io.sentry.ISpan +import io.sentry.NoOpSpan +import io.sentry.Scope +import io.sentry.ScopeCallback +import io.sentry.Sentry +import io.sentry.SentryOptions +import io.sentry.SentryTraceHeader +import io.sentry.SentryTracer +import io.sentry.SpanOptions +import io.sentry.SpanStatus +import io.sentry.TransactionContext +import io.sentry.test.initForTest +import java.nio.charset.StandardCharsets +import java.util.concurrent.CompletableFuture +import kotlin.test.AfterTest +import kotlin.test.BeforeTest +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertNotNull +import kotlin.test.assertSame +import kotlin.test.assertTrue +import org.apache.kafka.clients.producer.Callback +import org.apache.kafka.clients.producer.Producer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.RecordMetadata +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.header.Header +import org.apache.kafka.common.header.Headers +import org.mockito.kotlin.any +import org.mockito.kotlin.argumentCaptor +import org.mockito.kotlin.doAnswer +import org.mockito.kotlin.eq +import org.mockito.kotlin.isNull +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.whenever + +class SentryKafkaProducerTest { + + private lateinit var scopes: IScopes + private lateinit var options: SentryOptions + private lateinit var delegate: Producer + + @BeforeTest + fun setup() { + initForTest { + it.dsn = "https://key@sentry.io/proj" + it.isEnableQueueTracing = true + it.tracesSampleRate = 1.0 + } + scopes = mock() + options = + SentryOptions().apply { + dsn = "https://key@sentry.io/proj" + isEnableQueueTracing = true + } + whenever(scopes.options).thenReturn(options) + doAnswer { (it.arguments[0] as ScopeCallback).run(Scope(options)) } + .whenever(scopes) + .configureScope(any()) + delegate = mock() + whenever(delegate.send(any(), any())).thenReturn(CompletableFuture.completedFuture(null)) + } + + @AfterTest + fun teardown() { + Sentry.close() + } + + private fun createTransaction(): SentryTracer { + val tx = SentryTracer(TransactionContext("tx", "op"), scopes) + whenever(scopes.span).thenReturn(tx) + return tx + } + + @Test + fun `creates queue publish span and injects headers`() { + val tx = createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + assertEquals(1, tx.spans.size) + val span = tx.spans.first() + assertEquals("queue.publish", span.operation) + assertEquals("my-topic", span.description) + assertEquals("kafka", span.data["messaging.system"]) + assertEquals("my-topic", span.data["messaging.destination.name"]) + assertEquals(SentryKafkaProducer.TRACE_ORIGIN, span.spanContext.origin) + + val sentryTraceHeader = record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER) + assertNotNull(sentryTraceHeader) + + val enqueuedTimeHeader = + record.headers().lastHeader(SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER) + assertNotNull(enqueuedTimeHeader) + val enqueuedTimeRaw = String(enqueuedTimeHeader.value(), StandardCharsets.UTF_8) + // Cross-SDK consumers (e.g. sentry-python) parse this as a plain decimal — must not use + // scientific notation. + assertFalse(enqueuedTimeRaw.contains('E') || enqueuedTimeRaw.contains('e')) + assertTrue(enqueuedTimeRaw.matches(Regex("""^\d+\.\d{6}$"""))) + } + + @Test + fun `delegates send and does not finish span synchronously`() { + val tx = createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + verify(delegate).send(eq(record), any()) + val span = tx.spans.first() + assertFalse(span.isFinished, "span should be open until callback fires") + } + + @Test + fun `finishes span as OK when broker ack callback succeeds`() { + val tx = createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + val captor = argumentCaptor() + verify(delegate).send(eq(record), captor.capture()) + val metadata = RecordMetadata(TopicPartition("my-topic", 0), 0L, 0, 0L, 0, 0) + captor.firstValue.onCompletion(metadata, null) + + val span = tx.spans.first() + assertTrue(span.isFinished) + assertEquals(SpanStatus.OK, span.status) + } + + @Test + fun `finishes span as INTERNAL_ERROR when broker ack callback fails`() { + val tx = createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + val exception = RuntimeException("boom") + + producer.send(record) + + val captor = argumentCaptor() + verify(delegate).send(eq(record), captor.capture()) + captor.firstValue.onCompletion(null, exception) + + val span = tx.spans.first() + assertTrue(span.isFinished) + assertEquals(SpanStatus.INTERNAL_ERROR, span.status) + assertSame(exception, span.throwable) + } + + @Test + fun `forwards user callback after finishing span`() { + createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + val userCallback = mock() + + producer.send(record, userCallback) + + val captor = argumentCaptor() + verify(delegate).send(eq(record), captor.capture()) + val metadata = RecordMetadata(TopicPartition("my-topic", 0), 0L, 0, 0L, 0, 0) + captor.firstValue.onCompletion(metadata, null) + + verify(userCallback).onCompletion(metadata, null) + } + + @Test + fun `finishes span with error when delegate send throws synchronously`() { + val tx = createTransaction() + val exception = RuntimeException("kaboom") + whenever(delegate.send(any(), any())).thenThrow(exception) + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + val thrown = runCatching { producer.send(record) }.exceptionOrNull() + + assertSame(exception, thrown) + val span = tx.spans.first() + assertTrue(span.isFinished) + assertEquals(SpanStatus.INTERNAL_ERROR, span.status) + assertSame(exception, span.throwable) + } + + @Test + fun `delegates send without span when queue tracing is disabled`() { + createTransaction() + options.isEnableQueueTracing = false + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + verify(delegate).send(eq(record), isNull()) + } + + @Test + fun `delegates send without span when trace origin is ignored`() { + val tx = createTransaction() + options.setIgnoredSpanOrigins(listOf(SentryKafkaProducer.TRACE_ORIGIN)) + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + assertEquals(0, tx.spans.size) + verify(delegate).send(eq(record), isNull()) + assertEquals(null, record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER)) + } + + @Test + fun `injects headers but creates no span when no active span`() { + whenever(scopes.span).thenReturn(null) + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + verify(delegate).send(eq(record), isNull()) + // Headers should still be injected from PropagationContext + assertNotNull(record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER)) + assertNotNull(record.headers().lastHeader(BaggageHeader.BAGGAGE_HEADER)) + assertNotNull(record.headers().lastHeader(SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER)) + } + + @Test + fun `injects headers but creates no span when active span is no-op`() { + whenever(scopes.span).thenReturn(NoOpSpan.getInstance()) + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + verify(delegate).send(eq(record), isNull()) + // Headers should still be injected from PropagationContext + assertNotNull(record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER)) + assertNotNull(record.headers().lastHeader(BaggageHeader.BAGGAGE_HEADER)) + assertNotNull(record.headers().lastHeader(SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER)) + } + + @Test + fun `preserves pre-existing third-party baggage header entries`() { + createTransaction() + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + record + .headers() + .add( + BaggageHeader.BAGGAGE_HEADER, + "othervendor=someValue,another=thing".toByteArray(StandardCharsets.UTF_8), + ) + + producer.send(record) + + val baggageHeaders = record.headers().headers(BaggageHeader.BAGGAGE_HEADER).toList() + assertEquals(1, baggageHeaders.size) + val baggageValue = String(baggageHeaders.first().value(), StandardCharsets.UTF_8) + assertTrue(baggageValue.contains("othervendor=someValue")) + assertTrue(baggageValue.contains("another=thing")) + assertTrue(baggageValue.contains("sentry-")) + } + + @Test + fun `header injection failure does not prevent send`() { + val activeSpan = mock() + val span = mock() + val headers = mock() + val record = mock>() + whenever(scopes.span).thenReturn(activeSpan) + whenever(activeSpan.startChild(eq("queue.publish"), eq("my-topic"), any())) + .thenReturn(span) + whenever(span.isNoOp).thenReturn(false) + whenever(span.isFinished).thenReturn(false) + whenever(span.toSentryTrace()) + .thenReturn(SentryTraceHeader("2722d9f6ec019ade60c776169d9a8904-cedf5b7571cb4972-1")) + whenever(span.toBaggageHeader(null)).thenReturn(null) + whenever(record.topic()).thenReturn("my-topic") + whenever(record.headers()).thenReturn(headers) + whenever(headers.headers(BaggageHeader.BAGGAGE_HEADER)).thenReturn(emptyList

()) + whenever(headers.remove(SentryTraceHeader.SENTRY_TRACE_HEADER)) + .thenThrow(RuntimeException("boom")) + + val producer = SentryKafkaProducer.wrap(delegate, scopes) + producer.send(record) + + // Header injection failed silently; send still proceeds with wrapped callback for span + // lifecycle. + verify(delegate).send(eq(record), any()) + } + + @Test + fun `delegates non-send methods to underlying producer`() { + val producer = SentryKafkaProducer.wrap(delegate, scopes) + + producer.flush() + producer.partitionsFor("my-topic") + producer.metrics() + producer.close() + + verify(delegate).flush() + verify(delegate).partitionsFor("my-topic") + verify(delegate).metrics() + verify(delegate).close() + } + + @Test + fun `default wrap uses current scopes`() { + val transaction = Sentry.startTransaction("tx", "op") + val record = ProducerRecord("my-topic", "key", "value") + + try { + val token: ISentryLifecycleToken = transaction.makeCurrent() + try { + val producer = SentryKafkaProducer.wrap(delegate) + producer.send(record) + } finally { + token.close() + } + } finally { + transaction.finish() + } + + assertNotNull(record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER)) + assertNotNull(record.headers().lastHeader(SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER)) + verify(delegate).send(eq(record), any()) + } + + @Test + fun `wraps callback even when child span is no-op`() { + val tx = createTransaction() + // Set max spans to 0 so the child span is no-op (over limit) + options.maxSpans = 0 + val producer = SentryKafkaProducer.wrap(delegate, scopes) + val record = ProducerRecord("my-topic", "key", "value") + + producer.send(record) + + // Callback is still wrapped (no-op span finish is harmless) + verify(delegate).send(eq(record), any()) + // Headers should still be injected from PropagationContext + assertNotNull(record.headers().lastHeader(SentryTraceHeader.SENTRY_TRACE_HEADER)) + assertNotNull(record.headers().lastHeader(BaggageHeader.BAGGAGE_HEADER)) + assertNotNull(record.headers().lastHeader(SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER)) + } + + @Test + fun `toString includes delegate`() { + val producer = SentryKafkaProducer.wrap(delegate, scopes) + assertTrue(producer.toString().startsWith("SentryKafkaProducer[delegate=")) + } +} diff --git a/sentry-opentelemetry/sentry-opentelemetry-core/api/sentry-opentelemetry-core.api b/sentry-opentelemetry/sentry-opentelemetry-core/api/sentry-opentelemetry-core.api index b51c8cc39bc..847d69bca1b 100644 --- a/sentry-opentelemetry/sentry-opentelemetry-core/api/sentry-opentelemetry-core.api +++ b/sentry-opentelemetry/sentry-opentelemetry-core/api/sentry-opentelemetry-core.api @@ -149,7 +149,7 @@ public final class io/sentry/opentelemetry/SentrySpanProcessor : io/opentelemetr public final class io/sentry/opentelemetry/SpanDescriptionExtractor { public fun ()V - public fun extractSpanInfo (Lio/opentelemetry/sdk/trace/data/SpanData;Lio/sentry/opentelemetry/IOtelSpanWrapper;)Lio/sentry/opentelemetry/OtelSpanInfo; + public fun extractSpanInfo (Lio/opentelemetry/sdk/trace/data/SpanData;Lio/sentry/opentelemetry/IOtelSpanWrapper;Lio/sentry/SentryOptions;)Lio/sentry/opentelemetry/OtelSpanInfo; } public final class io/sentry/opentelemetry/SpanNode { diff --git a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanExporter.java b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanExporter.java index 680177f8451..e7fc873908a 100644 --- a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanExporter.java +++ b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanExporter.java @@ -12,6 +12,7 @@ import io.opentelemetry.sdk.trace.data.StatusData; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.semconv.HttpAttributes; +import io.opentelemetry.semconv.incubating.MessagingIncubatingAttributes; import io.opentelemetry.semconv.incubating.ProcessIncubatingAttributes; import io.opentelemetry.semconv.incubating.ThreadIncubatingAttributes; import io.sentry.Baggage; @@ -200,7 +201,7 @@ private void createAndFinishSpanForOtelSpan( final @Nullable IOtelSpanWrapper sentrySpanMaybe = spanStorage.getSentrySpan(spanData.getSpanContext()); final @NotNull OtelSpanInfo spanInfo = - spanDescriptionExtractor.extractSpanInfo(spanData, sentrySpanMaybe); + spanDescriptionExtractor.extractSpanInfo(spanData, sentrySpanMaybe, scopes.getOptions()); scopes .getOptions() @@ -294,7 +295,7 @@ private void transferSpanDetails( final @NotNull IScopes scopesToUse = scopesToUseBeforeForking.forkedCurrentScope("SentrySpanExporter.createTransaction"); final @NotNull OtelSpanInfo spanInfo = - spanDescriptionExtractor.extractSpanInfo(span, sentrySpanMaybe); + spanDescriptionExtractor.extractSpanInfo(span, sentrySpanMaybe, scopesToUse.getOptions()); scopesToUse .getOptions() @@ -361,6 +362,23 @@ private void transferSpanDetails( maybeTransferOtelAttribute(span, sentryTransaction, ThreadIncubatingAttributes.THREAD_ID); maybeTransferOtelAttribute(span, sentryTransaction, ThreadIncubatingAttributes.THREAD_NAME); + // Root transactions don't bulk-copy OTel attributes into span data (unlike child spans). + // The Sentry Queues product reads `trace.data.messaging.*`, so messaging attributes must + // be explicitly transferred for consumer root transactions to show up correctly. These are + // operational metadata (no payload contents) and are safe to transfer unconditionally. + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_SYSTEM); + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME); + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE); + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_MESSAGE_ID); + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_MESSAGE_BODY_SIZE); + maybeTransferOtelAttribute( + span, sentryTransaction, MessagingIncubatingAttributes.MESSAGING_MESSAGE_ENVELOPE_SIZE); + scopesToUse.configureScope( ScopeType.CURRENT, scope -> attributesExtractor.extract(span, scope, scopesToUse.getOptions())); diff --git a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanProcessor.java b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanProcessor.java index 9c6a51f17c3..31bd6368318 100644 --- a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanProcessor.java +++ b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SentrySpanProcessor.java @@ -297,7 +297,7 @@ private boolean isSentryRequest(final @NotNull ReadableSpan otelSpan) { private void updateTransactionWithOtelData( final @NotNull ITransaction sentryTransaction, final @NotNull ReadableSpan otelSpan) { final @NotNull OtelSpanInfo otelSpanInfo = - spanDescriptionExtractor.extractSpanInfo(otelSpan.toSpanData(), null); + spanDescriptionExtractor.extractSpanInfo(otelSpan.toSpanData(), null, scopes.getOptions()); sentryTransaction.setOperation(otelSpanInfo.getOp()); String transactionName = otelSpanInfo.getDescription(); sentryTransaction.setName( @@ -334,7 +334,7 @@ private void updateSpanWithOtelData( }); final @NotNull OtelSpanInfo otelSpanInfo = - spanDescriptionExtractor.extractSpanInfo(otelSpan.toSpanData(), null); + spanDescriptionExtractor.extractSpanInfo(otelSpan.toSpanData(), null, scopes.getOptions()); sentrySpan.setOperation(otelSpanInfo.getOp()); sentrySpan.setDescription(otelSpanInfo.getDescription()); } diff --git a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SpanDescriptionExtractor.java b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SpanDescriptionExtractor.java index b66555d68c9..90db227505d 100644 --- a/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SpanDescriptionExtractor.java +++ b/sentry-opentelemetry/sentry-opentelemetry-core/src/main/java/io/sentry/opentelemetry/SpanDescriptionExtractor.java @@ -7,6 +7,8 @@ import io.opentelemetry.semconv.UrlAttributes; import io.opentelemetry.semconv.incubating.DbIncubatingAttributes; import io.opentelemetry.semconv.incubating.HttpIncubatingAttributes; +import io.opentelemetry.semconv.incubating.MessagingIncubatingAttributes; +import io.sentry.SentryOptions; import io.sentry.protocol.TransactionNameSource; import org.jetbrains.annotations.ApiStatus; import org.jetbrains.annotations.NotNull; @@ -17,9 +19,19 @@ public final class SpanDescriptionExtractor { @SuppressWarnings("deprecation") public @NotNull OtelSpanInfo extractSpanInfo( - final @NotNull SpanData otelSpan, final @Nullable IOtelSpanWrapper sentrySpan) { + final @NotNull SpanData otelSpan, + final @Nullable IOtelSpanWrapper sentrySpan, + final @NotNull SentryOptions options) { final @NotNull Attributes attributes = otelSpan.getAttributes(); + if (options.isEnableQueueTracing()) { + final @Nullable String messagingSystem = + attributes.get(MessagingIncubatingAttributes.MESSAGING_SYSTEM); + if (messagingSystem != null) { + return descriptionForMessagingSystem(otelSpan); + } + } + final @Nullable String httpMethod = attributes.get(HttpAttributes.HTTP_REQUEST_METHOD); if (httpMethod != null) { return descriptionForHttpMethod(otelSpan, httpMethod); @@ -91,6 +103,57 @@ private static boolean isRootSpan(SpanData otelSpan) { return !otelSpan.getParentSpanContext().isValid() || otelSpan.getParentSpanContext().isRemote(); } + @SuppressWarnings("deprecation") + private OtelSpanInfo descriptionForMessagingSystem(final @NotNull SpanData otelSpan) { + final @NotNull Attributes attributes = otelSpan.getAttributes(); + final @NotNull String op = opForMessaging(otelSpan); + final @Nullable String destination = + attributes.get(MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME); + final @NotNull String description = destination != null ? destination : otelSpan.getName(); + return new OtelSpanInfo(op, description, TransactionNameSource.TASK); + } + + @SuppressWarnings("deprecation") + private @NotNull String opForMessaging(final @NotNull SpanData otelSpan) { + final @NotNull Attributes attributes = otelSpan.getAttributes(); + // Prefer `messaging.operation.type` (current OTel semconv), fall back to legacy + // `messaging.operation`. OTel's SpanKind.CONSUMER is overloaded for both `receive` and + // `process`, so attribute-first mapping is required. SpanKind is used only as a last resort. + @Nullable + String operationType = attributes.get(MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE); + if (operationType == null) { + operationType = attributes.get(MessagingIncubatingAttributes.MESSAGING_OPERATION); + } + if (operationType != null) { + switch (operationType) { + case "publish": + case "send": + return "queue.publish"; + case "create": + return "queue.create"; + case "receive": + return "queue.receive"; + case "process": + case "deliver": + return "queue.process"; + case "settle": + return "queue.settle"; + default: + // fall through to SpanKind mapping + break; + } + } + + final @NotNull SpanKind kind = otelSpan.getKind(); + if (SpanKind.PRODUCER.equals(kind)) { + return "queue.publish"; + } + if (SpanKind.CONSUMER.equals(kind)) { + return "queue.process"; + } + return "queue"; + } + @SuppressWarnings("deprecation") private OtelSpanInfo descriptionForDbSystem(final @NotNull SpanData otelSpan) { final @NotNull Attributes attributes = otelSpan.getAttributes(); diff --git a/sentry-opentelemetry/sentry-opentelemetry-core/src/test/kotlin/SpanDescriptionExtractorTest.kt b/sentry-opentelemetry/sentry-opentelemetry-core/src/test/kotlin/SpanDescriptionExtractorTest.kt index 9c5a1a352df..a43afb849e6 100644 --- a/sentry-opentelemetry/sentry-opentelemetry-core/src/test/kotlin/SpanDescriptionExtractorTest.kt +++ b/sentry-opentelemetry/sentry-opentelemetry-core/src/test/kotlin/SpanDescriptionExtractorTest.kt @@ -11,6 +11,8 @@ import io.opentelemetry.semconv.HttpAttributes import io.opentelemetry.semconv.UrlAttributes import io.opentelemetry.semconv.incubating.DbIncubatingAttributes import io.opentelemetry.semconv.incubating.HttpIncubatingAttributes +import io.opentelemetry.semconv.incubating.MessagingIncubatingAttributes +import io.sentry.SentryOptions import io.sentry.protocol.TransactionNameSource import kotlin.test.Test import kotlin.test.assertEquals @@ -228,6 +230,250 @@ class SpanDescriptionExtractorTest { assertEquals(TransactionNameSource.TASK, info.transactionNameSource) } + @Test + fun `ignores messaging system when queue tracing disabled`() { + givenSpanName("my-topic publish") + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = false) + + assertEquals("my-topic publish", info.op) + assertEquals("my-topic publish", info.description) + assertEquals(TransactionNameSource.CUSTOM, info.transactionNameSource) + } + + @Test + fun `maps messaging publish operation type to queue publish op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging send operation type to queue publish op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "send", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging process operation type to queue process op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "process", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.process", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging deliver operation type to queue process op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "deliver", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.process", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging create operation type to queue create op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "create", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.create", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging receive operation type to queue receive op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "receive", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.receive", info.op) + assertEquals("my-topic", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `maps messaging settle operation type to queue settle op`() { + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "rabbitmq", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-queue", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "settle", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.settle", info.op) + assertEquals("my-queue", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `falls back to legacy messaging operation attribute`() { + @Suppress("DEPRECATION") + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "rabbitmq", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "queue-name", + MessagingIncubatingAttributes.MESSAGING_OPERATION to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("queue-name", info.description) + } + + @Test + fun `falls back to PRODUCER span kind when no operation attribute`() { + givenSpanKind(SpanKind.PRODUCER) + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("my-topic", info.description) + } + + @Test + fun `falls back to CONSUMER span kind when no operation attribute`() { + givenSpanKind(SpanKind.CONSUMER) + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-topic", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.process", info.op) + assertEquals("my-topic", info.description) + } + + @Test + fun `falls back to span name as description when destination missing`() { + givenSpanName("my-topic publish") + givenAttributes( + mapOf( + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "kafka", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("my-topic publish", info.description) + } + + @Test + fun `messaging mapping wins over http when both attributes present and queue tracing enabled`() { + // Some OTel instrumentations (e.g. aws-sdk-2.2 SQS) attach both messaging and http + // attributes to the same span. Messaging is more specific and must win. + givenSpanKind(SpanKind.PRODUCER) + givenAttributes( + mapOf( + HttpAttributes.HTTP_REQUEST_METHOD to "POST", + UrlAttributes.URL_FULL to "https://sqs.us-east-1.amazonaws.com/", + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "aws.sqs", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-queue", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = true) + + assertEquals("queue.publish", info.op) + assertEquals("my-queue", info.description) + assertEquals(TransactionNameSource.TASK, info.transactionNameSource) + } + + @Test + fun `http mapping wins over messaging when queue tracing disabled`() { + givenSpanKind(SpanKind.CLIENT) + givenAttributes( + mapOf( + HttpAttributes.HTTP_REQUEST_METHOD to "POST", + UrlAttributes.URL_FULL to "https://sqs.us-east-1.amazonaws.com/", + MessagingIncubatingAttributes.MESSAGING_SYSTEM to "aws.sqs", + MessagingIncubatingAttributes.MESSAGING_DESTINATION_NAME to "my-queue", + MessagingIncubatingAttributes.MESSAGING_OPERATION_TYPE to "publish", + ) + ) + + val info = whenExtractingSpanInfo(queueTracingEnabled = false) + + assertEquals("http.client", info.op) + assertEquals("POST https://sqs.us-east-1.amazonaws.com/", info.description) + assertEquals(TransactionNameSource.URL, info.transactionNameSource) + } + @Test fun `uses span name as op and description if no relevant attributes`() { givenSpanName("span name") @@ -289,9 +535,10 @@ class SpanDescriptionExtractorTest { builder.put(key as AttributeKey, value) } - private fun whenExtractingSpanInfo(): OtelSpanInfo { + private fun whenExtractingSpanInfo(queueTracingEnabled: Boolean = false): OtelSpanInfo { fixture.setup() - return SpanDescriptionExtractor().extractSpanInfo(fixture.otelSpan, fixture.sentrySpan) + val options = SentryOptions().apply { isEnableQueueTracing = queueTracingEnabled } + return SpanDescriptionExtractor().extractSpanInfo(fixture.otelSpan, fixture.sentrySpan, options) } private fun givenParentContext(parentContext: SpanContext) { diff --git a/sentry-samples/sentry-samples-console/build.gradle.kts b/sentry-samples/sentry-samples-console/build.gradle.kts index 0dc6183b4fc..010195c6778 100644 --- a/sentry-samples/sentry-samples-console/build.gradle.kts +++ b/sentry-samples/sentry-samples-console/build.gradle.kts @@ -36,8 +36,10 @@ dependencies { implementation(projects.sentry) implementation(projects.sentryAsyncProfiler) implementation(projects.sentryJcache) + implementation(projects.sentryKafka) implementation(libs.jcache) implementation(libs.caffeine.jcache) + implementation(libs.kafka.clients) testImplementation(kotlin(Config.kotlinStdLib)) testImplementation(projects.sentry) diff --git a/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/Main.java b/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/Main.java index 0ed0646c7bc..2a45ef6902c 100644 --- a/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/Main.java +++ b/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/Main.java @@ -5,6 +5,7 @@ import io.sentry.jcache.SentryJCacheWrapper; import io.sentry.protocol.Message; import io.sentry.protocol.User; +import io.sentry.samples.console.kafka.KafkaShowcase; import java.util.Collections; import javax.cache.Cache; import javax.cache.CacheManager; @@ -16,6 +17,10 @@ public class Main { private static long numberOfDiscardedSpansDueToOverflow = 0; public static void main(String[] args) throws InterruptedException { + final String kafkaBootstrapServers = System.getenv("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS"); + final boolean kafkaEnabled = + kafkaBootstrapServers != null && !kafkaBootstrapServers.trim().isEmpty(); + Sentry.init( options -> { // NOTE: Replace the test DSN below with YOUR OWN DSN to see the events from this app in @@ -95,6 +100,7 @@ public static void main(String[] args) throws InterruptedException { // Enable cache tracing to create spans for cache operations options.setEnableCacheTracing(true); + options.setEnableQueueTracing(kafkaEnabled); // Determine traces sample rate based on the sampling context // options.setTracesSampler( @@ -178,6 +184,13 @@ public static void main(String[] args) throws InterruptedException { // cache.remove, and cache.flush spans as children of the active transaction. demonstrateCacheTracing(); + // Kafka queue tracing with the kafka-clients producer interceptor and manual consumer tracing. + // + // Enable with: SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS=localhost:9092 + if (kafkaEnabled) { + KafkaShowcase.runKafkaWithSentryTracing(kafkaBootstrapServers); + } + // Performance feature // // Transactions collect execution time of the piece of code that's executed between the start diff --git a/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/kafka/KafkaShowcase.java b/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/kafka/KafkaShowcase.java new file mode 100644 index 00000000000..de85e46b25f --- /dev/null +++ b/sentry-samples/sentry-samples-console/src/main/java/io/sentry/samples/console/kafka/KafkaShowcase.java @@ -0,0 +1,143 @@ +package io.sentry.samples.console.kafka; + +import io.sentry.ISentryLifecycleToken; +import io.sentry.ITransaction; +import io.sentry.Sentry; +import io.sentry.kafka.SentryKafkaConsumerTracing; +import io.sentry.kafka.SentryKafkaProducer; +import java.time.Duration; +import java.util.Collections; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; + +public final class KafkaShowcase { + + public static final String TOPIC = "sentry-topic-console-sample"; + + private KafkaShowcase() {} + + public static void runKafkaWithSentryTracing(final String bootstrapServers) { + final CountDownLatch consumedLatch = new CountDownLatch(1); + final Thread consumerThread = startConsumerWithSentryTracing(bootstrapServers, consumedLatch); + final Properties producerProperties = createProducerProperties(bootstrapServers); + + final ITransaction transaction = Sentry.startTransaction("kafka-demo", "demo"); + try (ISentryLifecycleToken ignored = transaction.makeCurrent()) { + // 1. Create the raw Kafka producer as you normally would. + final KafkaProducer rawProducer = new KafkaProducer<>(producerProperties); + + // 2. >>> Sentry instrumentation <<< + // Wrap it with SentryKafkaProducer.wrap() so every send is captured as a + // `queue.publish` span that closes when the broker ack callback fires. + final Producer producer = SentryKafkaProducer.wrap(rawProducer); + + try (producer) { + Thread.sleep(500); + producer.send(new ProducerRecord<>(TOPIC, "sentry-kafka sample message")).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception ignoredException) { + // local broker may not be available when running the sample + } + + try { + consumedLatch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } finally { + consumerThread.interrupt(); + try { + consumerThread.join(1000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + transaction.finish(); + } + } + + public static Properties createProducerProperties(final String bootstrapServers) { + final Properties producerProperties = new Properties(); + producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + producerProperties.put( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerProperties.put( + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + + // Optional tuning for sample stability in CI/local runs. + producerProperties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 2000); + producerProperties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 2000); + producerProperties.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 3000); + + return producerProperties; + } + + public static Properties createConsumerProperties(final String bootstrapServers) { + final Properties consumerProperties = new Properties(); + consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + consumerProperties.put( + ConsumerConfig.GROUP_ID_CONFIG, "sentry-console-sample-" + UUID.randomUUID()); + consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + consumerProperties.put( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerProperties.put( + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + + // Optional tuning for sample stability in CI/local runs. + consumerProperties.put(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 2000); + consumerProperties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 2000); + + return consumerProperties; + } + + private static Thread startConsumerWithSentryTracing( + final String bootstrapServers, final CountDownLatch consumedLatch) { + final Thread consumerThread = + new Thread( + () -> { + final Properties consumerProperties = createConsumerProperties(bootstrapServers); + + try (KafkaConsumer consumer = + new KafkaConsumer<>(consumerProperties)) { + consumer.subscribe(Collections.singletonList(TOPIC)); + + while (!Thread.currentThread().isInterrupted() && consumedLatch.getCount() > 0) { + final ConsumerRecords records = + consumer.poll(Duration.ofMillis(500)); + for (final ConsumerRecord record : records) { + SentryKafkaConsumerTracing.withTracing( + record, + () -> { + System.out.println( + "Consumed Kafka message from " + + record.topic() + + ": " + + record.value()); + consumedLatch.countDown(); + }); + if (consumedLatch.getCount() == 0) { + break; + } + } + } + } catch (Exception ignored) { + // local broker may not be available when running the sample + } + }, + "sentry-kafka-sample-consumer"); + consumerThread.start(); + return consumerThread; + } +} diff --git a/sentry-samples/sentry-samples-console/src/test/kotlin/io/sentry/systemtest/ConsoleApplicationSystemTest.kt b/sentry-samples/sentry-samples-console/src/test/kotlin/io/sentry/systemtest/ConsoleApplicationSystemTest.kt index 2b009167acb..db6f54a616b 100644 --- a/sentry-samples/sentry-samples-console/src/test/kotlin/io/sentry/systemtest/ConsoleApplicationSystemTest.kt +++ b/sentry-samples/sentry-samples-console/src/test/kotlin/io/sentry/systemtest/ConsoleApplicationSystemTest.kt @@ -19,19 +19,7 @@ class ConsoleApplicationSystemTest { @Test fun `console application sends expected events when run as JAR`() { - val jarFile = testHelper.findJar("sentry-samples-console") - val process = - testHelper.launch( - jarFile, - mapOf( - "SENTRY_DSN" to testHelper.dsn, - "SENTRY_TRACES_SAMPLE_RATE" to "1.0", - "SENTRY_ENABLE_PRETTY_SERIALIZATION_OUTPUT" to "false", - "SENTRY_DEBUG" to "true", - "SENTRY_PROFILE_SESSION_SAMPLE_RATE" to "1.0", - "SENTRY_PROFILE_LIFECYCLE" to "TRACE", - ), - ) + val process = launchConsoleProcess() process.waitFor(30, TimeUnit.SECONDS) assertEquals(0, process.exitValue()) @@ -40,6 +28,41 @@ class ConsoleApplicationSystemTest { verifyExpectedEvents() } + @Test + fun `console application sends kafka producer and consumer tracing when kafka is enabled`() { + val process = + launchConsoleProcess(mapOf("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS" to "localhost:9092")) + + process.waitFor(30, TimeUnit.SECONDS) + assertEquals(0, process.exitValue()) + + testHelper.ensureTransactionReceived { transaction, _ -> + transaction.transaction == "kafka-demo" && + testHelper.doesTransactionContainSpanWithOp(transaction, "queue.publish") + } + + testHelper.ensureTransactionReceived { transaction, _ -> + testHelper.doesTransactionHaveOp(transaction, "queue.process") && + transaction.contexts.trace?.origin == "manual.queue.kafka.consumer" && + transaction.contexts.trace?.data?.get("messaging.system") == "kafka" + } + } + + private fun launchConsoleProcess(overrides: Map = emptyMap()): Process { + val jarFile = testHelper.findJar("sentry-samples-console") + val env = + mutableMapOf( + "SENTRY_DSN" to testHelper.dsn, + "SENTRY_TRACES_SAMPLE_RATE" to "1.0", + "SENTRY_ENABLE_PRETTY_SERIALIZATION_OUTPUT" to "false", + "SENTRY_DEBUG" to "true", + "SENTRY_PROFILE_SESSION_SAMPLE_RATE" to "1.0", + "SENTRY_PROFILE_LIFECYCLE" to "TRACE", + ) + env.putAll(overrides) + return testHelper.launch(jarFile, env) + } + private fun verifyExpectedEvents() { var profilerId: SentryId? = null // Verify we received a "Fatal message!" event diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/build.gradle.kts b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/build.gradle.kts index 86914467a6d..87909294cd8 100644 --- a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/build.gradle.kts +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/build.gradle.kts @@ -52,6 +52,10 @@ dependencies { implementation(projects.sentryAsyncProfiler) implementation(projects.sentryOpentelemetry.sentryOpentelemetryAgentlessSpring) + // kafka + implementation(libs.spring.kafka3) + implementation(projects.sentryKafka) + // cache tracing implementation(libs.springboot3.starter.cache) implementation(libs.caffeine) diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java new file mode 100644 index 00000000000..5931efa3a3b --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java @@ -0,0 +1,19 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.stereotype.Component; + +@Component +@Profile("kafka") +public class KafkaConsumer { + + private static final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class); + + @KafkaListener(topics = "sentry-topic", groupId = "sentry-sample-group") + public void listen(String message) { + logger.info("Received message: {}", message); + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java new file mode 100644 index 00000000000..b17d231951d --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java @@ -0,0 +1,26 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@Profile("kafka") +@RequestMapping("/kafka") +public class KafkaController { + + private final KafkaTemplate kafkaTemplate; + + public KafkaController(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + @GetMapping("/produce") + String produce(@RequestParam(defaultValue = "hello from sentry!") String message) { + kafkaTemplate.send("sentry-topic", message); + return "Message sent: " + message; + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/resources/application-kafka.properties b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/resources/application-kafka.properties new file mode 100644 index 00000000000..e0abadf5f9c --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/main/resources/application-kafka.properties @@ -0,0 +1,12 @@ +# Kafka — activate with: --spring.profiles.active=kafka +sentry.enable-queue-tracing=true + +spring.kafka.bootstrap-servers=localhost:9092 +spring.kafka.consumer.group-id=sentry-sample-group +spring.kafka.consumer.auto-offset-reset=earliest +spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer +spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer + +logging.level.org.apache.kafka=warn diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt new file mode 100644 index 00000000000..0f85e81a0a6 --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry-noagent/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt @@ -0,0 +1,41 @@ +package io.sentry.systemtest + +import io.sentry.systemtest.util.TestHelper +import kotlin.test.Test +import kotlin.test.assertEquals +import org.junit.Before + +class KafkaOtelCoexistenceSystemTest { + lateinit var testHelper: TestHelper + + @Before + fun setup() { + testHelper = TestHelper("http://localhost:8080") + testHelper.reset() + } + + @Test + fun `Sentry Kafka integration is suppressed when OTel is active`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("otel-coexistence-test") + assertEquals(200, restClient.lastKnownStatusCode) + + testHelper.ensureTransactionReceived { transaction, _ -> + transaction.transaction == "GET /kafka/produce" && + transaction.sdk?.integrationSet?.contains("SpringKafka") != true && + transaction.spans.any { span -> + span.op == "queue.publish" && + span.origin == "auto.opentelemetry" && + span.data?.get("messaging.system") == "kafka" + } + } + + testHelper.ensureTransactionReceived { transaction, _ -> + transaction.contexts.trace?.operation == "queue.process" && + transaction.contexts.trace?.origin == "auto.opentelemetry" && + transaction.contexts.trace?.data?.get("messaging.system") == "kafka" && + transaction.sdk?.integrationSet?.contains("SpringKafka") != true + } + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/build.gradle.kts b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/build.gradle.kts index 37d7a94eec0..0f20925f782 100644 --- a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/build.gradle.kts +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/build.gradle.kts @@ -56,6 +56,10 @@ dependencies { implementation(libs.otel) implementation(projects.sentryAsyncProfiler) + // kafka + implementation(libs.spring.kafka3) + implementation(projects.sentryKafka) + // cache tracing implementation(libs.springboot3.starter.cache) implementation(libs.caffeine) diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java new file mode 100644 index 00000000000..5931efa3a3b --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java @@ -0,0 +1,19 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.stereotype.Component; + +@Component +@Profile("kafka") +public class KafkaConsumer { + + private static final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class); + + @KafkaListener(topics = "sentry-topic", groupId = "sentry-sample-group") + public void listen(String message) { + logger.info("Received message: {}", message); + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java new file mode 100644 index 00000000000..b17d231951d --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java @@ -0,0 +1,26 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@Profile("kafka") +@RequestMapping("/kafka") +public class KafkaController { + + private final KafkaTemplate kafkaTemplate; + + public KafkaController(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + @GetMapping("/produce") + String produce(@RequestParam(defaultValue = "hello from sentry!") String message) { + kafkaTemplate.send("sentry-topic", message); + return "Message sent: " + message; + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/resources/application-kafka.properties b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/resources/application-kafka.properties new file mode 100644 index 00000000000..e0abadf5f9c --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/main/resources/application-kafka.properties @@ -0,0 +1,12 @@ +# Kafka — activate with: --spring.profiles.active=kafka +sentry.enable-queue-tracing=true + +spring.kafka.bootstrap-servers=localhost:9092 +spring.kafka.consumer.group-id=sentry-sample-group +spring.kafka.consumer.auto-offset-reset=earliest +spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer +spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer + +logging.level.org.apache.kafka=warn diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt new file mode 100644 index 00000000000..0f85e81a0a6 --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta-opentelemetry/src/test/kotlin/io/sentry/systemtest/KafkaOtelCoexistenceSystemTest.kt @@ -0,0 +1,41 @@ +package io.sentry.systemtest + +import io.sentry.systemtest.util.TestHelper +import kotlin.test.Test +import kotlin.test.assertEquals +import org.junit.Before + +class KafkaOtelCoexistenceSystemTest { + lateinit var testHelper: TestHelper + + @Before + fun setup() { + testHelper = TestHelper("http://localhost:8080") + testHelper.reset() + } + + @Test + fun `Sentry Kafka integration is suppressed when OTel is active`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("otel-coexistence-test") + assertEquals(200, restClient.lastKnownStatusCode) + + testHelper.ensureTransactionReceived { transaction, _ -> + transaction.transaction == "GET /kafka/produce" && + transaction.sdk?.integrationSet?.contains("SpringKafka") != true && + transaction.spans.any { span -> + span.op == "queue.publish" && + span.origin == "auto.opentelemetry" && + span.data?.get("messaging.system") == "kafka" + } + } + + testHelper.ensureTransactionReceived { transaction, _ -> + transaction.contexts.trace?.operation == "queue.process" && + transaction.contexts.trace?.origin == "auto.opentelemetry" && + transaction.contexts.trace?.data?.get("messaging.system") == "kafka" && + transaction.sdk?.integrationSet?.contains("SpringKafka") != true + } + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/build.gradle.kts b/sentry-samples/sentry-samples-spring-boot-jakarta/build.gradle.kts index a945b87109a..d58c3b53d75 100644 --- a/sentry-samples/sentry-samples-spring-boot-jakarta/build.gradle.kts +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/build.gradle.kts @@ -59,6 +59,10 @@ dependencies { implementation(libs.springboot3.starter.cache) implementation(libs.caffeine) + // kafka + implementation(libs.spring.kafka3) + implementation(projects.sentryKafka) + // OpenFeature SDK implementation(libs.openfeature) diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java new file mode 100644 index 00000000000..5931efa3a3b --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaConsumer.java @@ -0,0 +1,19 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.stereotype.Component; + +@Component +@Profile("kafka") +public class KafkaConsumer { + + private static final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class); + + @KafkaListener(topics = "sentry-topic", groupId = "sentry-sample-group") + public void listen(String message) { + logger.info("Received message: {}", message); + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java new file mode 100644 index 00000000000..b17d231951d --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/java/io/sentry/samples/spring/boot/jakarta/queues/kafka/KafkaController.java @@ -0,0 +1,26 @@ +package io.sentry.samples.spring.boot.jakarta.queues.kafka; + +import org.springframework.context.annotation.Profile; +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@Profile("kafka") +@RequestMapping("/kafka") +public class KafkaController { + + private final KafkaTemplate kafkaTemplate; + + public KafkaController(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + @GetMapping("/produce") + String produce(@RequestParam(defaultValue = "hello from sentry!") String message) { + kafkaTemplate.send("sentry-topic", message); + return "Message sent: " + message; + } +} diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application-kafka.properties b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application-kafka.properties new file mode 100644 index 00000000000..eaaa62af13b --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application-kafka.properties @@ -0,0 +1,10 @@ +# Kafka — activate with: --spring.profiles.active=kafka +sentry.enable-queue-tracing=true + +spring.kafka.bootstrap-servers=localhost:9092 +spring.kafka.consumer.group-id=sentry-sample-group +spring.kafka.consumer.auto-offset-reset=earliest +spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer +spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer +spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application.properties b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application.properties index 60b92d369d5..20f9463aabc 100644 --- a/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application.properties +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/src/main/resources/application.properties @@ -37,6 +37,7 @@ spring.quartz.job-store-type=memory # Cache tracing sentry.enable-cache-tracing=true + spring.cache.cache-names=todos spring.cache.caffeine.spec=maximumSize=500,expireAfterAccess=600s diff --git a/sentry-samples/sentry-samples-spring-boot-jakarta/src/test/kotlin/io/sentry/systemtest/KafkaQueueSystemTest.kt b/sentry-samples/sentry-samples-spring-boot-jakarta/src/test/kotlin/io/sentry/systemtest/KafkaQueueSystemTest.kt new file mode 100644 index 00000000000..43781cf2c56 --- /dev/null +++ b/sentry-samples/sentry-samples-spring-boot-jakarta/src/test/kotlin/io/sentry/systemtest/KafkaQueueSystemTest.kt @@ -0,0 +1,117 @@ +package io.sentry.systemtest + +import io.sentry.systemtest.util.TestHelper +import kotlin.test.Test +import kotlin.test.assertEquals +import org.junit.Before + +/** + * System tests for Kafka queue instrumentation. + * + * Requires: + * - The sample app running with `--spring.profiles.active=kafka` + * - A Kafka broker at localhost:9092 + * - The mock Sentry server at localhost:8000 + */ +class KafkaQueueSystemTest { + lateinit var testHelper: TestHelper + + @Before + fun setup() { + testHelper = TestHelper("http://localhost:8080") + testHelper.reset() + } + + @Test + fun `producer endpoint creates queue publish span`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("test-message") + assertEquals(200, restClient.lastKnownStatusCode) + + testHelper.ensureTransactionReceived { transaction, _ -> + testHelper.doesTransactionContainSpanWithOp(transaction, "queue.publish") + } + } + + @Test + fun `consumer creates queue process transaction`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("test-consumer-message") + assertEquals(200, restClient.lastKnownStatusCode) + + // The consumer runs asynchronously, so wait for the queue.process transaction + testHelper.ensureTransactionReceived { transaction, _ -> + testHelper.doesTransactionHaveOp(transaction, "queue.process") + } + } + + @Test + fun `producer and consumer share same trace`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("trace-test-message") + assertEquals(200, restClient.lastKnownStatusCode) + + // Capture the trace ID from the producer transaction (has queue.publish span) + var producerTraceId: String? = null + testHelper.ensureTransactionReceived { transaction, _ -> + if (testHelper.doesTransactionContainSpanWithOp(transaction, "queue.publish")) { + producerTraceId = transaction.contexts.trace?.traceId?.toString() + true + } else { + false + } + } + + // Verify the consumer transaction has the same trace ID + // Use retryCount=3 since the consumer may take a moment to process + testHelper.ensureEnvelopeReceived(retryCount = 3) { envelopeString -> + val envelope = + testHelper.jsonSerializer.deserializeEnvelope(envelopeString.byteInputStream()) + ?: return@ensureEnvelopeReceived false + val txItem = + envelope.items.firstOrNull { it.header.type == io.sentry.SentryItemType.Transaction } + ?: return@ensureEnvelopeReceived false + val tx = + txItem.getTransaction(testHelper.jsonSerializer) ?: return@ensureEnvelopeReceived false + + tx.contexts.trace?.operation == "queue.process" && + tx.contexts.trace?.traceId?.toString() == producerTraceId + } + } + + @Test + fun `queue publish span has messaging attributes`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("attrs-test") + assertEquals(200, restClient.lastKnownStatusCode) + + testHelper.ensureTransactionReceived { transaction, _ -> + val span = transaction.spans.firstOrNull { it.op == "queue.publish" } + if (span == null) return@ensureTransactionReceived false + + val data = span.data ?: return@ensureTransactionReceived false + data["messaging.system"] == "kafka" && data["messaging.destination.name"] == "sentry-topic" + } + } + + @Test + fun `queue process transaction has messaging attributes`() { + val restClient = testHelper.restClient + + restClient.produceKafkaMessage("process-attrs-test") + assertEquals(200, restClient.lastKnownStatusCode) + + testHelper.ensureTransactionReceived { transaction, _ -> + if (!testHelper.doesTransactionHaveOp(transaction, "queue.process")) { + return@ensureTransactionReceived false + } + + val data = transaction.contexts.trace?.data ?: return@ensureTransactionReceived false + data["messaging.system"] == "kafka" && data["messaging.destination.name"] == "sentry-topic" + } + } +} diff --git a/sentry-spring-boot-jakarta/build.gradle.kts b/sentry-spring-boot-jakarta/build.gradle.kts index 04166519240..36b7dad3cc6 100644 --- a/sentry-spring-boot-jakarta/build.gradle.kts +++ b/sentry-spring-boot-jakarta/build.gradle.kts @@ -40,6 +40,7 @@ dependencies { compileOnly(projects.sentryGraphql) compileOnly(projects.sentryGraphql22) compileOnly(projects.sentryQuartz) + compileOnly(libs.spring.kafka3) compileOnly(Config.Libs.springWeb) compileOnly(Config.Libs.springWebflux) compileOnly(libs.context.propagation) @@ -70,6 +71,7 @@ dependencies { testImplementation(projects.sentryApacheHttpClient5) testImplementation(projects.sentryGraphql) testImplementation(projects.sentryGraphql22) + testImplementation(projects.sentryKafka) testImplementation(projects.sentryOpentelemetry.sentryOpentelemetryCore) testImplementation(projects.sentryOpentelemetry.sentryOpentelemetryAgent) testImplementation(projects.sentryOpentelemetry.sentryOpentelemetryAgentcustomization) @@ -90,6 +92,7 @@ dependencies { testImplementation(libs.springboot3.starter) testImplementation(libs.springboot3.starter.aop) testImplementation(libs.springboot3.starter.graphql) + testImplementation(libs.spring.kafka3) testImplementation(libs.springboot3.starter.quartz) testImplementation(libs.springboot3.starter.security) testImplementation(libs.springboot3.starter.test) diff --git a/sentry-spring-boot-jakarta/src/main/java/io/sentry/spring/boot/jakarta/SentryAutoConfiguration.java b/sentry-spring-boot-jakarta/src/main/java/io/sentry/spring/boot/jakarta/SentryAutoConfiguration.java index ef57868ad87..e1f8b026274 100644 --- a/sentry-spring-boot-jakarta/src/main/java/io/sentry/spring/boot/jakarta/SentryAutoConfiguration.java +++ b/sentry-spring-boot-jakarta/src/main/java/io/sentry/spring/boot/jakarta/SentryAutoConfiguration.java @@ -31,6 +31,8 @@ import io.sentry.spring.jakarta.checkin.SentryQuartzConfiguration; import io.sentry.spring.jakarta.exception.SentryCaptureExceptionParameterPointcutConfiguration; import io.sentry.spring.jakarta.exception.SentryExceptionParameterAdviceConfiguration; +import io.sentry.spring.jakarta.kafka.SentryKafkaConsumerBeanPostProcessor; +import io.sentry.spring.jakarta.kafka.SentryKafkaProducerBeanPostProcessor; import io.sentry.spring.jakarta.opentelemetry.SentryOpenTelemetryAgentWithoutAutoInitConfiguration; import io.sentry.spring.jakarta.opentelemetry.SentryOpenTelemetryNoAgentConfiguration; import io.sentry.spring.jakarta.tracing.CombinedTransactionNameProvider; @@ -246,6 +248,34 @@ static class SentryCacheConfiguration { } } + @Configuration(proxyBeanMethods = false) + @ConditionalOnClass( + name = { + "org.springframework.kafka.core.KafkaTemplate", + "io.sentry.kafka.SentryKafkaProducer" + }) + @ConditionalOnProperty(name = "sentry.enable-queue-tracing", havingValue = "true") + @ConditionalOnMissingClass({ + "io.sentry.opentelemetry.SentryAutoConfigurationCustomizerProvider", + "io.sentry.opentelemetry.agent.AgentMarker" + }) + @Open + static class SentryKafkaQueueConfiguration { + + @Bean + public static @NotNull SentryKafkaProducerBeanPostProcessor + sentryKafkaProducerBeanPostProcessor() { + SentryIntegrationPackageStorage.getInstance().addIntegration("SpringKafka"); + return new SentryKafkaProducerBeanPostProcessor(); + } + + @Bean + public static @NotNull SentryKafkaConsumerBeanPostProcessor + sentryKafkaConsumerBeanPostProcessor() { + return new SentryKafkaConsumerBeanPostProcessor(); + } + } + @Configuration(proxyBeanMethods = false) @ConditionalOnClass(ProceedingJoinPoint.class) @ConditionalOnProperty( diff --git a/sentry-spring-boot-jakarta/src/test/kotlin/io/sentry/spring/boot/jakarta/SentryKafkaAutoConfigurationTest.kt b/sentry-spring-boot-jakarta/src/test/kotlin/io/sentry/spring/boot/jakarta/SentryKafkaAutoConfigurationTest.kt new file mode 100644 index 00000000000..392e5184759 --- /dev/null +++ b/sentry-spring-boot-jakarta/src/test/kotlin/io/sentry/spring/boot/jakarta/SentryKafkaAutoConfigurationTest.kt @@ -0,0 +1,125 @@ +package io.sentry.spring.boot.jakarta + +import io.sentry.kafka.SentryKafkaProducer +import io.sentry.opentelemetry.SentryAutoConfigurationCustomizerProvider +import io.sentry.opentelemetry.agent.AgentMarker +import io.sentry.spring.jakarta.kafka.SentryKafkaConsumerBeanPostProcessor +import io.sentry.spring.jakarta.kafka.SentryKafkaProducerBeanPostProcessor +import kotlin.test.Test +import org.assertj.core.api.Assertions.assertThat +import org.springframework.boot.autoconfigure.AutoConfigurations +import org.springframework.boot.test.context.FilteredClassLoader +import org.springframework.boot.test.context.runner.ApplicationContextRunner +import org.springframework.kafka.core.KafkaTemplate + +class SentryKafkaAutoConfigurationTest { + + private val contextRunner = + ApplicationContextRunner() + .withConfiguration(AutoConfigurations.of(SentryAutoConfiguration::class.java)) + .withPropertyValues( + "sentry.dsn=http://key@localhost/proj", + "sentry.traces-sample-rate=1.0", + "sentry.shutdownTimeoutMillis=0", + "sentry.sessionFlushTimeoutMillis=0", + "sentry.flushTimeoutMillis=0", + "sentry.readTimeoutMillis=50", + "sentry.connectionTimeoutMillis=50", + "sentry.send-modules=false", + "sentry.debug=false", + ) + + private val noOtelClassLoader = + FilteredClassLoader( + SentryAutoConfigurationCustomizerProvider::class.java, + AgentMarker::class.java, + ) + + private val noOtelCustomizerClassLoader = + FilteredClassLoader(SentryAutoConfigurationCustomizerProvider::class.java) + + private val noSentryKafkaClassLoader = + FilteredClassLoader( + SentryKafkaProducer::class.java, + SentryAutoConfigurationCustomizerProvider::class.java, + AgentMarker::class.java, + ) + + private val noSpringKafkaClassLoader = + FilteredClassLoader( + KafkaTemplate::class.java, + SentryAutoConfigurationCustomizerProvider::class.java, + AgentMarker::class.java, + ) + + @Test + fun `registers Kafka BPPs when queue tracing is enabled`() { + contextRunner + .withClassLoader(noOtelClassLoader) + .withPropertyValues("sentry.enable-queue-tracing=true") + .run { context -> + assertThat(context).hasSingleBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).hasSingleBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when queue tracing is disabled`() { + contextRunner.withClassLoader(noOtelClassLoader).run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when sentry-kafka is not present`() { + contextRunner + .withClassLoader(noSentryKafkaClassLoader) + .withPropertyValues("sentry.enable-queue-tracing=true") + .run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when spring-kafka is not present`() { + contextRunner + .withClassLoader(noSpringKafkaClassLoader) + .withPropertyValues("sentry.enable-queue-tracing=true") + .run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when queue tracing is explicitly false`() { + contextRunner + .withClassLoader(noOtelClassLoader) + .withPropertyValues("sentry.enable-queue-tracing=false") + .run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when OpenTelemetry agent is present`() { + contextRunner + .withClassLoader(noOtelCustomizerClassLoader) + .withPropertyValues("sentry.enable-queue-tracing=true") + .run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } + + @Test + fun `does not register Kafka BPPs when OpenTelemetry integration is present`() { + contextRunner.withPropertyValues("sentry.enable-queue-tracing=true").run { context -> + assertThat(context).doesNotHaveBean(SentryKafkaProducerBeanPostProcessor::class.java) + assertThat(context).doesNotHaveBean(SentryKafkaConsumerBeanPostProcessor::class.java) + } + } +} diff --git a/sentry-spring-jakarta/api/sentry-spring-jakarta.api b/sentry-spring-jakarta/api/sentry-spring-jakarta.api index fe634da6f4c..24b9af7e14b 100644 --- a/sentry-spring-jakarta/api/sentry-spring-jakarta.api +++ b/sentry-spring-jakarta/api/sentry-spring-jakarta.api @@ -244,6 +244,29 @@ public final class io/sentry/spring/jakarta/graphql/SentrySpringSubscriptionHand public fun onSubscriptionResult (Ljava/lang/Object;Lio/sentry/IScopes;Lio/sentry/graphql/ExceptionReporter;Lgraphql/execution/instrumentation/parameters/InstrumentationFieldFetchParameters;)Ljava/lang/Object; } +public final class io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessor : org/springframework/beans/factory/config/BeanPostProcessor, org/springframework/core/PriorityOrdered { + public fun ()V + public fun getOrder ()I + public fun postProcessAfterInitialization (Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/Object; +} + +public final class io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessor : org/springframework/beans/factory/config/BeanPostProcessor, org/springframework/core/PriorityOrdered { + public fun ()V + public fun getOrder ()I + public fun postProcessAfterInitialization (Ljava/lang/Object;Ljava/lang/String;)Ljava/lang/Object; +} + +public final class io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptor : org/springframework/kafka/listener/RecordInterceptor { + public fun (Lio/sentry/IScopes;)V + public fun (Lio/sentry/IScopes;Lorg/springframework/kafka/listener/RecordInterceptor;)V + public fun afterRecord (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Lorg/apache/kafka/clients/consumer/Consumer;)V + public fun clearThreadState (Lorg/apache/kafka/clients/consumer/Consumer;)V + public fun failure (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Ljava/lang/Exception;Lorg/apache/kafka/clients/consumer/Consumer;)V + public fun intercept (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Lorg/apache/kafka/clients/consumer/Consumer;)Lorg/apache/kafka/clients/consumer/ConsumerRecord; + public fun setupThreadState (Lorg/apache/kafka/clients/consumer/Consumer;)V + public fun success (Lorg/apache/kafka/clients/consumer/ConsumerRecord;Lorg/apache/kafka/clients/consumer/Consumer;)V +} + public class io/sentry/spring/jakarta/opentelemetry/SentryOpenTelemetryAgentWithoutAutoInitConfiguration { public fun ()V public fun sentryOpenTelemetryOptionsConfiguration ()Lio/sentry/Sentry$OptionsConfiguration; diff --git a/sentry-spring-jakarta/build.gradle.kts b/sentry-spring-jakarta/build.gradle.kts index f1920e24510..cbf2e5346b5 100644 --- a/sentry-spring-jakarta/build.gradle.kts +++ b/sentry-spring-jakarta/build.gradle.kts @@ -29,6 +29,7 @@ tasks.withType().configureEach { dependencies { api(projects.sentry) + compileOnly(projects.sentryKafka) compileOnly(platform(SpringBootPlugin.BOM_COORDINATES)) compileOnly(Config.Libs.springWeb) compileOnly(Config.Libs.springAop) @@ -41,6 +42,7 @@ dependencies { compileOnly(libs.servlet.jakarta.api) compileOnly(libs.slf4j.api) compileOnly(libs.springboot3.starter.graphql) + compileOnly(libs.spring.kafka3) compileOnly(libs.springboot3.starter.quartz) compileOnly(Config.Libs.springWebflux) @@ -58,6 +60,7 @@ dependencies { // tests testImplementation(projects.sentryTestSupport) testImplementation(projects.sentryGraphql) + testImplementation(projects.sentryKafka) testImplementation(kotlin(Config.kotlinStdLib)) testImplementation(libs.awaitility.kotlin) testImplementation(libs.context.propagation) @@ -68,6 +71,7 @@ dependencies { testImplementation(libs.springboot3.starter.aop) testImplementation(libs.springboot3.starter.graphql) testImplementation(libs.springboot3.starter.security) + testImplementation(libs.spring.kafka3) testImplementation(libs.springboot3.starter.test) testImplementation(libs.springboot3.starter.web) testImplementation(libs.springboot3.starter.webflux) diff --git a/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessor.java b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessor.java new file mode 100644 index 00000000000..e4676b79cfd --- /dev/null +++ b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessor.java @@ -0,0 +1,98 @@ +package io.sentry.spring.jakarta.kafka; + +import io.sentry.ScopesAdapter; +import io.sentry.SentryLevel; +import java.lang.reflect.Field; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.config.BeanPostProcessor; +import org.springframework.core.Ordered; +import org.springframework.core.PriorityOrdered; +import org.springframework.kafka.config.AbstractKafkaListenerContainerFactory; +import org.springframework.kafka.listener.RecordInterceptor; + +/** + * Registers {@link SentryKafkaRecordInterceptor} on {@link AbstractKafkaListenerContainerFactory} + * beans. If an existing {@link RecordInterceptor} is already set, it is composed as a delegate. + */ +@ApiStatus.Internal +public final class SentryKafkaConsumerBeanPostProcessor + implements BeanPostProcessor, PriorityOrdered { + + private static final @NotNull String RECORD_INTERCEPTOR_FIELD_NAME = "recordInterceptor"; + + private final @NotNull String recordInterceptorFieldName; + + public SentryKafkaConsumerBeanPostProcessor() { + this(RECORD_INTERCEPTOR_FIELD_NAME); + } + + SentryKafkaConsumerBeanPostProcessor(final @NotNull String recordInterceptorFieldName) { + this.recordInterceptorFieldName = recordInterceptorFieldName; + } + + private static final class InterceptorReadFailedException extends Exception { + private static final long serialVersionUID = 1L; + + InterceptorReadFailedException(final @NotNull Throwable cause) { + super(cause); + } + } + + @Override + @SuppressWarnings("unchecked") + public @NotNull Object postProcessAfterInitialization( + final @NotNull Object bean, final @NotNull String beanName) throws BeansException { + if (bean instanceof AbstractKafkaListenerContainerFactory) { + final @NotNull AbstractKafkaListenerContainerFactory factory = + (AbstractKafkaListenerContainerFactory) bean; + + final @Nullable RecordInterceptor existing; + try { + existing = getExistingInterceptor(factory); + } catch (InterceptorReadFailedException e) { + ScopesAdapter.getInstance() + .getOptions() + .getLogger() + .log( + SentryLevel.ERROR, + e, + "Sentry Kafka consumer tracing disabled for factory '%s' \u2014 could not read " + + "existing recordInterceptor via reflection. Refusing to install Sentry's " + + "interceptor to avoid overwriting a customer-configured RecordInterceptor.", + beanName); + return bean; + } + + if (existing instanceof SentryKafkaRecordInterceptor) { + return bean; + } + + @SuppressWarnings("rawtypes") + final RecordInterceptor sentryInterceptor = + new SentryKafkaRecordInterceptor<>(ScopesAdapter.getInstance(), existing); + factory.setRecordInterceptor(sentryInterceptor); + } + return bean; + } + + private @Nullable RecordInterceptor getExistingInterceptor( + final @NotNull AbstractKafkaListenerContainerFactory factory) + throws InterceptorReadFailedException { + try { + final @NotNull Field field = + AbstractKafkaListenerContainerFactory.class.getDeclaredField(recordInterceptorFieldName); + field.setAccessible(true); + return (RecordInterceptor) field.get(factory); + } catch (NoSuchFieldException | IllegalAccessException | RuntimeException e) { + throw new InterceptorReadFailedException(e); + } + } + + @Override + public int getOrder() { + return Ordered.LOWEST_PRECEDENCE; + } +} diff --git a/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessor.java b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessor.java new file mode 100644 index 00000000000..8a06e4e338e --- /dev/null +++ b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessor.java @@ -0,0 +1,76 @@ +package io.sentry.spring.jakarta.kafka; + +import io.sentry.ScopesAdapter; +import io.sentry.SentryLevel; +import io.sentry.kafka.SentryKafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.NotNull; +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.config.BeanPostProcessor; +import org.springframework.core.Ordered; +import org.springframework.core.PriorityOrdered; +import org.springframework.kafka.core.ProducerFactory; +import org.springframework.kafka.core.ProducerPostProcessor; + +/** + * Installs a {@link ProducerPostProcessor} on every {@link ProducerFactory} bean so that each + * {@link Producer} created by Spring Kafka is wrapped via {@link SentryKafkaProducer#wrap + * SentryKafkaProducer.wrap(Producer)}. + * + *

The wrapper records a {@code queue.publish} span around each {@code send(...)} that finishes + * when the broker ack callback fires, giving a real producer-send lifecycle span. {@code + * KafkaTemplate} beans are left untouched, so all customer-configured listeners, interceptors and + * observation settings are preserved. + * + *

Note: {@link ProducerFactory#addPostProcessor(ProducerPostProcessor)} is a default method on + * the interface that is a no-op unless overridden. Custom factories that do not extend {@code + * DefaultKafkaProducerFactory} will not receive Sentry producer instrumentation; a warning is + * logged at startup in that case. + */ +@ApiStatus.Internal +public final class SentryKafkaProducerBeanPostProcessor + implements BeanPostProcessor, PriorityOrdered { + + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + public @NotNull Object postProcessAfterInitialization( + final @NotNull Object bean, final @NotNull String beanName) throws BeansException { + if (bean instanceof ProducerFactory) { + final @NotNull ProducerFactory factory = (ProducerFactory) bean; + final @NotNull SentryProducerPostProcessor pp = new SentryProducerPostProcessor<>(); + factory.addPostProcessor(pp); + if (!factory.getPostProcessors().contains(pp)) { + ScopesAdapter.getInstance() + .getOptions() + .getLogger() + .log( + SentryLevel.WARNING, + "Sentry Kafka producer tracing not active for ProducerFactory '%s' (%s). " + + "addPostProcessor() was not honored — the factory may not extend " + + "DefaultKafkaProducerFactory. Wrap producers manually with " + + "SentryKafkaProducer.wrap(producer).", + beanName, + factory.getClass().getName()); + } + } + return bean; + } + + @Override + public int getOrder() { + return Ordered.LOWEST_PRECEDENCE; + } + + /** + * Marker {@link ProducerPostProcessor} that wraps the freshly created Kafka {@link Producer} via + * {@link SentryKafkaProducer#wrap}. + */ + static final class SentryProducerPostProcessor implements ProducerPostProcessor { + @Override + public @NotNull Producer apply(final @NotNull Producer producer) { + return SentryKafkaProducer.wrap( + producer, ScopesAdapter.getInstance(), "auto.queue.spring_jakarta.kafka.producer"); + } + } +} diff --git a/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptor.java b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptor.java new file mode 100644 index 00000000000..3f5da4947dd --- /dev/null +++ b/sentry-spring-jakarta/src/main/java/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptor.java @@ -0,0 +1,292 @@ +package io.sentry.spring.jakarta.kafka; + +import io.sentry.BaggageHeader; +import io.sentry.DateUtils; +import io.sentry.IScopes; +import io.sentry.ISentryLifecycleToken; +import io.sentry.ITransaction; +import io.sentry.SentryLevel; +import io.sentry.SentryTraceHeader; +import io.sentry.SpanDataConvention; +import io.sentry.SpanStatus; +import io.sentry.TransactionContext; +import io.sentry.TransactionOptions; +import io.sentry.kafka.SentryKafkaProducer; +import io.sentry.util.SpanUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.header.Header; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.springframework.kafka.listener.RecordInterceptor; +import org.springframework.kafka.support.KafkaHeaders; + +/** + * A {@link RecordInterceptor} that creates {@code queue.process} transactions for incoming Kafka + * records with distributed tracing support. + */ +@ApiStatus.Internal +public final class SentryKafkaRecordInterceptor implements RecordInterceptor { + + static final String TRACE_ORIGIN = "auto.queue.spring_jakarta.kafka.consumer"; + + private final @NotNull IScopes scopes; + private final @Nullable RecordInterceptor delegate; + + private static final @NotNull ThreadLocal currentContext = + new ThreadLocal<>(); + + public SentryKafkaRecordInterceptor(final @NotNull IScopes scopes) { + this(scopes, null); + } + + public SentryKafkaRecordInterceptor( + final @NotNull IScopes scopes, final @Nullable RecordInterceptor delegate) { + this.scopes = scopes; + this.delegate = delegate; + } + + @Override + public @Nullable ConsumerRecord intercept( + final @NotNull ConsumerRecord record, final @NotNull Consumer consumer) { + if (!scopes.getOptions().isEnableQueueTracing() || isIgnored()) { + return delegateIntercept(record, consumer); + } + + try { + finishStaleContext(); + + final @NotNull IScopes forkedScopes = scopes.forkedRootScopes("SentryKafkaRecordInterceptor"); + final @NotNull ISentryLifecycleToken lifecycleToken = forkedScopes.makeCurrent(); + currentContext.set(new SentryRecordContext(lifecycleToken, null)); + + final @Nullable TransactionContext transactionContext = continueTrace(forkedScopes, record); + + final @Nullable ITransaction transaction = + startTransaction(forkedScopes, record, transactionContext); + currentContext.set(new SentryRecordContext(lifecycleToken, transaction)); + } catch (Throwable t) { + scopes.getOptions().getLogger().log(SentryLevel.ERROR, "Unable to wrap Kafka consumer.", t); + } + return delegateIntercept(record, consumer); + } + + @Override + public void success( + final @NotNull ConsumerRecord record, final @NotNull Consumer consumer) { + try { + if (delegate != null) { + delegate.success(record, consumer); + } + } finally { + finishSpan(SpanStatus.OK, null); + } + } + + @Override + public void failure( + final @NotNull ConsumerRecord record, + final @NotNull Exception exception, + final @NotNull Consumer consumer) { + try { + if (delegate != null) { + delegate.failure(record, exception, consumer); + } + } finally { + finishSpan(SpanStatus.INTERNAL_ERROR, exception); + } + } + + @Override + public void afterRecord( + final @NotNull ConsumerRecord record, final @NotNull Consumer consumer) { + if (delegate != null) { + delegate.afterRecord(record, consumer); + } + } + + @Override + public void setupThreadState(final @NotNull Consumer consumer) { + if (delegate != null) { + delegate.setupThreadState(consumer); + } + } + + @Override + public void clearThreadState(final @NotNull Consumer consumer) { + try { + finishStaleContext(); + } finally { + if (delegate != null) { + delegate.clearThreadState(consumer); + } + } + } + + private boolean isIgnored() { + return SpanUtils.isIgnored(scopes.getOptions().getIgnoredSpanOrigins(), TRACE_ORIGIN); + } + + private @Nullable ConsumerRecord delegateIntercept( + final @NotNull ConsumerRecord record, final @NotNull Consumer consumer) { + if (delegate != null) { + return delegate.intercept(record, consumer); + } + return record; + } + + private @Nullable TransactionContext continueTrace( + final @NotNull IScopes forkedScopes, final @NotNull ConsumerRecord record) { + final @Nullable String sentryTrace = headerValue(record, SentryTraceHeader.SENTRY_TRACE_HEADER); + final @Nullable List baggageHeaders = + headerValues(record, BaggageHeader.BAGGAGE_HEADER); + return forkedScopes.continueTrace(sentryTrace, baggageHeaders); + } + + private @Nullable ITransaction startTransaction( + final @NotNull IScopes forkedScopes, + final @NotNull ConsumerRecord record, + final @Nullable TransactionContext transactionContext) { + if (!forkedScopes.getOptions().isTracingEnabled()) { + return null; + } + + final @NotNull TransactionContext txContext = + transactionContext != null + ? transactionContext + : new TransactionContext("queue.process", "queue.process"); + txContext.setName("queue.process"); + txContext.setOperation("queue.process"); + + final @NotNull TransactionOptions txOptions = new TransactionOptions(); + txOptions.setOrigin(TRACE_ORIGIN); + txOptions.setBindToScope(true); + + final @NotNull ITransaction transaction = forkedScopes.startTransaction(txContext, txOptions); + + if (transaction.isNoOp()) { + return null; + } + + transaction.setData(SpanDataConvention.MESSAGING_SYSTEM, "kafka"); + transaction.setData(SpanDataConvention.MESSAGING_DESTINATION_NAME, record.topic()); + + final @Nullable String messageId = headerValue(record, "messaging.message.id"); + if (messageId != null) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_ID, messageId); + } + + final int bodySize = record.serializedValueSize(); + if (bodySize >= 0) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_BODY_SIZE, bodySize); + } + + final @Nullable Integer retryCount = retryCount(record); + if (retryCount != null) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_RETRY_COUNT, retryCount); + } + + final @Nullable String enqueuedTimeStr = + headerValue(record, SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER); + if (enqueuedTimeStr != null) { + try { + final double enqueuedTimeSeconds = Double.parseDouble(enqueuedTimeStr); + final double nowSeconds = DateUtils.millisToSeconds(System.currentTimeMillis()); + final long latencyMs = (long) ((nowSeconds - enqueuedTimeSeconds) * 1000); + if (latencyMs >= 0) { + transaction.setData(SpanDataConvention.MESSAGING_MESSAGE_RECEIVE_LATENCY, latencyMs); + } + } catch (NumberFormatException ignored) { + // ignore malformed header + } + } + + return transaction; + } + + private @Nullable Integer retryCount(final @NotNull ConsumerRecord record) { + final @Nullable Header header = record.headers().lastHeader(KafkaHeaders.DELIVERY_ATTEMPT); + if (header == null) { + return null; + } + + final byte[] value = header.value(); + if (value == null || value.length != Integer.BYTES) { + return null; + } + + final int attempt = ByteBuffer.wrap(value).getInt(); + if (attempt <= 0) { + return null; + } + + return attempt - 1; + } + + private void finishStaleContext() { + if (currentContext.get() != null) { + finishSpan(SpanStatus.UNKNOWN, null); + } + } + + private void finishSpan(final @NotNull SpanStatus status, final @Nullable Throwable throwable) { + final @Nullable SentryRecordContext ctx = currentContext.get(); + if (ctx == null) { + return; + } + currentContext.remove(); + + try { + final @Nullable ITransaction transaction = ctx.transaction; + if (transaction != null) { + transaction.setStatus(status); + if (throwable != null) { + transaction.setThrowable(throwable); + } + transaction.finish(); + } + } finally { + ctx.lifecycleToken.close(); + } + } + + private @Nullable String headerValue( + final @NotNull ConsumerRecord record, final @NotNull String headerName) { + final @Nullable Header header = record.headers().lastHeader(headerName); + if (header == null || header.value() == null) { + return null; + } + return new String(header.value(), StandardCharsets.UTF_8); + } + + private @Nullable List headerValues( + final @NotNull ConsumerRecord record, final @NotNull String headerName) { + @Nullable List values = null; + for (final @NotNull Header header : record.headers().headers(headerName)) { + if (header.value() != null) { + if (values == null) { + values = new ArrayList<>(); + } + values.add(new String(header.value(), StandardCharsets.UTF_8)); + } + } + return values; + } + + private static final class SentryRecordContext { + final @NotNull ISentryLifecycleToken lifecycleToken; + final @Nullable ITransaction transaction; + + SentryRecordContext( + final @NotNull ISentryLifecycleToken lifecycleToken, + final @Nullable ITransaction transaction) { + this.lifecycleToken = lifecycleToken; + this.transaction = transaction; + } + } +} diff --git a/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessorTest.kt b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessorTest.kt new file mode 100644 index 00000000000..0a642c06945 --- /dev/null +++ b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaConsumerBeanPostProcessorTest.kt @@ -0,0 +1,124 @@ +package io.sentry.spring.jakarta.kafka + +import kotlin.test.Test +import kotlin.test.assertSame +import kotlin.test.assertTrue +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.mockito.kotlin.mock +import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory +import org.springframework.kafka.core.ConsumerFactory +import org.springframework.kafka.listener.RecordInterceptor + +class SentryKafkaConsumerBeanPostProcessorTest { + + @Test + fun `wraps ConcurrentKafkaListenerContainerFactory with SentryKafkaRecordInterceptor`() { + val consumerFactory = mock>() + val factory = ConcurrentKafkaListenerContainerFactory() + factory.consumerFactory = consumerFactory + + val processor = SentryKafkaConsumerBeanPostProcessor() + processor.postProcessAfterInitialization(factory, "kafkaListenerContainerFactory") + + // Verify via reflection that the interceptor was set + val field = factory.javaClass.superclass.getDeclaredField("recordInterceptor") + field.isAccessible = true + val interceptor = field.get(factory) + assertTrue(interceptor is SentryKafkaRecordInterceptor<*, *>) + } + + @Test + fun `does not double-wrap when SentryKafkaRecordInterceptor already set`() { + val consumerFactory = mock>() + val factory = ConcurrentKafkaListenerContainerFactory() + factory.consumerFactory = consumerFactory + + val processor = SentryKafkaConsumerBeanPostProcessor() + // First wrap + processor.postProcessAfterInitialization(factory, "kafkaListenerContainerFactory") + + val field = factory.javaClass.superclass.getDeclaredField("recordInterceptor") + field.isAccessible = true + val firstInterceptor = field.get(factory) + + // Second wrap — should be idempotent + processor.postProcessAfterInitialization(factory, "kafkaListenerContainerFactory") + val secondInterceptor = field.get(factory) + + assertSame(firstInterceptor, secondInterceptor) + } + + @Test + fun `does not wrap non-factory beans`() { + val someBean = "not a factory" + val processor = SentryKafkaConsumerBeanPostProcessor() + + val result = processor.postProcessAfterInitialization(someBean, "someBean") + + assertSame(someBean, result) + } + + @Test + fun `chains existing customer RecordInterceptor as delegate`() { + val consumerFactory = mock>() + val factory = ConcurrentKafkaListenerContainerFactory() + factory.consumerFactory = consumerFactory + + val customerInterceptor = + object : RecordInterceptor { + override fun intercept( + record: ConsumerRecord, + consumer: Consumer, + ): ConsumerRecord? = record + } + factory.setRecordInterceptor(customerInterceptor) + + val processor = SentryKafkaConsumerBeanPostProcessor() + processor.postProcessAfterInitialization(factory, "kafkaListenerContainerFactory") + + val field = factory.javaClass.superclass.getDeclaredField("recordInterceptor") + field.isAccessible = true + val installed = field.get(factory) + assertTrue( + installed is SentryKafkaRecordInterceptor<*, *>, + "expected SentryKafkaRecordInterceptor, got ${installed?.javaClass}", + ) + + val delegateField = SentryKafkaRecordInterceptor::class.java.getDeclaredField("delegate") + delegateField.isAccessible = true + assertSame( + customerInterceptor, + delegateField.get(installed), + "customer interceptor must be preserved as delegate", + ) + } + + @Test + fun `skips installation when reflection fails and preserves customer interceptor`() { + val consumerFactory = mock>() + val factory = ConcurrentKafkaListenerContainerFactory() + factory.consumerFactory = consumerFactory + val customerInterceptor = + object : RecordInterceptor { + override fun intercept( + record: ConsumerRecord, + consumer: Consumer, + ): ConsumerRecord? = record + } + factory.setRecordInterceptor(customerInterceptor) + + val field = factory.javaClass.superclass.getDeclaredField("recordInterceptor") + field.isAccessible = true + assertSame(customerInterceptor, field.get(factory)) + + val processor = SentryKafkaConsumerBeanPostProcessor("missingRecordInterceptor") + processor.postProcessAfterInitialization(factory, "kafkaListenerContainerFactory") + + assertSame( + customerInterceptor, + field.get(factory), + "customer interceptor must remain installed when Sentry cannot read it", + ) + } +} diff --git a/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessorTest.kt b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessorTest.kt new file mode 100644 index 00000000000..ec6494c5047 --- /dev/null +++ b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaProducerBeanPostProcessorTest.kt @@ -0,0 +1,95 @@ +package io.sentry.spring.jakarta.kafka + +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertSame +import kotlin.test.assertTrue +import org.apache.kafka.clients.producer.Producer +import org.mockito.kotlin.any +import org.mockito.kotlin.argumentCaptor +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.whenever +import org.springframework.kafka.core.DefaultKafkaProducerFactory +import org.springframework.kafka.core.ProducerFactory +import org.springframework.kafka.core.ProducerPostProcessor + +class SentryKafkaProducerBeanPostProcessorTest { + + @Test + fun `registers Sentry post-processor on ProducerFactory`() { + val factory = mock>() + val pp = SentryKafkaProducerBeanPostProcessor.SentryProducerPostProcessor() + whenever(factory.postProcessors).thenReturn(listOf(pp)) + val processor = SentryKafkaProducerBeanPostProcessor() + + processor.postProcessAfterInitialization(factory, "kafkaProducerFactory") + + val captor = argumentCaptor>() + verify(factory).addPostProcessor(captor.capture()) + assertTrue( + captor.firstValue is SentryKafkaProducerBeanPostProcessor.SentryProducerPostProcessor<*, *> + ) + } + + @Test + fun `does not throw when addPostProcessor is a no-op (default interface method)`() { + // Factory using the default no-op addPostProcessor / getPostProcessors + val factory = mock>() + whenever(factory.postProcessors).thenReturn(emptyList()) + val processor = SentryKafkaProducerBeanPostProcessor() + + // Should complete without throwing, and log a warning via ScopesAdapter + processor.postProcessAfterInitialization(factory, "myFactory") + + verify(factory).addPostProcessor(any()) + } + + @Test + fun `does not modify non-ProducerFactory beans`() { + val someBean = "not a producer factory" + val processor = SentryKafkaProducerBeanPostProcessor() + + val result = processor.postProcessAfterInitialization(someBean, "someBean") + + assertSame(someBean, result) + } + + @Test + fun `returns the same bean instance`() { + val factory = mock>() + val pp = SentryKafkaProducerBeanPostProcessor.SentryProducerPostProcessor() + whenever(factory.postProcessors).thenReturn(listOf(pp)) + val processor = SentryKafkaProducerBeanPostProcessor() + + val result = processor.postProcessAfterInitialization(factory, "kafkaProducerFactory") + + assertSame(factory, result, "BPP must return the same bean, not a replacement") + } + + @Test + fun `registered post-processor wraps producers via SentryKafkaProducer wrap`() { + val pp = SentryKafkaProducerBeanPostProcessor.SentryProducerPostProcessor() + val raw = mock>() + + val wrapped = pp.apply(raw) + + assertTrue(java.lang.reflect.Proxy.isProxyClass(wrapped.javaClass)) + } + + @Test + fun `integrates with DefaultKafkaProducerFactory addPostProcessor contract`() { + // Sanity check against the real Spring Kafka API surface — DefaultKafkaProducerFactory + // honors addPostProcessor and exposes it via getPostProcessors(). + val factory = DefaultKafkaProducerFactory(emptyMap()) + val processor = SentryKafkaProducerBeanPostProcessor() + + processor.postProcessAfterInitialization(factory, "kafkaProducerFactory") + + assertEquals(1, factory.postProcessors.size) + assertTrue( + factory.postProcessors.first() + is SentryKafkaProducerBeanPostProcessor.SentryProducerPostProcessor<*, *> + ) + } +} diff --git a/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptorTest.kt b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptorTest.kt new file mode 100644 index 00000000000..c08756da69f --- /dev/null +++ b/sentry-spring-jakarta/src/test/kotlin/io/sentry/spring/jakarta/kafka/SentryKafkaRecordInterceptorTest.kt @@ -0,0 +1,468 @@ +package io.sentry.spring.jakarta.kafka + +import io.sentry.BaggageHeader +import io.sentry.IScopes +import io.sentry.ISentryLifecycleToken +import io.sentry.Sentry +import io.sentry.SentryOptions +import io.sentry.SentryTraceHeader +import io.sentry.SentryTracer +import io.sentry.SpanDataConvention +import io.sentry.TransactionContext +import io.sentry.kafka.SentryKafkaProducer +import io.sentry.test.initForTest +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.util.Optional +import kotlin.test.AfterTest +import kotlin.test.BeforeTest +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFailsWith +import kotlin.test.assertNull +import kotlin.test.assertTrue +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.common.header.internals.RecordHeaders +import org.apache.kafka.common.record.TimestampType +import org.mockito.kotlin.any +import org.mockito.kotlin.mock +import org.mockito.kotlin.never +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.whenever +import org.springframework.kafka.listener.RecordInterceptor +import org.springframework.kafka.support.KafkaHeaders + +class SentryKafkaRecordInterceptorTest { + + private lateinit var scopes: IScopes + private lateinit var forkedScopes: IScopes + private lateinit var options: SentryOptions + private lateinit var consumer: Consumer + private lateinit var lifecycleToken: ISentryLifecycleToken + private lateinit var transaction: SentryTracer + + @BeforeTest + fun setup() { + initForTest { it.dsn = "https://key@sentry.io/proj" } + scopes = mock() + consumer = mock() + lifecycleToken = mock() + options = + SentryOptions().apply { + dsn = "https://key@sentry.io/proj" + isEnableQueueTracing = true + tracesSampleRate = 1.0 + } + whenever(scopes.options).thenReturn(options) + whenever(scopes.isEnabled).thenReturn(true) + + forkedScopes = mock() + whenever(scopes.forkedRootScopes(any())).thenReturn(forkedScopes) + whenever(forkedScopes.options).thenReturn(options) + whenever(forkedScopes.makeCurrent()).thenReturn(lifecycleToken) + + transaction = SentryTracer(TransactionContext("queue.process", "queue.process"), forkedScopes) + whenever(forkedScopes.startTransaction(any(), any())) + .thenReturn(transaction) + } + + @AfterTest + fun teardown() { + Sentry.close() + } + + private fun createRecord( + topic: String = "my-topic", + headers: RecordHeaders = RecordHeaders(), + serializedValueSize: Int = -1, + ): ConsumerRecord { + return ConsumerRecord( + topic, + 0, + 0L, + System.currentTimeMillis(), + TimestampType.CREATE_TIME, + 3, + serializedValueSize, + "key", + "value", + headers, + Optional.empty(), + ) + } + + private fun createRecordWithHeaders( + sentryTrace: String? = null, + baggage: String? = null, + baggageHeaders: List? = null, + enqueuedTime: String? = null, + deliveryAttempt: Int? = null, + ): ConsumerRecord { + val headers = RecordHeaders() + sentryTrace?.let { + headers.add(SentryTraceHeader.SENTRY_TRACE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + baggage?.let { + headers.add(BaggageHeader.BAGGAGE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + baggageHeaders?.forEach { + headers.add(BaggageHeader.BAGGAGE_HEADER, it.toByteArray(StandardCharsets.UTF_8)) + } + enqueuedTime?.let { + headers.add( + SentryKafkaProducer.SENTRY_ENQUEUED_TIME_HEADER, + it.toByteArray(StandardCharsets.UTF_8), + ) + } + deliveryAttempt?.let { + headers.add( + KafkaHeaders.DELIVERY_ATTEMPT, + ByteBuffer.allocate(Int.SIZE_BYTES).putInt(it).array(), + ) + } + val record = ConsumerRecord("my-topic", 0, 0L, "key", "value") + headers.forEach { record.headers().add(it) } + return record + } + + @Test + fun `intercept forks root scopes`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + interceptor.intercept(record, consumer) + + verify(scopes).forkedRootScopes("SentryKafkaRecordInterceptor") + verify(forkedScopes).makeCurrent() + } + + @Test + fun `intercept continues trace from headers`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val sentryTraceValue = "2722d9f6ec019ade60c776169d9a8904-cedf5b7571cb4972-1" + val record = createRecordWithHeaders(sentryTrace = sentryTraceValue) + + interceptor.intercept(record, consumer) + + verify(forkedScopes) + .continueTrace(org.mockito.kotlin.eq(sentryTraceValue), org.mockito.kotlin.isNull()) + } + + @Test + fun `intercept calls continueTrace with null when no headers`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + interceptor.intercept(record, consumer) + + verify(forkedScopes).continueTrace(org.mockito.kotlin.isNull(), org.mockito.kotlin.isNull()) + } + + @Test + fun `intercept passes all baggage headers to continueTrace`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val sentryTraceValue = "2722d9f6ec019ade60c776169d9a8904-cedf5b7571cb4972-1" + val record = + createRecordWithHeaders( + sentryTrace = sentryTraceValue, + baggageHeaders = listOf("third=party", "sentry-sample_rate=1"), + ) + + interceptor.intercept(record, consumer) + + verify(forkedScopes) + .continueTrace( + org.mockito.kotlin.eq(sentryTraceValue), + org.mockito.kotlin.eq(listOf("third=party", "sentry-sample_rate=1")), + ) + } + + @Test + fun `sets body size from serializedValueSize`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord(serializedValueSize = 42) + + interceptor.intercept(record, consumer) + + assertEquals(42, transaction.data?.get(SpanDataConvention.MESSAGING_MESSAGE_BODY_SIZE)) + } + + @Test + fun `does not set body size when serializedValueSize is negative`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord(serializedValueSize = -1) + + interceptor.intercept(record, consumer) + + assertNull(transaction.data?.get(SpanDataConvention.MESSAGING_MESSAGE_BODY_SIZE)) + } + + @Test + fun `sets retry count from delivery attempt header`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecordWithHeaders(deliveryAttempt = 3) + + interceptor.intercept(record, consumer) + + assertEquals(2, transaction.data?.get(SpanDataConvention.MESSAGING_MESSAGE_RETRY_COUNT)) + } + + @Test + fun `does not set retry count when delivery attempt header is missing`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + interceptor.intercept(record, consumer) + + assertNull(transaction.data?.get(SpanDataConvention.MESSAGING_MESSAGE_RETRY_COUNT)) + } + + @Test + fun `sets receive latency from enqueued time in epoch seconds`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val enqueuedTime = (System.currentTimeMillis() / 1000.0 - 1.0).toString() + val record = createRecordWithHeaders(enqueuedTime = enqueuedTime) + + interceptor.intercept(record, consumer) + + val latency = transaction.data?.get(SpanDataConvention.MESSAGING_MESSAGE_RECEIVE_LATENCY) + assertTrue(latency is Long && latency >= 0) + } + + @Test + fun `does not create span when queue tracing is disabled`() { + options.isEnableQueueTracing = false + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + val result = interceptor.intercept(record, consumer) + + verify(scopes, never()).forkedRootScopes(any()) + verify(forkedScopes, never()).makeCurrent() + assertEquals(record, result) + } + + @Test + fun `does not create span when origin is ignored`() { + options.setIgnoredSpanOrigins(listOf(SentryKafkaRecordInterceptor.TRACE_ORIGIN)) + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + val result = interceptor.intercept(record, consumer) + + verify(scopes, never()).forkedRootScopes(any()) + verify(forkedScopes, never()).makeCurrent() + assertEquals(record, result) + } + + @Test + fun `delegates to existing interceptor`() { + val delegate = mock>() + val record = createRecord() + whenever(delegate.intercept(record, consumer)).thenReturn(record) + + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + interceptor.intercept(record, consumer) + + verify(delegate).intercept(record, consumer) + } + + @Test + fun `success finishes transaction and delegates`() { + val delegate = mock>() + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + val record = createRecord() + + interceptor.intercept(record, consumer) + interceptor.success(record, consumer) + + verify(delegate).success(record, consumer) + } + + @Test + fun `failure finishes transaction with error and delegates`() { + val delegate = mock>() + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + val record = createRecord() + val exception = RuntimeException("processing failed") + + interceptor.intercept(record, consumer) + interceptor.failure(record, exception, consumer) + + verify(delegate).failure(record, exception, consumer) + } + + @Test + fun `afterRecord delegates to existing interceptor`() { + val delegate = mock>() + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + val record = createRecord() + + interceptor.afterRecord(record, consumer) + + verify(delegate).afterRecord(record, consumer) + } + + @Test + fun `trace origin is set correctly`() { + assertEquals( + "auto.queue.spring_jakarta.kafka.consumer", + SentryKafkaRecordInterceptor.TRACE_ORIGIN, + ) + } + + @Test + fun `clearThreadState cleans up stale context`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + interceptor.intercept(record, consumer) + + interceptor.clearThreadState(consumer) + + verify(lifecycleToken).close() + } + + @Test + fun `clearThreadState is no-op when no context exists`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + + // should not throw + interceptor.clearThreadState(consumer) + } + + @Test + fun `setupThreadState delegates to existing interceptor`() { + val delegate = mock>() + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + + interceptor.setupThreadState(consumer) + + verify(delegate).setupThreadState(consumer) + } + + @Test + fun `setupThreadState is no-op without delegate`() { + val interceptor = SentryKafkaRecordInterceptor(scopes) + + // should not throw + interceptor.setupThreadState(consumer) + } + + @Test + fun `clearThreadState delegates to existing interceptor`() { + val delegate = mock>() + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + + interceptor.clearThreadState(consumer) + + verify(delegate).clearThreadState(consumer) + } + + @Test + fun `clearThreadState delegates to existing interceptor even when sentry cleanup throws`() { + val delegate = mock>() + whenever(lifecycleToken.close()).thenThrow(RuntimeException("boom")) + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + val record = createRecord() + + interceptor.intercept(record, consumer) + + try { + interceptor.clearThreadState(consumer) + } catch (ignored: RuntimeException) { + // expected + } + + verify(delegate).clearThreadState(consumer) + } + + @Test + fun `full lifecycle intercept success clearThreadState closes token exactly once`() { + val delegate = mock>() + val record = createRecord() + whenever(delegate.intercept(record, consumer)).thenReturn(record) + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + + interceptor.setupThreadState(consumer) + interceptor.intercept(record, consumer) + interceptor.success(record, consumer) + interceptor.clearThreadState(consumer) + + // token closed once by success(); clearThreadState must not re-close it + verify(lifecycleToken, times(1)).close() + assertTrue(transaction.isFinished) + // delegate hooks still delegated across the full lifecycle + verify(delegate).setupThreadState(consumer) + verify(delegate).success(record, consumer) + verify(delegate).clearThreadState(consumer) + } + + @Test + fun `when delegate intercept returns null clearThreadState still finishes transaction and closes token`() { + val delegate = mock>() + val record = createRecord() + // delegate filters the record — per Spring Kafka contract, success/failure will not be invoked + whenever(delegate.intercept(record, consumer)).thenReturn(null) + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + + interceptor.setupThreadState(consumer) + val result = interceptor.intercept(record, consumer) + interceptor.clearThreadState(consumer) + + assertNull(result) + verify(lifecycleToken, times(1)).close() + assertTrue(transaction.isFinished) + verify(delegate).clearThreadState(consumer) + } + + @Test + fun `when delegate intercept throws clearThreadState still finishes transaction and closes token`() { + val delegate = mock>() + val record = createRecord() + val boom = RuntimeException("delegate boom") + whenever(delegate.intercept(record, consumer)).thenThrow(boom) + val interceptor = SentryKafkaRecordInterceptor(scopes, delegate) + + interceptor.setupThreadState(consumer) + val thrown = assertFailsWith { interceptor.intercept(record, consumer) } + assertEquals(boom, thrown) + + interceptor.clearThreadState(consumer) + + verify(lifecycleToken, times(1)).close() + assertTrue(transaction.isFinished) + verify(delegate).clearThreadState(consumer) + } + + @Test + fun `intercept cleans up stale context from previous record`() { + val lifecycleToken2 = mock() + val forkedScopes2 = mock() + whenever(forkedScopes2.options).thenReturn(options) + whenever(forkedScopes2.makeCurrent()).thenReturn(lifecycleToken2) + val tx2 = SentryTracer(TransactionContext("queue.process", "queue.process"), forkedScopes2) + whenever(forkedScopes2.startTransaction(any(), any())).thenReturn(tx2) + + var callCount = 0 + + val interceptor = SentryKafkaRecordInterceptor(scopes) + val record = createRecord() + + whenever(scopes.forkedRootScopes(any())).thenAnswer { + callCount++ + if (callCount == 1) forkedScopes else forkedScopes2 + } + + // First intercept sets up context + interceptor.intercept(record, consumer) + + // Second intercept without success/failure — should clean up stale context first + interceptor.intercept(record, consumer) + + // First lifecycle token should have been closed by the defensive cleanup + verify(lifecycleToken).close() + } +} diff --git a/sentry-system-test-support/api/sentry-system-test-support.api b/sentry-system-test-support/api/sentry-system-test-support.api index 83a9f288d0c..1cbec857516 100644 --- a/sentry-system-test-support/api/sentry-system-test-support.api +++ b/sentry-system-test-support/api/sentry-system-test-support.api @@ -560,6 +560,8 @@ public final class io/sentry/systemtest/util/RestTestClient : io/sentry/systemte public final fun getTodo (J)Lio/sentry/systemtest/Todo; public final fun getTodoRestClient (J)Lio/sentry/systemtest/Todo; public final fun getTodoWebclient (J)Lio/sentry/systemtest/Todo; + public final fun produceKafkaMessage (Ljava/lang/String;)Ljava/lang/String; + public static synthetic fun produceKafkaMessage$default (Lio/sentry/systemtest/util/RestTestClient;Ljava/lang/String;ILjava/lang/Object;)Ljava/lang/String; public final fun saveCachedTodo (Lio/sentry/systemtest/Todo;)Lio/sentry/systemtest/Todo; } diff --git a/sentry-system-test-support/src/main/kotlin/io/sentry/systemtest/util/RestTestClient.kt b/sentry-system-test-support/src/main/kotlin/io/sentry/systemtest/util/RestTestClient.kt index da552ff93bc..b9dc0f3ccad 100644 --- a/sentry-system-test-support/src/main/kotlin/io/sentry/systemtest/util/RestTestClient.kt +++ b/sentry-system-test-support/src/main/kotlin/io/sentry/systemtest/util/RestTestClient.kt @@ -81,6 +81,12 @@ class RestTestClient(private val backendBaseUrl: String) : LoggingInsecureRestCl return response?.body?.string() } + fun produceKafkaMessage(message: String = "hello from sentry!"): String? { + val request = Request.Builder().url("$backendBaseUrl/kafka/produce?message=$message") + + return callTyped(request, true) + } + fun getCountMetric(): String? { val request = Request.Builder().url("$backendBaseUrl/metric/count") diff --git a/sentry/api/sentry.api b/sentry/api/sentry.api index b9cbb2ae1b2..e4611a46d44 100644 --- a/sentry/api/sentry.api +++ b/sentry/api/sentry.api @@ -529,6 +529,7 @@ public final class io/sentry/ExternalOptions { public fun isEnableLogs ()Ljava/lang/Boolean; public fun isEnableMetrics ()Ljava/lang/Boolean; public fun isEnablePrettySerializationOutput ()Ljava/lang/Boolean; + public fun isEnableQueueTracing ()Ljava/lang/Boolean; public fun isEnableSpotlight ()Ljava/lang/Boolean; public fun isEnabled ()Ljava/lang/Boolean; public fun isForceInit ()Ljava/lang/Boolean; @@ -548,6 +549,7 @@ public final class io/sentry/ExternalOptions { public fun setEnableLogs (Ljava/lang/Boolean;)V public fun setEnableMetrics (Ljava/lang/Boolean;)V public fun setEnablePrettySerializationOutput (Ljava/lang/Boolean;)V + public fun setEnableQueueTracing (Ljava/lang/Boolean;)V public fun setEnableSpotlight (Ljava/lang/Boolean;)V public fun setEnableUncaughtExceptionHandler (Ljava/lang/Boolean;)V public fun setEnabled (Ljava/lang/Boolean;)V @@ -3688,6 +3690,7 @@ public class io/sentry/SentryOptions { public fun isEnableEventSizeLimiting ()Z public fun isEnableExternalConfiguration ()Z public fun isEnablePrettySerializationOutput ()Z + public fun isEnableQueueTracing ()Z public fun isEnableScopePersistence ()Z public fun isEnableScreenTracking ()Z public fun isEnableShutdownHook ()Z @@ -3748,6 +3751,7 @@ public class io/sentry/SentryOptions { public fun setEnableEventSizeLimiting (Z)V public fun setEnableExternalConfiguration (Z)V public fun setEnablePrettySerializationOutput (Z)V + public fun setEnableQueueTracing (Z)V public fun setEnableScopePersistence (Z)V public fun setEnableScreenTracking (Z)V public fun setEnableShutdownHook (Z)V @@ -4392,6 +4396,14 @@ public abstract interface class io/sentry/SpanDataConvention { public static final field HTTP_RESPONSE_CONTENT_LENGTH_KEY Ljava/lang/String; public static final field HTTP_START_TIMESTAMP Ljava/lang/String; public static final field HTTP_STATUS_CODE_KEY Ljava/lang/String; + public static final field MESSAGING_DESTINATION_NAME Ljava/lang/String; + public static final field MESSAGING_MESSAGE_BODY_SIZE Ljava/lang/String; + public static final field MESSAGING_MESSAGE_ENVELOPE_SIZE Ljava/lang/String; + public static final field MESSAGING_MESSAGE_ID Ljava/lang/String; + public static final field MESSAGING_MESSAGE_RECEIVE_LATENCY Ljava/lang/String; + public static final field MESSAGING_MESSAGE_RETRY_COUNT Ljava/lang/String; + public static final field MESSAGING_OPERATION_TYPE Ljava/lang/String; + public static final field MESSAGING_SYSTEM Ljava/lang/String; public static final field PROFILER_ID Ljava/lang/String; public static final field THREAD_ID Ljava/lang/String; public static final field THREAD_NAME Ljava/lang/String; diff --git a/sentry/src/main/java/io/sentry/ExternalOptions.java b/sentry/src/main/java/io/sentry/ExternalOptions.java index e992c04466b..4e44ea422ec 100644 --- a/sentry/src/main/java/io/sentry/ExternalOptions.java +++ b/sentry/src/main/java/io/sentry/ExternalOptions.java @@ -58,6 +58,7 @@ public final class ExternalOptions { private @Nullable Boolean enableBackpressureHandling; private @Nullable Boolean enableDatabaseTransactionTracing; private @Nullable Boolean enableCacheTracing; + private @Nullable Boolean enableQueueTracing; private @Nullable Boolean globalHubMode; private @Nullable Boolean forceInit; private @Nullable Boolean captureOpenTelemetryEvents; @@ -168,6 +169,8 @@ public final class ExternalOptions { options.setEnableCacheTracing(propertiesProvider.getBooleanProperty("enable-cache-tracing")); + options.setEnableQueueTracing(propertiesProvider.getBooleanProperty("enable-queue-tracing")); + options.setGlobalHubMode(propertiesProvider.getBooleanProperty("global-hub-mode")); options.setCaptureOpenTelemetryEvents( @@ -541,6 +544,14 @@ public void setEnableCacheTracing(final @Nullable Boolean enableCacheTracing) { return enableCacheTracing; } + public void setEnableQueueTracing(final @Nullable Boolean enableQueueTracing) { + this.enableQueueTracing = enableQueueTracing; + } + + public @Nullable Boolean isEnableQueueTracing() { + return enableQueueTracing; + } + public void setGlobalHubMode(final @Nullable Boolean globalHubMode) { this.globalHubMode = globalHubMode; } diff --git a/sentry/src/main/java/io/sentry/SentryOptions.java b/sentry/src/main/java/io/sentry/SentryOptions.java index 86086f8816b..7db109e9d2e 100644 --- a/sentry/src/main/java/io/sentry/SentryOptions.java +++ b/sentry/src/main/java/io/sentry/SentryOptions.java @@ -508,6 +508,9 @@ public class SentryOptions { /** Whether cache operations (get, put, remove, flush) should be traced. */ private boolean enableCacheTracing = false; + /** Whether queue operations (publish, process) should be traced. */ + private boolean enableQueueTracing = false; + /** Date provider to retrieve the current date from. */ @ApiStatus.Internal private final @NotNull LazyEvaluator dateProvider = @@ -2704,6 +2707,26 @@ public void setEnableCacheTracing(boolean enableCacheTracing) { this.enableCacheTracing = enableCacheTracing; } + /** + * Whether Sentry emits Queue spans and transforms OpenTelemetry messaging spans to match Sentry's + * queue conventions. + * + * @return true if queue tracing is enabled + */ + public boolean isEnableQueueTracing() { + return enableQueueTracing; + } + + /** + * Whether Sentry emits Queue spans and transforms OpenTelemetry messaging spans to match Sentry's + * queue conventions. + * + * @param enableQueueTracing true to enable queue tracing + */ + public void setEnableQueueTracing(boolean enableQueueTracing) { + this.enableQueueTracing = enableQueueTracing; + } + /** * Whether Sentry is enabled. * @@ -3545,6 +3568,9 @@ public void merge(final @NotNull ExternalOptions options) { if (options.isEnableCacheTracing() != null) { setEnableCacheTracing(options.isEnableCacheTracing()); } + if (options.isEnableQueueTracing() != null) { + setEnableQueueTracing(options.isEnableQueueTracing()); + } if (options.getMaxRequestBodySize() != null) { setMaxRequestBodySize(options.getMaxRequestBodySize()); } diff --git a/sentry/src/main/java/io/sentry/SpanDataConvention.java b/sentry/src/main/java/io/sentry/SpanDataConvention.java index 647c0dacddf..4ede74505cb 100644 --- a/sentry/src/main/java/io/sentry/SpanDataConvention.java +++ b/sentry/src/main/java/io/sentry/SpanDataConvention.java @@ -30,4 +30,12 @@ public interface SpanDataConvention { String CACHE_KEY = "cache.key"; String CACHE_OPERATION = "cache.operation"; String CACHE_WRITE = "cache.write"; + String MESSAGING_SYSTEM = "messaging.system"; + String MESSAGING_DESTINATION_NAME = "messaging.destination.name"; + String MESSAGING_MESSAGE_ID = "messaging.message.id"; + String MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count"; + String MESSAGING_MESSAGE_BODY_SIZE = "messaging.message.body.size"; + String MESSAGING_MESSAGE_ENVELOPE_SIZE = "messaging.message.envelope.size"; + String MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency"; + String MESSAGING_OPERATION_TYPE = "messaging.operation.type"; } diff --git a/sentry/src/main/java/io/sentry/util/SpanUtils.java b/sentry/src/main/java/io/sentry/util/SpanUtils.java index cad4d483656..c324feed840 100644 --- a/sentry/src/main/java/io/sentry/util/SpanUtils.java +++ b/sentry/src/main/java/io/sentry/util/SpanUtils.java @@ -40,6 +40,10 @@ public final class SpanUtils { origins.add("auto.http.spring7.resttemplate"); origins.add("auto.http.openfeign"); origins.add("auto.http.ktor-client"); + origins.add("auto.queue.spring_jakarta.kafka.producer"); + origins.add("auto.queue.spring_jakarta.kafka.consumer"); + origins.add("auto.queue.kafka.producer"); + origins.add("auto.queue.kafka.consumer"); } if (SentryOpenTelemetryMode.AGENT == mode) { diff --git a/sentry/src/test/java/io/sentry/ExternalOptionsTest.kt b/sentry/src/test/java/io/sentry/ExternalOptionsTest.kt index 54630355557..fee707d31f3 100644 --- a/sentry/src/test/java/io/sentry/ExternalOptionsTest.kt +++ b/sentry/src/test/java/io/sentry/ExternalOptionsTest.kt @@ -345,6 +345,20 @@ class ExternalOptionsTest { } } + @Test + fun `creates options with enableQueueTracing set to true`() { + withPropertiesFile("enable-queue-tracing=true") { options -> + assertTrue(options.isEnableQueueTracing == true) + } + } + + @Test + fun `creates options with enableQueueTracing set to false`() { + withPropertiesFile("enable-queue-tracing=false") { options -> + assertTrue(options.isEnableQueueTracing == false) + } + } + @Test fun `creates options with cron defaults`() { withPropertiesFile( diff --git a/sentry/src/test/java/io/sentry/SentryOptionsTest.kt b/sentry/src/test/java/io/sentry/SentryOptionsTest.kt index da014b30f74..e18438707b2 100644 --- a/sentry/src/test/java/io/sentry/SentryOptionsTest.kt +++ b/sentry/src/test/java/io/sentry/SentryOptionsTest.kt @@ -708,6 +708,11 @@ class SentryOptionsTest { assertFalse(SentryOptions().isEnableCacheTracing) } + @Test + fun `when options are initialized, enableQueueTracing is set to false by default`() { + assertFalse(SentryOptions().isEnableQueueTracing) + } + @Test fun `when options are initialized, metrics is enabled by default`() { assertTrue(SentryOptions().metrics.isEnabled) @@ -1018,6 +1023,23 @@ class SentryOptionsTest { assertEquals("original", options.orgId) } + @Test + fun `merging options applies enableQueueTracing`() { + val externalOptions = ExternalOptions() + externalOptions.setEnableQueueTracing(true) + val options = SentryOptions() + options.merge(externalOptions) + assertTrue(options.isEnableQueueTracing) + } + + @Test + fun `merging options preserves enableQueueTracing default when not set`() { + val externalOptions = ExternalOptions() + val options = SentryOptions() + options.merge(externalOptions) + assertFalse(options.isEnableQueueTracing) + } + @Test fun `getEffectiveOrgId prefers explicit orgId over DSN`() { val options = SentryOptions() diff --git a/settings.gradle.kts b/settings.gradle.kts index 8d431d5fbdf..4b1c606bc64 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -58,6 +58,7 @@ include( "sentry-graphql-22", "sentry-graphql-core", "sentry-jdbc", + "sentry-kafka", "sentry-opentelemetry:sentry-opentelemetry-bootstrap", "sentry-opentelemetry:sentry-opentelemetry-core", "sentry-opentelemetry:sentry-opentelemetry-agentcustomization", diff --git a/test/system-test-runner.py b/test/system-test-runner.py index 70489c580a5..f20a9bd8d62 100644 --- a/test/system-test-runner.py +++ b/test/system-test-runner.py @@ -42,6 +42,7 @@ import argparse import requests import threading +import socket from pathlib import Path from typing import Optional, List, Tuple from dataclasses import dataclass @@ -65,6 +66,20 @@ "SENTRY_ENABLE_CACHE_TRACING": "true" } +KAFKA_CONTAINER_NAME = "sentry-java-system-test-kafka" +KAFKA_BOOTSTRAP_SERVERS = "localhost:9092" +KAFKA_BROKER_REQUIRED_MODULES = { + "sentry-samples-console", + "sentry-samples-spring-boot-jakarta", + "sentry-samples-spring-boot-jakarta-opentelemetry", + "sentry-samples-spring-boot-jakarta-opentelemetry-noagent", +} +KAFKA_PROFILE_REQUIRED_MODULES = { + "sentry-samples-spring-boot-jakarta", + "sentry-samples-spring-boot-jakarta-opentelemetry", + "sentry-samples-spring-boot-jakarta-opentelemetry-noagent", +} + class ServerType(Enum): TOMCAT = 0 SPRING = 1 @@ -155,6 +170,7 @@ def __init__(self): self.mock_server = Server(name="Mock", pid_filepath="sentry-mock-server.pid") self.tomcat_server = Server(name="Tomcat", pid_filepath="tomcat-server.pid") self.spring_server = Server(name="Spring", pid_filepath="spring-server.pid") + self.kafka_started_by_runner = False # Load existing PIDs if available for server in (self.mock_server, self.tomcat_server, self.spring_server): @@ -196,7 +212,84 @@ def kill_process(self, pid: int, name: str) -> None: except (OSError, ProcessLookupError): print(f"Process {pid} was already dead") + def module_requires_kafka(self, sample_module: str) -> bool: + return sample_module in KAFKA_BROKER_REQUIRED_MODULES + def module_requires_kafka_profile(self, sample_module: str) -> bool: + return sample_module in KAFKA_PROFILE_REQUIRED_MODULES + + def wait_for_port(self, host: str, port: int, max_attempts: int = 20) -> bool: + for _ in range(max_attempts): + try: + with socket.create_connection((host, port), timeout=1): + return True + except OSError: + time.sleep(1) + return False + + def remove_kafka_broker_container(self) -> None: + subprocess.run( + ["docker", "rm", "-f", KAFKA_CONTAINER_NAME], + check=False, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def start_kafka_broker(self) -> None: + if self.wait_for_port("localhost", 9092, max_attempts=1): + print("Kafka broker already running on localhost:9092, reusing it.") + self.kafka_started_by_runner = False + return + + self.remove_kafka_broker_container() + + print("Starting Kafka broker (Redpanda) for system tests...") + run_result = subprocess.run( + [ + "docker", + "run", + "-d", + "--name", + KAFKA_CONTAINER_NAME, + "-p", + "9092:9092", + "docker.redpanda.com/redpandadata/redpanda:v24.1.9", + "redpanda", + "start", + "--overprovisioned", + "--smp", + "1", + "--memory", + "1G", + "--reserve-memory", + "0M", + "--node-id", + "0", + "--check=false", + "--kafka-addr", + "PLAINTEXT://0.0.0.0:9092", + "--advertise-kafka-addr", + "PLAINTEXT://localhost:9092", + ], + check=False, + capture_output=True, + text=True, + ) + + if run_result.returncode != 0: + raise RuntimeError(f"Failed to start Kafka container: {run_result.stderr}") + + if not self.wait_for_port("localhost", 9092, max_attempts=30): + raise RuntimeError("Kafka broker did not become ready on localhost:9092") + + self.kafka_started_by_runner = True + + def stop_kafka_broker(self) -> None: + if not self.kafka_started_by_runner: + return + + self.remove_kafka_broker_container() + self.kafka_started_by_runner = False def start_sentry_mock_server(self) -> None: """Start the Sentry mock server.""" @@ -347,6 +440,13 @@ def start_spring_server(self, sample_module: str, java_agent: str, java_agent_au env.update(SENTRY_ENVIRONMENT_VARIABLES) env["SENTRY_AUTO_INIT"] = java_agent_auto_init + if self.module_requires_kafka_profile(sample_module): + env["SPRING_PROFILES_ACTIVE"] = "kafka" + env["SENTRY_ENABLE_QUEUE_TRACING"] = "true" + print("Enabling Spring profile: kafka") + else: + env.pop("SPRING_PROFILES_ACTIVE", None) + # Build command jar_path = f"sentry-samples/{sample_module}/build/libs/{sample_module}-0.0.1-SNAPSHOT.jar" cmd = ["java"] @@ -557,6 +657,12 @@ def setup_test_infrastructure(self, sample_module: str, java_agent: str, java_agent_auto_init: str, build_before_run: str, server_type: Optional[ServerType]) -> int: """Set up test infrastructure. Returns 0 on success, error code on failure.""" + if self.module_requires_kafka(sample_module): + self.start_kafka_broker() + os.environ["SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS"] = KAFKA_BOOTSTRAP_SERVERS + else: + os.environ.pop("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS", None) + # Build if requested if build_before_run == "1": print("Building before test run") @@ -624,6 +730,8 @@ def run_single_test(self, sample_module: str, java_agent: str, elif server_type == ServerType.SPRING: self.stop_spring_server() self.stop_sentry_mock_server() + self.stop_kafka_broker() + os.environ.pop("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS", None) def run_all_tests(self) -> int: """Run all system tests.""" @@ -954,6 +1062,8 @@ def cleanup_on_exit(self, signum, frame): self.stop_spring_server() self.stop_sentry_mock_server() self.stop_tomcat_server() + self.stop_kafka_broker() + os.environ.pop("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS", None) sys.exit(1) def main(): @@ -1152,6 +1262,8 @@ def main(): runner.stop_spring_server() runner.stop_sentry_mock_server() runner.stop_tomcat_server() + runner.stop_kafka_broker() + os.environ.pop("SENTRY_SAMPLE_KAFKA_BOOTSTRAP_SERVERS", None) if __name__ == "__main__": sys.exit(main())