Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions bin/hadoop-metrics2-hbase.properties
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
# Configuration for the metrics2 system for the HBase RegionServers
# to enable phoenix trace collection on the HBase servers.
#
# NOTE: The legacy PhoenixMetricsSink has been removed as part of the
# migration from HTrace to OpenTelemetry. Trace export is now handled
# by the OpenTelemetry Java Agent. Configure the agent via environment
# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to
# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend.
#
# See hadoop-metrics2-phoenix.properties for how these configurations
# are utilized.
#
Expand All @@ -28,9 +34,7 @@
# properties should be added to the file of the same name on
# the HBase classpath (likely in the HBase conf/ folder)

# ensure that we receive traces on the server
hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# Tell the sink where to write the metrics
hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# Only handle traces with a context of "tracing"
hbase.sink.tracing.context=tracing
# Legacy PhoenixMetricsSink configuration (removed - use OpenTelemetry agent instead):
# hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# hbase.sink.tracing.context=tracing
33 changes: 11 additions & 22 deletions bin/hadoop-metrics2-phoenix.properties
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,6 @@
# not zero-length). It is only there to differentiate the properties that are stored for
# objects of the same type (e.g. differentiating between two phoenix.sink objects).
#
#You could the following lines in your config
#
# phoenix.sink.thingA.class=com.your-company.SpecialSink
# phoenix.sink.thingA.option1=value1
#
# and also
#
# phoenix.sink.thingB.class=org.apache.phoenix.trace.PhoenixMetricsSink
# phoenix.sink.thingB.doGoodStuff=true
#
# which will create both SpecialSink and PhoenixMetricsSink and register them
# as a MetricsSink, but Special sink will only see option1=value1 in its
# configuration, which similarly, the instantiated PhoenixMetricsSink will
# only see doGoodStuff=true in its configuration
#
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail

# Uncomment to NOT start MBeans
Expand All @@ -60,11 +45,15 @@
# Sample from all the sources every 10 seconds
*.period=10

# Write Traces to Phoenix
# Write Traces to Phoenix (LEGACY - removed)
##########################
# ensure that we receive traces on the server
phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# Tell the sink where to write the metrics
phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# Only handle traces with a context of "tracing"
phoenix.sink.tracing.context=tracing
# NOTE: The legacy PhoenixMetricsSink has been removed as part of the
# migration from HTrace to OpenTelemetry. Trace export is now handled
# by the OpenTelemetry Java Agent. Configure the agent via environment
# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to
# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend.
#
# Legacy configuration (removed - use OpenTelemetry agent instead):
# phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# phoenix.sink.tracing.context=tracing
5 changes: 3 additions & 2 deletions phoenix-client-parent/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,6 @@
<shadedPattern>${shaded.package}.org.</shadedPattern>
<excludes>
<exclude>org/apache/hadoop/**</exclude>
<!-- Our non-shaded htrace and logging libraries -->
<exclude>org/apache/htrace/**</exclude>
<exclude>org/slf4j/**</exclude>
<exclude>org/apache/commons/logging/**</exclude>
<exclude>org/apache/log4j/**</exclude>
Expand Down Expand Up @@ -183,6 +181,9 @@
<exclude>io/skip/checksum/errors</exclude>
<exclude>io/sort/*</exclude>
<exclude>io/serializations</exclude>
<!-- OpenTelemetry API must NOT be shaded so that Phoenix uses the same
GlobalOpenTelemetry class as the Java Agent. See PHOENIX-5215. -->
<exclude>io/opentelemetry/**</exclude>
</excludes>
</relocation>
<!-- JSRs that haven't made it to inclusion in J2SE -->
Expand Down
21 changes: 17 additions & 4 deletions phoenix-core-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,6 @@
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
Expand Down Expand Up @@ -250,6 +246,23 @@
<groupId>org.hdrhistogram</groupId>
<artifactId>HdrHistogram</artifactId>
</dependency>

<!-- OpenTelemetry API (versions managed by opentelemetry-bom in parent).
Scope is "provided" because at runtime the OTel API classes are already on the
HBase classpath (HBase depends on opentelemetry-api). The Java Agent provides
the SDK implementation. Phoenix must NOT shade these classes. See PHOENIX-5215. -->
<dependency>
<groupId>io.opentelemetry</groupId>
<artifactId>opentelemetry-api</artifactId>
<scope>provided</scope>
</dependency>
<!-- OpenTelemetry Context (transitive from opentelemetry-api, but declared
explicitly to satisfy dependency analysis). -->
<dependency>
<groupId>io.opentelemetry</groupId>
<artifactId>opentelemetry-context</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>

<build>
Expand Down
4 changes: 2 additions & 2 deletions phoenix-core-client/src/main/antlr3/PhoenixSQL.g
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ import org.apache.phoenix.schema.types.PUnsignedTime;
import org.apache.phoenix.schema.types.PUnsignedTimestamp;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.parse.LikeParseNode.LikeType;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.trace.PhoenixTracing;
import org.apache.phoenix.parse.AddJarsStatement;
import org.apache.phoenix.parse.ExplainType;
}
Expand Down Expand Up @@ -724,7 +724,7 @@ alter_index_node returns [AlterIndexStatement ret]
// Parse a trace statement.
trace_node returns [TraceStatement ret]
: TRACE ((flag = ON ( WITH SAMPLING s = sampling_rate)?) | flag = OFF)
{ret = factory.trace(Tracing.isTraceOn(flag.getText()), s == null ? Tracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());}
{ret = factory.trace(PhoenixTracing.isTraceOn(flag.getText()), s == null ? PhoenixTracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());}
;

// Parse a create function statement.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
*/
package org.apache.phoenix.compile;

import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.SpanContext;
import io.opentelemetry.context.Scope;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
Expand All @@ -28,8 +31,6 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.htrace.Sampler;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
Expand Down Expand Up @@ -61,7 +62,7 @@
import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.trace.PhoenixTracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.PhoenixKeyValueUtil;
Expand Down Expand Up @@ -124,7 +125,7 @@ public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throw
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
final PhoenixConnection conn = stmt.getConnection();
if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
if (conn.getTraceSpan() == null && !traceStatement.isTraceOn()) {
return ResultIterator.EMPTY_ITERATOR;
}
return new TraceQueryResultIterator(conn);
Expand Down Expand Up @@ -254,30 +255,33 @@ public void close() throws SQLException {

@Override
public Tuple next() throws SQLException {
if (!first) return null;
TraceScope traceScope = conn.getTraceScope();
if (!first) {
return null;
}
Span traceSpan = conn.getTraceSpan();
if (traceStatement.isTraceOn()) {
conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
if (conn.getSampler() == Sampler.NEVER) {
closeTraceScope(conn);
}
if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
traceScope = Tracing.startNewSpan(conn, "Enabling trace");
if (traceScope.getSpan() != null) {
conn.setTraceScope(traceScope);
} else {
closeTraceScope(conn);
}
// TRACE ON: create a new span if one doesn't exist
if (traceSpan == null) {
traceSpan = PhoenixTracing.createSpan("phoenix.trace.session");
Scope scope = traceSpan.makeCurrent();
conn.setTraceSpan(traceSpan);
conn.setTraceScope(scope);
}
} else {
closeTraceScope(conn);
conn.setSampler(Sampler.NEVER);
// TRACE OFF: close the existing span
closeTrace(conn);
}
if (traceSpan == null || !traceSpan.getSpanContext().isValid()) {
return null;
}
if (traceScope == null || traceScope.getSpan() == null) return null;
first = false;
// Return the trace ID to the client
// OTel trace IDs are hex strings; convert to a long for backward compatibility
SpanContext spanContext = traceSpan.getSpanContext();
long traceIdLong = parseTraceIdAsLong(spanContext.getTraceId());
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ParseNodeFactory factory = new ParseNodeFactory();
LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
LiteralParseNode literal = factory.literal(traceIdLong);
LiteralExpression expression =
LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
expression.evaluate(null, ptr);
Expand All @@ -290,11 +294,31 @@ public Tuple next() throws SQLException {
return new ResultTuple(Result.create(cells));
}

private void closeTraceScope(final PhoenixConnection conn) {
if (conn.getTraceScope() != null) {
conn.getTraceScope().close();
/**
* Parse the first 16 hex characters of an OTel trace ID as a long. OTel trace IDs are
* 32-character hex strings (128 bits). We take the lower 64 bits for backward compatibility
* with the old HTrace long trace IDs.
*/
private long parseTraceIdAsLong(String traceId) {
if (traceId == null || traceId.length() < 16) {
return 0L;
}
// Take the last 16 hex chars (lower 64 bits)
String lower64 = traceId.substring(traceId.length() - 16);
return Long.parseUnsignedLong(lower64, 16);
}

private void closeTrace(PhoenixConnection conn) {
Scope scope = conn.getTraceScope();
Span span = conn.getTraceSpan();
if (scope != null) {
scope.close();
conn.setTraceScope(null);
}
if (span != null) {
span.end();
conn.setTraceSpan(null);
}
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
*/
package org.apache.phoenix.execute;

import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
Expand All @@ -33,7 +35,6 @@
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.ExplainPlanAttributes;
Expand Down Expand Up @@ -72,8 +73,8 @@
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.trace.PhoenixTracing;
import org.apache.phoenix.trace.TracingIterator;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.LogUtil;
Expand Down Expand Up @@ -364,10 +365,11 @@ public final ResultIterator iterator(final Map<ImmutableBytesPtr, ServerCache> c
}

// wrap the iterator so we start/end tracing as we expect
if (Tracing.isTracing()) {
TraceScope scope = Tracing.startNewSpan(context.getConnection(),
"Creating basic query for " + getPlanSteps(iterator));
if (scope.getSpan() != null) return new TracingIterator(scope, iterator);
if (PhoenixTracing.isRecording()) {
Span span = PhoenixTracing
.createSpan("phoenix.query.execute." + context.getCurrentTable().getTable().getName());
Scope scope = span.makeCurrent();
return new TracingIterator(span, scope, iterator);
}
return iterator;
}
Expand Down
Loading