SDK configuration and semantic conventions for SEA™ Forge.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from opentelemetry import trace, metrics
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.resources import Resource
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
# Resource with semantic context
resource = Resource.create({
"service.name": "sea-api",
"service.version": "1.0.0",
"sea.platform": "sea-forge",
"sea.domain": "governance"
})
# Traces
trace_provider = TracerProvider(resource=resource)
trace_provider.add_span_processor(
BatchSpanProcessor(OTLPSpanExporter(endpoint="localhost:4317"))
)
trace.set_tracer_provider(trace_provider)
# Metrics
meter_provider = MeterProvider(resource=resource)
metrics.set_meter_provider(meter_provider)
1
2
3
4
5
6
7
tracer = trace.get_tracer(__name__)
# Basic span
with tracer.start_as_current_span("process-order") as span:
span.set_attribute("sea.concept", "OrderProcessing")
span.set_attribute("order.id", order_id)
# Process order
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
meter = metrics.get_meter(__name__)
# Counter
order_counter = meter.create_counter(
"sea_orders_total",
description="Total orders processed"
)
order_counter.add(1, {"sea.domain": "sales"})
# Histogram
latency_histogram = meter.create_histogram(
"sea_request_latency_seconds",
description="Request latency"
)
latency_histogram.record(0.025, {"sea.concept": "OrderProcessing"})
| Attribute | Description |
|---|---|
sea.platform |
Platform identifier |
sea.domain |
Business domain |
sea.concept |
Active semantic concept |
sea.regime_id |
Invariant regime ID |
1
2
3
# Propagate correlation
span.set_attribute("sea.correlation_id", correlation_id)
span.set_attribute("sea.causation_id", causation_id)
Last Updated: January 2026