server: http_listen_port: 3200 query_frontend: search: duration_slo: 5s throughput_bytes_slo: 1.073741824e+09 trace_by_id: duration_slo: 5s distributor: receivers: # this configuration will listen on all ports and protocols that tempo is capable of. jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver thrift_http: # grpc: # for a production deployment you should only enable the receivers you need! thrift_binary: thrift_compact: zipkin: otlp: protocols: http: grpc: opencensus: ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally metrics_generator: processor: span_metrics: dimensions: [http.status_code, http.method] dimension_mappings: - name: http_status_code source_labels: [http_status_code] - name: http_method source_labels: [http_method] enable_target_info: true registry: external_labels: source: tempo cluster: docker-compose storage: path: /tmp/tempo/generator/wal remote_write: - url: http://prometheus:9090/api/v1/write send_exemplars: true storage: trace: backend: local # backend configuration to use wal: path: /tmp/tempo/wal # where to store the the wal locally local: path: /tmp/tempo/blocks overrides: defaults: metrics_generator: processors: [service-graphs, span-metrics] # enables metrics generator global: # Maximum size of a single trace in bytes. A value of 0 disables the size # check. # This limit is used in 3 places: # - During search, traces will be skipped when they exceed this threshold. # - During ingestion, traces that exceed this threshold will be refused. # - During compaction, traces that exceed this threshold will be partially dropped. # During ingestion, exceeding the threshold results in errors like # TRACE_TOO_LARGE: max size of trace (5000000) exceeded while adding 387 bytes max_bytes_per_trace: 20000000