build(docker): add docker containers

This commit is contained in:
2024-08-07 12:29:53 +02:00
parent 7b9327a55d
commit b3dd6579c5
12 changed files with 409 additions and 0 deletions

19
docker/nginx/default.conf Normal file
View File

@@ -0,0 +1,19 @@
server {
index index.php index.html;
server_name symfony_example_app;
error_log stderr debug;
access_log stderr;
listen 80;
root /var/www/html/public;
location / {
try_files $uri /index.php$is_args$args;
}
location ~ ^/.+\.php(/|$) {
fastcgi_pass php-fpm:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
}

View File

@@ -0,0 +1,43 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus
- name: Loki
type: loki
access: proxy
orgId: 1
url: http://loki:3100
basicAuth: false
isDefault: false
version: 1
editable: false
apiVersion: 1
uid: loki
jsonData:
httpMethod: GET

View File

@@ -0,0 +1,37 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v12
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093

View File

@@ -0,0 +1,68 @@
receivers:
loki:
protocols:
grpc:
use_incoming_timestamp: true
otlp:
protocols:
grpc:
processors:
attributes:
actions:
- action: insert
key: loki.attribute.labels
value: context, code.filepath, code.namespace, code.function, code.lineno, http.request.method, http.request.body.size, url.full, url.scheme, url.path, http.route, http.response.status_code
- action: insert
from_attribute: context
key: context
- action: insert
from_attribute: code.namespace
key: code.namespace
- action: insert
from_attribute: code.function
key: code.function
- action: insert
from_attribute: code.lineno
key: code.lineno
- action: insert
from_attribute: http.request.method
key: http.request.method
- action: insert
from_attribute: http.request.body.size
key: http.request.body.size
- action: insert
from_attribute: http.response.status_code
key: http.response.status_code
- action: insert
from_attribute: url.full
key: url.full
- action: insert
from_attribute: url.scheme
key: url.scheme
- action: insert
from_attribute: url.path
key: url.path
- action: insert
from_attribute: http.route
key: http.route
- action: insert
key: loki.format
value: raw
exporters:
loki:
endpoint: http://loki:3100/loki/api/v1/push
otlp:
endpoint: tempo:4317
tls:
insecure: true
logging:
verbosity: Detailed
service:
pipelines:
logs:
receivers: [otlp]
processors: [attributes]
exporters: [logging,loki]
traces:
receivers: [otlp]
exporters: [logging,otlp]

View File

@@ -0,0 +1,11 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: [ 'localhost:9090' ]
- job_name: 'tempo'
static_configs:
- targets: [ 'tempo:3200' ]

View File

@@ -0,0 +1,2 @@
clients:
- url: http://otelcol:3500/loki/api/v1/push

View File

@@ -0,0 +1,73 @@
server:
http_listen_port: 3200
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
metrics_generator:
processor:
span_metrics:
dimensions: [http.status_code, http.method]
dimension_mappings:
- name: http_status_code
source_labels: [http_status_code]
- name: http_method
source_labels: [http_method]
enable_target_info: true
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /tmp/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
storage:
trace:
backend: local # backend configuration to use
wal:
path: /tmp/tempo/wal # where to store the the wal locally
local:
path: /tmp/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics] # enables metrics generator
global:
# Maximum size of a single trace in bytes. A value of 0 disables the size
# check.
# This limit is used in 3 places:
# - During search, traces will be skipped when they exceed this threshold.
# - During ingestion, traces that exceed this threshold will be refused.
# - During compaction, traces that exceed this threshold will be partially dropped.
# During ingestion, exceeding the threshold results in errors like
# TRACE_TOO_LARGE: max size of trace (5000000) exceeded while adding 387 bytes
max_bytes_per_trace: 20000000