build(docker): add docker and docker-compose files

This commit is contained in:
Ondrej Vlach 2024-08-03 19:20:36 +02:00
parent 9889c206f0
commit 4361655127
No known key found for this signature in database
GPG Key ID: 7F141CDACEDEE2DE
14 changed files with 409 additions and 45 deletions

2
.gitignore vendored
View File

@ -32,3 +32,5 @@ phpstan.neon
npm-debug.log
yarn-error.log
###< symfony/webpack-encore-bundle ###
.tempo

11
Dockerfile.fpm Normal file
View File

@ -0,0 +1,11 @@
FROM php:8.3-fpm
RUN apt-get update && apt-get install -y nodejs npm unzip libzip-dev && rm -rf /var/cache/apt/*
RUN curl -sSL https://github.com/mlocati/docker-php-extension-installer/releases/latest/download/install-php-extensions -o - | sh -s \
opentelemetry-stable opcache zip grpc intl pgsql pdo_pgsql
RUN curl -sS https://getcomposer.org/installer | php && mv composer.phar /usr/local/bin/composer
RUN composer self-update
RUN usermod -a -G www-data root
RUN mkdir -p /var/www/html
RUN chown -R www-data:www-data /var/www/html
WORKDIR /var/www/html/

3
Dockerfile.nginx Normal file
View File

@ -0,0 +1,3 @@
FROM nginx:1.25.3-alpine
COPY docker/nginx/default.conf /etc/nginx/conf.d/default.conf
EXPOSE 80

0
Dockerfile.static Normal file
View File

View File

@ -1,19 +0,0 @@
version: '3'
services:
###> doctrine/doctrine-bundle ###
database:
ports:
- "5432"
###< doctrine/doctrine-bundle ###
###> symfony/mailer ###
mailer:
image: axllent/mailpit
ports:
- "1025"
- "8025"
environment:
MP_SMTP_AUTH_ACCEPT_ANY: 1
MP_SMTP_AUTH_ALLOW_INSECURE: 1
###< symfony/mailer ###

View File

@ -1,26 +0,0 @@
version: '3'
services:
###> doctrine/doctrine-bundle ###
database:
image: postgres:${POSTGRES_VERSION:-16}-alpine
environment:
POSTGRES_DB: ${POSTGRES_DB:-app}
# You should definitely change the password in production
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-!ChangeMe!}
POSTGRES_USER: ${POSTGRES_USER:-app}
healthcheck:
test: ["CMD", "pg_isready", "-d", "${POSTGRES_DB:-app}", "-U", "${POSTGRES_USER:-app}"]
timeout: 5s
retries: 5
start_period: 60s
volumes:
- database_data:/var/lib/postgresql/data:rw
# You may use a bind-mounted host directory instead, so that it is harder to accidentally remove the volume and lose all your data!
# - ./docker/db/data:/var/lib/postgresql/data:rw
###< doctrine/doctrine-bundle ###
volumes:
###> doctrine/doctrine-bundle ###
database_data:
###< doctrine/doctrine-bundle ###

137
docker-compose.yml Normal file
View File

@ -0,0 +1,137 @@
version: '3.9'
services:
nginx:
build:
context: .
dockerfile: ./Dockerfile.nginx
ports:
- '8000:80'
volumes:
- ./docker/nginx/default.conf:/etc/nginx/conf.d/default.conf
- ./public:/var/www/html/public
links:
- php-fpm
php-fpm:
build:
context: .
dockerfile: ./Dockerfile.fpm
volumes:
- ./:/var/www/html/
environment:
- OTEL_LOG_LEVEL=debug
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
- OTEL_PHP_AUTOLOAD_ENABLED=true
- OTEL_PHP_TRACES_PROCESSOR=simple
- OTEL_PHP_LOG_DESTINATION=stderr
- OTEL_EXPORTER_OTLP_PROTOCOL=grpc
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcol:4317
- DATABASE_URL=pgsql://brilo:localdevelopment@postgres_dev/brilo
- DATABASE_TEST_URL=pgsql://brilo_test:localdevelopment@postgres_test/brilo_test
links:
- otelcol
- postgres_dev
- postgres_test
dns: # TODO: fix-me
- 8.8.8.8
# ----------------- database
postgres_dev:
image: postgres:16.3
environment:
- POSTGRES_USER=brilo
- POSTGRES_PASSWORD=localdevelopment
- POSTGRES_DB=brilo
healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "brilo", "-u", "brilo"]
interval: 30s
timeout: 60s
retries: 5
start_period: 80s
postgres_test:
image: postgres:16.3
environment:
- POSTGRES_USER=brilo_test
- POSTGRES_PASSWORD=localdevelopment
- POSTGRES_DB=brilo_test
healthcheck:
test: ["CMD-SHELL", "pg_isready", "-d", "brilo_test", "-u", "brilo_test"]
interval: 30s
timeout: 60s
retries: 5
start_period: 80s
# ----------------- telemetry
tempo:
image: grafana/tempo:latest
command: [ "-config.file=/etc/tempo.yaml" ]
volumes:
- ./docker/telemetry/tempo.yaml:/etc/tempo.yaml
- ./.tempo:/tmp/tempo
loki:
image: grafana/loki:2.9.2
ports:
- "3100:3100"
command: -config.file=/etc/loki.yaml
volumes:
- ./.loki:/loki
- ./docker/telemetry/loki.yaml:/etc/loki.yaml
promtail:
image: grafana/promtail:2.9.2
command: -config.file=/etc/promtail/config.yaml
volumes:
- ./docker/telemetry/promtail.yaml:/etc/promtail/config.yaml
links:
- prometheus
prometheus:
image: prom/prometheus:latest
command:
- --config.file=/etc/prometheus.yaml
- --web.enable-remote-write-receiver
- --enable-feature=exemplar-storage
volumes:
- ./docker/telemetry/prometheus.yaml:/etc/prometheus.yaml
ports:
- "9090:9090"
grafana:
image: grafana/grafana:10.1.1
volumes:
- ./docker/telemetry/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
ports:
- "3000:3000"
otelcol:
image: otel/opentelemetry-collector-contrib:latest
deploy:
resources:
limits:
memory: 125M
restart: unless-stopped
command: [ "--config=/etc/otelcol-config.yaml" ]
volumes:
- ./docker/telemetry/otelcol-config.yaml:/etc/otelcol-config.yaml
depends_on:
- loki
- tempo
links:
- loki
- tempo
# TODO: networks
networks:
default:
driver: "bridge"
ipam:
driver: default

22
docker/nginx/default.conf Normal file
View File

@ -0,0 +1,22 @@
server {
index index.php index.html;
server_name symfony_example_app;
error_log stderr debug;
access_log stderr;
listen 80;
root /var/www/html/public;
location / {
try_files $uri /index.php$is_args$args;
}
location /build {
try_files $uri /$uri;
}
location ~ ^/.+\.php(/|$) {
fastcgi_pass php-fpm:9000;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
}

View File

@ -0,0 +1,43 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus
- name: Loki
type: loki
access: proxy
orgId: 1
url: http://loki:3100
basicAuth: false
isDefault: false
version: 1
editable: false
apiVersion: 1
uid: loki
jsonData:
httpMethod: GET

View File

@ -0,0 +1,37 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v12
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093

View File

@ -0,0 +1,68 @@
receivers:
loki:
protocols:
grpc:
use_incoming_timestamp: true
otlp:
protocols:
grpc:
processors:
attributes:
actions:
- action: insert
key: loki.attribute.labels
value: context, code.filepath, code.namespace, code.function, code.lineno, http.request.method, http.request.body.size, url.full, url.scheme, url.path, http.route, http.response.status_code
- action: insert
from_attribute: context
key: context
- action: insert
from_attribute: code.namespace
key: code.namespace
- action: insert
from_attribute: code.function
key: code.function
- action: insert
from_attribute: code.lineno
key: code.lineno
- action: insert
from_attribute: http.request.method
key: http.request.method
- action: insert
from_attribute: http.request.body.size
key: http.request.body.size
- action: insert
from_attribute: http.response.status_code
key: http.response.status_code
- action: insert
from_attribute: url.full
key: url.full
- action: insert
from_attribute: url.scheme
key: url.scheme
- action: insert
from_attribute: url.path
key: url.path
- action: insert
from_attribute: http.route
key: http.route
- action: insert
key: loki.format
value: raw
exporters:
loki:
endpoint: http://loki:3100/loki/api/v1/push
otlp:
endpoint: tempo:4317
tls:
insecure: true
logging:
verbosity: Detailed
service:
pipelines:
logs:
receivers: [otlp]
processors: [attributes]
exporters: [logging,loki]
traces:
receivers: [otlp]
exporters: [logging,otlp]

View File

@ -0,0 +1,11 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: [ 'localhost:9090' ]
- job_name: 'tempo'
static_configs:
- targets: [ 'tempo:3200' ]

View File

@ -0,0 +1,2 @@
clients:
- url: http://otelcol:3500/loki/api/v1/push

View File

@ -0,0 +1,73 @@
server:
http_listen_port: 3200
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
metrics_generator:
processor:
span_metrics:
dimensions: [http.status_code, http.method]
dimension_mappings:
- name: http_status_code
source_labels: [http_status_code]
- name: http_method
source_labels: [http_method]
enable_target_info: true
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /tmp/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
storage:
trace:
backend: local # backend configuration to use
wal:
path: /tmp/tempo/wal # where to store the the wal locally
local:
path: /tmp/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics] # enables metrics generator
global:
# Maximum size of a single trace in bytes. A value of 0 disables the size
# check.
# This limit is used in 3 places:
# - During search, traces will be skipped when they exceed this threshold.
# - During ingestion, traces that exceed this threshold will be refused.
# - During compaction, traces that exceed this threshold will be partially dropped.
# During ingestion, exceeding the threshold results in errors like
# TRACE_TOO_LARGE: max size of trace (5000000) exceeded while adding 387 bytes
max_bytes_per_trace: 20000000