소스 가져오기: git clone https://github.com/deviantony/docker-elk.git
컴포즈로 ELK 셋업: docker compose up setup
Filebeat 를 추가한 Main Compose 파일 만들기
services:
# Setup service (unchanged)
setup:
profiles:
- setup
build:
context: setup/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
- ./setup/lib.sh:/lib.sh:ro,Z
- ./setup/roles:/roles:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
# Elasticsearch service (unchanged)
elasticsearch:
build:
context: elasticsearch/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
- elasticsearch:/usr/share/elasticsearch/data:Z
ports:
- 9200:9200
- 9300:9300
environment:
node.name: elasticsearch
ES_JAVA_OPTS: -Xms512m -Xmx512m
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
discovery.type: single-node
networks:
- elk
restart: unless-stopped
# Logstash service (unchanged)
logstash:
build:
context: logstash/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
restart: unless-stopped
# Kibana service (unchanged)
kibana:
build:
context: kibana/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports:
- 5601:5601
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
restart: unless-stopped
# Filebeat service (newly added)
filebeat:
build:
context: extensions/filebeat/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
user: root
command:
# Log to stderr.
- -e
# Disable config file permissions checks. Allows mounting 'config/filebeat.yml' even if it's not owned by root.
# See https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html for details.
- --strict.perms=false
volumes:
# Mount the Filebeat configuration file.
- ./extensions/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro,Z
# Mount Docker container logs for Filebeat to process.
- type: bind
source: /var/lib/docker/containers
target: /var/lib/docker/containers
read_only: true
# Mount Docker socket for Filebeat to access container metadata.
- type: bind
source: /var/run/docker.sock
target: /var/run/docker.sock
read_only: true
# Mount the logs directory you want Filebeat to monitor. 로그 경로
- ./logs:/usr/share/filebeat/logs:ro,Z
environment:
FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
name: docker-elk_elk
driver: bridge
volumes:
elasticsearch:
Filebeat 설정파일에서 Output을 Logstash를 거치도록 하기
filebeat.yml (extensions\filebeat\config)
name: filebeat
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
# Define inputs to collect logs from a specific directory
filebeat.inputs:
- type: log
enabled: true
paths:
- /usr/share/filebeat/logs/*
processors:
- add_cloud_metadata: ~
# Enable monitoring and specify Elasticsearch hosts for monitoring data
monitoring:
enabled: true
elasticsearch:
hosts: ["http://elasticsearch:9200"] # Replace with your Elasticsearch host and port
username: "beats_system" # Use the built-in `beats_system` user
password: "changeme" # Replace with the actual password for the `beats_system` user
# Send logs to Logstash instead of Elasticsearch
output.logstash:
hosts: ["logstash:5044"] # Replace "logstash" with the service name or IP of your Logstash container
http:
enabled: true
host: 0.0.0.0
Logstash 관련 파싱 설정 추가하기
logstash.conf (logstash\pipeline)
input {
beats {
port => 5044
host => "0.0.0.0"
}
}
filter {
grok {
match => {
"message" => '%{IP:remote_addr} - - \[%{HTTPDATE:time_local}\] "%{WORD:http_method} %{DATA:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status:int} %{NUMBER:bytes:int} %{NUMBER:request_time:float} "(?:%{URI:http_referer}|-)" %{NUMBER:session_id:int} "(?:%{DATA:http_user_agent}|-)" "-"'
}
}
date {
match => [ "time_local", "dd/MMM/yyyy:HH:mm:ss Z" ]
timezone => "Asia/Seoul" # Adjust for KST timezone
target => "@timestamp"
}
mutate {
remove_field => [ "host", "time_local" ] # Remove unnecessary fields if desired
}
}
output {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
user => "elastic" # Correct syntax using =>
password => "changeme" # Replace with your actual password
index => "was-access-log-%{+YYYY.MM.dd}" # Custom index name for WAS logs
codec => json
}
stdout {
codec => rubydebug
}
}
filter {
grok {
match => {
"message" => '%{IP:remote_addr} - - \[%{HTTPDATE:time_local}\] "%{WORD:http_method} %{DATA:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status:int} %{NUMBER:bytes:int} %{NUMBER:request_time:float} "(?:%{URI:http_referer}|-)" %{NUMBER:session_id:int} "(?:%{DATA:http_user_agent}|-)" "-"'
}
}
이 부분은 여기서 확인해볼 수 있다.
Grok Debugger | Autocomplete and Live Match Highlghting
grokdebugger.com
Pattern에 파싱하고 싶은 패턴, Sample에 로그 넣으면 실시간으로 파싱되어 우측같이 나온다.
컴포즈 실행: docker compose up -d
비트 암호 초기화: docker compose exec elasticsearch bin/elasticsearch-reset-password --batch --user beats_system
바뀐 암호로 filebeat.yml (extensions\filebeat\config) 여기 수정
키바나 접속
http://localhost:5601
Management > Stack Management
Data > Index Management 에서
*마운트한 로그가 잘 들어갔는지 확인. 만약 안보인다면 Reload indices
Kibana > Data Views > Create data view
데이터뷰 생성할 때 마운트 한 로그의 포맷에 맞춰서 Index pattern 입력. 매칭이 된다면 우측에 표시된다.
Logs > Explorer
All logs 대신 Data Views에서 아까 Custom으로 만든 데이터 뷰 선택 (ex: was-log)
로그가 찍힌 날짜를 포함해야 하니 기간을 넉넉하게 잡고
파싱이 제대로 되었다면 Custom fields들이 좌측에 있을 것이다.
+ 버튼 눌러서 추가해주면 된다.
Open in Discover or Analytics > Discover
원하는 필드 추가해주고 우측 상단에 Save
Analytics > Visualize Library
Lens
여기에서 입맛대로 그래프를 그리면 된다.
'개발자 전향 프로젝트' 카테고리의 다른 글
페이징의 종류와 각각의 장단점 (파이썬+장고) (0) | 2025.01.23 |
---|---|
CSV 포맷에서 Xlsx 포맷으로 추출하기 (숫자 앞에 0 짤림 방지) (2) | 2024.10.18 |
initdb와 pg_dump 통해서 Postgres DB 통채로 Migrate 하는법 (Feat. Docker) (3) | 2024.10.04 |
JMeter - Recording 으로 실시간 브라우저 상호작용 테스트하기 (0) | 2024.09.26 |
무한 스크롤 이해하기 (0) | 2024.08.22 |