elk之docker-compose部署

介绍

单纯记录elk的docker-compose部署模版,用于笔记

elk模版

需要持久化的配置文件可以先启动容器,从容器中拷贝默认配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
version: '3'
services:
elasticsearch:
image: elasticsearch:7.7.0
container_name: elasticsearch
restart: always
environment:
- "cluster.name=elasticsearch" #设置集群名称为elasticsearch
- "discovery.type=single-node" #以单一节点模式启动
- "ES_JAVA_OPTS=-Xms7168m -Xmx7168m" #设置使用jvm内存大小
- TZ=Asia/Shanghai
volumes:
- /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
- /data/elasticsearch/data:/usr/share/elasticsearch/data #数据文件挂载
- /data/elasticsearch/config:/usr/share/elasticsearch/config
ports:
- 9200:9200
- 9300:9300
kibana:
image: kibana:7.7.0
container_name: kibana
restart: always
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
environment:
- "elasticsearch.hosts=http://127.0.0.1:9200" #设置访问elasticsearch的地址
- i18n.locale=zh-CN
- TZ=Asia/Shanghai
volumes:
- /data/kibana/kibana.yaml:/usr/share/kibana/config/kibana.yml #配置文件挂载
ports:
- 5601:5601
logstash:
image: logstash:7.7.0
container_name: logstash
restart: always
ports:
- 5044:5044
volumes:
- /data/logstash/pipeline:/usr/share/logstash/pipeline
- /data/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
depends_on:
- elasticsearch
# 日志钉钉报警组件
elastalert:
image: anjia0532/elastalert-docker:v0.2.4
container_name: elastalert
restart: always
environment:
- "ELASTICSEARCH_HOST=192.168.1.100"
- "ELASTICSEARCH_USER=elastic"
- "ELASTICSEARCH_PASSWORD=es密码"
- "CONTAINER_TIMEZONE=Asia/Shanghai"
- "TZ=Asia/Shanghai"
- "ELASTALERT_DINGTALK_SECURITY_TYPE:keyword"
- "ELASTALERT_DINGTALK_MSGTYPE:text"
- "ELASTALERT_BUFFER_TIME=10"
- "ELASTALERT_RUN_EVERY=3"
volumes:
- /data/elastalert/rules:/opt/elastalert/rules
- /data/elastalert/config.yaml:/opt/elastalert/config.yaml

elasticsearch配置

elasticsearch.yml

1
2
3
4
5
6
7
8
9
10
11
cluster.name: "docker-cluster"
network.host: 0.0.0.0

http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12

kibana配置

kibana.yaml

1
2
3
4
5
6
7
8
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
i18n.locale: "zh-CN"
kibana.index: ".kibana"
xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: 'es用户'
elasticsearch.password: 'es密码'

logstash配置

logstash.yml

1
2
3
4
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.1.100:9200" ]
xpack.monitoring.elasticsearch.username: "es用户"
xpack.monitoring.elasticsearch.password: "es密码"

logstash收集日志示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
input {
beats {
port => 5044
}
}
filter{
# 正则过滤日志,定义日志字段,针对匹配到的日志做处理
grok{
match => [ "message","(?<logtime>\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\.\d{3})\|#\|(?<level>.*)\|#\|(?<event>.*)\|#\|(?<text>.*)\|#\|message: (?<msg>.*)" ]
remove_field => "message"
}
# 替换默认时间@timestamp为日志中的时间,格式化输出
date {
match => [ "logtime", "YYYY-MM-dd HH:mm:ss.SSS", "ISO8601" ]
target => "@timestamp"
timezone => "Asia/Shanghai"
}
# 移除字段
mutate{
remove_field => [ "@version","[beat][hostname]","[beat][name]","[beat][version]","[agent][ephemeral_id]","[agent][hostname]","[agent][id]","[agent][type]","[agent][version]","[cloud][availability_zone]","[cloud][instance][id]","[cloud][instance][name]","[cloud][machine][type]","[cloud][provider]","[ecs][version]","tags","[host][name]" ]
}
}
output {
if [fields][net] {
elasticsearch {
hosts => ["192.168.1.100:9200"]
# 定义到es的索引名称
index => "%{[fields][net]}-%{[fields][app]}-%{+YYYY-MM}"
user => es用户
password => es密码
}
} else {
elasticsearch {
hosts => ["192.168.1.100:9200"]
index => "%{index}-%{+YYYY-MM}"
user => es用户
password => es密码
}
}

# stdout { codec => rubydebug }
}

elastalert配置

钉钉告警

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
name: 日志告警(level:error or fatal)
type: blacklist
index: es索引(模糊匹配)

timeframe:
minutes: 5

# 匹配level字段有error或fatal的值
compare_key: level
blacklist:
- error
- fatal

#只需要的字段 https://elastalert.readthedocs.io/en/latest/ruletypes.html#include
#include: ["text","event","msg"]
# 告警模版格式化
alert_text: "
告警时间: {}\n
告警索引: {}\n
告警事件: {}\n
告警等级: {}\n
告警ID : {}\n
告警信息: {}
"

alert_text_type: alert_text_only

alert_text_args:
- "logtime"
- "_index"
- event
- level
- text
- msg

alert:
- "elastalert_modules.dingtalk_alert.DingTalkAlerter"
# 测试钉钉地址
dingtalk_access_token: "bb039c2d098a3515dd6f2c61bdawelknfkawenf731a004cf3xxxxxxxx"

邮件告警

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
name: test告警
# 告警查询类型
type: frequency
# 查询索引
index: test-*
# 监控时间范围
timeframe:
minutes: 5

num_events: 1

# 过滤指定字段,匹配字段内容告警
#query_key: message

filter:
- query_string:
query: "\"invalid-sessionkey\""

# 5分钟内相同的报警不会重复发送
realert:
minutes: 5

# 指数级扩大 realert 时间,中间如果有报警,
# 则按照5>10>20>40>60不断增大报警时间到制定的最大时间,
# 如果之后报警减少,则会慢慢恢复原始realert时间
exponential_realert:
hours: 1

alert_text_type: alert_text_only

alert_subject: "test告警"

alert_text_args:
- message
- num_hits
- num_matches

# 告警模板
alert_text: "
告警信息: {}\n
num_hits: {}\n
num_matches: {}
"

# 告警类型
alert:
- "email"

# 邮件发送配置
smtp_host: "smtp.feishu.cn"
smtp_port: 465
smtp_auth_file: "/opt/elastalert/rules/smtp_auth_file.auth"
from_addr: "123@admin.cn"
smtp_ssl: true

# 接收邮件邮箱
email:
- "456@admin.cn"

/opt/elastalert/rules/smtp_auth_file.auth配置,用于邮箱认证,去邮箱配置中获取

1
2
user: "123@admin.cn"
password: "kRytBESuxxxxxxx"

飞书告警

参考:https://www.jianshu.com/p/8804348182a8

filebeat日志收集配置

收集主机日志

1
2
3
4
5
6
7
8
9
10
11
12
13
version: "3"
services:
filebeat:
container_name: filebeat
restart: always
user: root
environment:
- TZ=Asia/Shanghai
image: docker.elastic.co/beats/filebeat:7.7.0
volumes:
- /root/.pm2/logs:/var/log/pm2
- /data/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml
- /data/filebeat/data:/usr/share/filebeat/data

filebeat.yml配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
filebeat.inputs:
- input_type: log
enabled: true
paths:
- /var/log/pm2/MasterCluster-*.log
# 自定义字段
fields:
net: intranet
server: "10.0.0.10"
app: mastercluster

filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false

processors:
- add_cloud_metadata: ~
- add_docker_metadata: ~

output.logstash:
hosts: ["logstash地址和端口"]

本博客所有文章除特别声明外,均采用 CC BY-SA 4.0 协议 ,转载请注明出处!