0%

ELK 日志收集系统

安装 Elasticsearch

  1. 修改 Linux 内核配置
    1
    vi /etc/sysctl.conf

    添加参数

    1
    vm.max_map_count = 262144

    使配置生效

    1
    sysctl -p
  2. 获取 Elasticsearch 配置文件

    拉取镜像

    1
    docker pull elasticsearch:7.10.1

    启动服务

    1
    docker run -d -it --name elasticsearch --rm -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.10.1

    拷贝配置文件并停止容器

    1
    2
    3
    mkdir /usr/share/elasticsearch
    docker cp elasticsearch:/usr/share/elasticsearch/config /usr/share/elasticsearch/config;
    docker stop elasticsearch

    创建文件夹用于绑定数据卷

    1
    2
    3
    mkdir /usr/share/elasticsearch/data
    mkdir /usr/share/elasticsearch/config/certs
    chmod g+rwx /usr/share/elasticsearch/data
  3. 启动容器修改预置账号的密码

    修改/usr/share/elasticsearch/config/elasticsearch.yml

    1
    2
    # 添加安全配置
    xpack.security.enabled: true

    没有 docker 网络可新建

    1
    docker network create -d bridge docker-net
    1
    docker run -d -it --name elasticsearch --net docker-net -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -v /usr/share/elasticsearch/data:/usr/share/elasticsearch/data -v /usr/share/elasticsearch/config:/usr/share/elasticsearch/config elasticsearch:7.10.1

    进入容器

    1
    docker exec -it elasticsearch bash

    修改预置账号的密码

    1
    ./bin/elasticsearch-setup-passwords interactive
    • apm_systemAPM 系统专用账号
    • kibana_system仅可用于 kibana 用来连接 elasticsearch 并与之通信, 不能用于 kibana 登录
    • kibanaKibana 访问专用账号
    • logstash_systemLogstash 访问专用账号
    • beats_systemFileBeat 访问专用账号
    • remote_monitoring_user远程监控账号
    • elastic超级管理员账号

    如果使用密码保护节点的证书,请将密码添加到密钥存储

    1
    2
    .bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
    .bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
  4. 配置 Elasticsearch

    进入容器

    1
    docker exec -it elasticsearch bash

    生成节点证书

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    # 创建密钥
    ./bin/elasticsearch-certutil ca
    # 输入所需的输出文件目录
    config/certs/elastic-stack-ca.p12
    # 生成证书
    ./bin/elasticsearch-certutil cert --ca config/certs/elastic-stack-ca.p12
    # 输入所需的输出文件目录
    config/certs/elastic-certificates.p12
    # 退出容器
    exit
    # 从文件中提取 CA 证书链
    openssl pkcs12 -in elastic-certificates.p12 -cacerts -nokeys -out elasticsearch-ca.pem

    修改/usr/share/elasticsearch/config/jvm.options

    1
    2
    3
    4
    # 内存分配不超过机器的一半 
    # 确保堆内存最小值(Xms) 与最大值 (Xmx) 的大小是相同的,防止程序在运行时改变堆内存大小, 这是一个很耗系统资源的过程。
    -Xms1g
    -Xmx1g

    /usr/share/elasticsearch/config/elasticsearch.yml 追加配置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    bootstrap.memory_lock: true
    # 加密 HTTP 客户端通信
    xpack.security.http.ssl.enabled: true
    xpack.security.http.ssl.keystore.path: certs/elastic-certificates.p12
    xpack.security.http.ssl.truststore.path: certs/elastic-certificates.p12
    # 加密集群中节点之间的通讯以及 Elasticsearch 和 Kibana 之间通信
    xpack.security.transport.ssl.enabled: true
    xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
    xpack.security.transport.ssl.verification_mode: certificate
    xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12

    设置文件权限

    1
    chmod -R 777 /usr/share/elasticsearch/config/certs

    重启 elasticsearch

    1
    docker restart elasticsearch;

安装 Kibana

  1. 配置 Kibana

    拉取镜像

    1
    docker pull kibana:7.10.1

    启动容器

    1
    docker run -d -it --name kibana --rm -p 5601:5601 kibana:7.10.1

    拷贝配置文件并停止容器

    1
    2
    3
    mkdir /usr/share/kibana
    docker cp kibana:/usr/share/kibana/config /usr/share/kibana/config
    docker stop kibana

    修改/usr/share/kibana/config/kibana.yml

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    server.name: kibana
    server.host: "0"
    server.ssl.enabled: true
    server.ssl.keystore.path: "/usr/share/elasticsearch/config/certs/elastic-certificates.p12"
    server.ssl.keystore.password: ""
    elasticsearch.hosts: ["https://IP:9200" ]
    elasticsearch.username: "kibana_system"
    elasticsearch.password: " 安装 elasticsearch 时设置的 kibana_system 密码 "
    elasticsearch.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
    elasticsearch.ssl.truststore.password: ""
    elasticsearch.ssl.verificationMode: certificate
    elasticsearch.ssl.certificateAuthorities: ["/usr/share/elasticsearch/config/certs/elasticsearch-ca.pem"]
    monitoring.ui.container.elasticsearch.enabled: true
    xpack.encryptedSavedObjects.encryptionKey: 32位字符串
    xpack.reporting.capture.browser.chromium.disableSandbox: false
    # 设置为中文
    i18n.locale: "zh-CN"
  2. 启动容器
    1
    docker run -d -it --name kibana --net docker-net -p 5601:5601 -v /usr/share/kibana/config:/usr/share/kibana/config -v /usr/share/elasticsearch/config/certs:/usr/share/elasticsearch/config/certs kibana:7.10.1
  3. 后台管理配置 Kibana
    1. 为 Logstash 设置身份验证凭据

      在 Kibana 中的 管理 > 安全 > 角色 中创建 logstash_writer 角色

      logstash_writer

      在 Kibana 中的 管理 > 安全 > 用户 中创建 logstash_internal 用户,设置角色为logstash_writer

    2. 配置时间格式

      在 Kibana 中的 管理 > 高级设置 > 常规 >Date formatDate with nanoseconds format中配置时间格式为yyyy-MM-DD HH:mm:ss.SSS

安装 Logstash

  1. 配置 Logstash

    拉取镜像

    1
    docker pull logstash:7.10.1

    启动容器

    1
    2
    #  通过在名为 LOGSTASH_KEYSTORE_PASS 的环境变量中存储密码来保护对 Logstash 密钥存储的访问
    docker run -d -it --name logstash --rm -p 5047:5047 -e LOGSTASH_KEYSTORE_PASS= 密码 logstash:7.10.1

    创建密钥库存储密钥

    1
    2
    3
    4
    5
    6
    7
    # 进入容器
    docker exec -it logstash bash
    # 创建密钥库
    ./bin/logstash-keystore create
    # 添加键
    ./bin/logstash-keystore add ES_PWD
    # 输入密码为 logstash_internal 用户的密码

    拷贝配置文件并停止容器

    1
    2
    3
    4
    5
    6
    mkdir /usr/share/logstash
    docker cp logstash:/usr/share/logstash/config /usr/share/logstash/config
    docker cp logstash:/usr/share/logstash/pipeline /usr/share/logstash/pipeline
    docker stop logstash
    mkdir /usr/share/logstash/data
    chmod 777 /usr/share/logstash/data

    修改/usr/share/logstash/pipeline/logstash.conf

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    input {
    rabbitmq {
    durable => true
    exchange => "logs"
    exchange_type => "topic"
    key => "logstash"
    host => "IP"
    port => 5672
    user => " 用户名 "
    password => " 密码 "
    # 自定义虚拟网络
    vhost => logs
    }
    }


    filter {
    if [message] {
    kv { }
    }
    }


    output {
    elasticsearch {
    hosts => ["https://IP:9200"]
    user => "logstash_internal"
    password => "${ES_PWD}"
    ssl => true
    ssl_certificate_verification => false
    truststore => '/usr/share/elasticsearch/config/certs/elastic-certificates.p12'
    truststore_password => ""
    }
    }

    自定义虚拟网络 logs
    创建 logs:rabbitmqctl add_vhost logs
    赋予用户权限:rabbitmqctl set_permissions -p logs 用户名 “.“ “.“ “.*”
    input.rabbitmq 的配置参考 官方文档

  1. 启动容器
    1
    docker run -d -it --name logstash --net docker-net -p 5044:5044 -e LOGSTASH_KEYSTORE_PASS= 密码 -v /usr/share/logstash/config:/usr/share/logstash/config -v /usr/share/logstash/pipeline:/usr/share/logstash/pipeline -v /usr/share/logstash/data:/usr/share/logstash/data logstash:7.10.1

配置 Spring Boot

  1. 配置 logback-spring.xml
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    <?xml version="1.0" encoding="UTF-8"?>
    <configuration>

    <springProperty scope="context" name="logstash.address" source="logstash.address"/>
    <springProperty scope="context" name="spring.cloud.nacos.discovery.ip" source="spring.cloud.nacos.discovery.ip"/>
    <springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
    <springProperty scope="context" name="logging.level.root" source="logging.level.root"/>
    <springProperty scope="context" name="logging.path" source="logging.path"/>
    <springProperty scope="context" name="mq.host" source="spring.rabbitmq.host"/>
    <springProperty scope="context" name="mq.port" source="spring.rabbitmq.port"/>
    <springProperty scope="context" name="mq.username" source="spring.rabbitmq.username"/>
    <springProperty scope="context" name="mq.password" source="spring.rabbitmq.password"/>

    <appender name="LOGSTASH" class="org.springframework.amqp.rabbit.logback.AmqpAppender">
    <layout>
    <pattern><![CDATA[{"service":"${spring.application.name}",
    "serviceIp":"${spring.cloud.nacos.discovery.ip}",
    "dateTime":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
    "level":"%-5level",
    "thread":"%thread",
    "requestId":"%X{requestId}",
    "logger":"%logger{50}",
    "message":"%replace(%msg){'\"','\\\"'}",
    "stack_trace":"%replace(%exception){'[\r\t\n]',''}"}]]></pattern>
    </layout>
    <host>${mq.host}</host>
    <port>${mq.port}</port>
    <username>${mq.username}</username>
    <password>${mq.password}</password>
    <applicationId>${spring.application.name}</applicationId>
    <declareExchange>true</declareExchange>
    <routingKeyPattern>logstash</routingKeyPattern>
    <charset>UTF-8</charset>
    <contentType>application/json</contentType>
    <virtualHost>logs</virtualHost>
    </appender>

    <appender name="ASYNC_LOGSTASH" class="ch.qos.logback.classic.AsyncAppender">
    <discardingThreshold>0</discardingThreshold>
    <queueSize>256</queueSize>
    <appender-ref ref="LOGSTASH"/>
    </appender>

    <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
    <file>${logging.path}</file>
    <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
    <FileNamePattern>${logging.path}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
    <MaxHistory>180</MaxHistory>
    <maxFileSize>10MB</maxFileSize>
    </rollingPolicy>
    <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
    <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
    <charset>UTF-8</charset>
    </encoder>
    </appender>

    <appender name="ASYNC_FILE" class="ch.qos.logback.classic.AsyncAppender">
    <discardingThreshold>0</discardingThreshold>
    <queueSize>256</queueSize>
    <appender-ref ref="FILE"/>
    </appender>

    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
    <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
    <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
    </encoder>
    </appender>


    <root level="${logging.level.root}">
    <appender-ref ref="ASYNC_LOGSTASH"/>
    <appender-ref ref="ASYNC_FILE"/>
    <appender-ref ref="CONSOLE"/>
    </root>
    </configuration>
  2. 日志切面
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

@Before("***")
public void apiLogDoBefore(JoinPoint joinPoint) {
// 设置 请求 ID
MDC.put(RequestConstants.REQUEST_ID, httpServletRequest.getHeader(RequestConstants.X_REQUEST_ID));
logger.info("ip={} class={} method={} params={}",
ClientUtil.getClientIp(httpServletRequest),
joinPoint.getTarget().getClass().getName(),
joinPoint.getSignature().getName(),
JSON.toJSONString(joinPoint.getArgs(), SerializerFeature.UseSingleQuotes));
}

@AfterReturning(returning = "ret", value = "***")
public void doAfterReturning(Object ret) {
logger.info(JSON.toJSONString(ret, SerializerFeature.UseSingleQuotes));
MDCAdapter mdcAdapter = MDC.getMDCAdapter();
if (Objects.nonNull(mdcAdapter)) {
mdcAdapter.clear();
}
}

}