Gitee link
Spring Boot version: 2.3.4.RELEASE
directory
- Basic logging functions and custom logging
- Log link Tracing
- EFK log collection system
- Golang is a lightweight log collection tool
Basic logging functions and custom logging
Add logback dependency:
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
Copy the code
Set the log save path in the application configuration file:
server:
port: 8888
# Log save path
logging:
file:
path: _logs/mylog-${server.port}.logs
Copy the code
Configuration file about LogBack
Logback – spring. XML:
<configuration>
<! Create LOG_PATH_IS_UNDEFINED file in project directory
<property name="LOG_PATH" value="${LOG_PATH:-${java.io.tmpdir:-/logs}}"/>
<! Import SpringBoot's default configuration file defaults.xml
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<! Console-appender.xml -->
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<! Logback-spring-file-level.xml --> logback-spring-file-level.xml -->
<include resource="logback-spring-file-level.xml"/>
<! Set the root Logger level to INFO and add appenders to both console and file outputs.
<root level="INFO">
<! -- Without this line, the console will have no output, only log output -->
<appender-ref ref="CONSOLE"/>
<appender-ref ref="INFO_FILE"/>
<appender-ref ref="WARN_FILE"/>
<appender-ref ref="ERROR_FILE"/>
</root>
<! JMX can manage logback configuration dynamically.
<jmxConfigurator/>
</configuration>
Copy the code
Logback – spring – file. Level. XML:
<included>
<! --INFO Level log -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<! -- % I specifies the number of the split log -->
<fileNamePattern>${LOG_PATH}.INFOLevel.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<! MaxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB
<! -- After testing, maxHistory refers to the specified number of days, not the number of days -->
<maxFileSize>50MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>50MB</totalSizeCap>
</rollingPolicy>
<! Log Level filter (INFO Level filter only)
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<! -- Format output -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{"yyyy-MM-dd HH:mm:ss.SSS"} %-5level -[%X{traceId}] - %msg%n</pattern>
</encoder>
</appender>
<! -- Logs of WARN Level -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<! -- % I specifies the number of the split log -->
<fileNamePattern>${LOG_PATH}.WARNLevel.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<! MaxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB
<maxFileSize>50MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>50MB</totalSizeCap>
</rollingPolicy>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<! -- Filter level -->
<level>WARN</level>
<! --onMatch: logs that match the filtering level. ACCEPT: immediate processing -->
<onMatch>ACCEPT</onMatch>
<! --onMismatch: logs that do not meet the filtering level. DENY: DENY immediately -->
<onMismatch>DENY</onMismatch>
</filter>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{"yyyy-MM-dd HH:mm:ss.SSS"} %-5level -[%X{traceId}] - %msg%n</pattern>
</encoder>
</appender>
<! --ERROR Level log -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<! -- % I specifies the number of the split log -->
<fileNamePattern>${LOG_PATH}.ERRORLevel.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<! MaxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB
<maxFileSize>50MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>50MB</totalSizeCap>
<! --<cleanHistoryOnStart>true</cleanHistoryOnStart>-->
</rollingPolicy>
<! Filter logs at specified levels -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<! -- Filter level -->
<level>ERROR</level>
<! --onMatch: logs that match the filtering level. ACCEPT: immediate processing -->
<onMatch>ACCEPT</onMatch>
<! --onMismatch: logs that do not meet the filtering level. DENY: DENY immediately -->
<onMismatch>DENY</onMismatch>
</filter>
<! -- Log output format -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{"yyyy-MM-dd HH:mm:ss.SSS"} %-5level - [%X{traceId}] - %msg%n</pattern>
</encoder>
</appender>
<! -- Custom log -->
<appender name="CUSTOM_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<! -- % I specifies the number of the split log -->
<fileNamePattern>${LOG_PATH}.MYLOGGERLevel.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<! MaxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB --> maxFileSizeMB
<! -- After testing, maxHistory refers to the specified number of days, not the number of days -->
<maxFileSize>300MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>300MB</totalSizeCap>
</rollingPolicy>
<! Log Level filter (INFO Level filter only)
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<! -- Format output -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{"yyyy-MM-dd HH:mm:ss.SSS"}\t%X{traceId}\t%msg%n</pattern>
</encoder>
</appender>
<! -- Custom log logs are not bound to root, only specified output is recorded -->
<logger name="my_logger" additivity="false">
<appender-ref ref= "CUSTOM_FILE"/>
</logger>
</included>
Copy the code
The comments in the configuration file are more detailed and can be modified as required. There is a “traceId” in the configuration file, which is not included in the logback. I added it for log tracking, which will be described later.
Write interface to test:
package com.cc.controller;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.UUID;
import java.util.logging.Logger;
@RestController
public class TestController {
Logger LOGGER = Logger.getLogger(this.getClass().toString());
Logger MyLogger = Logger.getLogger("my_logger");
@GetMapping("/w")
public String logWarning(a) {
LOGGER.warning(This is a warning log: + UUID.randomUUID().toString().replace("-".""));
return "Output warning log";
}
@GetMapping("/e")
public String logError(a) {
LOGGER.severe("This is an error log :" + UUID.randomUUID().toString().replace("-".""));
return "Output error log";
}
@GetMapping("/m")
public String logMyLogger(a) {
MyLogger.info("This is a custom log :" + UUID.randomUUID().toString().replace("-".""));
return "Output custom Logs"; }}Copy the code
Start the project, execute the test interface separately, and then you will see four log files in the _logs folder: INFO log for startup information, WARN log for warnings, ERROR log for errors, and custom MYLOGGER log.
Log link Tracing
We assign a traceId to the HTTP request. The traceId will run through the entire request. All logs in the request process will record the traceId, which helps to quickly locate problems and filter out irrelevant logs.
For a better look, let’s define a constant class:
package com.cc.config.logback;
/** * Logback constant definition *@author cc
* @dateIn 2021-07-12 he * /
public interface LogbackConstant {
String TRACT_ID = "traceId";
}
Copy the code
Then there is the logback filter:
package com.cc.config.logback;
import org.slf4j.MDC;
import org.springframework.stereotype.Component;
import javax.servlet.*;
import java.io.IOException;
import java.util.UUID;
/** * log trace id *@author cc
* @dateIn 2021-07-12 he * /
@Component
public class LogbackFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) {}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
try {
MDC.put(LogbackConstant.TRACT_ID, UUID.randomUUID().toString().replace("-".""));
chain.doFilter(request, response);
} finally{ MDC.remove(LogbackConstant.TRACT_ID); }}@Override
public void destroy(a) {}}Copy the code
We used the processed UUID as the traceId, and now we call the test interface and see the traceId attached to the log.
EFK log collection system
EFK is Elasticsearch, Filebeat, and Kibana. There’s also a Logstash, but it’s not as easy to use as Filebeat, and its content filtering is not required. But there will still be a brief introduction to Logstash.
First, explain the EKF workflow
- The Spring Boot application logs are saved in the specified path
- Filbeat detects the change in the log file and sends the content to ElasticSearch
- If you use a Logstash, the content is sent to the Logstash
- Logstash filters and analyzes the content, and then sends it to ElasticSearch. This processing makes the log data more detailed on Kibana.
- Access the Kibana visual interface to view and manipulate ElasticSeach log data
Environment to prepare
Since my EFK environment is built on the DOCker of the VIRTUAL machine, and the machine is Windows, I have two solutions in order to make the FileBeat container on docker detect the changes of my log files:
- Deploy the project as a JAR package to run on Linux on the VIRTUAL machine, and set the log save path to the specified location
- Create a shared folder between the local host and the VM
Because VMWare makes it easy to create shared folders and I can update the code in real time in the local development environment, I chose option 1.
Container to create
It is assumed that readers have a certain understanding of Docker. After all, the introduction of Docker is quite long and irrelevant to the theme, so I will not elaborate on it here.
# Create a network, Docker network create mynetwork docker run --name myes -p 9200:9200 -p 9300:9300 -itd --restart=always -v /etc/localtime:/etc/localtime -v /home/mycontainers/myes/data:/data --net mynetwork -e "discovery.type=single-node" -e Docker run --name myFileBeat -itd --restart=always -v "ES_JAVA_OPTS= -xMS256m -XMx256m "ElasticSearch :7.12.0 docker run --name myFilebeat -itd --restart=always -v /etc/localtime:/etc/localtime -v /mnt/hgfs/myshare/_logs:/data --net mynetwork -v / home/filebeat. Yml: / usr/share/filebeat/filebeat yml elastic/filebeat: 7.12.0 docker run - name mykibana -p 5601:5601 - itd - restart = always - v/etc/localtime: / etc/localtime -.net mynetwork - m 512 m - ring = true kibana: 7.12.0Copy the code
Note that the fileBeat container file mapping path is mapped to my shared folder, so it is not necessarily the same as everyone else.
And for convenience, we directly map a FileBeat. Yml configuration file to fileBeat container, save the modification later.
Filebeat. Yml:
filebeat.inputs:
- type: log
enabled: true
paths:
- /data/*.log
output.elasticsearch:
hosts: ['myes']
index: "filebeat-%{+yyyy-MM-dd}"
setup.template.name: "filebeat"
setup.template.pattern: "filebeat-*"
processors:
- drop_fields:
fields: ["log"."input"."host"."agent"."ecs"]
Copy the code
Configuration file description:
filebeat.inputs: // The input source
- type: log // Note that this is the log type
enabled: true // Enable the function
paths: // The path
- /data/*.log // Log files in the /data folder of the fileBeat container
output.elasticsearch: // Output position: elasticSearch (es
hosts: ['myes'] // Es link, because we made the network segment so we can communicate by container name
index: "filebeat-%{+yyyy-MM-dd}" // User-defined ES index
setup.template.name: "filebeat" // When indexes are configured, you need to set these two items
setup.template.pattern: "filebeat-*" // When indexes are configured, you need to set these two items
processors: // The processor
- drop_fields: // Filter or remove specified fields, because data entering ES carries them by default
fields: ["log"."input"."host"."agent"."ecs"]
Copy the code
Let Kibana connect to ElasticSearch
Enter the Kibana container, modify the configuration file and restart:
docker exec -it mykibana bash
cd config/
vi kibana.yml
The original content:
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
Copy the code
Modified to:
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://myes:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
Copy the code
If the es container name is elasticSearch, you don’t need to change it.
Test container validity
Elasticsearch: visit http://ip:9200. If there is json content, it succeeds
Kibana: If no error is reported when you visit http://ip:5601, the visual UI is displayed. If no error is reported, the connection is faulty. Check whether the content of elasticSearch in the configuration file is correct and whether containers can communicate with each other on the same network segment. You can ping each other inside the container for confirmation during debugging.
Filebeat: Check out the Kibana visual interface later
The test results
Call the localhost:8888/ W or e/ M interface to output the log content to the specified location. At this point, FileBeat can detect the file changes and push them to ElasticSearch
- If the output log file is displayed in the specified directory, the log file is saved successfully.
- Open the Kibana Visualization panel: IP:5061, click the three-bar icon in the top left corner to display the menu and go to Analytics-Discover. The first time you need to create an Index pattern, because we set fileBeat as the Index in filebeat. After filling in fileBeat, you can see that there is a match. Next, Step 2 Select the time filter and confirm it. Now you can see the logback-> FileBeat -> ElasticSearch log and use the Kibana panel to retrieve the data.
At this point, the EFK entry level deployment is complete.
Golang is a lightweight log collection tool
EFK is convenient to use, has a beautiful interface, and supports distribution, so it can be said that it is very easy to use. However, because my server memory is not so abundant, EFK will consume nearly 1G, so I choose another solution: write a service with Golang, and extract the matching content from the log file in combination with Linux grep command. This scheme has the advantage of using Golang to write with low memory footprint and the disadvantage of low search efficiency, but it is suitable for my small project.
Attached is Golang’s code. In fact, the principle is very simple, which is to start a Web service using Gin framework and then call shell script to extract the content:
package main
import (
"fmt"
"os/exec"
"github.com/gin-gonic/gin"
)
func main(a) {
runServer()
}
func runServer(a) {
r := gin.Default()
r.GET("/log".func(c *gin.Context) {
id := c.Query("id")
result := runScript("./findLog.sh " + id)
c.Header("Content-Type"."text/html; charset=utf-8")
c.String(200, result)
})
r.Run(": 18085")}func runScript(path string) string {
cmd := exec.Command("/bin/bash"."-c", path)
output, err := cmd.Output()
iferr ! =nil {
fmt.Printf("Execute Shell:%s failed with error:%s", path, err.Error())
return err.Error()
}
fmt.Printf("Execute Shell:%s finished with output:\n%s", path, string(output))
return string(output)
}
Copy the code
FindLog. Sh:
cd /Users/chen/Desktop/mycontainers/mall-business/data/logs
id=The $1
grep $id *.log%
Copy the code
Just pack the Golang app and run it on the platform you want.