# 消费租赁项目 Kubernetes 基于 ELK 日志分析与实践

Snipaste_2025-05-25_13-43-46.jpg

# 一、ELK 创建 Namespace 和 Secrets

# kubectl create ns logging
# kubectl create secret docker-registry harbor-admin -n logging --docker-server=registry.cn-hangzhou.aliyuncs.com --docker-username=xyapples@163.com --docker-password=passwd

# 二、交付 Zookeeper 集群至 K8S

# 2.1 制作 ZK 集群镜像
# 2.1.1 Dockerfile
# cat Dockerfile 
FROM openjdk:8-jre

# 1、拷贝Zookeeper压缩包和配置文件
ENV VERSION=3.8.4
ADD ./apache-zookeeper-${VERSION}-bin.tar.gz /
ADD ./zoo.cfg /apache-zookeeper-${VERSION}-bin/conf

# 2、对Zookeeper文件夹名称重新命名
RUN mv /apache-zookeeper-${VERSION}-bin /zookeeper

# 3、拷贝eentrpoint的启动脚本文件
ADD ./entrypoint.sh /entrypoint.sh

# 4、暴露Zookeeper端口
EXPOSE 2181 2888 3888

# 5、执行启动脚本
CMD ["/bin/bash","/entrypoint.sh"]
# 2.1.2 zoo.cfg
# cat zoo.cfg 
# 服务器之间或客户端与服务器之间维持心跳的时间间隔 tickTime以毫秒为单位。
tickTime={ZOOK_TICKTIME}

# 集群中的follower服务器(F)与leader服务器(L)之间的初始连接心跳数 10* tickTime
initLimit={ZOOK_INIT_LIMIT}

# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数 5 * tickTime
syncLimit={ZOOK_SYNC_LIMIT}
 
# 数据保存目录
dataDir={ZOOK_DATA_DIR}

# 日志保存目录
dataLogDir={ZOOK_LOG_DIR}

# 客户端连接端口
clientPort={ZOOK_CLIENT_PORT}

# 客户端最大连接数。# 根据自己实际情况设置,默认为60个
maxClientCnxns={ZOOK_MAX_CLIENT_CNXNS}

# 客户端获取 zookeeper 服务的当前状态及相关信息
4lw.commands.whitelist=*

# 三个接点配置,格式为: server.服务编号=服务地址、LF通信端口、选举端口
# 2.1.3 entrypoint
# cat entrypoint.sh 
#设定变量
ZOOK_BIN_DIR=/zookeeper/bin
ZOOK_CONF_DIR=/zookeeper/conf/zoo.cfg

# 2、对配置文件中的字符串进行变量替换
sed -i s@{ZOOK_TICKTIME}@${ZOOK_TICKTIME:-2000}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_INIT_LIMIT}@${ZOOK_INIT_LIMIT:-10}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_SYNC_LIMIT}@${ZOOK_SYNC_LIMIT:-5}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_DATA_DIR}@${ZOOK_DATA_DIR:-/data}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_LOG_DIR}@${ZOOK_LOG_DIR:-/logs}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_CLIENT_PORT}@${ZOOK_CLIENT_PORT:-2181}@g ${ZOOK_CONF_DIR}
sed -i s@{ZOOK_MAX_CLIENT_CNXNS}@${ZOOK_MAX_CLIENT_CNXNS:-60}@g ${ZOOK_CONF_DIR}

# 3、准备ZK的集群节点地址,后期肯定是需要通过ENV的方式注入进来
for server in ${ZOOK_SERVERS}
do
	echo ${server} >> ${ZOOK_CONF_DIR}
done

# 4、在datadir目录中创建myid的文件,并填入对应的编号
ZOOK_MYID=$(( $(hostname | sed 's#.*-##g') + 1 ))
echo ${ZOOK_MYID:-99} > ${ZOOK_DATA_DIR:-/data}/myid

#5、前台运行Zookeeper
cd ${ZOOK_BIN_DIR}
./zkServer.sh start-foreground
# 2.1.4 构建镜像并推送仓库
# wget https://dlcdn.apache.org/zookeeper/zookeeper-3.8.4/apache-zookeeper-3.8.4-bin.tar.gz
# docker build -t registry.cn-hangzhou.aliyuncs.com/kubernetes_public/zookeeper:3.8.4 .
# docker push  registry.cn-hangzhou.aliyuncs.com/kubernetes_public/zookeeper:3.8.4
# 2.2 迁移 zookeeper 至 K8S
# 2.2.1 zookeeper-headless
# cat 01-zookeeper-headless.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-svc
  namespace: logging
spec:
  clusterIP: None
  selector:
    app: zookeeper
  ports:
  - name: client
    port: 2181
    targetPort: 2181
  - name: leader-follwer
    port: 2888
    targetPort: 2888
  - name: selection
    port: 3888
    targetPort: 3888
# 2.2.2 zookeeper-sts
[root@k8s-master01 01-zookeeper]# vim 02-zookeeper-sts.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper               
  namespace: logging
spec:
  serviceName: "zookeeper-svc"
  replicas: 3
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values: ["zookeeper"]
              topologyKey: "kubernetes.io/hostname"
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: zookeeper
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/zookeeper:3.8.4           
        imagePullPolicy: Always
        ports:
        - name: client
          containerPort: 2181
        - name: leader-follwer
          containerPort: 2888
        - name: selection
          containerPort: 3888
        env:
        - name: ZOOK_SERVERS
          value: "server.1=zookeeper-0.zookeeper-svc.logging.svc.cluster.local:2888:3888 server.2=zookeeper-1.zookeeper-svc.logging.svc.cluster.local:2888:3888 server.3=zookeeper-2.zookeeper-svc.logging.svc.cluster.local:2888:3888"
        readinessProbe:         # 就绪探针,不就绪则不介入流量
          exec:
            command:
            - "/bin/bash"
            - "-c"
            - '[[ "$(/zookeeper/bin/zkServer.sh status 2>/dev/null|grep 2181)" ]] && exit 0 || exit 1'
          initialDelaySeconds: 5
        livenessProbe:         # 存活探针。如果不存活则根据重启策略进行重启
          exec:
            command:
            - "/bin/bash"
            - "-c"
            - '[[ "$(/zookeeper/bin/zkServer.sh status 2>/dev/null|grep 2181)" ]] && exit 0 || exit 1'
          initialDelaySeconds: 5
        volumeMounts:
        - name: data
          mountPath: /data
          subPath: data
        - name: data
          mountPath: /logs
          subPath: logs
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteMany"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 2.2.3 更新资源清单
[root@k8s-master01 01-zookeeper]# kubectl apply -f 01-zookeeper-headless.yaml 
[root@k8s-master01 01-zookeeper]# kubectl apply -f 02-zookeeper-sts.yaml
[root@k8s-master01 01-zookeeper]# kubectl get pods -n logging
NAME          READY   STATUS    RESTARTS   AGE
zookeeper-0   1/1     Running   0          17m
zookeeper-1   1/1     Running   0          14m
zookeeper-2   1/1     Running   0          11m
# 2.2.4 检查 zookeeper 集群状态
# for i in 0 1 2 ; do kubectl exec zookeeper-$i -n logging -- /zookeeper/bin/zkServer.sh status; done
ZooKeeper JMX enabled by default
Using config: /zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
ZooKeeper JMX enabled by default
Using config: /zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
ZooKeeper JMX enabled by default
Using config: /zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
# 2.2.5 连接 Zookeeper 集群
[root@k8s-master01 01-zookeeper]# kubectl exec -it zookeeper-0 -n logging -- /bin/sh
# /zookeeper/bin/zkCli.sh -server zookeeper-svc
[zk: zookeeper-svc(CONNECTED) 0]  create /hello oldxu
Created /hello
[zk: zookeeper-svc(CONNECTED) 1] get /hello
oldxu

# 三、 交付 Kafka 集群至 K8S

# 3.1 制作 Kafka 集群镜像
# 3.1.1 Dockerfile
# cat Dockerfile 
FROM openjdk:8-jre

# 1、调整时区
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
    echo 'Asia/Shanghai' > /etc/timezone

# 2、拷贝kafka软件以及kafka的配置
ENV VERSION=2.12-2.2.0
ADD ./kafka_${VERSION}.tgz /
ADD ./server.properties /kafka_${VERSION}/config/server.properties

# 3、修改kafka的名称
RUN mv /kafka_${VERSION} /kafka

# 4、启动脚本(修改kafka配置)
ADD ./entrypoint.sh /entrypoint.sh

# 5、暴露kafka端口 9999是jmx的端口
EXPOSE 9092 9999

# 6、运行启动脚本
CMD ["/bin/bash","/entrypoint.sh"]
# 3.1.2 server.properties
# cat server.properties 
############################# Server Basics ############################# 
# broker的id,值为整数,且必须唯一,在一个集群中不能重复
broker.id={BROKER_ID}

############################# Socket Server Settings ############################# 
# kafka监听端口,默认9092
listeners=PLAINTEXT://{LISTENERS}:9092

# 处理网络请求的线程数量,默认为3个
num.network.threads=3

# 执行磁盘IO操作的线程数量,默认为8个 
num.io.threads=8

# socket服务发送数据的缓冲区大小,默认100KB
socket.send.buffer.bytes=102400

# socket服务接受数据的缓冲区大小,默认100KB
socket.receive.buffer.bytes=102400

# socket服务所能接受的一个请求的最大大小,默认为100M
socket.request.max.bytes=104857600

############################# Log Basics ############################# 
# kafka存储消息数据的目录
log.dirs={KAFKA_DATA_DIR}

# 每个topic默认的partition
num.partitions=1

# 设置副本数量为3,当Leader的Replication故障,会进行故障自动转移。
default.replication.factor=3

# 在启动时恢复数据和关闭时刷新数据时每个数据目录的线程数量
num.recovery.threads.per.data.dir=1

############################# Log Flush Policy ############################# 
# 消息刷新到磁盘中的消息条数阈值
log.flush.interval.messages=10000

# 消息刷新到磁盘中的最大时间间隔,1s
log.flush.interval.ms=1000

############################# Log Retention Policy ############################# 
# 日志保留小时数,超时会自动删除,默认为7天
log.retention.hours=168

# 日志保留大小,超出大小会自动删除,默认为1G
#log.retention.bytes=1073741824

# 日志分片策略,单个日志文件的大小最大为1G,超出后则创建一个新的日志文件
log.segment.bytes=1073741824

# 每隔多长时间检测数据是否达到删除条件,300s
log.retention.check.interval.ms=300000

############################# Zookeeper ############################# 
# Zookeeper连接信息,如果是zookeeper集群,则以逗号隔开
zookeeper.connect={ZOOK_SERVERS}

# 连接zookeeper的超时时间,6s
zookeeper.connection.timeout.ms=6000
# 3.1.3 entrypoint
# cat entrypoint.sh 
# 变量
KAFKA_DIR=/kafka
KAFKA_CONF=/kafka/config/server.properties

# 1、基于主机名 + 1 获取Broker_id  这个是用来标识集群节点 在整个集群中必须唯一
BROKER_ID=$(( $(hostname | sed 's#.*-##g') + 1 ))
LISTENERS=$(hostname -i)

# 2、替换配置文件内容,后期ZK集群的地址通过ENV传递
sed -i s@{BROKER_ID}@${BROKER_ID}@g  ${KAFKA_CONF}
sed -i s@{LISTENERS}@${LISTENERS}@g  ${KAFKA_CONF}
sed -i s@{KAFKA_DATA_DIR}@${KAFKA_DATA_DIR:-/data}@g  ${KAFKA_CONF}
sed -i s@{ZOOK_SERVERS}@${ZOOK_SERVERS}@g  ${KAFKA_CONF}

# 3、启动Kafka
cd ${KAFKA_DIR}/bin
sed -i '/export KAFKA_HEAP_OPTS/a export JMX_PORT="9999"' kafka-server-start.sh
./kafka-server-start.sh ../config/server.properties
# 3.1.4 构建镜像并推送仓库
# wget https://archive.apache.org/dist/kafka/2.2.0/kafka_2.12-2.2.0.tgz
# docker build -t registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kafka:2.12.2 .
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kafka:2.12.2
# 3.2 迁移 Kafka 集群至 K8S
# 3.2.1 kafka-headless
# cat 01-kafka-headless.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: logging
spec:
  clusterIP: None
  selector:
    app: kafka
  ports:
  - name: client
    port: 9092
    targetPort: 9092
  - name: jmx
    port: 9999
    targetPort: 9999
# 3.2.2 kafka-sts
# cat 02-kafka-sts.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: logging
spec:
  serviceName: "kafka-svc"
  replicas: 3
  selector:
    matchLabels:
      app: kafka
  template:
    metadata:
      labels:
        app: kafka
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values: ["kafka"]
              topologyKey: "kubernetes.io/hostname"
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: kafka
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kafka:2.12.2 
        imagePullPolicy: Always
        ports:
        - name: client
          containerPort: 9092
        - name: jmxport
          containerPort: 9999
        env:
        - name: ZOOK_SERVERS
          value: "zookeeper-0.zookeeper-svc:2181,zookeeper-1.zookeeper-svc:2181,zookeeper-2.zookeeper-svc:2181"
        readinessProbe:         # 就绪探针,不就绪则不介入流量
          tcpSocket:
            port: 9092
          initialDelaySeconds: 5
        livenessProbe:         # 存活探针。如果不存活则根据重启策略进行重启
          tcpSocket:
            port: 9092
          initialDelaySeconds: 5
        volumeMounts:
        - name: data
          mountPath: /data
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteMany"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 3.2.3 更新资源清单
[root@k8s-master01 02-kafka]# kubectl apply -f 01-kafka-headless.yaml 
[root@k8s-master01 02-kafka]# kubectl apply -f 02-kafka-sts.yaml
[root@k8s-master01 02-kafka]# kubectl get pods -n logging 
NAME          READY   STATUS    RESTARTS       AGE
kafka-0       1/1     Running   0              5m49s
kafka-1       1/1     Running   0              4m43s
kafka-2       1/1     Running   0              3m40s

#查看kafka是否注册到zookeeper
[root@k8s-master01 02-kafka]# kubectl exec -it zookeeper-0 -n logging -- /bin/bash
root@zookeeper-0:/# /zookeeper/bin/zkCli.sh 
[zk: localhost:2181(CONNECTED) 2] get /brokers/ids/1
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.85.201:9092"],"jmx_port":9999,"host":"172.16.85.201","timestamp":"1748162470218","port":9092,"version":4}
[zk: localhost:2181(CONNECTED) 3] get /brokers/ids/2
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.58.205:9092"],"jmx_port":9999,"host":"172.16.58.205","timestamp":"1748162532658","port":9092,"version":4}
[zk: localhost:2181(CONNECTED) 4] get /brokers/ids/3
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.195.1:9092"],"jmx_port":9999,"host":"172.16.195.1","timestamp":"1748162649250","port":9092,"version":4}
# 3.2.4 检查 Kafka 集群
1.创建一个topic
root@kafka-0:/# /kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-0.zookeeper-svc:2181,zookeeper-1.zookeeper-svc:2181,zookeeper-2.zookeeper-svc:2181 --partitions 1 --replication-factor 3 --topic oldxu

2.模拟消息发布
root@kafka-1:/# /kafka/bin/kafka-console-producer.sh --broker-list kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092 --topic oldxu
>hello kubernetes
>hello world

3.模拟消息订阅
root@kafka-2:/# /kafka/bin/kafka-console-consumer.sh  --bootstrap-server kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092 --topic oldxu --from-beginning
hello kubernetes
hello world

# 四、交付 efak 至 K8S

# 4.1 制作 efak 镜像
# 4.1.1 Dockerfile
[root@manager 03-efak]# cat Dockerfile 
FROM openjdk:8

# 1、调整时区
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
    echo 'Asia/Shanghai' > /etc/timezone

# 2、拷贝kafka软件以及kafka的配置
ENV VERSION=3.0.1
ADD ./efak-web-${VERSION}-bin.tar.gz /
ADD ./system-config.properties /efak-web-${VERSION}/conf/system-config.properties

# 3、修改efak的名称
RUN mv /efak-web-${VERSION} /efak

# 4、环境变量
ENV KE_HOME=/efak
ENV PATH=$PATH:$KE_HOME/bin

# 5、启动脚本(修改kafka配置)
ADD ./entrypoint.sh /entrypoint.sh

# 6、暴露kafka端口 9999是jmx的端口
EXPOSE 8048

# 7、运行启动脚本
CMD ["/bin/bash","/entrypoint.sh"]
# 4.1.2 system-config
# cat system-config.properties 
######################################
# 填写 zookeeper集群列表
######################################
efak.zk.cluster.alias=cluster1
cluster1.zk.list={ZOOK_SERVERS}

######################################
# broker 最大规模数量
######################################
cluster1.efak.broker.size=20

######################################
# zk 客户端线程数
######################################
kafka.zk.limit.size=32

######################################
# EFAK webui 端口
######################################
efak.webui.port=8048

######################################
# kafka offset storage
######################################
cluster1.efak.offset.storage=kafka

######################################
# kafka jmx uri
######################################
cluster1.efak.jmx.uri=service:jmx:rmi:///jndi/rmi://%s/jmxrmi

######################################
# kafka metrics 指标,默认存储15天
######################################
efak.metrics.charts=true
efak.metrics.retain=15

######################################
# kafka sql topic records max
######################################
efak.sql.topic.records.max=5000
efak.sql.topic.preview.records.max=10

######################################
# delete kafka topic token
######################################
efak.topic.token=keadmin

######################################
# kafka sqlite 数据库地址(需要修改存储路径)
######################################
efak.driver=org.sqlite.JDBC
efak.url=jdbc:sqlite:{EFAK_DATA_DIR}/db/ke.db
efak.username=root
efak.password=www.kafka-eagle.org
# 4.1.3 entrypoint
# cat entrypoint.sh 
# 1、变量
EFAK_DIR=/efak
EFAK_CONF=/efak/conf/system-config.properties

# 2、替换配置文件内容,后期ZK集群的地址通过ENV传递
sed -i s@{EFAK_DATA_DIR}@${EFAK_DIR}@g  ${EFAK_CONF}
sed -i s@{ZOOK_SERVERS}@${ZOOK_SERVERS}@g  ${EFAK_CONF}

# 3、启动efka
${EFAK_DIR}/bin/ke.sh start
tail -f ${EFAK_DIR}/logs/ke_console.out
# 4.1.4 构建镜像并推送仓库
# wget https://github.com/smartloli/kafka-eagle-bin/archive/v3.0.1.tar.gz
# docker build -t registry.cn-hangzhou.aliyuncs.com/kubernetes_public/efak:3.0 .
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/efak:3.0
# 4.2 迁移 efak 至 K8S
# 4.2.1 efak-deploy
# cat 01-efak-deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: efak
  namespace: logging
spec:
  replicas: 1
  selector:
    matchLabels:
      app: efak
  template:
    metadata:
      labels:
        app: efak
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: efak
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/efak:3.0 
        imagePullPolicy: Always
        ports:
        - name: http
          containerPort: 8048
        env:
        - name: ZOOK_SERVERS
          value: "zookeeper-0.zookeeper-svc:2181,zookeeper-1.zookeeper-svc:2181,zookeeper-2.zookeeper-svc:2181"
# 4.2.2 efak-service
# cat 02-efak-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: efak-svc
  namespace: logging
spec:
  selector:
    app: efak
  ports:
  - port: 8048
    targetPort: 8048
# 4.2.3 efak-ingress
# cat 03-efak-ingress.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: efak-ingress
  namespace: logging
spec:
  ingressClassName: "nginx"
  rules:
  - host: "efak.hmallleasing.com"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: efak-svc
            port: 
              number: 8048
# 4.2.4 更新资源清单
[root@k8s-master01 03-efak]# kubectl apply -f 01-efak-deploy.yaml 
[root@k8s-master01 03-efak]# kubectl apply -f 02-efak-service.yaml 
[root@k8s-master01 03-efak]# kubectl apply -f 03-efak-ingress.yaml 
# 4.2.5 访问 efka

1、初始用户名密码 admin 123456

1.png

2、查看 Topics

2.png

3、查看 kafka 集群状态

3.png

4、查看 Zookeeper 集群状态

4.png

# 五、交付 Elastic 集群

  • ES 集群是由多个节点组成的,通过 cluster.name 设置 ES 集群名称,同时用于区分其它的 ES 集群。
  • 每个节点通过 node.name 参数来设定所在集群的节点名称。
  • 节点使用 discovery.send_hosts 参数来设定集群节点的列表。
  • 集群在第一次启动时,需要初始化,同时需要指定参与选举的 master 节点 IP,或节点名称。
  • 每个节点可以通过 node.master:true 设定为 master 角色,通过 node.data:true 设定为 data 角色。
[root@k8s-master01 ~]# grep "^[a-Z]" /etc/elasticsearch/elasticsearch.yml
# 集群名称cluster.name: my-oldxu
# 节点名称node.name: node1
# 数据存储路径path.data: /var/lib/elasticsearch
# 日志存储路径path.logs: /var/log/elasticsearch
# 监听在本地哪个地址上network.host: 10.0.0.100
# 监听端口http.port: 9200
# 集群主机列表discovery.seed_hosts: ["ip1", "ip2", "ip3"]
# 仅第一次启动集群时进行选举(可以填写node.name的名称)cluster.initial_master_nodes: ["node01", "node02", "node03"]
# 5.1 下载 elastic 镜像
# docker pull elasticsearch:7.17.6
# docker tag elasticsearch:7.17.6 registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6
# 5.2 交付 ES-Service

创建 es-headlessService,为每个 ES Pod 设定固定的 DNS 名称,无论它是 Master 或是 Data,易或是 Coordinating

# cat 01-es-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: es-svc
  namespace: logging
spec:
  selector:
    app: es
  clusterIP: None
  ports:
  - name: cluster
    port: 9200
    targetPort: 9200
  - name: transport
    port: 9300
    targetPort: 9300
# 5.3 交付 ES-Master 节点
  1. ES 无法使用 root 直接启动,需要授权数据目录 UID=1000,同时还需要持久化 /usr/share/elasticsearch/data ;

  2. ES 所有节点都需要设定 vm.max_map_count 内核参数以及 ulimit;

  3. ES 启动是通过 ENV 环境变量传参来完成的;

    • 集群名称、节点名称、角色类型;

    • discovery.seed_hosts 集群地址列表;

    • cluster.initial_master_nodes 初始集群参与选举的 master 节点名称;

# cat 02-es-master.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es-master
  namespace: logging
spec:
  serviceName: "es-svc"
  replicas: 3           # es-pod运行的实例
  selector:             # 需要管理的ES-Pod标签
    matchLabels:
      app: es
      role: master
  template:
    metadata:
      labels:
        app: es
        role: master
    spec:                       # 定义pod规范
      imagePullSecrets:         # 镜像拉取使用的认证信息
      - name: harbor-admin
      affinity:                 # 设定pod反亲和
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values: ["es"]
              - key: role
                operator: In
                values: ["master"]
            topologyKey: "kubernetes.io/hostname"       # 每个节点就是一个位置
      initContainers:           # 初始化容器设定
      - name: fix-permissions
        image: busybox
        command: ["sh","-c","chown -R 1000:1000 /usr/share/elasticsearch/data ; sysctl -w vm.max_map_count=262144; ulimit -n 65536"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
      containers:               # ES主容器
      - name: es
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6 
        resources:
          limits:
            cpu: 1000m
            memory: 4096Mi
          requests:
            cpu: 300m
            memory: 1024Mi
        ports:
        - name: cluster
          containerPort: 9200
        - name: transport
          containerPort: 9300
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: cluster.name
          value: es-cluster
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: node.master
          value: "true"
        - name: node.data
          value: "false"
        - name: discovery.seed_hosts
          value: "es-master-0.es-svc,es-master-1.es-svc,es-master-2.es-svc"
        - name: cluster.initial_master_nodes
          value: "es-master-0,es-master-1,es-master-2"
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
  volumeClaimTemplates: # 动态pvc
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
[root@k8s-master01 04-elasticsearch]# cat 03-es-data.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es-data
  namespace: logging
spec:
  serviceName: "es-svc"
  replicas: 2           # es-pod运行的实例
  selector:             # 需要管理的ES-Pod标签
    matchLabels:
      app: es
      role: data
  template:
    metadata:
      labels:
        app: es
        role: data
    spec:                       # 定义pod规范
      imagePullSecrets:         # 镜像拉取使用的认证信息
      - name: harbor-admin
      affinity:                 # 设定pod反亲和
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values: ["es"]
              - key: role
                operator: In
                values: ["data"]
            topologyKey: "kubernetes.io/hostname"       # 每个节点就是一个位置
      initContainers:           # 初始化容器设定
      - name: fix-permissions
        image: busybox
        command: ["sh","-c","chown -R 1000:1000 /usr/share/elasticsearch/data ; sysctl -w vm.max_map_count=262144; ulimit -n 65536"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
      containers:               # ES主容器
      - name: es
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6 
        resources:
          limits:
            cpu: 1000m
            memory: 4096Mi
          requests:
            cpu: 300m
            memory: 1024Mi
        ports:
        - name: cluster
          containerPort: 9200
        - name: transport
          containerPort: 9300
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: cluster.name
          value: es-cluster
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: node.master
          value: "false"
        - name: node.data
          value: "true"
        - name: discovery.seed_hosts
          value: "es-master-0.es-svc,es-master-1.es-svc,es-master-2.es-svc"
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
  volumeClaimTemplates: # 动态pvc
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 5.4 交付 ES-Data 节点
  1. ES 无法使用 root 直接启动,需要授权数据目录 UID=1000,同时还需要持久化 /usr/share/elasticsearch/data

  2. ES 所有节点都需要设定 vm.max_map_count 内核参数以及 ulimit;

  3. ES 启动是通过 ENV 环境变量传参来完成的

    • 集群名称、节点名称、角色类型

    • discovery.seed_hosts 集群地址列表

# cat 03-es-data.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es-data
  namespace: logging
spec:
  serviceName: "es-svc"
  replicas: 2           # es-pod运行的实例
  selector:             # 需要管理的ES-Pod标签
    matchLabels:
      app: es
      role: data
  template:
    metadata:
      labels:
        app: es
        role: data
    spec:                       # 定义pod规范
      imagePullSecrets:         # 镜像拉取使用的认证信息
      - name: harbor-admin
      affinity:                 # 设定pod反亲和
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values: ["es"]
              - key: role
                operator: In
                values: ["data"]
            topologyKey: "kubernetes.io/hostname"       # 每个节点就是一个位置
      initContainers:           # 初始化容器设定
      - name: fix-permissions
        image: busybox
        command: ["sh","-c","chown -R 1000:1000 /usr/share/elasticsearch/data ; sysctl -w vm.max_map_count=262144; ulimit -n 65536"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
      containers:               # ES主容器
      - name: es
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6 
        resources:
          limits:
            cpu: 1000m
            memory: 4096Mi
          requests:
            cpu: 300m
            memory: 1024Mi
        ports:
        - name: cluster
          containerPort: 9200
        - name: transport
          containerPort: 9300
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: cluster.name
          value: es-cluster
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: node.master
          value: "false"
        - name: node.data
          value: "true"
        - name: discovery.seed_hosts
          value: "es-master-0.es-svc,es-master-1.es-svc,es-master-2.es-svc"
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
  volumeClaimTemplates: # 动态pvc
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 5.5 更新资源清单
[root@k8s-master01 04-elasticsearch]# kubectl apply -f 01-es-svc.yaml 
[root@k8s-master01 04-elasticsearch]# kubectl apply -f 02-es-master.yaml 
[root@k8s-master01 04-elasticsearch]# kubectl apply -f 03-es-data.yaml 
# 5.6 验证 ES 集群
#1.解析headlessService获取对应ES集群任一节点的IP地址
# dig @10.96.0.10 es-svc.logging.svc.cluster.local  +short
172.16.58.229
172.16.122.191
172.16.195.21
172.16.122.129
172.16.32.164

#2.通过curl访问ES,检查ES集群是否正常(如果仅交付Master,没有data节点,集群状态可能会Red,因为没有数据节点进行数据存储;)
# curl -XGET "http://172.16.122.129:9200/_cluster/health?pretty"
{
  "cluster_name" : "es-cluster",
  "status" : "green",
  "timed_out" : false,
  "number_of_nodes" : 5,
  "number_of_data_nodes" : 2,
  "active_primary_shards" : 3,
  "active_shards" : 6,
  "relocating_shards" : 0,
  "initializing_shards" : 0,
  "unassigned_shards" : 0,
  "delayed_unassigned_shards" : 0,
  "number_of_pending_tasks" : 0,
  "number_of_in_flight_fetch" : 0,
  "task_max_waiting_in_queue_millis" : 0,
  "active_shards_percent_as_number" : 100.0
}

#3.查看ES各个节点详情
# curl -XGET "http://172.16.122.129:9200/_cat/nodes"
172.16.122.129 16 33 20 0.38 0.56 0.38 ilmr       - es-master-2
172.16.58.229  66 33 22 0.64 0.66 0.44 ilmr       * es-master-1
172.16.122.191 52 34 15 0.38 0.56 0.38 cdfhilrstw - es-data-0
172.16.195.21  38 35 19 0.38 0.53 0.36 cdfhilrstw - es-data-1
172.16.32.164  31 33 12 0.28 0.50 0.59 ilmr       - es-master-0

# 六、交付 Kibana 可视化

# 6.1 下载 kibana 镜像
# docker pull kibana:7.17.6
# docker tag kibana:7.17.6 registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kibana:7.17.6
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kibana:7.17.6
# 6.2 kibana-deploy
  1. Kibana 需要连接 ES 集群,通过 ELASTICSEARCH_HOSTS 变量来传递 ES 集群地址
  2. kibana 通过 I18N_LOCALE 来传递语言环境
  3. Kibana 通过 SERVER_PUBLICBASEURL 来传递服务访问的公开地址
# cat 01-kibana-deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: logging
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: kibana
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kibana:7.17.6 
        resources:
          limits:
            cpu: 1000m
        ports:
        - containerPort: 5601
        env:
        - name: ELASTICSEARCH_HOSTS
          value: '["http://es-data-0.es-svc:9200","http://es-data-1.es-svc:9200"]'
        - name: I18N_LOCALE
          value: "zh-CN"
        - name: SERVER_PUBLICBASEURL
          value: "http://kibana.hmallleasing.com"   #kibana访问UI
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
# 6.3 kibana-svc
# cat 02-kibana-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kibana-svc
  namespace: logging
spec:
  selector:
    app: kibana
  ports:
  - name: web
    port: 5601
    targetPort: 5601
# 6.4 kibana-ingress
# cat 03-kibana-ingress.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana-ingress
  namespace: logging
spec:
  ingressClassName: "nginx"
  rules:
  - host: "kibana.hmallleasing.com"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kibana-svc
            port:
              number: 5601
# 6.5 更新资源清单
[root@k8s-master01 05-kibana]# kubectl apply -f 01-kibana-deploy.yaml 
[root@k8s-master01 05-kibana]# kubectl apply -f 02-kibana-svc.yaml 
[root@k8s-master01 05-kibana]# kubectl apply -f 03-kibana-ingress.yaml

[root@k8s-master01 05-kibana]# kubectl get pods -n logging
NAME                      READY   STATUS    RESTARTS   AGE
efak-5cdc74bf59-nrhb4     1/1     Running   0          5h33m
es-data-0                 1/1     Running   0          16m
es-data-1                 1/1     Running   0          15m
es-master-0               1/1     Running   0          17m
es-master-1               1/1     Running   0          15m
es-master-2               1/1     Running   0          12m
kafka-0                   1/1     Running   0          5h39m
kafka-1                   1/1     Running   0          5h39m
kafka-2                   1/1     Running   0          5h38m
kibana-5ccc46864b-ndzx9   1/1     Running   0          118s
zookeeper-0               1/1     Running   0          5h42m
zookeeper-1               1/1     Running   0          5h42m
zookeeper-2               1/1     Running   0          5h41m
# 6.6 访问 kibana

1.png

# 七、filebeat-sidecar 收集业务应用日志

# 7.1 部署架构说明

对于那些能够将日志输出到本地文件的 Pod,我们可以使用 Sidecar 模式方式运行一个日志采集 Agent,对其进行单独收集日志。

1.png

  1. 首先需要将 Pod 中的业务容器日志输出至本地文件,而后运行一个 Filebeat 边车容器,采集本地路径下的日志;
  2. Filebeat 容器需要传递如下变量;
    • ENV:了解 Pod 属于隶属于哪个环境;
    • PROJECT_NAME:为了后期能在单个索引中区分出不同的项目;
    • PodIP:为了让用户清楚该 Pod 属于哪个 IP;
    • Node:用于获取该 Pod 所处的节点;
  3. Logstash 根据不同的环境,拉取不同的 topic 数据,然后将数据存储至 ES 对应的索引中;
  4. Kibana 添加不同环境的 index pattern,而后选择对应环境不同的项目进行日志探索与展示;
# 7.2 Sidecar 部署思路
  1. 制作一个业务镜像,要求镜像输出日志至本地;
  2. 制作 Filebeat 镜像,配置 Input、output 等信息;
  3. 采用边车模式运行不同环境的 Pod,确保日志信息能输出至 Kafka 集群;
  4. 准备不同环境下 Logstash 配置文件,而后读取数据写入 ES 集群;
  5. 使用 kibana 添加索引,进行日志探索与展示;
# 7.3 制作 Filebeat 镜像

7.3.1 下载 filebeat

curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.6-x86_64.rpm

7.3.2 编写 Dockerfile

# cat Dockerfile 
# 1、基础镜像
FROM centos:7

# 2、拷贝filebeat
ENV VERSION=7.17.6
ADD ./filebeat-${VERSION}-x86_64.rpm /
RUN rpm -ivh /filebeat-${VERSION}-x86_64.rpm && \
    rm -f /filebeat-${VERSION}-x86_64.rpm

# 3、拷贝filebeat配置文件(核心)
ADD ./filebeat.yml /etc/filebeat/filebeat.yml

# 4、拷贝启动脚本
ADD ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh

# 5、执行启动脚本
CMD ["/bin/bash","-c","/entrypoint.sh"]

7.3.3 编写 entrypoint

# cat entrypoint.sh 
#启动脚本
#1、替换filbeat配置文件中的内容
Beat_Conf=/etc/filebeat/filebeat.yml

sed -i s@{ENV}@${ENV:-test}@g ${Beat_Conf}
sed -i s@{PodIP}@${PodIP:-"no-ip"}@g ${Beat_Conf}
sed -i s@{Node}@${Node:-"none"}@g ${Beat_Conf}
sed -i s@{PROJECT_NAME}@${PROJECT_NAME:-"no-define"}@g ${Beat_Conf}
sed -i s@{MULTILINE}@${MULTILINE:-"^\\\d{2}"}@g ${Beat_Conf}		# \\用来转义
sed -i s@{KAFKA_HOSTS}@${KAFKA_HOSTS}@g ${Beat_Conf}

# 2、运行filebeat
filebeat -e -c /etc/filebeat/filebeat.yml

7.3.4 编写 filebeat 配置

{ENV}:用于定义环境的变量;
{PROJECT_NAME}:用于定义项目名称的变量;
{MULTILINE}:用于定义多行合并的正则变量;
{KAFKA_HOSTS}:用于定义 KAFKA 集群地址的变量;
{PodIP}:用于获取该 Pod 地址的变量;
{Node}:用于获取该 Pod 所处的节点;

[root@k8s-master01 filebeat_sidecar_dockerfile]# cat filebeat.yml 
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /logu/*.log
    - /logu/*/*.log
  tags: ["logu"]
  fields:
    topic: {PROJECT_NAME}
    podip: {PodIP}
    node: {Node}
  fields_under_root: true               # 增加的所有字段都为顶级字段

- type: log
  enabled: true
  paths:
    - /logm/*.log
    - /logm/*/*.log
  tags: ["logm"]
  fields:
    topic: {PROJECT_NAME}
    podip: {PodIP}
    node: {Node}
  fields_under_root: true               # 增加的所有字段都为顶级字段
  multiline.pattern: '{MULTILINE}'      
  multiline.negate: true
  multiline.match: after
  multiline.max_lines: 10000    #默认最大合并行为500,可根据实际情况调整。

output.kafka:
  hosts: [{KAFKA_HOSTS}]
  topic: app-{ENV}-%{[topic]}
  required_acks: 1              # 保证消息可靠,0不保证,1等待写入主分区(默认),-1等待写入副本分区
  compression: gzip             # 压缩
  max_message_bytes: 1000000    # 每条消息最大的长度,多余的被删除

7.3.5 构建并推送镜像

# docker build -t registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 .
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
# 7.4 nf-flms-gateway 日志收集

7.4.1 创建 Namespace 和 Secrets

# sed -i "s#dev#prod#g" *.yaml
# kubectl create ns prod
# kubectl create secret tls prod-api.hmallleasig.com --key hmallleasing.com.key --cert hmallleasing.com.pem -n prod
# kubectl create secret docker-registry harbor-admin --docker-server=registry.cn-hangzhou.aliyuncs.com --docker-username=xyapples@163.com --docker-password=passwd -n prod

7.4.2 创建 nf-flms-gateway

# cat 01-nf-flms-gateway.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-gateway
  namespace: prod
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nf-flms-gateway
  template:
    metadata:
      labels:
        app: nf-flms-gateway
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-gateway
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-gateway:v2.2 
        command:
        - "/bin/sh"
        - "-c"
        - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-gateway.jar"
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        ports:
        - containerPort: 8080
        readinessProbe:         # 就绪探针,不就绪则从负载均衡移除
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        livenessProbe:          # 存活探针,不存活会重启
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: log
          mountPath: /logs    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logm    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-gateway"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: log
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  name: gateway-svc
  namespace: prod
spec:
  selector:
    app: nf-flms-gateway
  ports:
  - port: 8080
    targetPort: 8080

---

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: gateway-ingress
  namespace: prod
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: "false"    #禁用https强制跳转
spec:
  ingressClassName: "nginx"
  rules:
  - host: "prod-api.hmallleasing.com"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: gateway-svc
            port:
              number: 8080
  tls:                  #https
  - hosts:
    - prod-api.hmallleasing.com
    secretName: "prod-api.hmallleasig.com"   #配置默认证书可不添加secretNam

7.4.3 检查 KafkaTopic

1、检查是否有对应的 topic

2.png

2、点击对应的 Preview,查看 topic 中的最新数据

3.png

# 7.5 nf-flms-order 日志收集

7.5.1 创建 PVC

创建 PVC 存储订单合同、身份证复印件等附件

# cat 02-data-image.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: data-image
  namespace: prod
spec:
  storageClassName: "nfs-storage"     # 明确指定使用哪个sc的供应商来创建pv
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi                      # 根据业务实际大小进行资源申请

7.5.2 创建 nf-flms-order

# cat 02-nf-flms-order.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-order
  namespace: prod
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nf-flms-order
  template:
    metadata:
      labels:
        app: nf-flms-order
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-order
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-order:v2.0 
        command:
        - "/bin/sh"
        - "-c"
        - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-order.jar"
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        ports:
        - containerPort: 8080
        readinessProbe:         # 就绪探针,不就绪则从负载均衡移除
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        livenessProbe:          # 存活探针,不存活会重启
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: data-image
          mountPath: /data
        - name: log
          mountPath: /logs    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logm    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-order"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: data-image
        persistentVolumeClaim:      
          claimName: data-image
      - name: log
        emptyDir: {}

7.5.3 检查 KafkaTopic

1、检查是否有对应的 topic

4.png

2、点击对应的 Preview,查看 topic 中的最新数据

5.png

# 7.6 nf-flms-statistics 日志收集

7.6.1 创建 nf-flms-statistics

# cat 03-nf-flms-statistics.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-statistics
  namespace: prod
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nf-flms-statistics
  template:
    metadata:
      labels:
        app: nf-flms-statistics
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-statistics
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-statistics:v2.0 
        command: 
        - "/bin/sh"
        - "-c"
        - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-statistics.jar"
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        ports:
        - containerPort: 8080
        readinessProbe:         # 就绪探针,不就绪则从负载均衡移除
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        livenessProbe:          # 存活探针,不存活会重启
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 30
          timeoutSeconds: 3
          successThreshold: 1
          failureThreshold: 2
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: log
          mountPath: /logs    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logm    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-statistics"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: log
        emptyDir: {}

7.6.2 检查 KafkaTopic

1、检查是否有对应的 topic

6.png

2、点击对应的 Preview,查看 topic 中的最新数据

7.png

# 7.7 nf-flms-system 日志收集

7.7.1 创建 nf-flms-system

# cat 04-nf-flms-system.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-system
  namespace: prod
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nf-flms-system
  template:
    metadata:
      labels:
        app: nf-flms-system
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-system
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-system:v2.0 
        command:
        - "/bin/sh"
        - "-c"
        - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-system.jar"
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        ports:
        - containerPort: 8080
        livenessProbe:
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        readinessProbe:
          tcpSocket:
            port: 8080
          failureThreshold: 2
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: log
          mountPath: /logs    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logm    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-system"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: log
        emptyDir: {}

7.7.2 检查 KafkaTopic

1、检查是否有对应的 topic

1.png

2、点击对应的 Preview,查看 topic 中的最新数据

2.png

# 7.8 nf-flms-openapi 日志收集

7.8.1 创建 nf-flms-openapi

# cat 06-nf-flms-openapi.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-openapi
  namespace: prod
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nf-flms-openapi
  template:
    metadata:
      labels:
        app: nf-flms-openapi
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-openapi
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-openapi:v2.2 
        command: 
        - "/bin/sh"
        - "-c"
        - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-openapi.jar"
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        ports:
        - containerPort: 8080
        livenessProbe:
          tcpSocket:
            port: 8080
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        readinessProbe:
          tcpSocket:
            port: 8080
          failureThreshold: 2
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        volumeMounts:
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: log
          mountPath: /logs    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logm    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-openapi"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: log
        emptyDir: {}

7.8.2 检查 KafkaTopic

1、检查是否有对应的 topic

3.png

2、点击对应的 Preview,查看 topic 中的最新数据

4.png

# 7.9 nf-flms-ui 日志收集

7.9.1 准备 Nginx 配置文件

# cat prod.hmallleasing.com.conf 
server {
        listen 80;
        server_name prod.hmallleasing.com;
        root /code/prod;

        location / {
            index  index.html index.htm;
        }
}

server {
        listen 80;
        server_name prod-api.hmallleasing.com;

        location / {
                proxy_set_header Host $http_host;
                proxy_set_header X-Real-IP $remote_addr;
                proxy_set_header REMOTE-HOST $remote_addr;
                proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_pass http://gateway-svc.prod.svc.cluster.local:8080;
        }
}

7.9.2 创建 ConfigMap

kubectl create configmap nf-flms-ui-conf --from-file=./prod.hmallleasing.com.conf -n prod

7.9.3 创建 nf-flms-ui

[root@k8s-master01 06-service-all]# cat 07-ui-deploy-ingress.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nf-flms-ui
  namespace: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nf-flms-ui
  template:
    metadata:
      labels:
        app: nf-flms-ui
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: nf-flms-ui
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-ui:v1.0
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: '1000m'
            memory: 1Gi
          requests:
            cpu: "200m"
            memory: "500Mi"
        readinessProbe:         # 就绪探针,不就绪则从负载均衡移除
          tcpSocket:
            port: 80
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        livenessProbe:          # 存活探针,不存活会重启
          tcpSocket:
            port: 80
          initialDelaySeconds: 60
          periodSeconds: 10
          timeoutSeconds: 10
        volumeMounts:
        - name: ngxconfs
          mountPath: /etc/nginx/conf.d/
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        - name: log
          mountPath: /var/log/nginx/    # 业务容器日志目录
      - name: filebeat
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6
        imagePullPolicy: Always
        volumeMounts:
        - name: log
          mountPath: /logu    # 匹配多行日志
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
        env:
        - name: ENV
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: PodIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: Node
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: PROJECT_NAME
          value: "nf-flms-ui"
        - name: KAFKA_HOSTS
          value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"'
      volumes:
      - name: ngxconfs
        configMap:
          name: nf-flms-ui-conf
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""
      - name: log
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  name: nf-flms-ui-svc
  namespace: prod
spec:
  selector:
    app: nf-flms-ui
  ports:
  - port: 80
    targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nf-flms-ui-ingress
  namespace: prod
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: "false"    #禁用https强制跳转
spec:
  ingressClassName: "nginx"
  rules:
  - host: "prod.hmallleasing.com"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: nf-flms-ui-svc
            port:
              number: 80
  tls:                  #https
  - hosts:
    - prod.hmallleasing.com
    secretName: "prod-api.hmallleasig.com"   #配置默认证书可不添加secretName

7.9.2 检查 KafkaTopic

1、检查是否有对应的 topic

1.png

2、点击对应的 Preview,查看 topic 中的最新数据

2.png

# 八、交付生产环境 Logstash

# 8.1 拉取镜像
# docker pull docker.elastic.co/logstash/logstash-oss:7.17.6
# docker tag docker.elastic.co/logstash/logstash-oss:7.17.6 registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
# 8.2 编写 logstash 配置
[root@k8s-master01 conf]# cat logstash-prod.conf 
input {
    kafka {
        bootstrap_servers => "kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092"
        group_id => "logstash-prod"      # 消费者组名称
        consumer_threads => "3"          # 理想情况下,配置与分区数一样多的线程,实现均衡
        topics_pattern => "app-prod-.*"  # 通过正则表达式匹配要订阅的主题
    }
}

filter {
	json {
		source => "message"
	}
}

output {
    stdout {
        codec => rubydebug
    }
    elasticsearch {
        hosts => ["es-data-0.es-svc:9200","es-data-1.es-svc:9200"]
        index => "app-prod-%{+YYYY.MM.dd}"
        template_overwrite => true
    }
}
# 8.3 创建生产环境 configmap
kubectl create configmap logstash-prod-conf --from-file=logstash.conf=conf/logstash-prod.conf -n logging
# 8.4 创建生产环境 Logstash

1、创建 logstash-svc

# cat 01-logstash-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: logstash-svc
  namespace: logging
spec:
  clusterIP: None
  selector:
    app: logstash
  ports:
  - port: 9600
    targetPort: 9600

2、创建 logstash-StatefulSet

# cat 05-logstash-prod-sts.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: logstash-prod
  namespace: logging
spec:
  serviceName: "logstash-svc"           # 使用此前创建的svc,则无需重复创建
  replicas: 1
  selector:
    matchLabels:
      app: logstash
      env: prod
  template:
    metadata:
      labels:
        app: logstash
        env: prod
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: logstash
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
        args: ["-f","config/logstash.conf"]                     # 启动时指定加载的配置文件
        resources:
          limits:
            memory: 1024Mi
        env:
        - name: PIPELINE_WORKERS
          value: "2"
        - name: PIPELINE_BATCH_SIZE
          value: "10000"
        lifecycle:
          postStart:                                            # 设定JVM
            exec:
              command:
              - "/bin/bash"
              - "-c"
              - "sed -i -e '/^-Xms/c-Xms1024m' -e '/^-Xmx/c-Xmx1024m' /usr/share/logstash/config/jvm.options"
        volumeMounts:
        - name: data                                            # 持久化数据目录
          mountPath: /usr/share/logstash/data
        - name: conf
          mountPath: /usr/share/logstash/config/logstash.conf
          subPath: logstash.conf
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone
      volumes:
      - name: conf
        configMap:
          name: logstash-prod-conf
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""         
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteMany"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 8.5 更新资源清单
[root@k8s-master01 08-logstash]# kubectl apply -f 01-logstash-svc.yaml 
[root@k8s-master01 08-logstash]# kubectl apply -f 05-logstash-prod-sts.yaml
# 8.6 检查 ES 生产环境索引

1.png

# 九、交付测试环境 Logstash

# 9.1 拉取镜像
# docker pull docker.elastic.co/logstash/logstash-oss:7.17.6
# docker tag docker.elastic.co/logstash/logstash-oss:7.17.6 registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
# docker push registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
# 9.2 编写 logstash 配置
[root@k8s-master01 08-logstash]# cat conf/logstash-test.conf 
input {
    kafka {
        bootstrap_servers => "kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092"
        group_id => "logstash-test"      # 消费者组名称
        consumer_threads => "3"          # 理想情况下,配置与分区数一样多的线程,实现均衡
        topics_pattern => "app-test-.*"  # 通过正则表达式匹配要订阅的主题
    }
}

filter {
	json {
		source => "message"
	}
}

output {
    stdout {
        codec => rubydebug
    }
    elasticsearch {
        hosts => ["es-data-0.es-svc:9200","es-data-1.es-svc:9200"]
        index => "app-test-%{+YYYY.MM.dd}"
        template_overwrite => true
    }
}
# 9.3 创建测试环境 configmap
kubectl create configmap logstash-test-conf --from-file=logstash.conf=conf/logstash-test.conf -n logging
# 9.4 创建测试环境 Logstash

1、创建 logstash-svc

# cat 01-logstash-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: logstash-svc
  namespace: logging
spec:
  clusterIP: None
  selector:
    app: logstash
  ports:
  - port: 9600
    targetPort: 9600

2、创建 logstash-StatefulSet

[root@k8s-master01 08-logstash]# cat 03-logstash-test-sts.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: logstash-test
  namespace: logging
spec:
  serviceName: "logstash-svc"           # 使用此前创建的svc,则无需重复创建
  replicas: 1
  selector:
    matchLabels:
      app: logstash
      env: test
  template:
    metadata:
      labels:
        app: logstash
        env: test
    spec:
      imagePullSecrets:
      - name: harbor-admin
      containers:
      - name: logstash
        image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6
        args: ["-f","config/logstash.conf"]                     # 启动时指定加载的配置文件
        resources:
          limits:
            memory: 1024Mi
        env:
        - name: PIPELINE_WORKERS
          value: "2"
        - name: PIPELINE_BATCH_SIZE
          value: "10000"
        lifecycle:
          postStart:                                            # 设定JVM
            exec:
              command:
              - "/bin/bash"
              - "-c"
              - "sed -i -e '/^-Xms/c-Xms1024m' -e '/^-Xmx/c-Xmx1024m' /usr/share/logstash/config/jvm.options"
        volumeMounts:
        - name: data                                            # 持久化数据目录
          mountPath: /usr/share/logstash/data
        - name: conf
          mountPath: /usr/share/logstash/config/logstash.conf
          subPath: logstash.conf
        - name: tz-config
          mountPath: /usr/share/zoneinfo/Asia/Shanghai
        - name: tz-config
          mountPath: /etc/localtime
        - name: timezone
          mountPath: /etc/timezone          
      volumes:
      - name: conf
        configMap:
          name: logstash-test-conf
      - name: tz-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
          type: ""
      - name: timezone
        hostPath:
          path: /etc/timezone
          type: ""          
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteMany"]
      storageClassName: "nfs-storage"
      resources:
        requests:
          storage: 5Gi
# 9.5 更新资源清单
[root@k8s-master01 08-logstash]# kubectl apply -f 01-logstash-svc.yaml 
[root@k8s-master01 08-logstash]# kubectl apply -f 03-logstash-test-sts.yaml
# 9.6 检查 ES 测试环境索引

1.png

# 十、Kibana 数据展示

# 10.1 添加生产环境索引

2.png

# 10.2 查看生产环境数据

kibana->Discover

3.png

①点击开发环境索引 ->②选择需要查看的字段 ->③进行 filter 筛选项目 ->④选择对应时间段

4.png

5.png

6.png

#
此文章已被阅读次数:正在加载...更新于

请我喝[茶]~( ̄▽ ̄)~*

Xu Yong 微信支付

微信支付

Xu Yong 支付宝

支付宝