# 消费租赁项目 Kubernetes 基于 ELK 日志分析与实践
![Snipaste_2025-05-25_13-43-46.jpg]()
# 一、ELK 创建 Namespace 和 Secrets
# 二、交付 Zookeeper 集群至 K8S
# 2.1 制作 ZK 集群镜像
# 2.1.1 Dockerfile
| |
| FROM openjdk:8-jre |
| |
| |
| ENV VERSION=3.8.4 |
| ADD ./apache-zookeeper-${VERSION}-bin.tar.gz / |
| ADD ./zoo.cfg /apache-zookeeper-${VERSION}-bin/conf |
| |
| |
| RUN mv /apache-zookeeper-${VERSION}-bin /zookeeper |
| |
| |
| ADD ./entrypoint.sh /entrypoint.sh |
| |
| |
| EXPOSE 2181 2888 3888 |
| |
| |
| CMD ["/bin/bash","/entrypoint.sh"] |
# 2.1.2 zoo.cfg
| |
| |
| tickTime={ZOOK_TICKTIME} |
| |
| |
| initLimit={ZOOK_INIT_LIMIT} |
| |
| |
| syncLimit={ZOOK_SYNC_LIMIT} |
| |
| |
| dataDir={ZOOK_DATA_DIR} |
| |
| |
| dataLogDir={ZOOK_LOG_DIR} |
| |
| |
| clientPort={ZOOK_CLIENT_PORT} |
| |
| |
| maxClientCnxns={ZOOK_MAX_CLIENT_CNXNS} |
| |
| |
| 4lw.commands.whitelist=* |
| |
| |
# 2.1.3 entrypoint
| |
| |
| ZOOK_BIN_DIR=/zookeeper/bin |
| ZOOK_CONF_DIR=/zookeeper/conf/zoo.cfg |
| |
| |
| sed -i s@{ZOOK_TICKTIME}@${ZOOK_TICKTIME:-2000}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_INIT_LIMIT}@${ZOOK_INIT_LIMIT:-10}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_SYNC_LIMIT}@${ZOOK_SYNC_LIMIT:-5}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_DATA_DIR}@${ZOOK_DATA_DIR:-/data}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_LOG_DIR}@${ZOOK_LOG_DIR:-/logs}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_CLIENT_PORT}@${ZOOK_CLIENT_PORT:-2181}@g ${ZOOK_CONF_DIR} |
| sed -i s@{ZOOK_MAX_CLIENT_CNXNS}@${ZOOK_MAX_CLIENT_CNXNS:-60}@g ${ZOOK_CONF_DIR} |
| |
| |
| for server in ${ZOOK_SERVERS} |
| do |
| echo ${server} >> ${ZOOK_CONF_DIR} |
| done |
| |
| |
| ZOOK_MYID=$(( $(hostname | sed 's#.*-##g') + 1 )) |
| echo ${ZOOK_MYID:-99} > ${ZOOK_DATA_DIR:-/data}/myid |
| |
| |
| cd ${ZOOK_BIN_DIR} |
| ./zkServer.sh start-foreground |
# 2.1.4 构建镜像并推送仓库
# 2.2 迁移 zookeeper 至 K8S
# 2.2.1 zookeeper-headless
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: zookeeper-svc |
| namespace: logging |
| spec: |
| clusterIP: None |
| selector: |
| app: zookeeper |
| ports: |
| - name: client |
| port: 2181 |
| targetPort: 2181 |
| - name: leader-follwer |
| port: 2888 |
| targetPort: 2888 |
| - name: selection |
| port: 3888 |
| targetPort: 3888 |
# 2.2.2 zookeeper-sts
| [root@k8s-master01 01-zookeeper] |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: zookeeper |
| namespace: logging |
| spec: |
| serviceName: "zookeeper-svc" |
| replicas: 3 |
| selector: |
| matchLabels: |
| app: zookeeper |
| template: |
| metadata: |
| labels: |
| app: zookeeper |
| spec: |
| affinity: |
| podAntiAffinity: |
| requiredDuringSchedulingIgnoredDuringExecution: |
| - labelSelector: |
| matchExpressions: |
| - key: app |
| operator: In |
| values: ["zookeeper"] |
| topologyKey: "kubernetes.io/hostname" |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: zookeeper |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/zookeeper:3.8.4 |
| imagePullPolicy: Always |
| ports: |
| - name: client |
| containerPort: 2181 |
| - name: leader-follwer |
| containerPort: 2888 |
| - name: selection |
| containerPort: 3888 |
| env: |
| - name: ZOOK_SERVERS |
| value: "server.1=zookeeper-0.zookeeper-svc.logging.svc.cluster.local:2888:3888 server.2=zookeeper-1.zookeeper-svc.logging.svc.cluster.local:2888:3888 server.3=zookeeper-2.zookeeper-svc.logging.svc.cluster.local:2888:3888" |
| readinessProbe: |
| exec: |
| command: |
| - "/bin/bash" |
| - "-c" |
| - '[[ "$(/zookeeper/bin/zkServer.sh status 2>/dev/null|grep 2181)" ]] && exit 0 || exit 1' |
| initialDelaySeconds: 5 |
| livenessProbe: |
| exec: |
| command: |
| - "/bin/bash" |
| - "-c" |
| - '[[ "$(/zookeeper/bin/zkServer.sh status 2>/dev/null|grep 2181)" ]] && exit 0 || exit 1' |
| initialDelaySeconds: 5 |
| volumeMounts: |
| - name: data |
| mountPath: /data |
| subPath: data |
| - name: data |
| mountPath: /logs |
| subPath: logs |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteMany"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 2.2.3 更新资源清单
| [root@k8s-master01 01-zookeeper] |
| [root@k8s-master01 01-zookeeper] |
| [root@k8s-master01 01-zookeeper] |
| NAME READY STATUS RESTARTS AGE |
| zookeeper-0 1/1 Running 0 17m |
| zookeeper-1 1/1 Running 0 14m |
| zookeeper-2 1/1 Running 0 11m |
# 2.2.4 检查 zookeeper 集群状态
| |
| ZooKeeper JMX enabled by default |
| Using config: /zookeeper/bin/../conf/zoo.cfg |
| Client port found: 2181. Client address: localhost. Client SSL: false. |
| Mode: follower |
| ZooKeeper JMX enabled by default |
| Using config: /zookeeper/bin/../conf/zoo.cfg |
| Client port found: 2181. Client address: localhost. Client SSL: false. |
| Mode: leader |
| ZooKeeper JMX enabled by default |
| Using config: /zookeeper/bin/../conf/zoo.cfg |
| Client port found: 2181. Client address: localhost. Client SSL: false. |
| Mode: follower |
# 2.2.5 连接 Zookeeper 集群
| [root@k8s-master01 01-zookeeper] |
| |
| [zk: zookeeper-svc(CONNECTED) 0] create /hello oldxu |
| Created /hello |
| [zk: zookeeper-svc(CONNECTED) 1] get /hello |
| oldxu |
# 三、 交付 Kafka 集群至 K8S
# 3.1 制作 Kafka 集群镜像
# 3.1.1 Dockerfile
| |
| FROM openjdk:8-jre |
| |
| |
| RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ |
| echo 'Asia/Shanghai' > /etc/timezone |
| |
| |
| ENV VERSION=2.12-2.2.0 |
| ADD ./kafka_${VERSION}.tgz / |
| ADD ./server.properties /kafka_${VERSION}/config/server.properties |
| |
| |
| RUN mv /kafka_${VERSION} /kafka |
| |
| |
| ADD ./entrypoint.sh /entrypoint.sh |
| |
| |
| EXPOSE 9092 9999 |
| |
| |
| CMD ["/bin/bash","/entrypoint.sh"] |
# 3.1.2 server.properties
| |
| |
| |
| broker.id={BROKER_ID} |
| |
| |
| |
| listeners=PLAINTEXT://{LISTENERS}:9092 |
| |
| |
| num.network.threads=3 |
| |
| |
| num.io.threads=8 |
| |
| |
| socket.send.buffer.bytes=102400 |
| |
| |
| socket.receive.buffer.bytes=102400 |
| |
| |
| socket.request.max.bytes=104857600 |
| |
| |
| |
| log.dirs={KAFKA_DATA_DIR} |
| |
| |
| num.partitions=1 |
| |
| |
| default.replication.factor=3 |
| |
| |
| num.recovery.threads.per.data.dir=1 |
| |
| |
| |
| log.flush.interval.messages=10000 |
| |
| |
| log.flush.interval.ms=1000 |
| |
| |
| |
| log.retention.hours=168 |
| |
| |
| |
| |
| |
| log.segment.bytes=1073741824 |
| |
| |
| log.retention.check.interval.ms=300000 |
| |
| |
| |
| zookeeper.connect={ZOOK_SERVERS} |
| |
| |
| zookeeper.connection.timeout.ms=6000 |
# 3.1.3 entrypoint
| |
| |
| KAFKA_DIR=/kafka |
| KAFKA_CONF=/kafka/config/server.properties |
| |
| |
| BROKER_ID=$(( $(hostname | sed 's#.*-##g') + 1 )) |
| LISTENERS=$(hostname -i) |
| |
| |
| sed -i s@{BROKER_ID}@${BROKER_ID}@g ${KAFKA_CONF} |
| sed -i s@{LISTENERS}@${LISTENERS}@g ${KAFKA_CONF} |
| sed -i s@{KAFKA_DATA_DIR}@${KAFKA_DATA_DIR:-/data}@g ${KAFKA_CONF} |
| sed -i s@{ZOOK_SERVERS}@${ZOOK_SERVERS}@g ${KAFKA_CONF} |
| |
| |
| cd ${KAFKA_DIR}/bin |
| sed -i '/export KAFKA_HEAP_OPTS/a export JMX_PORT="9999"' kafka-server-start.sh |
| ./kafka-server-start.sh ../config/server.properties |
# 3.1.4 构建镜像并推送仓库
# 3.2 迁移 Kafka 集群至 K8S
# 3.2.1 kafka-headless
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: kafka-svc |
| namespace: logging |
| spec: |
| clusterIP: None |
| selector: |
| app: kafka |
| ports: |
| - name: client |
| port: 9092 |
| targetPort: 9092 |
| - name: jmx |
| port: 9999 |
| targetPort: 9999 |
# 3.2.2 kafka-sts
| |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: kafka |
| namespace: logging |
| spec: |
| serviceName: "kafka-svc" |
| replicas: 3 |
| selector: |
| matchLabels: |
| app: kafka |
| template: |
| metadata: |
| labels: |
| app: kafka |
| spec: |
| affinity: |
| podAntiAffinity: |
| requiredDuringSchedulingIgnoredDuringExecution: |
| - labelSelector: |
| matchExpressions: |
| - key: app |
| operator: In |
| values: ["kafka"] |
| topologyKey: "kubernetes.io/hostname" |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: kafka |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kafka:2.12.2 |
| imagePullPolicy: Always |
| ports: |
| - name: client |
| containerPort: 9092 |
| - name: jmxport |
| containerPort: 9999 |
| env: |
| - name: ZOOK_SERVERS |
| value: "zookeeper-0.zookeeper-svc:2181,zookeeper-1.zookeeper-svc:2181,zookeeper-2.zookeeper-svc:2181" |
| readinessProbe: |
| tcpSocket: |
| port: 9092 |
| initialDelaySeconds: 5 |
| livenessProbe: |
| tcpSocket: |
| port: 9092 |
| initialDelaySeconds: 5 |
| volumeMounts: |
| - name: data |
| mountPath: /data |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteMany"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 3.2.3 更新资源清单
| [root@k8s-master01 02-kafka] |
| [root@k8s-master01 02-kafka] |
| [root@k8s-master01 02-kafka] |
| NAME READY STATUS RESTARTS AGE |
| kafka-0 1/1 Running 0 5m49s |
| kafka-1 1/1 Running 0 4m43s |
| kafka-2 1/1 Running 0 3m40s |
| |
| |
| [root@k8s-master01 02-kafka] |
| root@zookeeper-0:/ |
| [zk: localhost:2181(CONNECTED) 2] get /brokers/ids/1 |
| {"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.85.201:9092"],"jmx_port":9999,"host":"172.16.85.201","timestamp":"1748162470218","port":9092,"version":4} |
| [zk: localhost:2181(CONNECTED) 3] get /brokers/ids/2 |
| {"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.58.205:9092"],"jmx_port":9999,"host":"172.16.58.205","timestamp":"1748162532658","port":9092,"version":4} |
| [zk: localhost:2181(CONNECTED) 4] get /brokers/ids/3 |
| {"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://172.16.195.1:9092"],"jmx_port":9999,"host":"172.16.195.1","timestamp":"1748162649250","port":9092,"version":4} |
# 3.2.4 检查 Kafka 集群
| 1.创建一个topic |
| root@kafka-0:/ |
| |
| 2.模拟消息发布 |
| root@kafka-1:/ |
| >hello kubernetes |
| >hello world |
| |
| 3.模拟消息订阅 |
| root@kafka-2:/ |
| hello kubernetes |
| hello world |
# 四、交付 efak 至 K8S
# 4.1 制作 efak 镜像
# 4.1.1 Dockerfile
| [root@manager 03-efak] |
| FROM openjdk:8 |
| |
| |
| RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ |
| echo 'Asia/Shanghai' > /etc/timezone |
| |
| |
| ENV VERSION=3.0.1 |
| ADD ./efak-web-${VERSION}-bin.tar.gz / |
| ADD ./system-config.properties /efak-web-${VERSION}/conf/system-config.properties |
| |
| |
| RUN mv /efak-web-${VERSION} /efak |
| |
| |
| ENV KE_HOME=/efak |
| ENV PATH=$PATH:$KE_HOME/bin |
| |
| |
| ADD ./entrypoint.sh /entrypoint.sh |
| |
| |
| EXPOSE 8048 |
| |
| |
| CMD ["/bin/bash","/entrypoint.sh"] |
# 4.1.2 system-config
| |
| |
| |
| |
| efak.zk.cluster.alias=cluster1 |
| cluster1.zk.list={ZOOK_SERVERS} |
| |
| |
| |
| |
| cluster1.efak.broker.size=20 |
| |
| |
| |
| |
| kafka.zk.limit.size=32 |
| |
| |
| |
| |
| efak.webui.port=8048 |
| |
| |
| |
| |
| cluster1.efak.offset.storage=kafka |
| |
| |
| |
| |
| cluster1.efak.jmx.uri=service:jmx:rmi:///jndi/rmi://%s/jmxrmi |
| |
| |
| |
| |
| efak.metrics.charts=true |
| efak.metrics.retain=15 |
| |
| |
| |
| |
| efak.sql.topic.records.max=5000 |
| efak.sql.topic.preview.records.max=10 |
| |
| |
| |
| |
| efak.topic.token=keadmin |
| |
| |
| |
| |
| efak.driver=org.sqlite.JDBC |
| efak.url=jdbc:sqlite:{EFAK_DATA_DIR}/db/ke.db |
| efak.username=root |
| efak.password=www.kafka-eagle.org |
# 4.1.3 entrypoint
| |
| |
| EFAK_DIR=/efak |
| EFAK_CONF=/efak/conf/system-config.properties |
| |
| |
| sed -i s@{EFAK_DATA_DIR}@${EFAK_DIR}@g ${EFAK_CONF} |
| sed -i s@{ZOOK_SERVERS}@${ZOOK_SERVERS}@g ${EFAK_CONF} |
| |
| |
| ${EFAK_DIR}/bin/ke.sh start |
| tail -f ${EFAK_DIR}/logs/ke_console.out |
# 4.1.4 构建镜像并推送仓库
# 4.2 迁移 efak 至 K8S
# 4.2.1 efak-deploy
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: efak |
| namespace: logging |
| spec: |
| replicas: 1 |
| selector: |
| matchLabels: |
| app: efak |
| template: |
| metadata: |
| labels: |
| app: efak |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: efak |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/efak:3.0 |
| imagePullPolicy: Always |
| ports: |
| - name: http |
| containerPort: 8048 |
| env: |
| - name: ZOOK_SERVERS |
| value: "zookeeper-0.zookeeper-svc:2181,zookeeper-1.zookeeper-svc:2181,zookeeper-2.zookeeper-svc:2181" |
# 4.2.2 efak-service
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: efak-svc |
| namespace: logging |
| spec: |
| selector: |
| app: efak |
| ports: |
| - port: 8048 |
| targetPort: 8048 |
# 4.2.3 efak-ingress
| |
| apiVersion: networking.k8s.io/v1 |
| kind: Ingress |
| metadata: |
| name: efak-ingress |
| namespace: logging |
| spec: |
| ingressClassName: "nginx" |
| rules: |
| - host: "efak.hmallleasing.com" |
| http: |
| paths: |
| - path: / |
| pathType: Prefix |
| backend: |
| service: |
| name: efak-svc |
| port: |
| number: 8048 |
# 4.2.4 更新资源清单
| [root@k8s-master01 03-efak] |
| [root@k8s-master01 03-efak] |
| [root@k8s-master01 03-efak] |
# 4.2.5 访问 efka
1、初始用户名密码 admin 123456
![1.png]()
2、查看 Topics
![2.png]()
3、查看 kafka 集群状态
![3.png]()
4、查看 Zookeeper 集群状态
![4.png]()
# 五、交付 Elastic 集群
- ES 集群是由多个节点组成的,通过 cluster.name 设置 ES 集群名称,同时用于区分其它的 ES 集群。
- 每个节点通过 node.name 参数来设定所在集群的节点名称。
- 节点使用 discovery.send_hosts 参数来设定集群节点的列表。
- 集群在第一次启动时,需要初始化,同时需要指定参与选举的 master 节点 IP,或节点名称。
- 每个节点可以通过 node.master:true 设定为 master 角色,通过 node.data:true 设定为 data 角色。
# 5.1 下载 elastic 镜像
# 5.2 交付 ES-Service
创建 es-headlessService,为每个 ES Pod 设定固定的 DNS 名称,无论它是 Master 或是 Data,易或是 Coordinating
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: es-svc |
| namespace: logging |
| spec: |
| selector: |
| app: es |
| clusterIP: None |
| ports: |
| - name: cluster |
| port: 9200 |
| targetPort: 9200 |
| - name: transport |
| port: 9300 |
| targetPort: 9300 |
# 5.3 交付 ES-Master 节点
ES 无法使用 root 直接启动,需要授权数据目录 UID=1000,同时还需要持久化 /usr/share/elasticsearch/data ;
ES 所有节点都需要设定 vm.max_map_count 内核参数以及 ulimit;
ES 启动是通过 ENV 环境变量传参来完成的;
| |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: es-master |
| namespace: logging |
| spec: |
| serviceName: "es-svc" |
| replicas: 3 |
| selector: |
| matchLabels: |
| app: es |
| role: master |
| template: |
| metadata: |
| labels: |
| app: es |
| role: master |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| affinity: |
| podAntiAffinity: |
| requiredDuringSchedulingIgnoredDuringExecution: |
| - labelSelector: |
| matchExpressions: |
| - key: app |
| operator: In |
| values: ["es"] |
| - key: role |
| operator: In |
| values: ["master"] |
| topologyKey: "kubernetes.io/hostname" |
| initContainers: |
| - name: fix-permissions |
| image: busybox |
| command: ["sh","-c","chown -R 1000:1000 /usr/share/elasticsearch/data ; sysctl -w vm.max_map_count=262144; ulimit -n 65536"] |
| securityContext: |
| privileged: true |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/elasticsearch/data |
| containers: |
| - name: es |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6 |
| resources: |
| limits: |
| cpu: 1000m |
| memory: 4096Mi |
| requests: |
| cpu: 300m |
| memory: 1024Mi |
| ports: |
| - name: cluster |
| containerPort: 9200 |
| - name: transport |
| containerPort: 9300 |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/elasticsearch/data |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ES_JAVA_OPTS |
| value: "-Xms1g -Xmx1g" |
| - name: cluster.name |
| value: es-cluster |
| - name: node.name |
| valueFrom: |
| fieldRef: |
| fieldPath: metadata.name |
| - name: node.master |
| value: "true" |
| - name: node.data |
| value: "false" |
| - name: discovery.seed_hosts |
| value: "es-master-0.es-svc,es-master-1.es-svc,es-master-2.es-svc" |
| - name: cluster.initial_master_nodes |
| value: "es-master-0,es-master-1,es-master-2" |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteOnce"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 5.4 交付 ES-Data 节点
ES 无法使用 root 直接启动,需要授权数据目录 UID=1000,同时还需要持久化 /usr/share/elasticsearch/data
ES 所有节点都需要设定 vm.max_map_count 内核参数以及 ulimit;
ES 启动是通过 ENV 环境变量传参来完成的
| |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: es-data |
| namespace: logging |
| spec: |
| serviceName: "es-svc" |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: es |
| role: data |
| template: |
| metadata: |
| labels: |
| app: es |
| role: data |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| affinity: |
| podAntiAffinity: |
| requiredDuringSchedulingIgnoredDuringExecution: |
| - labelSelector: |
| matchExpressions: |
| - key: app |
| operator: In |
| values: ["es"] |
| - key: role |
| operator: In |
| values: ["data"] |
| topologyKey: "kubernetes.io/hostname" |
| initContainers: |
| - name: fix-permissions |
| image: busybox |
| command: ["sh","-c","chown -R 1000:1000 /usr/share/elasticsearch/data ; sysctl -w vm.max_map_count=262144; ulimit -n 65536"] |
| securityContext: |
| privileged: true |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/elasticsearch/data |
| containers: |
| - name: es |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/elasticsearch:7.17.6 |
| resources: |
| limits: |
| cpu: 1000m |
| memory: 4096Mi |
| requests: |
| cpu: 300m |
| memory: 1024Mi |
| ports: |
| - name: cluster |
| containerPort: 9200 |
| - name: transport |
| containerPort: 9300 |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/elasticsearch/data |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ES_JAVA_OPTS |
| value: "-Xms1g -Xmx1g" |
| - name: cluster.name |
| value: es-cluster |
| - name: node.name |
| valueFrom: |
| fieldRef: |
| fieldPath: metadata.name |
| - name: node.master |
| value: "false" |
| - name: node.data |
| value: "true" |
| - name: discovery.seed_hosts |
| value: "es-master-0.es-svc,es-master-1.es-svc,es-master-2.es-svc" |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteOnce"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 5.5 更新资源清单
| [root@k8s-master01 04-elasticsearch] |
| [root@k8s-master01 04-elasticsearch] |
| [root@k8s-master01 04-elasticsearch] |
# 5.6 验证 ES 集群
| |
| |
| 172.16.58.229 |
| 172.16.122.191 |
| 172.16.195.21 |
| 172.16.122.129 |
| 172.16.32.164 |
| |
| |
| |
| { |
| "cluster_name" : "es-cluster", |
| "status" : "green", |
| "timed_out" : false, |
| "number_of_nodes" : 5, |
| "number_of_data_nodes" : 2, |
| "active_primary_shards" : 3, |
| "active_shards" : 6, |
| "relocating_shards" : 0, |
| "initializing_shards" : 0, |
| "unassigned_shards" : 0, |
| "delayed_unassigned_shards" : 0, |
| "number_of_pending_tasks" : 0, |
| "number_of_in_flight_fetch" : 0, |
| "task_max_waiting_in_queue_millis" : 0, |
| "active_shards_percent_as_number" : 100.0 |
| } |
| |
| |
| |
| 172.16.122.129 16 33 20 0.38 0.56 0.38 ilmr - es-master-2 |
| 172.16.58.229 66 33 22 0.64 0.66 0.44 ilmr * es-master-1 |
| 172.16.122.191 52 34 15 0.38 0.56 0.38 cdfhilrstw - es-data-0 |
| 172.16.195.21 38 35 19 0.38 0.53 0.36 cdfhilrstw - es-data-1 |
| 172.16.32.164 31 33 12 0.28 0.50 0.59 ilmr - es-master-0 |
# 六、交付 Kibana 可视化
# 6.1 下载 kibana 镜像
# 6.2 kibana-deploy
- Kibana 需要连接 ES 集群,通过 ELASTICSEARCH_HOSTS 变量来传递 ES 集群地址
- kibana 通过 I18N_LOCALE 来传递语言环境
- Kibana 通过 SERVER_PUBLICBASEURL 来传递服务访问的公开地址
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: kibana |
| namespace: logging |
| spec: |
| replicas: 1 |
| selector: |
| matchLabels: |
| app: kibana |
| template: |
| metadata: |
| labels: |
| app: kibana |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: kibana |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/kibana:7.17.6 |
| resources: |
| limits: |
| cpu: 1000m |
| ports: |
| - containerPort: 5601 |
| env: |
| - name: ELASTICSEARCH_HOSTS |
| value: '["http://es-data-0.es-svc:9200","http://es-data-1.es-svc:9200"]' |
| - name: I18N_LOCALE |
| value: "zh-CN" |
| - name: SERVER_PUBLICBASEURL |
| value: "http://kibana.hmallleasing.com" |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
# 6.3 kibana-svc
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: kibana-svc |
| namespace: logging |
| spec: |
| selector: |
| app: kibana |
| ports: |
| - name: web |
| port: 5601 |
| targetPort: 5601 |
# 6.4 kibana-ingress
| |
| apiVersion: networking.k8s.io/v1 |
| kind: Ingress |
| metadata: |
| name: kibana-ingress |
| namespace: logging |
| spec: |
| ingressClassName: "nginx" |
| rules: |
| - host: "kibana.hmallleasing.com" |
| http: |
| paths: |
| - path: / |
| pathType: Prefix |
| backend: |
| service: |
| name: kibana-svc |
| port: |
| number: 5601 |
# 6.5 更新资源清单
| [root@k8s-master01 05-kibana] |
| [root@k8s-master01 05-kibana] |
| [root@k8s-master01 05-kibana] |
| |
| [root@k8s-master01 05-kibana] |
| NAME READY STATUS RESTARTS AGE |
| efak-5cdc74bf59-nrhb4 1/1 Running 0 5h33m |
| es-data-0 1/1 Running 0 16m |
| es-data-1 1/1 Running 0 15m |
| es-master-0 1/1 Running 0 17m |
| es-master-1 1/1 Running 0 15m |
| es-master-2 1/1 Running 0 12m |
| kafka-0 1/1 Running 0 5h39m |
| kafka-1 1/1 Running 0 5h39m |
| kafka-2 1/1 Running 0 5h38m |
| kibana-5ccc46864b-ndzx9 1/1 Running 0 118s |
| zookeeper-0 1/1 Running 0 5h42m |
| zookeeper-1 1/1 Running 0 5h42m |
| zookeeper-2 1/1 Running 0 5h41m |
# 6.6 访问 kibana
![1.png]()
# 七、filebeat-sidecar 收集业务应用日志
# 7.1 部署架构说明
对于那些能够将日志输出到本地文件的 Pod,我们可以使用 Sidecar 模式方式运行一个日志采集 Agent,对其进行单独收集日志。
![1.png]()
- 首先需要将 Pod 中的业务容器日志输出至本地文件,而后运行一个 Filebeat 边车容器,采集本地路径下的日志;
- Filebeat 容器需要传递如下变量;
- ENV:了解 Pod 属于隶属于哪个环境;
- PROJECT_NAME:为了后期能在单个索引中区分出不同的项目;
- PodIP:为了让用户清楚该 Pod 属于哪个 IP;
- Node:用于获取该 Pod 所处的节点;
- Logstash 根据不同的环境,拉取不同的 topic 数据,然后将数据存储至 ES 对应的索引中;
- Kibana 添加不同环境的 index pattern,而后选择对应环境不同的项目进行日志探索与展示;
# 7.2 Sidecar 部署思路
- 制作一个业务镜像,要求镜像输出日志至本地;
- 制作 Filebeat 镜像,配置 Input、output 等信息;
- 采用边车模式运行不同环境的 Pod,确保日志信息能输出至 Kafka 集群;
- 准备不同环境下 Logstash 配置文件,而后读取数据写入 ES 集群;
- 使用 kibana 添加索引,进行日志探索与展示;
# 7.3 制作 Filebeat 镜像
7.3.1 下载 filebeat
| curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.6-x86_64.rpm |
7.3.2 编写 Dockerfile
| |
| |
| FROM centos:7 |
| |
| |
| ENV VERSION=7.17.6 |
| ADD ./filebeat-${VERSION}-x86_64.rpm / |
| RUN rpm -ivh /filebeat-${VERSION}-x86_64.rpm && \ |
| rm -f /filebeat-${VERSION}-x86_64.rpm |
| |
| |
| ADD ./filebeat.yml /etc/filebeat/filebeat.yml |
| |
| |
| ADD ./entrypoint.sh /entrypoint.sh |
| RUN chmod +x /entrypoint.sh |
| |
| |
| CMD ["/bin/bash","-c","/entrypoint.sh"] |
7.3.3 编写 entrypoint
| |
| |
| |
| Beat_Conf=/etc/filebeat/filebeat.yml |
| |
| sed -i s@{ENV}@${ENV:-test}@g ${Beat_Conf} |
| sed -i s@{PodIP}@${PodIP:-"no-ip"}@g ${Beat_Conf} |
| sed -i s@{Node}@${Node:-"none"}@g ${Beat_Conf} |
| sed -i s@{PROJECT_NAME}@${PROJECT_NAME:-"no-define"}@g ${Beat_Conf} |
| sed -i s@{MULTILINE}@${MULTILINE:-"^\\\d{2}"}@g ${Beat_Conf} |
| sed -i s@{KAFKA_HOSTS}@${KAFKA_HOSTS}@g ${Beat_Conf} |
| |
| |
| filebeat -e -c /etc/filebeat/filebeat.yml |
7.3.4 编写 filebeat 配置
{ENV}:用于定义环境的变量;
{PROJECT_NAME}:用于定义项目名称的变量;
{MULTILINE}:用于定义多行合并的正则变量;
{KAFKA_HOSTS}:用于定义 KAFKA 集群地址的变量;
{PodIP}:用于获取该 Pod 地址的变量;
{Node}:用于获取该 Pod 所处的节点;
| [root@k8s-master01 filebeat_sidecar_dockerfile] |
| filebeat.inputs: |
| - type: log |
| enabled: true |
| paths: |
| - /logu/*.log |
| - /logu/*/*.log |
| tags: ["logu"] |
| fields: |
| topic: {PROJECT_NAME} |
| podip: {PodIP} |
| node: {Node} |
| fields_under_root: true |
| |
| - type: log |
| enabled: true |
| paths: |
| - /logm/*.log |
| - /logm/*/*.log |
| tags: ["logm"] |
| fields: |
| topic: {PROJECT_NAME} |
| podip: {PodIP} |
| node: {Node} |
| fields_under_root: true |
| multiline.pattern: '{MULTILINE}' |
| multiline.negate: true |
| multiline.match: after |
| multiline.max_lines: 10000 |
| |
| output.kafka: |
| hosts: [{KAFKA_HOSTS}] |
| topic: app-{ENV}-%{[topic]} |
| required_acks: 1 |
| compression: gzip |
| max_message_bytes: 1000000 |
7.3.5 构建并推送镜像
# 7.4 nf-flms-gateway 日志收集
7.4.1 创建 Namespace 和 Secrets
7.4.2 创建 nf-flms-gateway
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-gateway |
| namespace: prod |
| spec: |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: nf-flms-gateway |
| template: |
| metadata: |
| labels: |
| app: nf-flms-gateway |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-gateway |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-gateway:v2.2 |
| command: |
| - "/bin/sh" |
| - "-c" |
| - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-gateway.jar" |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| ports: |
| - containerPort: 8080 |
| readinessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| livenessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: log |
| mountPath: /logs |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logm |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-gateway" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: log |
| emptyDir: {} |
| --- |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: gateway-svc |
| namespace: prod |
| spec: |
| selector: |
| app: nf-flms-gateway |
| ports: |
| - port: 8080 |
| targetPort: 8080 |
| |
| --- |
| |
| apiVersion: networking.k8s.io/v1 |
| kind: Ingress |
| metadata: |
| name: gateway-ingress |
| namespace: prod |
| annotations: |
| nginx.ingress.kubernetes.io/ssl-redirect: "false" |
| spec: |
| ingressClassName: "nginx" |
| rules: |
| - host: "prod-api.hmallleasing.com" |
| http: |
| paths: |
| - path: / |
| pathType: Prefix |
| backend: |
| service: |
| name: gateway-svc |
| port: |
| number: 8080 |
| tls: |
| - hosts: |
| - prod-api.hmallleasing.com |
| secretName: "prod-api.hmallleasig.com" |
7.4.3 检查 KafkaTopic
1、检查是否有对应的 topic
![2.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![3.png]()
# 7.5 nf-flms-order 日志收集
7.5.1 创建 PVC
创建 PVC 存储订单合同、身份证复印件等附件
| |
| apiVersion: v1 |
| kind: PersistentVolumeClaim |
| metadata: |
| name: data-image |
| namespace: prod |
| spec: |
| storageClassName: "nfs-storage" |
| accessModes: |
| - ReadWriteMany |
| resources: |
| requests: |
| storage: 2Gi |
7.5.2 创建 nf-flms-order
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-order |
| namespace: prod |
| spec: |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: nf-flms-order |
| template: |
| metadata: |
| labels: |
| app: nf-flms-order |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-order |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-order:v2.0 |
| command: |
| - "/bin/sh" |
| - "-c" |
| - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-order.jar" |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| ports: |
| - containerPort: 8080 |
| readinessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| livenessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: data-image |
| mountPath: /data |
| - name: log |
| mountPath: /logs |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logm |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-order" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: data-image |
| persistentVolumeClaim: |
| claimName: data-image |
| - name: log |
| emptyDir: {} |
7.5.3 检查 KafkaTopic
1、检查是否有对应的 topic
![4.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![5.png]()
# 7.6 nf-flms-statistics 日志收集
7.6.1 创建 nf-flms-statistics
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-statistics |
| namespace: prod |
| spec: |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: nf-flms-statistics |
| template: |
| metadata: |
| labels: |
| app: nf-flms-statistics |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-statistics |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-statistics:v2.0 |
| command: |
| - "/bin/sh" |
| - "-c" |
| - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-statistics.jar" |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| ports: |
| - containerPort: 8080 |
| readinessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| livenessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 30 |
| timeoutSeconds: 3 |
| successThreshold: 1 |
| failureThreshold: 2 |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: log |
| mountPath: /logs |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logm |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-statistics" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: log |
| emptyDir: {} |
7.6.2 检查 KafkaTopic
1、检查是否有对应的 topic
![6.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![7.png]()
# 7.7 nf-flms-system 日志收集
7.7.1 创建 nf-flms-system
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-system |
| namespace: prod |
| spec: |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: nf-flms-system |
| template: |
| metadata: |
| labels: |
| app: nf-flms-system |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-system |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-system:v2.0 |
| command: |
| - "/bin/sh" |
| - "-c" |
| - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-system.jar" |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| ports: |
| - containerPort: 8080 |
| livenessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| readinessProbe: |
| tcpSocket: |
| port: 8080 |
| failureThreshold: 2 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: log |
| mountPath: /logs |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logm |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-system" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: log |
| emptyDir: {} |
7.7.2 检查 KafkaTopic
1、检查是否有对应的 topic
![1.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![2.png]()
# 7.8 nf-flms-openapi 日志收集
7.8.1 创建 nf-flms-openapi
| |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-openapi |
| namespace: prod |
| spec: |
| replicas: 2 |
| selector: |
| matchLabels: |
| app: nf-flms-openapi |
| template: |
| metadata: |
| labels: |
| app: nf-flms-openapi |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-openapi |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-openapi:v2.2 |
| command: |
| - "/bin/sh" |
| - "-c" |
| - "java -Xms256m -Xmx1024m -Dspring.profiles.active=prd -Djava.security.egd=file:/dev/./urandom -jar -Duser.timezone=GMT+08 nf-flms-openapi.jar" |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| ports: |
| - containerPort: 8080 |
| livenessProbe: |
| tcpSocket: |
| port: 8080 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| readinessProbe: |
| tcpSocket: |
| port: 8080 |
| failureThreshold: 2 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| volumeMounts: |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: log |
| mountPath: /logs |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logm |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-openapi" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: log |
| emptyDir: {} |
7.8.2 检查 KafkaTopic
1、检查是否有对应的 topic
![3.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![4.png]()
# 7.9 nf-flms-ui 日志收集
7.9.1 准备 Nginx 配置文件
| |
| server { |
| listen 80; |
| server_name prod.hmallleasing.com; |
| root /code/prod; |
| |
| location / { |
| index index.html index.htm; |
| } |
| } |
| |
| server { |
| listen 80; |
| server_name prod-api.hmallleasing.com; |
| |
| location / { |
| proxy_set_header Host $http_host; |
| proxy_set_header X-Real-IP $remote_addr; |
| proxy_set_header REMOTE-HOST $remote_addr; |
| proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
| proxy_pass http://gateway-svc.prod.svc.cluster.local:8080; |
| } |
| } |
7.9.2 创建 ConfigMap
| kubectl create configmap nf-flms-ui-conf --from-file=./prod.hmallleasing.com.conf -n prod |
7.9.3 创建 nf-flms-ui
| [root@k8s-master01 06-service-all] |
| apiVersion: apps/v1 |
| kind: Deployment |
| metadata: |
| name: nf-flms-ui |
| namespace: prod |
| spec: |
| replicas: 1 |
| selector: |
| matchLabels: |
| app: nf-flms-ui |
| template: |
| metadata: |
| labels: |
| app: nf-flms-ui |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: nf-flms-ui |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/nf-flms-ui:v1.0 |
| ports: |
| - containerPort: 80 |
| resources: |
| limits: |
| cpu: '1000m' |
| memory: 1Gi |
| requests: |
| cpu: "200m" |
| memory: "500Mi" |
| readinessProbe: |
| tcpSocket: |
| port: 80 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| livenessProbe: |
| tcpSocket: |
| port: 80 |
| initialDelaySeconds: 60 |
| periodSeconds: 10 |
| timeoutSeconds: 10 |
| volumeMounts: |
| - name: ngxconfs |
| mountPath: /etc/nginx/conf.d/ |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| - name: log |
| mountPath: /var/log/nginx/ |
| - name: filebeat |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/filebeat_sidecar:7.17.6 |
| imagePullPolicy: Always |
| volumeMounts: |
| - name: log |
| mountPath: /logu |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| env: |
| - name: ENV |
| valueFrom: |
| fieldRef: |
| apiVersion: v1 |
| fieldPath: metadata.namespace |
| - name: PodIP |
| valueFrom: |
| fieldRef: |
| fieldPath: status.podIP |
| - name: Node |
| valueFrom: |
| fieldRef: |
| fieldPath: spec.nodeName |
| - name: PROJECT_NAME |
| value: "nf-flms-ui" |
| - name: KAFKA_HOSTS |
| value: '"kafka-0.kafka-svc.logging:9092","kafka-1.kafka-svc.logging:9092","kafka-2.kafka-svc.logging:9092"' |
| volumes: |
| - name: ngxconfs |
| configMap: |
| name: nf-flms-ui-conf |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| - name: log |
| emptyDir: {} |
| --- |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: nf-flms-ui-svc |
| namespace: prod |
| spec: |
| selector: |
| app: nf-flms-ui |
| ports: |
| - port: 80 |
| targetPort: 80 |
| --- |
| apiVersion: networking.k8s.io/v1 |
| kind: Ingress |
| metadata: |
| name: nf-flms-ui-ingress |
| namespace: prod |
| annotations: |
| nginx.ingress.kubernetes.io/ssl-redirect: "false" |
| spec: |
| ingressClassName: "nginx" |
| rules: |
| - host: "prod.hmallleasing.com" |
| http: |
| paths: |
| - path: / |
| pathType: Prefix |
| backend: |
| service: |
| name: nf-flms-ui-svc |
| port: |
| number: 80 |
| tls: |
| - hosts: |
| - prod.hmallleasing.com |
| secretName: "prod-api.hmallleasig.com" |
7.9.2 检查 KafkaTopic
1、检查是否有对应的 topic
![1.png]()
2、点击对应的 Preview,查看 topic 中的最新数据
![2.png]()
# 八、交付生产环境 Logstash
# 8.1 拉取镜像
# 8.2 编写 logstash 配置
| [root@k8s-master01 conf] |
| input { |
| kafka { |
| bootstrap_servers => "kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092" |
| group_id => "logstash-prod" |
| consumer_threads => "3" |
| topics_pattern => "app-prod-.*" |
| } |
| } |
| |
| filter { |
| json { |
| source => "message" |
| } |
| } |
| |
| output { |
| stdout { |
| codec => rubydebug |
| } |
| elasticsearch { |
| hosts => ["es-data-0.es-svc:9200","es-data-1.es-svc:9200"] |
| index => "app-prod-%{+YYYY.MM.dd}" |
| template_overwrite => true |
| } |
| } |
# 8.3 创建生产环境 configmap
| kubectl create configmap logstash-prod-conf --from-file=logstash.conf=conf/logstash-prod.conf -n logging |
# 8.4 创建生产环境 Logstash
1、创建 logstash-svc
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: logstash-svc |
| namespace: logging |
| spec: |
| clusterIP: None |
| selector: |
| app: logstash |
| ports: |
| - port: 9600 |
| targetPort: 9600 |
2、创建 logstash-StatefulSet
| |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: logstash-prod |
| namespace: logging |
| spec: |
| serviceName: "logstash-svc" |
| replicas: 1 |
| selector: |
| matchLabels: |
| app: logstash |
| env: prod |
| template: |
| metadata: |
| labels: |
| app: logstash |
| env: prod |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: logstash |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6 |
| args: ["-f","config/logstash.conf"] |
| resources: |
| limits: |
| memory: 1024Mi |
| env: |
| - name: PIPELINE_WORKERS |
| value: "2" |
| - name: PIPELINE_BATCH_SIZE |
| value: "10000" |
| lifecycle: |
| postStart: |
| exec: |
| command: |
| - "/bin/bash" |
| - "-c" |
| - "sed -i -e '/^-Xms/c-Xms1024m' -e '/^-Xmx/c-Xmx1024m' /usr/share/logstash/config/jvm.options" |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/logstash/data |
| - name: conf |
| mountPath: /usr/share/logstash/config/logstash.conf |
| subPath: logstash.conf |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| volumes: |
| - name: conf |
| configMap: |
| name: logstash-prod-conf |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteMany"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 8.5 更新资源清单
| [root@k8s-master01 08-logstash] |
| [root@k8s-master01 08-logstash] |
# 8.6 检查 ES 生产环境索引
![1.png]()
# 九、交付测试环境 Logstash
# 9.1 拉取镜像
# 9.2 编写 logstash 配置
| [root@k8s-master01 08-logstash] |
| input { |
| kafka { |
| bootstrap_servers => "kafka-0.kafka-svc:9092,kafka-1.kafka-svc:9092,kafka-2.kafka-svc:9092" |
| group_id => "logstash-test" |
| consumer_threads => "3" |
| topics_pattern => "app-test-.*" |
| } |
| } |
| |
| filter { |
| json { |
| source => "message" |
| } |
| } |
| |
| output { |
| stdout { |
| codec => rubydebug |
| } |
| elasticsearch { |
| hosts => ["es-data-0.es-svc:9200","es-data-1.es-svc:9200"] |
| index => "app-test-%{+YYYY.MM.dd}" |
| template_overwrite => true |
| } |
| } |
# 9.3 创建测试环境 configmap
| kubectl create configmap logstash-test-conf --from-file=logstash.conf=conf/logstash-test.conf -n logging |
# 9.4 创建测试环境 Logstash
1、创建 logstash-svc
| |
| apiVersion: v1 |
| kind: Service |
| metadata: |
| name: logstash-svc |
| namespace: logging |
| spec: |
| clusterIP: None |
| selector: |
| app: logstash |
| ports: |
| - port: 9600 |
| targetPort: 9600 |
2、创建 logstash-StatefulSet
| [root@k8s-master01 08-logstash] |
| apiVersion: apps/v1 |
| kind: StatefulSet |
| metadata: |
| name: logstash-test |
| namespace: logging |
| spec: |
| serviceName: "logstash-svc" |
| replicas: 1 |
| selector: |
| matchLabels: |
| app: logstash |
| env: test |
| template: |
| metadata: |
| labels: |
| app: logstash |
| env: test |
| spec: |
| imagePullSecrets: |
| - name: harbor-admin |
| containers: |
| - name: logstash |
| image: registry.cn-hangzhou.aliyuncs.com/kubernetes_public/logstash-oss:7.17.6 |
| args: ["-f","config/logstash.conf"] |
| resources: |
| limits: |
| memory: 1024Mi |
| env: |
| - name: PIPELINE_WORKERS |
| value: "2" |
| - name: PIPELINE_BATCH_SIZE |
| value: "10000" |
| lifecycle: |
| postStart: |
| exec: |
| command: |
| - "/bin/bash" |
| - "-c" |
| - "sed -i -e '/^-Xms/c-Xms1024m' -e '/^-Xmx/c-Xmx1024m' /usr/share/logstash/config/jvm.options" |
| volumeMounts: |
| - name: data |
| mountPath: /usr/share/logstash/data |
| - name: conf |
| mountPath: /usr/share/logstash/config/logstash.conf |
| subPath: logstash.conf |
| - name: tz-config |
| mountPath: /usr/share/zoneinfo/Asia/Shanghai |
| - name: tz-config |
| mountPath: /etc/localtime |
| - name: timezone |
| mountPath: /etc/timezone |
| volumes: |
| - name: conf |
| configMap: |
| name: logstash-test-conf |
| - name: tz-config |
| hostPath: |
| path: /usr/share/zoneinfo/Asia/Shanghai |
| type: "" |
| - name: timezone |
| hostPath: |
| path: /etc/timezone |
| type: "" |
| volumeClaimTemplates: |
| - metadata: |
| name: data |
| spec: |
| accessModes: ["ReadWriteMany"] |
| storageClassName: "nfs-storage" |
| resources: |
| requests: |
| storage: 5Gi |
# 9.5 更新资源清单
| [root@k8s-master01 08-logstash] |
| [root@k8s-master01 08-logstash] |
# 9.6 检查 ES 测试环境索引
![1.png]()
# 十、Kibana 数据展示
# 10.1 添加生产环境索引
![2.png]()
# 10.2 查看生产环境数据
kibana->Discover
![3.png]()
①点击开发环境索引 ->②选择需要查看的字段 ->③进行 filter 筛选项目 ->④选择对应时间段
![4.png]()
![5.png]()
![6.png]()