通过 EFK 采集 K8(3)s 集群服务的日志
1、Elasticsearch
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch
namespace: elk
labels:
k8s-app: elasticsearch
spec:
serviceName: elasticsearch
selector:
matchLabels:
k8s-app: elasticsearch
template:
metadata:
labels:
k8s-app: elasticsearch
spec:
containers:
- image: elasticsearch:7.3.2
name: elasticsearch
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 0.5
memory: 500Mi
env:
- name: "discovery.type"
value: "single-node"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx2g"
ports:
- containerPort: 9200
name: db
protocol: TCP
volumeMounts:
- name: elasticsearch-data
mountPath: /usr/share/elasticsearch/data
nodeSelector:
disktype: ssd
securityContext:
fsGroup: 1000
volumes:
- name: elasticsearch-data
persistentVolumeClaim:
claimName: elasticsearch-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elasticsearch-pvc
namespace: elk
spec:
accessModes:
- ReadWriteOnce
storageClassName: "task-pv"
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: elk
spec:
clusterIP: None
ports:
- port: 9200
protocol: TCP
targetPort: db
selector:
k8s-app: elasticsearch
2、Kibana
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elk
labels:
k8s-app: kibana
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kibana
template:
metadata:
labels:
k8s-app: kibana
spec:
containers:
- name: kibana
image: kibana:7.3.2
resources:
limits:
cpu: 1
memory: 500Mi
requests:
cpu: 0.5
memory: 200Mi
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200
ports:
- containerPort: 5601
name: ui
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: elk
spec:
type: ClusterIP
ports:
- name: ui
port: 5601
protocol: TCP
targetPort: ui
selector:
k8s-app: kibana
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana
namespace: elk
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
# 依据实际ingressClass来配置
ingressClassName: nginx
rules:
# 内网使用修改Hosts
- host: kibana.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana
port:
name: ui
3、Filebeat
下面的实例为采集ns:halo中的服务日志
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: halo
data:
filebeat.yml: |-
filebeat.inputs:
# 收集 halo.log 文件
- type: log
enabled: true
paths:
- /data/halo/logs/halo.log
fields:
service_name: halo
environment: production
log_type: application
fields_under_root: true
multiline.pattern: '^\d{4}-\d{2}-\d{2}'
multiline.negate: true
multiline.match: after
scan_frequency: 1s
harvester_buffer_size: 16384
max_bytes: 10485760
# 处理器配置
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- add_host_metadata:
when.not.contains.tags: forwarded
- add_fields:
target: '@metadata'
fields:
index_prefix: halo-logs
- timestamp:
field: '@timestamp'
layouts:
- '2006-01-02 15:04:05'
- '2006-01-02T15:04:05.000Z'
test:
- '2023-12-01 10:30:00'
# 输出配置
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
index: "halo-logs-%{+yyyy.MM.dd}"
# If index lifecycle management is enabled (which is typically the default), setup.template.name and # setup.template.pattern are ignored.
# https://www.elastic.co/docs/reference/beats/filebeat/ilm
setup.ilm.enabled: false
setup.template.name: "halo-logs-template"
setup.template.pattern: "halo-logs-*"
setup.template.settings:
index.number_of_shards: 1
index.number_of_replicas: 0
index.refresh_interval: "5s"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: halo
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: halo
labels:
k8s-app: filebeat
*** Sidecar配置示例:***
Filebeat 以 sidecar 的形式进行日志采集,注意不同名称空间es的地址需要配置为es的无头服务的访问地址,通过内部DNS来进行解析。
另外需要注意ServerAccount的配置,否则无权采集集群内资源信息
apiVersion: v1
...
...
spec:
template:
spec:
containers:
- args:
- '-c'
- /etc/filebeat.yml
- '-e'
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: ELASTICSEARCH_HOST
value: elasticsearch.elk.svc.cluster.local
- name: ELASTICSEARCH_PORT
value: '9200'
image: elastic/filebeat:7.3.2
imagePullPolicy: IfNotPresent
name: filebeat-log
resources:
limits:
memory: 512Mi
volumeMounts:
- mountPath: /etc/filebeat.yml
name: filebeat-config
readOnly: true
subPath: filebeat.yml
- mountPath: /data/halo
name: halo-data
readOnly: true
subPath: halo
- mountPath: /usr/share/filebeat/data
name: data
serviceAccount: filebeat
serviceAccountName: filebeat
volumes:
- configMap:
defaultMode: 0600
name: filebeat-config
name: filebeat-config
# 为 Filebeat 提供临时数据存储空间
# 注册表文件 (registry):记录已读取的日志文件位置和偏移量
# 元数据信息:文件状态、最后读取时间等
# 临时处理文件:缓存和临时数据
- emptyDir: {}
name: data
- name: halo-data
persistentVolumeClaim:
claimName: halo-1-xxx