1.Elasticsearch集群介绍
2.部署环境
IP | 节点 | 操作系统 | k8s版本 |
elasticsearch版本 |
docker版本 |
172.16.4.85 | master1 | centos7.8 | 1.23.17 | 20.10.9 | |
172.16.4.86 | node1 | centos7.8 | 1.23.17 | 7.1.1 | 20.10.9 |
172.16.4.87 | node2 | centos7.8 | 1.23.17 | 7.1.1 | 20.10.9 |
172.16.4.89 | node3 | centos7.8 | 1.23.17 | 20.10.9 | |
172.16.4.90 | node4 | centos7.8 | 1.23.17 | 7.1.1 | 20.10.9 |
3.Elasticsearch集群部署
3.1 nfs部署
- centos7安装nfs
yum install -y nfs-utils
- 创建nfs共享目录
mkdir -p /nfs_share/k8s/es/pv{1..3}
chmod 777 /nfs_share/k8s/es/pv{1..3} -R
- nfs配置文件编辑
[root@localhost minio]# cat /etc/exports
/nfs_share/k8s/es/pv1 *(rw,sync,no_subtree_check,no_root_squash)
/nfs_share/k8s/es/pv2 *(rw,sync,no_subtree_check,no_root_squash)
/nfs_share/k8s/es/pv3 *(rw,sync,no_subtree_check,no_root_squash)
- 启动nfs服务
# 启动 NFS 服务
systemctl start nfs-server
# 设置 NFS 服务在系统启动时自动启动
systemctl enable nfs-server
- 加载配置文件,并输出
[root@localhost es]# exportfs -r
[root@localhost es]# exportfs -v
/nfs_share/k8s/es/pv1<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfs_share/k8s/es/pv2<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfs_share/k8s/es/pv3<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
3.2 创建namespace
apiVersion: v1
kind: Namespace
metadata:name: es
kubectl apply -f es-ns.yaml
3.3 elasticsearch部署pv
apiVersion: v1
kind: PersistentVolume
metadata:name: es-pv1labels:type: nfs-es-pv # 用于PVC匹配
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: nfs-es-storagenfs:server: 172.16.4.60path: /nfs_share/k8s/es/pv1 # 确保路径已存在且有读写权限---
apiVersion: v1
kind: PersistentVolume
metadata:name: es-pv2labels:type: nfs-es-pv
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: nfs-es-storagenfs:server: 172.16.4.60path: /nfs_share/k8s/es/pv2---
apiVersion: v1
kind: PersistentVolume
metadata:name: es-pv3labels:type: nfs-es-pv
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: nfs-es-storagenfs:server: 172.16.4.60path: /nfs_share/k8s/es/pv3
kubectl apply -f es-pv.yaml
3.3 elasticsearch部署证书
- 生成证书:使用Elasticsearch 证书工具elasticsearch-certutil生成 TLS 证书
https://www.cnblogs.com/Jeffrey1172417122/p/16718354.html
- 创建 Secret:将证书上传到 Kubernetes,注意引用证书的路径
# 假设证书文件存放在当前目录的 certs/ 文件夹中
kubectl create secret generic elasticsearch-certs -n es \--from-file=certs/ks-es.key \--from-file=certs/ks-es.crt \--from-file=certs/ca.crt
3.4 elasticsearch部署configmap
- k8s的configmap可以参考如下配置文件,这是我之前在宿主机上部署的有证书的集群,也是3台es组成集群,使用nginx做代理。
集群elasticsearch.yml配置
################################ es1 ################################
[root@localhost config]# cat elasticsearch.yml
# 集群名称
cluster.name: escluster# 集群初始化 选择该节点为主节点 master
cluster.initial_master_nodes : "es1"
# 节点名称
node.name: es1# 是否允许该节点参加master 选举
node.master: true # 允许该节点存储数据(默认开启)
node.data: true # 默认情况下,ElasticSearch使用0.0.0.0地址,并为http传输开启9200-9300端口,为节点到节点的通信开启9300-9400端口,也可以自行设置IP地址
network.host: 0.0.0.0# publish_host设置其他节点连接此节点的地址,如果不设置的话,则自动获取,publish_host的地址必须为真实地址
network.publish_host: 172.17.22.65# 该节点与其他节点交互的端口
transport.tcp.port: 29300# Http传输监听端口
http.port: 29200# 是否支持跨域,默认为false
http.cors.enabled: true# 当设置允许跨域,默认为*,表示支持所有域名
http.cors.allow-origin: "*"# 跨域允许设置的头信息
http.cors.allow-headers: Authorization# 节点内部通信地址 9200 是默认端口 ,如有修改则补全端口号
discovery.zen.ping.unicast.hosts: ["172.17.22.65:29300","172.17.22.66:29300","172.17.22.67:29300"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 5s
#cluster.initial_master_nodes: ["192.168.1.240:29300","192.168.1.125:29300","192.168.1.141:29300"]
# X-Pack是Elastic Stack扩展功能,提供安全性,警报,监视,报告,机器学习和许多其他功能 】
# 默认为true,启用节点上ES的XPACK安全功能,相当于总开关
#xpack.security.enabled: true
# 传输层的认证设置#xpack.security.transport.ssl.enabled: true#xpack.security.transport.ssl.verification_mode: certificate#xpack.security.transport.ssl.keystore.path: certs/#xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12xpack.security.transport.ssl.enabled: true
xpack.security.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.key: certs/ks-es.key
xpack.security.transport.ssl.certificate: certs/ks-es.crt
xpack.security.transport.ssl.certificate_authorities: certs/ca.crt# 最大子查询个数 ,默认1024 目前系统是以相机为主要检索 如果超过 1024个相机 将会检索不到结果
indices.query.bool.max_clause_count: 10240################################ es2 ################################
[root@localhost config]# cat elasticsearch.yml
# 集群名称
cluster.name: escluster# 集群初始化 选择该节点为主节点 master
#cluster.initial_master_nodes : "es1"
# 节点名称
node.name: es2# 是否允许该节点参加master 选举
node.master: true # 允许该节点存储数据(默认开启)
node.data: true # 默认情况下,ElasticSearch使用0.0.0.0地址,并为http传输开启9200-9300端口,为节点到节点的通信开启9300-9400端口,也可以自行设置IP地址
network.host: 0.0.0.0# publish_host设置其他节点连接此节点的地址,如果不设置的话,则自动获取,publish_host的地址必须为真实地址
network.publish_host: 172.17.22.66# 该节点与其他节点交互的端口
transport.tcp.port: 29300# Http传输监听端口
http.port: 29200# 是否支持跨域,默认为false
http.cors.enabled: true# 当设置允许跨域,默认为*,表示支持所有域名
http.cors.allow-origin: "*"# 跨域允许设置的头信息
http.cors.allow-headers: Authorization# 节点内部通信地址 9200 是默认端口 ,如有修改则补全端口号
discovery.zen.ping.unicast.hosts: ["172.17.22.65:29300","172.17.22.66:29300","172.17.22.67:29300"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 5s
#cluster.initial_master_nodes: ["192.168.1.240:29300","192.168.1.125:29300","192.168.1.141:29300"]
# X-Pack是Elastic Stack扩展功能,提供安全性,警报,监视,报告,机器学习和许多其他功能 】
# 默认为true,启用节点上ES的XPACK安全功能,相当于总开关
#xpack.security.enabled: true
# 传输层的认证设置#xpack.security.transport.ssl.enabled: true#xpack.security.transport.ssl.verification_mode: certificate#xpack.security.transport.ssl.keystore.path: certs/#xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12xpack.security.transport.ssl.enabled: true
xpack.security.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.key: certs/ks-es.key
xpack.security.transport.ssl.certificate: certs/ks-es.crt
xpack.security.transport.ssl.certificate_authorities: certs/ca.crt# 最大子查询个数 ,默认1024 目前系统是以相机为主要检索 如果超过 1024个相机 将会检索不到结果
indices.query.bool.max_clause_count: 10240################################ es3 ################################
[root@localhost config]# cat elasticsearch.yml
# 集群名称
cluster.name: escluster# 集群初始化 选择该节点为主节点 master
#cluster.initial_master_nodes : "es1"
# 节点名称
node.name: es3# 是否允许该节点参加master 选举
node.master: true # 允许该节点存储数据(默认开启)
node.data: true # 默认情况下,ElasticSearch使用0.0.0.0地址,并为http传输开启9200-9300端口,为节点到节点的通信开启9300-9400端口,也可以自行设置IP地址
network.host: 0.0.0.0# publish_host设置其他节点连接此节点的地址,如果不设置的话,则自动获取,publish_host的地址必须为真实地址
network.publish_host: 172.17.22.67# 该节点与其他节点交互的端口
transport.tcp.port: 29300# Http传输监听端口
http.port: 29200# 是否支持跨域,默认为false
http.cors.enabled: true# 当设置允许跨域,默认为*,表示支持所有域名
http.cors.allow-origin: "*"# 跨域允许设置的头信息
http.cors.allow-headers: Authorization# 节点内部通信地址 9200 是默认端口 ,如有修改则补全端口号
discovery.zen.ping.unicast.hosts: ["172.17.22.65:29300","172.17.22.66:29300","172.17.22.67:29300"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 5s
#cluster.initial_master_nodes: ["192.168.1.240:29300","192.168.1.125:29300","192.168.1.141:29300"]
# X-Pack是Elastic Stack扩展功能,提供安全性,警报,监视,报告,机器学习和许多其他功能 】
# 默认为true,启用节点上ES的XPACK安全功能,相当于总开关
#xpack.security.enabled: true
# 传输层的认证设置#xpack.security.transport.ssl.enabled: true#xpack.security.transport.ssl.verification_mode: certificate#xpack.security.transport.ssl.keystore.path: certs/#xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12xpack.security.transport.ssl.enabled: true
xpack.security.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.key: certs/ks-es.key
xpack.security.transport.ssl.certificate: certs/ks-es.crt
xpack.security.transport.ssl.certificate_authorities: certs/ca.crt# 最大子查询个数 ,默认1024 目前系统是以相机为主要检索 如果超过 1024个相机 将会检索不到结果
indices.query.bool.max_clause_count: 10240
- nginx.conf配置代理
nginx代理es集群配置
#cat nginx.confhttp {upstream es {server 172.17.22.65:29200; # es1server 172.17.22.66:29200; # es2server 172.17.22.67:29200; # es3}server {listen 29201;server_name localhost;client_body_timeout 5s;client_header_timeout 5s;send_timeout 30s;location / {proxy_pass http://es;proxy_buffers 16 1024k;proxy_busy_buffers_size 2048k;proxy_temp_file_write_size 2048k;client_max_body_size 4000m;proxy_set_header Host $host:80;proxy_set_header X-Real-IP $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;}error_page 500 502 503 504 /50x.html;location = /50x.html {root html;}}}
- elasticsearch configmap,需要注意cm中的证书路径和sts挂载保持一致
apiVersion: v1
kind: ConfigMap
metadata:name: elasticsearch-confignamespace: eslabels:app: elasticsearch
data:elasticsearch.yml: |-cluster.name: es-productionnode.name: ${HOSTNAME}network.host: 0.0.0.0http.port: 29200transport.port: 29300http.cors.enabled: truehttp.cors.allow-origin: "*"discovery.seed_hosts:- "es-cluster-0.es-cluster.es.svc.cluster.local:29300"- "es-cluster-1.es-cluster.es.svc.cluster.local:29300"- "es-cluster-2.es-cluster.es.svc.cluster.local:29300"cluster.initial_master_nodes:- "es-cluster-0"- "es-cluster-1"- "es-cluster-2"xpack.security.enabled: truexpack.security.transport.ssl.enabled: truexpack.security.transport.ssl.verification_mode: certificatexpack.security.transport.ssl.key: /usr/share/elasticsearch/config/certs/ks-es.keyxpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/certs/ks-es.crtxpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/certs/ca.crtindices.query.bool.max_clause_count: 10240
kubectl apply -f es-cm.yaml
3.5 elasticsearch部署headless+statefulset
- 注意:自定义端口、自定义密码、配置MEM/CPU等
- 172.16.4.17:8090/public/elastic:7.1.1镜像是hub.docker.com中找的,然后私有化
apiVersion: v1
kind: Service
metadata:name: es-clusternamespace: eslabels:app: elasticsearch
spec:clusterIP: Noneports:- name: httpport: 29200targetPort: 29200- name: transportport: 29300targetPort: 29300selector:app: elasticsearch
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: es-clusternamespace: es
spec:serviceName: es-clusterreplicas: 3updateStrategy:type: RollingUpdatepodManagementPolicy: Parallelselector:matchLabels:app: elasticsearchtemplate:metadata:labels:app: elasticsearchspec:terminationGracePeriodSeconds: 120securityContext:runAsUser: 1000fsGroup: 1000fsGroupChangePolicy: "OnRootMismatch"affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: appoperator: Invalues: ["elasticsearch"]topologyKey: "kubernetes.io/hostname"containers:- name: elasticsearchimage: 172.16.4.17:8090/public/elastic:7.1.1securityContext:capabilities:add: ["SYS_RESOURCE"]volumeMounts:- name: elasticsearch-configmountPath: /usr/share/elasticsearch/config/elasticsearch.ymlsubPath: elasticsearch.yml- name: datamountPath: /usr/share/elasticsearch/data- name: certsmountPath: /usr/share/elasticsearch/config/certsreadOnly: trueenv:- name: ES_JAVA_OPTSvalue: "-Xms4g -Xmx4g -Dlog4j2.formatMsgNoLookups=true -Des.bootstrap.memory_lock=true"- name: ELASTIC_PASSWORDvalue: "123456"ports:- containerPort: 29200name: http- containerPort: 29300name: transportresources:requests:memory: "5Gi"cpu: "2"limits:memory: "5Gi"cpu: "4"livenessProbe:exec:command:- /bin/bash- -c- |curl -sS --max-time 5 -u elastic:123456 http://localhost:29200/_cluster/health || exit 1initialDelaySeconds: 300periodSeconds: 20readinessProbe:httpGet:path: /_cluster/health?localport: httpscheme: HTTPhttpHeaders:- name: Authorizationvalue: "Basic ZWxhc3RpYzp5dHhAMTIzNA=="initialDelaySeconds: 120timeoutSeconds: 10volumes:- name: elasticsearch-configconfigMap:name: elasticsearch-configitems:- key: elasticsearch.ymlpath: elasticsearch.yml- name: certssecret:secretName: elasticsearch-certsvolumeClaimTemplates:- metadata:name: dataspec:storageClassName: nfs-es-storageaccessModes: [ "ReadWriteOnce" ]resources:requests:storage: 10Gi
kubectl apply -f es-ss.yaml
3.6 elasticsearch部署service
- 有负载均衡能力,替代nginx
apiVersion: v1
kind: Service
metadata:name: es-nodeport # NodePort 服务名称namespace: es
spec:type: NodePortports:- name: httpport: 29200 # Service 端口(集群内访问端口)targetPort: 29200 # 容器暴露的 HTTP 端口nodePort: 30920 # 节点外部端口(范围 30000-32767)selector:app: elasticsearch # 与 StatefulSet 的 Pod 标签一致
kubectl apply -f es-svc.yaml
3.7 elasticsearch状态查看
[root@master1 es]# kubectl get pv | grep es-cluster
es-pv1 10Gi RWO Retain Bound es/data-es-cluster-1 nfs-es-storage 3h2m
es-pv2 10Gi RWO Retain Bound es/data-es-cluster-2 nfs-es-storage 3h2m
es-pv3 10Gi RWO Retain Bound es/data-es-cluster-0 nfs-es-storage 3h2m[root@master1 es]# kubectl get pvc -n es
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Bound es-pv3 10Gi RWO nfs-es-storage 3h1m
data-es-cluster-1 Bound es-pv1 10Gi RWO nfs-es-storage 3h1m
data-es-cluster-2 Bound es-pv2 10Gi RWO nfs-es-storage 3h1m[root@master1 es]# kubectl get secret -n es
NAME TYPE DATA AGE
default-token-6f7pg kubernetes.io/service-account-token 3 23h
elasticsearch-certs Opaque 3 159m[root@master1 es]# kubectl get cm -n es
NAME DATA AGE
elasticsearch-config 1 111m
kube-root-ca.crt 1 23h[root@master1 es]# kubectl get sts -n es
NAME READY AGE
es-cluster 3/3 110m[root@master1 es]# kubectl get pods -n es -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-cluster-0 1/1 Running 0 110m 10.244.3.84 node4 <none> <none>
es-cluster-1 1/1 Running 0 110m 10.244.166.190 node1 <none> <none>
es-cluster-2 1/1 Running 0 110m 10.244.104.22 node2 <none> <none>[root@master1 es]# kubectl get svc -n es
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
es-cluster ClusterIP None <none> 29200/TCP,29300/TCP 110m
es-nodeport NodePort 10.98.45.191 <none> 29200:30920/TCP 100m
3.8 elasticsearch集群验证
一、查看 Elasticsearch 集群节点状态
#容器内
[root@master1 es]# kubectl exec -n es -it es-cluster-0 -- curl -u elastic:123456 http://es-cluster.es:29200/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.244.3.84 7 99 6 0.28 0.65 0.77 mdi * es-cluster-0
10.244.104.22 5 99 4 0.08 0.39 0.57 mdi - es-cluster-2
10.244.166.190 6 99 4 0.10 0.40 0.51 mdi - es-cluster-1
#宿主机
[root@master1 es]# curl -u elastic:123456 http://localhost:30920/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.244.3.84 7 99 6 0.28 0.65 0.77 mdi * es-cluster-0
10.244.104.22 5 99 4 0.08 0.39 0.57 mdi - es-cluster-2
10.244.166.190 6 99 4 0.10 0.40 0.51 mdi - es-cluster-1二、查看分片分配状态
#查看,新环境没有分片,创建分片
[root@master1 es]# kubectl exec -n es -it es-cluster-0 -- curl -u elastic:123456 http://es-cluster.es:29200/_cat/shards?v
index shard prirep state docs store ip node
#创建分片
[root@master1 es]# kubectl exec -n es -it es-cluster-0 -- curl -XPUT -u elastic:123456 http://es-cluster.es:29200/test_index
{"acknowledged":true,"shards_acknowledged":true,"index":"test_index"}
#再次查看分片状态
[root@master1 es]# kubectl exec -n es -it es-cluster-0 -- curl -u elastic:123456 http://es-cluster.es:29200/_cat/shards?v
index shard prirep state docs store ip node
test_index 0 p STARTED 0 230b 10.244.104.22 es-cluster-2
test_index 0 r STARTED 0 230b 10.244.3.84 es-cluster-0三、集群外验证集群状态
[root@master1 es]# curl --user elastic:ytx@1234 -XGET http://localhost:30920/_cluster/health?pretty=true
{"cluster_name" : "es-production","status" : "green","timed_out" : false,"number_of_nodes" : 3,"number_of_data_nodes" : 3,"active_primary_shards" : 1,"active_shards" : 2,"relocating_shards" : 0,"initializing_shards" : 0,"unassigned_shards" : 0,"delayed_unassigned_shards" : 0,"number_of_pending_tasks" : 0,"number_of_in_flight_fetch" : 0,"task_max_waiting_in_queue_millis" : 0,"active_shards_percent_as_number" : 100.0
}四、查询索引信息
[root@master1 es]# curl --user elastic:ytx@1234 -XGET http://localhost:30920/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open test_index l8RTxJz2Te-evDhxhHIHNA 1 1 0 0 566b 283b
3.9 elasticsearch集群地址
- 内部集群域名地址
es-nodeport.es.svc.cluster.local 29200
- 外部nodeport地址(svc-nodeport)
10.98.45.191 30920
至此elasticsearch集群就部署完了!!!