ELK-安装

ELK-安装

ElasticSearch 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
## 安装
tar -xvf elasticsearch-7.13.1-linux-x86_64.tar.gz
mv elasticsearch-7.13.1-linux-x86_64 /usr/local/elasticsearch

## 创建用户
useradd es
passwd es
chown -R es:es /usr/local/elasticsearch

## linux参数设置
vi /etc/sysctl.conf
fs.file-max=655360
vm.max_map_count = 262144

sysctl -p

vi /etc/security/limits.conf
* soft nproc 20480
* hard nproc 20480
* soft nofile 65536
* hard nofile 65536
* soft memlock unlimited
* hard memlock unlimited

## 重新登录生效,查看
ulimit -a
rm -f /etc/security/limits.d/20-nproc.conf

## 配置
vi /usr/local/elasticsearch/config/elasticsearch.yml

#cluster.name: elkbigdata
#node.name: server1
#node.master: true
#node.data: true
path.data: /data/elasticsearch
#path.logs: /usr/local/elasticsearch/logs
#bootstrap.memory_lock: true
network.host: 192.168.8.221
#http.port: 9200
#discovery.zen.minimum_master_nodes: 1
#discovery.seed_hosts: ["127.0.0.1", "[::1]"]
#discovery.seed_providers: file
cluster.initial_master_nodes: "vm221"
##discovery.zen.ping.unicast.hosts: ["172.16.213.37:9300","172.16.213.78:9300"]

## 启动脚本
#!/bin/bash
su - es -c "/usr/local/elasticsearch/bin/elasticsearch -d"

ES常用命令

1
2
#查看索引
curl localhost:9200/_cat/indices?v

zookepper 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
## 安装
tar -xvf tar -xvf apache-zookeeper-3.6.3-bin.tar.gz
mv apache-zookeeper-3.6.3-bin /usr/local/zookeeper

## 配置
vi /usr/local/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper
clientPort=2181
server.1=192.168.8.225:2888:3888
server.2=192.168.8.226:2888:3888
server.3=192.168.8.227:2888:3888

set myid file
echo "1" >/data/zookeeper/myid

## 启动脚本
#!/bin/bash
/usr/local/zookeeper/bin/zkServer.sh start

kafka 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
## 安装
tar -xvf kafka_2.11-2.2.2.tgz
mv kafka_2.11-2.2.2 /usr/local/kafka

## 配置文件
vi /usr/local/kafka/config/server.properties

broker.id=0
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=60
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.8.225:2181,192.168.8.226:2181,192.168.8.227:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

## 启动脚本
#!/bin/bash
nohup /usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties 1>kafka.log 2>&1 &
tail -f kafka.log

## 常用操作
# 查看topic
bin/kafka-topics.sh --zookeeper 192.168.8.225:2181,192.168.8.226:2181,192.168.8.227:2181 --list
# 创建topic
bin/kafka-topics.sh --zookeeper 192.168.8.225:2181,192.168.8.226:2181,192.168.8.227:2181 --create --topic elktopic \
> --partitions 3 --replication-factor 1
# 查看topic内容
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic elktopic --from-beginning

filebeat 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
## 安装
tar -xvf filebeat-7.13.1-linux-x86_64.tar.gz
mv filebeat-7.13.1-linux-x86_64 /usr/local/filebeat

## 配置
vi /usr/local/filebeat/filebeat.yml

filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/secure
fields:
log_topic: topic1

output.kafka:
hosts: ["192.168.8.225:9092", "192.168.8.226:9092", "192.168.8.227:9092"]
enabled: true
topic: '%{[fields.log_topic]}'
partition.round_robin:
reachable_only: false
required_acks: 1
compression: gzip
max_message_bytes: 1000000

## 启动脚本
#!/bin/bash
/usr/local/filebeat/filebeat -e -c /usr/local/filebeat/filebeat.yml >filebeat.log 2>&1 &
tail -f filebeat.log

logstash 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
## 安装
tar -xvf logstash-7.13.1-linux-x86_64.tar.gz
mv logstash-7.13.1-linux-x86_64 /usr/local/logstash

## 配置
cat /usr/local/logstash/config/logstash.conf
input {
kafka {
bootstrap_servers => "192.168.8.225:9092,192.168.8.226:9092,192.168.8.227:9092"
topics => ["topic1"]
}
}
output {
elasticsearch {
hosts => "192.168.8.221:9200"
index => "securelog-%{+YYYY-MM-dd}"
}
}

## 启动脚本
#!/bin/bash
/usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf >logstash.log 2>&1 &

Kibana 安装配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
## 安装
tar -xvf logstash-7.13.1-linux-x86_64.tar.gz
mv logstash-7.13.1-linux-x86_64 /usr/local/logstash

## 配置
vi /usr/local/kibana/config/kibana.yml
server.port: 5601
server.host: "192.168.8.221"
elasticsearch.hosts: "http://192.168.8.221:9200"

## 启动脚本
#!/bin/bash
nohup /usr/local/kibana/bin/kibana --allow-root >kibana.log 2>&1 &

## 索引设置
## Management-->Stack Management-->Kibana-->Index Patterns