安装Elastic

发布时间 2023-03-29 10:00:02作者: 七星飘虫

安装Elastic

docker network create elastic
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.16.2
docker run -d --name es01-test --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.16.2

安装kibana

docker pull docker.elastic.co/kibana/kibana:7.16.2
docker run -d --name kib01-test --net elastic -p 5601:5601 -e "ELASTICSEARCH_HOSTS=http://es01-test:9200" docker.elastic.co/kibana/kibana:7.16.2

卸载

docker stop es01-test
docker stop kib01-test
docker network rm elastic
docker rm es01-test
docker rm kib01-test

安装logstash

docker pull docker.elastic.co/logstash/logstash:7.16.2

启动容器

docker run -d  --net elastic -p 5044:5044 --name logstash docker.elastic.co/logstash/logstash:7.16.2

拷贝配置

mkdir -p /root/logstash/data && chmod 777 /root/logstash/data
docker cp logstash:/usr/share/logstash/config  /root/logstash/
docker cp logstash:/usr/share/logstash/pipeline /root/logstash/

删除(只是为了拿到原始配置)

docker rm -f logstash

修改logstash.yml

vi /root/logstash/config/logstash.yml

logstash.ym

http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.4.71:9200" ]

修改pipelines.yml

vi /root/logstash/config/pipelines.yml

pipelines.yml内容如下

- pipeline.id: kafkatoes
  path.config: "/usr/share/logstash/pipeline/kafka-ls-es.conf"
  pipeline.workers: 2

创建一个新的pipeline配置文件kafka-ls-es.conf,用于从kafka接受数据经过过滤后写入es

vi /root/logstash/pipeline/kafka-ls-es.conf

内容如下(根据实际情况配置)

# kafka -> Logstash -> Elasticsearch pipeline.
#input {
#  kafka {
#    bootstrap_servers => ["192.168.4.71:9092]
#	group_id => "hello"
#	client_id => "ls-node1"
#	consumer_threads => "4"
#	topics => ["hello-elk"]
#	codec => json { charset => "UTF-8" }
#  }
#}
input {
  tcp {
    port => 5044
	mode => "server"
	ssl_enable => false
  }
}

#filter{
#    json{
#        source=>"message"
#    }
#}

output {
   elasticsearch { 
        hosts => ["192.168.4.71:9200"] 
        index => "hello-elk-%{+YYYY.MM.dd}"
        #user => "elastic"
        #password => "changeme
   }
}

启动

docker run -d --net elastic --user root --name logstash -p 5044:5044 -v /root/logstash/config:/usr/share/logstash/config -v /root/logstash/pipeline:/usr/share/logstash/pipeline -v /root/logstash/data:/usr/share/logstash/data -e TZ=Asia/Shanghai docker.elastic.co/logstash/logstash:7.16.2

net NLog

controller

using Microsoft.AspNetCore.Mvc;
using NLog;
using NLog.Extensions.Logging;

namespace ELKTest.Controllers
{
    [ApiController]
    [Route("[controller]")]
    public class WeatherForecastController : ControllerBase
    {
        private static readonly string[] Summaries = new[]
        {
        "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching"
    };

        private readonly ILoggerFactory _loggerFactory;

        public WeatherForecastController(ILoggerFactory loggerFactory)
        {
            _loggerFactory = loggerFactory;
        }

        [HttpGet(Name = "GetWeatherForecast")]
        public IEnumerable<WeatherForecast> Get()
        {
            _loggerFactory.ConfigureNLog("nlog.config");
            _loggerFactory.AddNLog();//注入Nlog
            Logger log = NLog.LogManager.GetCurrentClassLogger();

            log.Info("这是一条测试日志信息Info");
            log.Warn("这是一条测试日志信息Warn");
            log.Error("这是一条测试日志信息Error");
            return Enumerable.Range(1, 5).Select(index => new WeatherForecast
            {
                Date = DateTime.Now.AddDays(index),
                TemperatureC = Random.Shared.Next(-20, 55),
                Summary = Summaries[Random.Shared.Next(Summaries.Length)]
            })
            .ToArray();
        }
    }
}

nlog.config

<?xml version="1.0" encoding="utf-8"?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
      autoReload="true"
      internalLogLevel="Warn"
      internalLogFile="internal-nlog.txt">

	<extensions>
		<!--enable NLog.Web for ASP.NET Core-->
		<add assembly="NLog.Web.AspNetCore"/>
	</extensions>

	<!-- define various log targets -->
	<!--定义日志文件目录-->
	<variable name="logDirectory" value="${basedir}/logs/${shortdate}"/>
	<variable name="nodeName" value="node1"/>

	<targets async="true">
		<!-- 所有日志target -->
		<target xsi:type="File"
				name="allfile"
				fileName="${logDirectory}/nlog-all/${shortdate}.log"
				layout="#node1#${longdate}#${logger}#${uppercase:${level}}#${callsite}#${callsite-linenumber}#${aspnet-request-url}#${aspnet-request-method}#${aspnet-mvc-controller}#${aspnet-mvc-action}#${message}#${exception:format=ToString}#"
				keepFileOpen="false"
            />

		<!-- 本地文件日志target -->
		<target xsi:type="File"
				name="ownLog-file"
				fileName="${logDirectory}/nlog-${level}/${shortdate}.log"
				layout="#${longdate}#${nodeName}#${logger}#${uppercase:${level}}#${callsite}#${callsite-linenumber}#${aspnet-request-url}#${aspnet-request-method}#${aspnet-mvc-controller}#${aspnet-mvc-action}#${message}#${exception:format=ToString}#"
				keepFileOpen="false"
            />

		<!-- Tcp日志target -->
		<target xsi:type="Network"
				name="ownLog-tcp"
				keepConnection="false"
				address ="tcp://192.168.4.71:5044"
				layout="#${longdate}#${nodeName}#${logger}#${uppercase:${level}}#${callsite}#${callsite-linenumber}#${aspnet-request-url}#${aspnet-request-method}#${aspnet-mvc-controller}#${aspnet-mvc-action}#${message}#${exception:format=ToString}#"
            />
		<!--grok 规则-->
		<!--%#{DATA:request_time}#%{DATA:node_name}#%{DATA:class_name}#%{DATA:log_level}#%{DATA:call_site}#%{DATA:line_number}#%{DATA:request_url}#%{DATA:request_method}#%{DATA:container_name}#%{DATA:action_name}#%{DATA:log_info}#%{DATA:exception_msg}#-->
		<!--空白-->
		<target xsi:type="Null" name="blackhole" />
	</targets>

	<!--日志级别 Trace -》Debug-》 Info -》Warn-》 Error-》 Fatal-->
	<!--日志规则-->
	<rules>
		<!--所有日志, 包括Microsoft日志-->
		<logger name="*" minlevel="Trace" writeTo="allfile" />

		<!--自定义日志,排除Microsoft日志-->
		<logger name="Microsoft.*" minlevel="Trace" writeTo="blackhole" final="true" />
		<logger name="*" minlevel="Debug" writeTo="ownLog-file" />
		<logger name="*" minlevel="Info" writeTo="ownLog-tcp" />
	</rules>
</nlog>

以下环节非必要(从kafka取数据才需要)

搭建zookeeper环境

docker pull zookeeper
docker run -d --name zookeeper -p 2181:2181 -t zookeeper

创建kafka环境

docker pull wurstmeister/kafka
docker run  -d --name kafka -p 9092:9092  --env KAFKA_ADVERTISED_HOST_NAME=localhost  -e KAFKA_ZOOKEEPER_CONNECT=192.168.4.71:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.4.71:9092  -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e KAFKA_HEAP_OPTS="-Xmx256M -Xms128M"  wurstmeister/kafka

输入以下命令后,按y确认后挨个输入密码即可(默认会有很多个账户,一个一个设置即可)

elasticsearch-setup-passwords interactive

安装lk分词器

./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.16.2/elasticsearch-analysis-ik-7.16.2.zip

# 进入docker容器
docker exec -it 容器名称 /bin/bash
# 进入 plugins 目录
cd plugins
# 创建ik目录
mkdir ik 或 mkdir /usr/share/elasticsearch/plugins/ik
# 退出docker容器
exit;
# 复制文件到docker内
docker cp d:\elasticsearch-analysis-ik-7.16.2.zip 容器名称:/usr/share/elasticsearch/plugins/ik/
# 再次进入docker容器,解压ik分词器
unzip elasticsearch-analysis-ik-7.16.2.zip
# 解压后可删除
rm -rf elasticsearch-analysis-ik-7.16.2.zip

搜索dev-tools

GET _analyze
{
  "analyzer": "ik_max_word", 
  "text": "elasticsearch搜索引擎"
}

#显示,表示安装成功
{
  "tokens" : [
    {
      "token" : "elasticsearch",
      "start_offset" : 0,
      "end_offset" : 13,
      "type" : "ENGLISH",
      "position" : 0
    },
    {
      "token" : "搜索引擎",
      "start_offset" : 13,
      "end_offset" : 17,
      "type" : "CN_WORD",
      "position" : 1
    },
    {
      "token" : "搜索",
      "start_offset" : 13,
      "end_offset" : 15,
      "type" : "CN_WORD",
      "position" : 2
    },
    {
      "token" : "索引",
      "start_offset" : 14,
      "end_offset" : 16,
      "type" : "CN_WORD",
      "position" : 3
    },
    {
      "token" : "引擎",
      "start_offset" : 15,
      "end_offset" : 17,
      "type" : "CN_WORD",
      "position" : 4
    }
  ]
}

查询所有的index

http://localhost:9200/_cat/indices?v

测试

#添加一条数据
PUT test/doc/2
{
 "name":"wangfei",
 "age":27,
 "desc":"热天还不让后人不认同"
}

PUT test/doc/1
{
 "name":"wangjifei",
 "age":27,
 "desc":"萨芬我反胃为范围额"
}

PUT test/doc/3
{
 "name":"“wangyang”",
 "age":27,
 "desc":"“点在我心内的几首歌”"
}


GET test
GET test/doc/1
GET test/doc/2

#得到test index下的所有文档
GET test/doc/_search


#删除指定文档
DELETE test/doc/3

#删除索引
DELETE test

#修改指定的文档
#全部覆盖
PUT test/doc/1
{
  "name":"王计飞"
}

#修改指定的文档
#只更新,字段没有则新增
POST test/doc/1/_update
{
 "doc":{
    "desc":"生活就像 茫茫海上"
 }
}

#查询字符串搜索
GET test/doc/_search?q=name:wangfei

#结构化查询(单字段查询,不能多字段组合查询)
GET test/doc/_search
{
    "query":{
        "match":{
            "name":"wangfei"
        }
    }
}


#准备测试数据
PUT test1/doc/1
{
 "title": "中国是世界上人口最多的国家"
}
PUT test1/doc/2
{
 "title": "美国是世界上军事实力最强大的国家"
}
PUT test1/doc/3
{
 "title": "北京是中国的首都"
}

#查询1
GET test1/doc/_search
{
    "query":{
        "match":{
            "title":"中国"
        }
    }
}

#通过观察结果可以发现,虽然如期的返回了中国的文档。但是却把和美国的文档也返回了,这并不是我们想要的。是怎么回事呢?因为这是elasticsearch在内部对文档做分词的时候,对于中文来说,就是一个字一个字分的,所以,我们搜中国,中和国都符合条件,返回,而美国的国也符合。而我们认为中国是个短语,是一个有具体含义的词。可以用match_phrase解决

GET test1/doc/_search
{
    "query":{
        "“match_phrase”":{
            "title":"中国"
        }
    }
}


GET test1/doc/_search
{
    "query":{
        "match_phrase": {
        "title": {
            "query": "中国世界",
            "slop":2
            }
        }
    }
}


#准备数据 
PUT zhifou/doc/1
{
 "name":"顾老二",
"age":30,
"from": "gu",
"desc": "皮肤黑、武器长、性格直",
"tags": ["黑", "长", "直"]
}

PUT zhifou/doc/2
{
"name":"大娘子",
"age":18,
"from":"sheng",
"desc":"肤白貌美,娇憨可爱",
"ags":["白", "富","美"]
}

PUT zhifou/doc/3
{
"name":"龙套偏房",
"age":22,
"from":"gu",
"desc":"mmp,没怎么看,不知道怎么形容",
"tags":["造数据", "真","难"]
}

PUT zhifou/doc/4
{
"name":"石头",
"age":29,
"from":"gu",
"desc":"粗中有细,狐假虎威",
"tags":["粗", "大","猛"]
}

PUT zhifou/doc/5
{
"name":"魏行首",
"age":25,
"from":"广云台",
"desc":"仿佛兮若轻云之蔽月,飘飘兮若流风之回,mmp,最后竟然没有嫁给顾老二!",
"tags":["闭月","羞花"]
}

#查询所有数据
GET zhifou/doc/_search
{
  "query": {
  "match_all": {}
  }
}

#查询from是gu的人的平均年龄
GET zhifou/doc/_search
{
"query": {
"match": {
  "from": "gu"
  }
},
"aggs": {
  "my_avg": {
    "avg": {
      "field": "age"
    }
  }
},
"_source": ["name", "age"]
}


GET zhifou/doc/_search
{
"query": {
"match": {
"from": "gu"
}
},
"aggs":{
"my_avg":{
"avg": {
"field": "age"
}
}
},
"size":0,
"_source":["name","age"]
}