Docker-compose部署常用服务

一,docker-compose部署ELK,里面的镜像版本、端口号、目录、等按照实际情况替换,部署es的时候如果config目录没有,可以先不挂载,docker cp一份配置到宿主机然后在挂载


version: "3"
services:
  elasticsearch:
    image: elasticsearch:7.17.9
    container_name: elasticsearch
    privileged: true
    environment:
      - TZ=Asia/Shanghai
      - cluster.name=elasticsearch-cluster
      - node.name=node0
      - node.master=true
      - node.data=true
      - cluster.initial_master_nodes=node0
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
    volumes:
      - /root/workspace/es/data:/usr/share/elasticsearch/data
      - /root/workspace/es/logs:/usr/share/elasticsearch/logs
      - /root/workspace/es/config:/usr/share/elasticsearch/config
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    ports:
      - 9200:9200
      - 9300:9300
    restart: always
  logstash:
    image: logstash:7.17.9
    container_name: logstash
    privileged: true
    volumes:
      - /root/workspace/logstash/conf/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - 5044:5044
      - 9600:9600
    environment:
      TZ: Asia/Shanghai
      LS_JAVA_OPTS: "-Xms512m -Xmx512m"
    depends_on:
      - elasticsearch
    restart: always
  kibana:
    image: kibana:7.17.9
    container_name: kibana
    privileged: true
    depends_on:
      - elasticsearch
    environment:
      TZ: Asia/Shanghai
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
    volumes:
      - /root/workspace/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
    ports:
      - 5601:5601
    restart: always
  skywalking-server:
    image: apache/skywalking-oap-server:9.3.0
    container_name: skywalking-server
    privileged: true
    depends_on:
      - elasticsearch
    ports:
      - 11800:11800
      - 12800:12800
    environment:
      TZ: Asia/Shanghai
      SW_STORAGE: elasticsearch
      SW_STORAGE_ES_CLUSTER_NODES: elasticsearch:9200
      SW_ES_USER: elastic
      SW_ES_PASSWORD: Elastic#2023
    restart: always
  skywalking-ui:
    image: apache/skywalking-ui:9.3.0
    container_name: skywalking-ui
    privileged: true
    depends_on:
      - skywalking-server
    ports:
      - 8080:8080
    environment:
      TZ: Asia/Shanghai
      SW_OAP_ADDRESS: http://skywalking-server:12800
    restart: always
  sentinel:
    image: bladex/sentinel-dashboard
    container_name: sentinel
    privileged: true
    restart: always
    ports:
      - 8858:8858
    volumes:
      - /root/workspace/sentinel/logs:/root/logs/csp
  zookeeper:
    image: bitnami/zookeeper:3.7.1
    container_name: zookeeper
    privileged: true
    environment:
      TZ: Asia/Shanghai
      ALLOW_ANONYMOUS_LOGIN: "yes"
    restart: always
    ports:
      - 2181:2181
  kafka:
    image: wurstmeister/kafka:2.13-2.8.1
    container_name: kafka
    privileged: true
    ports:
      - 9092:9092
    environment:
      - KAFKA_LISTENERS=PLAINTEXT://:9092
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.172.85:9092
      - KAFKA_ADVERTISED_HOST_NAME=192.168.172.85
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
      - KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
      - TZ=Asia/Shanghai
    restart: always
    volumes:
      - /root/workspace/kafka/logs:/kafka/logs
    depends_on:
      - zookeeper

logstash.conf文件的配置:


input {
  tcp {
    mode => "server"
    host => "0.0.0.0"
    port => 5044
    codec => json
  }
}
input{

    kafka {
        codec => plain{charset => "UTF-8"}
        bootstrap_servers => "192.168.172.85:9092"
        client_id => "clickhouse_logs"
        group_id => "clickhouse_logs"
        consumer_threads => 5   
        auto_offset_reset => "latest" 
        decorate_events => true 
        topics => "clickhouse_logs"
    }

}
output {
  if[service] == "svc"{
      elasticsearch {
        hosts => "192.168.172.85:9200"
        user => "elastic"
        password => "Elastic#2023"
        index => "svc-%{+YYYY.MM.dd}"
      }
  }else{
    elasticsearch {
      hosts => "192.168.172.85:9200"
      user => "elastic"
      password => "Elastic#2023"
      index => "clickhouse-%{+YYYY.MM.dd}"
    }

  }
}

kibana.yml文件的配置:


#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "elastic"
elasticsearch.password: "Elastic#2023"

二,部署prometheus、grafana


version: "3"
services:
  prometheus:
    container_name: prometheus
    privileged: true
    image: prom/prometheus:v2.43.0
    volumes:
      - /home/edgegallery/monitor/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
    ports:
      - "9090:9090"
    restart: always
  grafana:
    container_name: grafana
    privileged: true
    image: grafana/grafana:9.4.7
    ports:
      - 3000:3000
    restart: always

prometheus.yml文件配置


global:
  scrape_interval:     60s
  evaluation_interval: 60s
scrape_configs:
  - job_name: prometheus
    static_configs:
      - targets: ['192.168.172.85:9090']
        labels:
          instance: prometheus