Root Only

подайте на пропитание, бывшему админу бывшего локалхоста

Menu
  • Администрирование
  • Программирование
  • Монетизация
  • Продвижение
  • Хлам
Menu

ELK кластер в докере с авто-развертыванием узлов

Спешу поделится своим рецептом поднятия elastricsearch+logstash+kibana класстера с 1 мастер нодой и произвольным количеством дата-нод. Ниже идет базовый docker-compose-файл, которым поднимаются и мастер нода и дата-нода, в зависимости от соедержимого .env-файла.

Генерация сертификатов для связи нод и паролей к ним происходит автоматически.

---
version: '3.6'

services:
  elasticsearch:
    image: 'elasticsearch:${ELK_VERSION}'
    container_name: '${COMPOSE_PROJECT_NAME}_elasticsearch'
    hostname: 'elasticsearch'
    restart: unless-stopped
    environment:
      - TZ=${TZ}
      - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
      - ELASTIC_NODE_NAME=${ELASTIC_NODE_NAME}
      - ELASTIC_NODE_ROLES=${ELASTIC_NODE_ROLES}
      - ELASTIC_NODES=${ELASTIC_NODES}
      - node.name=${ELASTIC_NODE_NAME}
      - node.roles=${ELASTIC_NODE_ROLES}
      - cluster.name=${ELASTIC_CLUSTER_NAME}
      - cluster.initial_master_nodes=${ELASTIC_MASTER}
      - bootstrap.memory_lock=true
      - network.host=0.0.0.0
      - http.port=9200
      - transport.port=9300
      - logger.level=WARN
      - transport.publish_host=${ELASTIC_NODE_NAME}
      - discovery.seed_hosts=${ELASTIC_NODES}
      - xpack.security.enabled=true
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.security.transport.ssl.client_authentication=required
      - xpack.security.transport.ssl.keystore.path=cert/${ELASTIC_NODE_NAME}.p12
      - xpack.security.transport.ssl.truststore.path=cert/${ELASTIC_NODE_NAME}.p12
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
      nproc: 65538
    ports:
      - 127.0.0.1:9200:9200
      - 9300:9300
    volumes:
      - /srv/docker/${COMPOSE_PROJECT_NAME}/elasticsearch/config:/usr/share/elasticsearch/${CONFDIR-config}
      - /srv/docker/${COMPOSE_PROJECT_NAME}/elasticsearch/data:/usr/share/elasticsearch/data
    networks:
      - backend
    logging:
      driver: syslog
      options:
        tag: 'docker_${COMPOSE_PROJECT_NAME}_elasticsearch'

  logstash:
    image: 'logstash:${ELK_VERSION}'
    container_name: '${COMPOSE_PROJECT_NAME}_logstash'
    hostname: 'logstash'
    restart: unless-stopped
    ports:
      - 5046:5046/udp
      - 12201:12201/udp
      - 127.0.0.1:9600:9600
    environment:
      - MONITORING_ENABLED=false
      - LOG_LEVEL=error
      - LOG_FORMAT=plain
      - PIPELINE_ORDERED=auto
      - PATH_CONFIG=/usr/share/logstash/pipeline
      - HTTP_HOST=0.0.0.0
      - HTTP_PORT=9600
      - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
      - TZ=${TZ}
    volumes:
      - /srv/docker/${COMPOSE_PROJECT_NAME}/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
      - /srv/docker/${COMPOSE_PROJECT_NAME}/logstash/data:/usr/share/logstash/data
    command: logstash -f /usr/share/logstash/pipeline/logstash.conf
    networks:
      - backend
    logging:
      driver: syslog
      options:
        tag: 'docker_${COMPOSE_PROJECT_NAME}_logstash'
    depends_on:
      - elasticsearch

  kibana:
    image: 'kibana:${ELK_VERSION}'
    container_name: '${COMPOSE_PROJECT_NAME}_kibana'
    hostname: 'kibana'
    restart: unless-stopped
    ports:
      - 127.0.0.1:5601:5601
    environment:
      - TZ=${TZ}
      - ELASTICSEARCH_USERNAME=elastic
      - ELASTICSEARCH_PASSWORD=${ELASTIC_PASSWORD}
      - MONITORING_ENABLED=false
      - MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED=false
      - I18N_LOCALE=ru-RU
      - LOGGING_DEST=stdout
    networks:
      - backend
    logging:
      driver: syslog
      options:
        tag: 'docker_${COMPOSE_PROJECT_NAME}_kibana'
    depends_on:
      - elasticsearch

networks:
  backend:
    name: '${COMPOSE_PROJECT_NAME}-network'

Пример .env-файла

TZ=Asia/Yekaterinburg
ELK_VERSION=7.9.3
COMPOSE_PROJECT_NAME=elk
ELASTIC_PASSWORD=mysecretpassword
ELASTIC_CLUSTER_NAME=elk-stack
ELASTIC_MASTER=node1.local
ELASTIC_NODE_NAME=node1.local
# выбор роли, мастер-нода
# ELASTIC_NODE_ROLES=master,ingest,ml,remote_cluster_client,transform
# или дата-нода
# ELASTIC_NODE_ROLES=data
ELASTIC_NODES=node2.local,node3.local,node4.local

Скрипт начального развертывания ноды

## master node

# create:
 CA_PASS='capass' CERT_PASS='certpass' CONFDIR=config2 \
 docker-compose run -e CA_PASS -e CERT_PASS --rm --entrypoint '
  /bin/bash -c "
   set -e
   test ! -f config2/elasticsearch.keystore
   echo create KEYSTORE
   elasticsearch-keystore create -s
   mkdir -m 770 -p config/cert
   echo create CA
   echo ${CA_PASS} | elasticsearch-certutil ca -s --out config/cert/ca.p12
   CERT_PASS=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c30)
   echo create CERT for ${ELASTIC_NODE_NAME}:${CERT_PASS}
   echo ${CERT_PASS} | elasticsearch-certutil cert -s --ca config/cert/ca.p12 --out config/cert/${ELASTIC_NODE_NAME}.p12 --ca-pass ${CA_PASS}
   echo add cert pass to KEYSTORE
   echo ${CERT_PASS} | elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
   echo add cert pass to TRUSTSTORE
   echo ${CERT_PASS} | elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
   for ELASTIC_NODE_NAME in ${ELASTIC_NODES//,/ }; do
    if [ ! -f config/cert/${ELASTIC_NODE_NAME}.p12 ]; then
     CERT_PASS=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c30)
     echo create CERT for ${ELASTIC_NODE_NAME}: ${CERT_PASS}
     echo ${CERT_PASS} | elasticsearch-certutil cert -s --ca config/cert/ca.p12 --out config/cert/${ELASTIC_NODE_NAME}.p12 --ca-pass ${CA_PASS}
    fi
   done
   cp -ra config/* config2/
   chown -cR elasticsearch config2 data
   echo completed
  "' elasticsearch

# start:
 docker-compose up -d
# copy cert's to host
 docker cp elk_elasticsearch:/usr/share/elasticsearch/config/cert ./
# down:
 docker-compose down


## data node

# create
 CERT_PASS='certpass' CONFDIR=config2 \
 docker-compose run -e CERT_PASS -v $(pwd)/cert:/usr/share/elasticsearch/config/cert:ro --rm --entrypoint '
  /bin/bash -c "
   set -e
   test ! -f config2/elasticsearch.keystore
   echo create KEYSTORE
   elasticsearch-keystore create -s
   echo add cert pass to KEYSTORE
   echo ${CERT_PASS} | elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
   echo add cert pass to TRUSTSTORE
   echo ${CERT_PASS} | elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
   cp -ra config/* config2/
   chown -cR elasticsearch config2 data
   echo completed
  "' elasticsearch

# start:
 docker-compose up -d elasticsearch
# down:
 docker-compose down

И напоследок несколько читов по работе с кластером elasticsearch

Получаем состояние кластера и нод
curl --user elastic:${ELASTIC_PASSWORD} -s "http://127.0.0.1:9200/_cluster/health?pretty" curl --user elastic:${ELASTIC_PASSWORD} -s "http://127.0.0.1:9200/_cat/master?pretty" curl --user elastic:${ELASTIC_PASSWORD} -s "http://127.0.0.1:9200/_cat/nodes?pretty" curl --user elastic:${ELASTIC_PASSWORD} -s "http://127.0.0.1:9200/_nodes?pretty"

Временно в рантайме изменяем дисковые полиси нод (watermark low,high)

curl --user elastic:${ELASTIC_PASSWORD} -XPUT "http://localhost:9200/_cluster/settings" -H 'Content-Type: application/json' -d'{ "transient": {
"cluster.routing.allocation.disk.watermark.low": "60%",
"cluster.routing.allocation.disk.watermark.high": "75%"
}}'

Перемещение шарда myname в рантайме с ноды hode2.local на ноду node3.local

curl --user elastic:${ELASTIC_PASSWORD} -s -X POST http://127.0.0.1:9200/_cluster/reroute -H 'Content-Type: application/json' -d' { "commands": [ { "move": { "index": "myname", "shard": 0, "from_node": "node2.local", "to_node": "node3.local" } } ] }'

Вывод ноды node3.local из кластера

{
"transient": {
"cluster.routing.allocation.exclude._name": "node3.local"
}
}

Просмотр состояние шардов

curl --user elastic:${ELASTIC_PASSWORD} -s -XGET 'http://127.0.0.1:9200/_cat/shards?v'
curl --user elastic:${ELASTIC_PASSWORD} -s -XGET 'http://127.0.0.1:9200/_cat/shards'|grep -F UNA
curl --user elastic:${ELASTIC_PASSWORD} -s -XGET 'http://127.0.0.1:9200/_cluster/allocation/explain?pretty'

Запуск принудительной перетасовки шардов в рантайме для случая выпадения ноды из кластера

curl --user elastic:${ELASTIC_PASSWORD} -s -XPOST 'http://127.0.0.1:9200/_cluster/reroute?retry_failed=true'

Получение списка индексов и сортировка списка по размеру индекса

curl --user elastic:${ELASTIC_PASSWORD} -s -XGET 'http://127.0.0.1:9200/_cat/indices?v'
curl --user elastic:${ELASTIC_PASSWORD} -s -XGET 'http://127.0.0.1:9200/_cat/indices?v&h=index,store.size&&s=store.size'

Удаление индекса myindex

curl --user elastic:${ELASTIC_PASSWORD} -s -XDELETE 'http://127.0.0.2:9200/myindex

Поиск

Метки

archlinux awk bash binlog cache console css debian docker ffmpeg filemanager hash healthcheck ipcam java jquery jscript loginza mail md5 mysql mysqldump netbeans nginx php PHP-функция на JScript python qemu quote redirect replication RSS session sniffer socat systemd tinymce unix-socket usb vim windows 7 youtube авторизация продвижение социальные сети

Безопасность

© 2023 Root Only