diff --git a/templates/es-cluster/4/README.md b/templates/es-cluster/4/README.md
new file mode 100644
index 0000000..c662188
--- /dev/null
+++ b/templates/es-cluster/4/README.md
@@ -0,0 +1,5 @@
+# Elasticsearch Cluster
+
+A scalable Elasticsearch cluster
+
+WARN: To avoid vm.max_map_count errors you could set "Update host sysctl" to true. Then param vm.max_map_count will be update to 262144 if it's less in your hosts.
diff --git a/templates/es-cluster/4/docker-compose.yml.tpl b/templates/es-cluster/4/docker-compose.yml.tpl
new file mode 100644
index 0000000..6ed93e0
--- /dev/null
+++ b/templates/es-cluster/4/docker-compose.yml.tpl
@@ -0,0 +1,122 @@
+version: '2'
+services:
+ es-master:
+ labels:
+ io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
+ io.rancher.container.hostname_override: container_name
+ io.rancher.sidekicks: es-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}}
+ image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3
+ environment:
+ - "cluster.name=${cluster_name}"
+ - "node.name=$${HOSTNAME}"
+ - "bootstrap.memory_lock=true"
+ - "xpack.security.enabled=false"
+ - "ES_JAVA_OPTS=-Xms${master_heap_size} -Xmx${master_heap_size}"
+ - "discovery.zen.ping.unicast.hosts=es-master"
+ - "discovery.zen.minimum_master_nodes=${minimum_master_nodes}"
+ - "node.master=true"
+ - "node.data=false"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile:
+ soft: 65536
+ hard: 65536
+ mem_limit: ${master_mem_limit}
+ mem_swappiness: 0
+ cap_add:
+ - IPC_LOCK
+ volumes_from:
+ - es-storage
+
+ es-data:
+ labels:
+ io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
+ io.rancher.container.hostname_override: container_name
+ io.rancher.sidekicks: es-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}}
+ image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3
+ environment:
+ - "cluster.name=${cluster_name}"
+ - "node.name=$${HOSTNAME}"
+ - "bootstrap.memory_lock=true"
+ - "xpack.security.enabled=false"
+ - "discovery.zen.ping.unicast.hosts=es-master"
+ - "ES_JAVA_OPTS=-Xms${data_heap_size} -Xmx${data_heap_size}"
+ - "node.master=false"
+ - "node.data=true"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile:
+ soft: 65536
+ hard: 65536
+ mem_limit: ${data_mem_limit}
+ mem_swappiness: 0
+ cap_add:
+ - IPC_LOCK
+ volumes_from:
+ - es-storage
+ depends_on:
+ - es-master
+
+ es-client:
+ labels:
+ io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
+ io.rancher.container.hostname_override: container_name
+ io.rancher.sidekicks: es-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}}
+ image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3
+ environment:
+ - "cluster.name=${cluster_name}"
+ - "node.name=$${HOSTNAME}"
+ - "bootstrap.memory_lock=true"
+ - "xpack.security.enabled=false"
+ - "discovery.zen.ping.unicast.hosts=es-master"
+ - "ES_JAVA_OPTS=-Xms${client_heap_size} -Xmx${client_heap_size}"
+ - "node.master=false"
+ - "node.data=false"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile:
+ soft: 65536
+ hard: 65536
+ mem_limit: ${client_mem_limit}
+ mem_swappiness: 0
+ cap_add:
+ - IPC_LOCK
+ volumes_from:
+ - es-storage
+ depends_on:
+ - es-master
+
+ es-storage:
+ labels:
+ io.rancher.container.start_once: true
+ network_mode: none
+ image: rawmind/alpine-volume:0.0.2-1
+ environment:
+ - SERVICE_UID=1000
+ - SERVICE_GID=1000
+ - SERVICE_VOLUME=/usr/share/elasticsearch/data
+ volumes:
+ - es-storage-volume:/usr/share/elasticsearch/data
+
+ {{- if eq .Values.UPDATE_SYSCTL "true" }}
+ es-sysctl:
+ labels:
+ io.rancher.container.start_once: true
+ network_mode: none
+ image: rawmind/alpine-sysctl:0.1
+ privileged: true
+ environment:
+ - "SYSCTL_KEY=vm.max_map_count"
+ - "SYSCTL_VALUE=262144"
+ {{- end}}
+
+volumes:
+ es-storage-volume:
+ driver: ${VOLUME_DRIVER}
+ per_container: true
diff --git a/templates/es-cluster/4/rancher-compose.yml b/templates/es-cluster/4/rancher-compose.yml
new file mode 100644
index 0000000..287c035
--- /dev/null
+++ b/templates/es-cluster/4/rancher-compose.yml
@@ -0,0 +1,111 @@
+version: '2'
+catalog:
+ name: Elasticsearch Cluster
+ version: 6.2.3-rancher1
+ description: Scalable Elasticsearch Cluster
+
+ questions:
+ - variable: "cluster_name"
+ type: "string"
+ required: true
+ label: "Cluster name"
+ description: "Name of the Elasticsearch Cluster"
+ default: "es-cluster"
+
+ - variable: "UPDATE_SYSCTL"
+ label: "Update host sysctl:"
+ description: |
+ Set true to avoid vm.max_map_count errors.
+ WARN: If set true, host param vm.max_map_count will be update to 262144.
+ default: false
+ required: true
+ type: enum
+ options:
+ - false
+ - true
+
+ - variable: "master_heap_size"
+ type: "string"
+ required: true
+ label: "Heap size (master nodes)"
+ description: "Heap size to be allocated for Java (master nodes)"
+ default: "512m"
+
+ - variable: "master_mem_limit"
+ type: "int"
+ required: true
+ label: "Memory limit in byte (master nodes)"
+ description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (master nodes)"
+ default: 1073741824
+
+ - variable: "data_heap_size"
+ type: "string"
+ required: true
+ label: "Heap size (data nodes)"
+ description: "Heap size to be allocated for Java (data nodes)"
+ default: "512m"
+
+ - variable: "data_mem_limit"
+ type: "int"
+ required: true
+ label: "Memory limit in byte (data nodes)"
+ description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (data nodes)"
+ default: 1073741824
+
+ - variable: "client_heap_size"
+ type: "string"
+ required: true
+ label: "Heap size (client nodes)"
+ description: "Heap size to be allocated for Java (client nodes)"
+ default: "512m"
+
+ - variable: "client_mem_limit"
+ type: "int"
+ required: true
+ label: "Memory limit in byte (client nodes)"
+ description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (client nodes)"
+ default: 1073741824
+
+ - variable: "minimum_master_nodes"
+ type: "int"
+ required: true
+ label: "# of minimum Master Nodes"
+ description: "Set the number of required master nodes to reach quorum. Sets initial scale to this value as well"
+ default: 3
+
+ - variable: "initial_data_nodes"
+ type: "int"
+ required: true
+ label: "# of initial data nodes"
+ description: "Set the initial number of data nodes"
+ default: 2
+
+ - variable: "initial_client_nodes"
+ type: "int"
+ required: true
+ label: "# of initial client nodes"
+ description: "Set the initial number of client nodes"
+ default: 1
+
+ - variable: "VOLUME_DRIVER"
+ description: "The VOLUME driver to associate with this server"
+ label: "VOLUME Driver"
+ required: true
+ default: "local"
+ type: enum
+ options:
+ - local
+ - rancher-nfs
+ - rancher-efs
+ - rancher-ebs
+
+services:
+
+ es-master:
+ scale: ${minimum_master_nodes}
+
+ es-data:
+ scale: ${initial_data_nodes}
+
+ es-client:
+ scale: ${initial_client_nodes}
diff --git a/templates/es-cluster/config.yml b/templates/es-cluster/config.yml
index bb8b8c6..ce6d654 100644
--- a/templates/es-cluster/config.yml
+++ b/templates/es-cluster/config.yml
@@ -1,5 +1,5 @@
-name: Elasticsearch Cluster 5.5.1
+name: Elasticsearch Cluster 6.2.3
description: |
Elasticsearch, you know for search!
-version: 5.5.1-rancher1
+version: 6.2.3-rancher1
category: ELK
diff --git a/templates/kibana/4/docker-compose.yml b/templates/kibana/4/docker-compose.yml
new file mode 100644
index 0000000..1287953
--- /dev/null
+++ b/templates/kibana/4/docker-compose.yml
@@ -0,0 +1,33 @@
+kibana-vip:
+ ports:
+ - "${public_port}:80"
+ restart: always
+ tty: true
+ image: rancher/load-balancer-service
+ links:
+ - nginx-proxy:kibana6
+ stdin_open: true
+nginx-proxy-conf:
+ image: rancher/nginx-conf:v0.2.0
+ command: "-backend=rancher --prefix=/2015-07-25"
+ labels:
+ io.rancher.container.hostname_override: container_name
+nginx-proxy:
+ image: rancher/nginx:v1.9.4-3
+ volumes_from:
+ - nginx-proxy-conf
+ labels:
+ io.rancher.container.hostname_override: container_name
+ io.rancher.sidekicks: nginx-proxy-conf,kibana6
+ external_links:
+ - ${elasticsearch_source}:elasticsearch
+kibana6:
+ restart: always
+ tty: true
+ image: docker.elastic.co/kibana/kibana:6.2.3
+ net: "container:nginx-proxy"
+ stdin_open: true
+ environment:
+ ELASTICSEARCH_URL: "http://elasticsearch:9200"
+ labels:
+ io.rancher.container.hostname_override: container_name
diff --git a/templates/kibana/4/rancher-compose.yml b/templates/kibana/4/rancher-compose.yml
new file mode 100644
index 0000000..37b8723
--- /dev/null
+++ b/templates/kibana/4/rancher-compose.yml
@@ -0,0 +1,24 @@
+.catalog:
+ name: "Kibana"
+ version: "6.2.3-rancher1"
+ description: "Kibana: Explore & Visualize Your Data"
+ questions:
+ - variable: "elasticsearch_source"
+ description: "Link to elasticsearch service or stack/service"
+ label: "Elasticsearch source"
+ type: "service"
+ required: true
+ default: "es/elasticsearch-clients"
+ - variable: "public_port"
+ label: "Public Port"
+ description: "Unique public port for Kibana"
+ type: "int"
+ default: 80
+ required: true
+
+nginx-proxy:
+ metadata:
+ nginx:
+ conf:
+ servername: "kibana"
+ upstream_port: 5601
diff --git a/templates/kibana/catalogIcon-kibana.svg b/templates/kibana/catalogIcon-kibana.svg
index 6f5091e..5cac2fb 100644
--- a/templates/kibana/catalogIcon-kibana.svg
+++ b/templates/kibana/catalogIcon-kibana.svg
@@ -1,34 +1,45 @@
-
-
-
+
+
+
diff --git a/templates/kibana/config.yml b/templates/kibana/config.yml
index cdadc3d..41d5de4 100644
--- a/templates/kibana/config.yml
+++ b/templates/kibana/config.yml
@@ -1,4 +1,4 @@
-name: "Kibana 4"
+name: "Kibana"
description: "Visualization dashboard"
-version: "4.6.3-rancher1"
+version: "6.2.3-rancher1"
category: ELK
diff --git a/templates/logstash/4/docker-compose.yml b/templates/logstash/4/docker-compose.yml
new file mode 100644
index 0000000..8743df8
--- /dev/null
+++ b/templates/logstash/4/docker-compose.yml
@@ -0,0 +1,54 @@
+logstash-indexer-config:
+ restart: always
+ image: rancher/logstash-config:v0.2.0
+ labels:
+ io.rancher.container.hostname_override: container_name
+redis:
+ restart: always
+ tty: true
+ image: redis:3.2.6-alpine
+ stdin_open: true
+ labels:
+ io.rancher.container.hostname_override: container_name
+logstash-indexer:
+ restart: always
+ tty: true
+ volumes_from:
+ - logstash-indexer-config
+ command:
+ - logstash
+ - -f
+ - /etc/logstash
+ image: docker.elastic.co/logstash/logstash:6.2.3
+ links:
+ - redis:redis
+ external_links:
+ - ${elasticsearch_link}:elasticsearch
+ stdin_open: true
+ labels:
+ io.rancher.sidekicks: logstash-indexer-config
+ io.rancher.container.hostname_override: container_name
+logstash-collector-config:
+ restart: always
+ image: rancher/logstash-config:v0.2.0
+ labels:
+ io.rancher.container.hostname_override: container_name
+logstash-collector:
+ restart: always
+ tty: true
+ links:
+ - redis:redis
+ ports:
+ - "5000/udp"
+ - "6000/tcp"
+ volumes_from:
+ - logstash-collector-config
+ command:
+ - logstash
+ - -f
+ - /etc/logstash
+ image: docker.elastic.co/logstash/logstash:6.2.3
+ stdin_open: true
+ labels:
+ io.rancher.sidekicks: logstash-collector-config
+ io.rancher.container.hostname_override: container_name
diff --git a/templates/logstash/4/rancher-compose.yml b/templates/logstash/4/rancher-compose.yml
new file mode 100644
index 0000000..b5cde38
--- /dev/null
+++ b/templates/logstash/4/rancher-compose.yml
@@ -0,0 +1,73 @@
+.catalog:
+ name: "Logstash"
+ version: "6.2.3-rancher1"
+ description: "Logstash: Process Any Data, From Any Source"
+ questions:
+ - variable: "collector_inputs"
+ description: |
+ Logstash collection tier inputs. These will be added
+ directly to input { } section of logstash.conf
+ label: "Logstash inputs"
+ type: "multiline"
+ required: true
+ default: |
+ udp {
+ port => 5000
+ codec => "json"
+ }
+ - variable: "indexer_filters"
+ description: |
+ Logstash indexing tier filters. These will be added
+ directly to filter { } section of logstash.conf
+ label: "Logstash filters"
+ type: "multiline"
+ required: false
+ default: ""
+ - variable: "indexer_outputs"
+ description: |
+ Logstash indexing tier outputs. These will be added
+ directly to output { } section of logstash.conf
+ label: "Logstash outputs"
+ type: "multiline"
+ required: true
+ default: |
+ elasticsearch {
+ hosts => ["elasticsearch.rancher.internal:9200"]
+ }
+ stdout {
+ codec => rubydebug
+ }
+ - variable: "elasticsearch_link"
+ description: |
+ stack/service link or external service link to elasticsearch
+ cluster.
+ label: "Elasticsearch stack/service"
+ default: "es/elasticsearch-clients"
+ required: true
+ type: "service"
+logstash-indexer:
+ metadata:
+ logstash:
+ inputs: |
+ redis {
+ host => "redis.rancher.internal"
+ port => "6379"
+ data_type => "list"
+ key => "logstash"
+ }
+ filters: |
+ ${indexer_filters}
+ outputs: |
+ ${indexer_outputs}
+logstash-collector:
+ metadata:
+ logstash:
+ inputs: |
+ ${collector_inputs}
+ outputs: |
+ redis {
+ host => "redis.rancher.internal"
+ port => "6379"
+ data_type => "list"
+ key => "logstash"
+ }
diff --git a/templates/logstash/catalogIcon-logstash.svg b/templates/logstash/catalogIcon-logstash.svg
index 15f65af..c3928f3 100644
--- a/templates/logstash/catalogIcon-logstash.svg
+++ b/templates/logstash/catalogIcon-logstash.svg
@@ -1,145 +1,57 @@
-
-
-
-