Merge remote-tracking branch 'refs/remotes/rancher/master'
This commit is contained in:
commit
9139d6518d
@ -3,6 +3,7 @@
|
||||
version: "v0.6.2-rancher1"
|
||||
description: "Rancher External DNS service powered by DigitalOcean"
|
||||
minimum_rancher_version: v1.2.0-pre4-rc1
|
||||
maximum_rancher_version: v1.4.99
|
||||
questions:
|
||||
- variable: "DO_PAT"
|
||||
label: "DigitalOcean Personal Access Token"
|
||||
|
48
infra-templates/digitalocean-dns/1/README.md
Normal file
48
infra-templates/digitalocean-dns/1/README.md
Normal file
@ -0,0 +1,48 @@
|
||||
## DigitalOcean DNS
|
||||
|
||||
Rancher External DNS service powered by DigitalOcean
|
||||
|
||||
#### Changelog
|
||||
|
||||
Initial version
|
||||
|
||||
#### Usage
|
||||
|
||||
##### DigitalOcean DNS record TTL
|
||||
The DigitalOcean API currently does not support per-record TTL setting. You should configure the global TTL setting for the domain manually and set it to a low value (e.g. 60).
|
||||
|
||||
##### Limitation when running the service on multiple Rancher servers
|
||||
|
||||
When running multiple instances of the External DNS service configured to use the same domain name, then only one of them can run in the "Default" environment of a Rancher server instance.
|
||||
|
||||
##### Supported host labels
|
||||
|
||||
`io.rancher.host.external_dns_ip`
|
||||
Override the IP address used in DNS records for containers running on the host. Defaults to the IP address the host is registered with in Rancher.
|
||||
|
||||
`io.rancher.host.external_dns`
|
||||
Accepts 'true' (default) or 'false'
|
||||
When this is set to 'false' no DNS records will ever be created for containers running on this host.
|
||||
|
||||
##### Supported service labels
|
||||
|
||||
`io.rancher.service.external_dns`
|
||||
Accepts 'always', 'never' or 'auto' (default)
|
||||
- `always`: Always create DNS records for this service
|
||||
- `never`: Never create DNS records for this service
|
||||
- `auto`: Create DNS records for this service if it exposes ports on the host
|
||||
|
||||
##### Custom DNS name template
|
||||
|
||||
By default DNS entries are named `<service>.<stack>.<environment>.<domain>`.
|
||||
You can specify a custom name template used to construct the subdomain part (left of the domain/zone name) of the DNS records. The following placeholders are supported:
|
||||
|
||||
* `%{{service_name}}`
|
||||
* `%{{stack_name}}`
|
||||
* `%{{environment_name}}`
|
||||
|
||||
**Example:**
|
||||
|
||||
`%{{stack_name}}-%{{service_name}}.statictext`
|
||||
|
||||
Make sure to only use characters in static text and separators that your provider allows in DNS names.
|
13
infra-templates/digitalocean-dns/1/docker-compose.yml
Normal file
13
infra-templates/digitalocean-dns/1/docker-compose.yml
Normal file
@ -0,0 +1,13 @@
|
||||
digitalocean:
|
||||
image: rancher/external-dns:v0.6.3
|
||||
command: -provider=digitalocean
|
||||
expose:
|
||||
- 1000
|
||||
environment:
|
||||
DO_PAT: ${DO_PAT}
|
||||
ROOT_DOMAIN: ${ROOT_DOMAIN}
|
||||
NAME_TEMPLATE: ${NAME_TEMPLATE}
|
||||
TTL: 300
|
||||
labels:
|
||||
io.rancher.container.create_agent: "true"
|
||||
io.rancher.container.agent.role: "external-dns"
|
34
infra-templates/digitalocean-dns/1/rancher-compose.yml
Normal file
34
infra-templates/digitalocean-dns/1/rancher-compose.yml
Normal file
@ -0,0 +1,34 @@
|
||||
.catalog:
|
||||
name: "DigitalOcean DNS"
|
||||
version: "v0.6.3"
|
||||
description: "Rancher External DNS service powered by DigitalOcean"
|
||||
minimum_rancher_version: v1.5.0
|
||||
questions:
|
||||
- variable: "DO_PAT"
|
||||
label: "DigitalOcean Personal Access Token"
|
||||
description: "Enter your personal access token"
|
||||
type: "string"
|
||||
required: true
|
||||
- variable: "ROOT_DOMAIN"
|
||||
label: "Domain Name"
|
||||
description: "The domain name managed by DigitalOcean."
|
||||
type: "string"
|
||||
required: true
|
||||
- variable: "NAME_TEMPLATE"
|
||||
label: "DNS Name Template"
|
||||
description: |
|
||||
Name template used to construct the subdomain part (left of the domain) of the DNS record names.
|
||||
Supported placeholders: %{{service_name}}, %{{stack_name}}, %{{environment_name}}.
|
||||
By default DNS entries will be named '<service>.<stack>.<environment>.<domain>'.
|
||||
type: "string"
|
||||
default: "%{{service_name}}.%{{stack_name}}.%{{environment_name}}"
|
||||
required: false
|
||||
|
||||
digitalocean:
|
||||
health_check:
|
||||
port: 1000
|
||||
interval: 5000
|
||||
unhealthy_threshold: 3
|
||||
request_line: GET / HTTP/1.0
|
||||
healthy_threshold: 2
|
||||
response_timeout: 2000
|
@ -1,7 +1,7 @@
|
||||
name: DigitalOcean DNS
|
||||
description: |
|
||||
Rancher External DNS service powered by DigitalOcean
|
||||
version: v0.6.2-rancher1
|
||||
version: v0.6.3
|
||||
category: External DNS
|
||||
labels:
|
||||
io.rancher.orchestration.supported: 'cattle,mesos,swarm,kubernetes'
|
||||
|
@ -3,6 +3,7 @@
|
||||
version: "v1.0.0"
|
||||
description: "Updates credentials for ECR in Rancher"
|
||||
uuid: ecr-1
|
||||
maximum_rancher_version: "v1.4.99"
|
||||
questions:
|
||||
- variable: "aws_access_key_id"
|
||||
label: "AWS Access Key ID"
|
||||
|
@ -3,6 +3,7 @@
|
||||
version: "v1.0.1"
|
||||
description: "Updates credentials for ECR in Rancher"
|
||||
uuid: ecr-2
|
||||
maximum_rancher_version: "v1.4.99"
|
||||
questions:
|
||||
- variable: "aws_access_key_id"
|
||||
label: "AWS Access Key ID"
|
||||
|
@ -3,6 +3,7 @@
|
||||
version: "v1.1.0"
|
||||
description: "Updates credentials for ECR in Rancher"
|
||||
uuid: ecr-3
|
||||
maximum_rancher_version: "v1.4.99"
|
||||
questions:
|
||||
- variable: "aws_access_key_id"
|
||||
label: "AWS Access Key ID"
|
||||
|
35
templates/gocd/0/README.md
Normal file
35
templates/gocd/0/README.md
Normal file
@ -0,0 +1,35 @@
|
||||
# GoCD.io
|
||||
|
||||
### Info:
|
||||
|
||||
This template creates one GoCD server and scale out the number of GoCD agent you need.
|
||||
|
||||
The GoCD agent is link with docker engine container as sidekick, so the idea is to not create GoCD agent per language but use docker container to build and test your stuff.
|
||||
You can use on GoCD agent:
|
||||
- docker cli
|
||||
- docker-compose cli
|
||||
- rancher-compose cli
|
||||
- make
|
||||
|
||||
|
||||
### Usage:
|
||||
|
||||
Select GoCD from catalog.
|
||||
|
||||
Choose if you should deploy GoCD Server, or GoCD Agent or the two.
|
||||
Enter the number of GoCD agent you need.
|
||||
Choose the key to autoregister GoCD agent.
|
||||
|
||||
Click deploy.
|
||||
|
||||
GoCD server can now be accessed over the Rancher network on port `8153` (http://IP_CONTAINER:8153). To access from external Rancher network, you need to set load balancer or expose the port 8153.
|
||||
|
||||
|
||||
|
||||
### Source, bugs and enhances
|
||||
|
||||
If you found bugs or need enhance, you can open ticket on github:
|
||||
- [GoCD official core project](https://github.com/gocd/gocd)
|
||||
- [GoCD Server docker image](https://github.com/disaster37/alpine-gocd-server)
|
||||
- [GoCD Agent docker image](https://github.com/disaster37/alpine-gocd-agent)
|
||||
- [Rancher Cattle metadata docker image](https://github.com/disaster37/rancher-cattle-metadata)
|
125
templates/gocd/0/docker-compose.yml.tpl
Normal file
125
templates/gocd/0/docker-compose.yml.tpl
Normal file
@ -0,0 +1,125 @@
|
||||
version: '2'
|
||||
services:
|
||||
{{- if eq .Values.DEPLOY_SERVER "true"}}
|
||||
gocd-server:
|
||||
tty: true
|
||||
image: webcenter/alpine-gocd-server:17.3.0-1
|
||||
volumes:
|
||||
{{- if (contains .Values.VOLUME_DRIVER_SERVER "/")}}
|
||||
- ${VOLUME_DRIVER_SERVER}:/data
|
||||
{{- else}}
|
||||
- gocd-server-data:/data
|
||||
{{- end}}
|
||||
environment:
|
||||
- GOCD_CONFIG_memory=${GOCD_SERVER_MEMORY}
|
||||
- GOCD_CONFIG_agent-key=${GOCD_AGENT_KEY}
|
||||
- GOCD_CONFIG_server-url=${GOCD_SERVER_URL}
|
||||
- GOCD_USER_${GOCD_USER}=${GOCD_PASSWORD}
|
||||
- CONFD_BACKEND=${CONFD_BACKEND}
|
||||
- CONFD_NODES=${CONFD_NODES}
|
||||
- CONFD_PREFIX_KEY=${CONFD_PREFIX}
|
||||
{{- if eq .Values.GOCD_AGENT_PACKAGE "true"}}
|
||||
- GOCD_PLUGIN_script-executor=https://github.com/gocd-contrib/script-executor-task/releases/download/0.3/script-executor-0.3.0.jar
|
||||
- GOCD_PLUGIN_docker-task=https://github.com/manojlds/gocd-docker/releases/download/0.1.27/docker-task-assembly-0.1.27.jar
|
||||
- GOCD_PLUGIN_slack=https://github.com/Vincit/gocd-slack-task/releases/download/v1.3.1/gocd-slack-task-1.3.1.jar
|
||||
- GOCD_PLUGIN_docker-pipline=https://github.com/Haufe-Lexware/gocd-plugins/releases/download/v1.0.0-beta/gocd-docker-pipeline-plugin-1.0.0.jar
|
||||
- GOCD_PLUGIN_email-notifier=https://github.com/gocd-contrib/email-notifier/releases/download/v0.1/email-notifier-0.1.jar
|
||||
- GOCD_PLUGIN_github-notifier=https://github.com/gocd-contrib/gocd-build-status-notifier/releases/download/1.3/github-pr-status-1.3.jar
|
||||
- GOCD_PLUGIN_github-scm=https://github.com/ashwanthkumar/gocd-build-github-pull-requests/releases/download/v1.3.3/github-pr-poller-1.3.3.jar
|
||||
- GOCD_PLUGIN_maven-repository=https://github.com/1and1/go-maven-poller/releases/download/v1.1.4/go-maven-poller.jar
|
||||
- GOCD_PLUGIN_maven-task=https://github.com/ruckc/gocd-maven-plugin/releases/download/0.1.1/gocd-maven-plugin-0.1.1.jar
|
||||
- GOCD_PLUGIN_s3-fetch=https://github.com/indix/gocd-s3-artifacts/releases/download/v2.0.2/s3fetch-assembly-2.0.2.jar
|
||||
- GOCD_PLUGIN_s3-publish=https://github.com/indix/gocd-s3-artifacts/releases/download/v2.0.2/s3publish-assembly-2.0.2.jar
|
||||
- GOCD_PLUGIN_nessus-scan=https://github.com/Haufe-Lexware/gocd-plugins/releases/download/v1.0.0-beta/gocd-nessus-scan-plugin-1.0.0.jar
|
||||
- GOCD_PLUGIN_sonar=https://github.com/Haufe-Lexware/gocd-plugins/releases/download/v1.0.0-beta/gocd-sonar-qualitygates-plugin-1.0.0.jar
|
||||
- GOCD_PLUGIN_gitlab-auth=https://github.com/gocd-contrib/gocd-oauth-login/releases/download/v2.3/gitlab-oauth-login-2.3.jar
|
||||
- GOCD_PLUGIN_google-auth=https://github.com/gocd-contrib/gocd-oauth-login/releases/download/v2.3/google-oauth-login-2.3.jar
|
||||
- GOCD_PLUGIN_github-auth=https://github.com/gocd-contrib/gocd-oauth-login/releases/download/v2.3/github-oauth-login-2.3.jar
|
||||
{{- end}}
|
||||
{{- if (ne .Values.DEPLOY_LB "true") and .Values.PUBLISH_PORT}}
|
||||
ports:
|
||||
- ${PUBLISH_PORT}:8153
|
||||
{{- end}}
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
{{- if eq .Values.DEPLOY_LB "true"}}
|
||||
lb:
|
||||
image: rancher/lb-service-haproxy:v0.6.2
|
||||
{{- if .Values.PUBLISH_PORT}}
|
||||
ports:
|
||||
- ${PUBLISH_PORT}:8153/tcp
|
||||
{{- else}}
|
||||
expose:
|
||||
- 8153:8153/tcp
|
||||
{{- end}}
|
||||
links:
|
||||
- gocd-server:gocd-server
|
||||
labels:
|
||||
io.rancher.container.agent.role: environmentAdmin
|
||||
io.rancher.container.create_agent: 'true'
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if eq .Values.DEPLOY_AGENT "true"}}
|
||||
gocd-agent:
|
||||
tty: true
|
||||
image: webcenter/alpine-gocd-agent:17.3.0-1
|
||||
volumes:
|
||||
{{- if (contains .Values.VOLUME_DRIVER_AGENT "/")}}
|
||||
- ${VOLUME_DRIVER_AGENT}:/data
|
||||
{{- else}}
|
||||
- gocd-agent-data:/data
|
||||
{{- end}}
|
||||
- gocd-scheduler-setting:/opt/scheduler
|
||||
environment:
|
||||
- GOCD_CONFIG_memory=${GOCD_AGENT_MEMORY}
|
||||
- GOCD_CONFIG_agent_key=${GOCD_AGENT_KEY}
|
||||
- GOCD_CONFIG_agent_resource_docker=${GOCD_AGENT_RESOURCE}
|
||||
- DOCKER_HOST=docker-engine:2375
|
||||
{{- if eq .Values.DEPLOY_SERVER "true"}}
|
||||
links:
|
||||
- gocd-server:gocd-server
|
||||
{{- end}}
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
io.rancher.container.agent.role: environment
|
||||
io.rancher.container.create_agent: 'true'
|
||||
io.rancher.sidekicks: rancher-cattle-metadata,docker-engine
|
||||
rancher-cattle-metadata:
|
||||
network_mode: none
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
io.rancher.container.start_once: "true"
|
||||
image: webcenter/rancher-cattle-metadata:1.0.1
|
||||
volumes:
|
||||
- gocd-scheduler-setting:/opt/scheduler
|
||||
docker-engine:
|
||||
privileged: true
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
image: index.docker.io/docker:1.13-dind
|
||||
volumes:
|
||||
{{- if (contains .Values.VOLUME_DRIVER_AGENT "/")}}
|
||||
- ${VOLUME_DRIVER_AGENT}:/data
|
||||
{{- else}}
|
||||
- gocd-agent-data:/data
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
|
||||
volumes:
|
||||
gocd-scheduler-setting:
|
||||
driver: local
|
||||
per_container: true
|
||||
{{- if not (contains .Values.VOLUME_DRIVER_AGENT "/")}}
|
||||
gocd-agent-data:
|
||||
driver: ${VOLUME_DRIVER_AGENT}
|
||||
per_container: true
|
||||
{{- end}}
|
||||
{{- if not (contains .Values.VOLUME_DRIVER_SERVER "/")}}
|
||||
gocd-server-data:
|
||||
driver: ${VOLUME_DRIVER_SERVER}
|
||||
{{- end}}
|
155
templates/gocd/0/rancher-compose.yml
Normal file
155
templates/gocd/0/rancher-compose.yml
Normal file
@ -0,0 +1,155 @@
|
||||
version: '2'
|
||||
catalog:
|
||||
name: GoCD
|
||||
version: 17.3.0-rancher1
|
||||
minimum_rancher_version: v1.5.0
|
||||
questions:
|
||||
- variable: "DEPLOY_SERVER"
|
||||
description: "Deploy GoCD server"
|
||||
label: "Deploy GoCD server"
|
||||
required: true
|
||||
type: enum
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
- variable: "DEPLOY_AGENT"
|
||||
description: "Deploy GoCD agent"
|
||||
label: "Deploy GoCD agent"
|
||||
required: true
|
||||
type: enum
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
- variable: "GOCD_AGENT_SCALE"
|
||||
description: "Number of GoCD agent"
|
||||
label: "GoCD Agents"
|
||||
required: true
|
||||
default: 1
|
||||
type: "string"
|
||||
- variable: "GOCD_AGENT_KEY"
|
||||
description: "Key to use for auto registration agent"
|
||||
label: "Agent key"
|
||||
required: true
|
||||
type: "password"
|
||||
- variable: "GOCD_SERVER_MEMORY"
|
||||
description: "Max memory allowed to GoCD server"
|
||||
label: "Max memory for server"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "1024m"
|
||||
- variable: "GOCD_AGENT_MEMORY"
|
||||
description: "Max memory allowed to GoCD agent"
|
||||
label: "Max memory for agent"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "2048m"
|
||||
- variable: "GOCD_AGENT_RESOURCE"
|
||||
description: "Resource name associated for agent"
|
||||
label: "Resource name"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "docker"
|
||||
- variable: "GOCD_USER"
|
||||
description: "Login to connect on GoCD"
|
||||
label: "Login"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "admin"
|
||||
- variable: "GOCD_PASSWORD"
|
||||
description: "Password to connect on GoCD"
|
||||
label: "Password"
|
||||
type: "password"
|
||||
required: true
|
||||
- variable: "GOCD_AGENT_PACKAGE"
|
||||
description: "Install GoCD extra plugins"
|
||||
label: "Install extra plugins"
|
||||
required: true
|
||||
type: enum
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
- variable: "VOLUME_DRIVER_SERVER"
|
||||
description: "Docker driver to store volume or base path for GoCD server"
|
||||
label: "Volume drver / Path for server"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "local"
|
||||
- variable: "VOLUME_DRIVER_AGENT"
|
||||
description: "Docker driver to store volume or base path for GoCD agent"
|
||||
label: "Volume drver / Path for agent"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "local"
|
||||
- variable: "DEPLOY_LB"
|
||||
description: "Deploy Loadbalancer"
|
||||
label: "Deploy Loadbalancer"
|
||||
required: true
|
||||
type: enum
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
- variable: "PUBLISH_PORT"
|
||||
description: "Set port if you want publish external port for GoCD server or Loadbalancer"
|
||||
label: "Publish port"
|
||||
required: false
|
||||
type: "string"
|
||||
default: "8153"
|
||||
- variable: "GOCD_SERVER_URL"
|
||||
description: "The server URL use by agent to auto register. Don't touch if you deploy server and agent"
|
||||
label: "Server URL"
|
||||
required: true
|
||||
type: "string"
|
||||
default: "https://gocd-server:8154/go"
|
||||
- variable: "CONFD_BACKEND"
|
||||
description: "The confd backend to grab config"
|
||||
label: "Confd backend"
|
||||
required: true
|
||||
default: "env"
|
||||
type: "string"
|
||||
- variable: "CONFD_NODES"
|
||||
description: "The confd nodes"
|
||||
label: "Confd nodes"
|
||||
required: false
|
||||
type: "string"
|
||||
- variable: "CONFD_PREFIX"
|
||||
description: "The confd prefix"
|
||||
label: "Confd prefix"
|
||||
required: true
|
||||
default: "/gocd"
|
||||
type: "string"
|
||||
|
||||
services:
|
||||
gocd-agent:
|
||||
scale: ${GOCD_AGENT_SCALE}
|
||||
retain_ip: true
|
||||
gocd-server:
|
||||
scale: 1
|
||||
retain_ip: false
|
||||
health_check:
|
||||
port: 8153
|
||||
interval: 5000
|
||||
unhealthy_threshold: 3
|
||||
request_line: ''
|
||||
healthy_threshold: 2
|
||||
response_timeout: 5000
|
||||
lb:
|
||||
scale: 1
|
||||
start_on_create: true
|
||||
lb_config:
|
||||
certs: []
|
||||
port_rules:
|
||||
- priority: 1
|
||||
protocol: http
|
||||
service: gocd-server
|
||||
source_port: 8153
|
||||
target_port: 8153
|
||||
health_check:
|
||||
response_timeout: 2000
|
||||
healthy_threshold: 2
|
||||
port: 42
|
||||
unhealthy_threshold: 3
|
||||
interval: 2000
|
BIN
templates/gocd/catalogIcon-gocd.png
Normal file
BIN
templates/gocd/catalogIcon-gocd.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 KiB |
8
templates/gocd/config.yml
Normal file
8
templates/gocd/config.yml
Normal file
@ -0,0 +1,8 @@
|
||||
name: GoCD
|
||||
description: |
|
||||
GoCD Stack (server and agents)
|
||||
version: 17.3.0-rancher1
|
||||
category: Continuous Integration
|
||||
maintainer: "Sebastien Langoureaux <linuxworkgroup@gmail.com>"
|
||||
license: Apache License
|
||||
projectURL: https://www.gocd.io/
|
49
templates/minio/0/README.md
Normal file
49
templates/minio/0/README.md
Normal file
@ -0,0 +1,49 @@
|
||||
# Minio.io
|
||||
|
||||
### Info:
|
||||
|
||||
This template creates, scale in and scale out a multinodes minio cluster on top of Rancher. The configuration is generated with confd from Rancher metadata.
|
||||
Cluster size is static after deployement. It's mean that you should redeploy the stack if you should change the size of your cluster (minio.io limitation).
|
||||
|
||||
|
||||
### Usage:
|
||||
|
||||
Select Minio Cloud Storage from catalog.
|
||||
|
||||
Enter the number of nodes for your minio cluster and set the key and secret to connect in minio.
|
||||
|
||||
Click deploy.
|
||||
|
||||
Minio can now be accessed over the Rancher network on port `9000` (http://IP_CONTAINER:9000). To access from external Rancher network, you need to set load balancer or expose the port 9000.
|
||||
|
||||
### Disks / nodes
|
||||
|
||||
You can set many disks per nodes (max of 4). If you use local disk (no extra Docker driver), you need to mount them on the same `base path` and indicate this name on `Volume Driver / Path` section.
|
||||
Moreover, you need to use the same disk name with a number as suffix (from 0 to 4) and report this on `Disk base name` section.
|
||||
|
||||
For exemple, if you should to use 4 disks per nodes:
|
||||
- Number of disks per node: 4
|
||||
- Volume drver / Path: /data/minio
|
||||
- Disk base name: disk
|
||||
|
||||
And you have to mount the following partition:
|
||||
- /data/minio/disk0
|
||||
- /data/minio/disk1
|
||||
- /data/minio/disk2
|
||||
- /data/minio/disk3
|
||||
-
|
||||
|
||||
To more info about nodes and disks, you can read the [official documentation](https://github.com/minio/minio/tree/master/docs/distributed)
|
||||
|
||||
|
||||
|
||||
### Advance info
|
||||
1. This template create first the container called `rancher-cattle-metadata`. It embedded confd, with some scripts to get many settings from Cattle scheduler and expose them through the volume.
|
||||
2. Then, the template create `minio` container. It will launch the scripts provided from `rancher-cattle-metadata` container with `volumes_from`. it will create /opt/scheduler/conf/scheduler.cfg file with some usefull infos about container, service, stack and host. Next, it will source `/opt/scheduler/conf/scheduler.cfg` and launch confd scripts to configure minio.
|
||||
|
||||
### Source, bugs and enhances
|
||||
|
||||
If you found bugs or need enhance, you can open ticket on github:
|
||||
- [Minio official core project](https://github.com/minio/minio)
|
||||
- [Minio docker image](https://github.com/disaster37/alpine-minio)
|
||||
- [Rancher Cattle metadata docker image](https://github.com/disaster37/rancher-cattle-metadata)
|
70
templates/minio/0/docker-compose.yml.tpl
Normal file
70
templates/minio/0/docker-compose.yml.tpl
Normal file
@ -0,0 +1,70 @@
|
||||
version: '2'
|
||||
services:
|
||||
minio-server:
|
||||
tty: true
|
||||
image: webcenter/alpine-minio:2017-03-16_4
|
||||
volumes:
|
||||
- minio-scheduler-setting:/opt/scheduler
|
||||
{{- if contains .Values.VOLUME_DRIVER "/" }}
|
||||
{{- range $idx, $e := atoi .Values.MINIO_DISKS | until }}
|
||||
- {{.Values.VOLUME_DRIVER}}/{{.Values.DISK_BASE_NAME}}{{$idx}}:/data/disk{{$idx}}
|
||||
{{- end}}
|
||||
{{- else}}
|
||||
{{- range $idx, $e := atoi .Values.MINIO_DISKS | until }}
|
||||
- minio-data-{{$idx}}:/data/disk{{$idx}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
environment:
|
||||
- MINIO_CONFIG_minio.access.key=${MINIO_ACCESS_KEY}
|
||||
- MINIO_CONFIG_minio.secret.key=${MINIO_SECRET_KEY}
|
||||
- CONFD_BACKEND=${CONFD_BACKEND}
|
||||
- CONFD_NODES=${CONFD_NODES}
|
||||
- CONFD_PREFIX_KEY=${CONFD_PREFIX}
|
||||
{{- range $idx, $e := atoi .Values.MINIO_DISKS | until }}
|
||||
- MINIO_DISKS_{{$idx}}=disk{{$idx}}
|
||||
{{- end}}
|
||||
{{- if (ne .Values.DEPLOY_LB "true") and .Values.PUBLISH_PORT}}
|
||||
ports:
|
||||
- ${PUBLISH_PORT}:9000
|
||||
{{- end}}
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
io.rancher.sidekicks: rancher-cattle-metadata
|
||||
rancher-cattle-metadata:
|
||||
network_mode: none
|
||||
labels:
|
||||
io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name}
|
||||
io.rancher.container.hostname_override: container_name
|
||||
io.rancher.container.start_once: "true"
|
||||
image: webcenter/rancher-cattle-metadata:1.0.1
|
||||
volumes:
|
||||
- minio-scheduler-setting:/opt/scheduler
|
||||
{{- if eq .Values.DEPLOY_LB "true"}}
|
||||
lb:
|
||||
image: rancher/lb-service-haproxy:v0.6.2
|
||||
{{- if .Values.PUBLISH_PORT}}
|
||||
ports:
|
||||
- ${PUBLISH_PORT}:9000/tcp
|
||||
{{- else}}
|
||||
expose:
|
||||
- 9000:9000/tcp
|
||||
{{- end}}
|
||||
links:
|
||||
- minio-server:minio-server
|
||||
labels:
|
||||
io.rancher.container.agent.role: environmentAdmin
|
||||
io.rancher.container.create_agent: 'true'
|
||||
{{- end}}
|
||||
|
||||
volumes:
|
||||
minio-scheduler-setting:
|
||||
driver: local
|
||||
per_container: true
|
||||
{{- if not (contains .Values.VOLUME_DRIVER "/")}}
|
||||
{{- range $idx, $e := atoi .Values.MINIO_DISKS | until }}
|
||||
minio-data-{{$idx}}:
|
||||
per_container: true
|
||||
driver: ${VOLUME_DRIVER}
|
||||
{{- end}}
|
||||
{{- end}}
|
114
templates/minio/0/rancher-compose.yml
Normal file
114
templates/minio/0/rancher-compose.yml
Normal file
@ -0,0 +1,114 @@
|
||||
version: '2'
|
||||
catalog:
|
||||
name: Minio
|
||||
version: 2017-03-16-rancher1
|
||||
minimum_rancher_version: v1.5.0
|
||||
questions:
|
||||
- variable: "MINIO_SCALE"
|
||||
description: "Number of minio nodes."
|
||||
label: "Minio Nodes"
|
||||
required: true
|
||||
default: 1
|
||||
type: enum
|
||||
options:
|
||||
- 1
|
||||
- 4
|
||||
- 6
|
||||
- 8
|
||||
- 10
|
||||
- 12
|
||||
- 14
|
||||
- 16
|
||||
- variable: "MINIO_DISKS"
|
||||
description: "Number of disks per node"
|
||||
label: "Disks Per Node"
|
||||
required: true
|
||||
type: enum
|
||||
default: 1
|
||||
options:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- variable: "DISK_BASE_NAME"
|
||||
description: "The base name for each disk"
|
||||
label: "Disk base name"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "disk"
|
||||
- variable: "VOLUME_DRIVER"
|
||||
description: "Docker driver to store volume or base path for each disks"
|
||||
label: "Volume drver / Path"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "local"
|
||||
- variable: "MINIO_ACCESS_KEY"
|
||||
description: "The key to connect on minio"
|
||||
label: "Minio key"
|
||||
required: true
|
||||
type: "string"
|
||||
- variable: "MINIO_SECRET_KEY"
|
||||
description: "The secret key to connect on minio"
|
||||
label: "Minio secret key"
|
||||
required: true
|
||||
type: "password"
|
||||
- variable: "DEPLOY_LB"
|
||||
description: "Deploy Loadbalancer"
|
||||
label: "Deploy Loadbalancer"
|
||||
required: true
|
||||
type: enum
|
||||
default: "true"
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
- variable: "PUBLISH_PORT"
|
||||
description: "Set port if you want publish external port for minio or Loadbalancer"
|
||||
label: "Publish port"
|
||||
required: false
|
||||
type: "string"
|
||||
default: "9000"
|
||||
- variable: "CONFD_BACKEND"
|
||||
description: "The confd backend to grab config"
|
||||
label: "Confd backend"
|
||||
required: true
|
||||
default: "env"
|
||||
type: "string"
|
||||
- variable: "CONFD_NODES"
|
||||
description: "The confd nodes"
|
||||
label: "Confd nodes"
|
||||
required: false
|
||||
type: "string"
|
||||
- variable: "CONFD_PREFIX"
|
||||
description: "The confd prefix"
|
||||
label: "Confd prefix"
|
||||
required: true
|
||||
default: "/minio"
|
||||
type: "string"
|
||||
|
||||
services:
|
||||
minio-server:
|
||||
scale: ${MINIO_SCALE}
|
||||
retain_ip: true
|
||||
health_check:
|
||||
port: 9000
|
||||
interval: 5000
|
||||
unhealthy_threshold: 3
|
||||
request_line: ''
|
||||
healthy_threshold: 2
|
||||
response_timeout: 5000
|
||||
lb:
|
||||
scale: 1
|
||||
start_on_create: true
|
||||
lb_config:
|
||||
certs: []
|
||||
port_rules:
|
||||
- priority: 1
|
||||
protocol: http
|
||||
service: minio-server
|
||||
source_port: 9000
|
||||
target_port: 9000
|
||||
health_check:
|
||||
response_timeout: 2000
|
||||
healthy_threshold: 2
|
||||
port: 42
|
||||
unhealthy_threshold: 3
|
||||
interval: 2000
|
3531
templates/minio/catalogIcon-minio.svg
Normal file
3531
templates/minio/catalogIcon-minio.svg
Normal file
File diff suppressed because it is too large
Load Diff
After Width: | Height: | Size: 261 KiB |
8
templates/minio/config.yml
Normal file
8
templates/minio/config.yml
Normal file
@ -0,0 +1,8 @@
|
||||
name: Minio Cloud Storage
|
||||
description: |
|
||||
Store photos, videos, VMs, containers, log files, or any blob of data as objects.
|
||||
version: 2017-03-16-rancher1
|
||||
category: Storage
|
||||
maintainer: "Sebastien Langoureaux <linuxworkgroup@gmail.com>"
|
||||
license: Apache License
|
||||
projectURL: https://minio.io/
|
@ -2,7 +2,7 @@
|
||||
name: "Nuxeo Platform"
|
||||
version: 8.1-rancher1
|
||||
description: |
|
||||
Enterprise Content Management
|
||||
Enterprise Content Management
|
||||
Platform for Business Applications
|
||||
uuid: nuxeo-1
|
||||
questions:
|
||||
@ -16,14 +16,14 @@
|
||||
type: "string"
|
||||
label: "List of Nuxeo packages"
|
||||
description: |
|
||||
List of Nuxeo packages
|
||||
List of Nuxeo packages
|
||||
default: nuxeo-web-mobile nuxeo-drive nuxeo-diff nuxeo-spreadsheet nuxeo-dam nuxeo-template-rendering nuxeo-template-rendering-samples nuxeo-showcase-content
|
||||
- variable: url
|
||||
description: "The URL that this nuxeo instance should serve"
|
||||
label: "Nuxeo URL"
|
||||
type: "string"
|
||||
required: true
|
||||
default: "http://localhost:8080/"
|
||||
default: "http://localhost:8080/"
|
||||
- variable: volumedriver
|
||||
type: "string"
|
||||
label: "Volume driver"
|
||||
@ -40,14 +40,14 @@ nuxeo:
|
||||
interval: 5000
|
||||
unhealthy_threshold: 5
|
||||
# For TCP, request_line needs to be ''
|
||||
# TCP Example:
|
||||
# TCP Example:
|
||||
# request_line: ''
|
||||
request_line: GET /nuxeo/login.jsp
|
||||
request_line: GET /nuxeo/login.jsp
|
||||
healthy_threshold: 2
|
||||
# Response timeout is measured in milliseconds
|
||||
response_timeout: 2000
|
||||
|
||||
elasticsearch:
|
||||
elasticsearch-masters:
|
||||
metadata:
|
||||
elasticsearch:
|
||||
yml:
|
||||
@ -67,4 +67,4 @@ lb:
|
||||
interval: 2000
|
||||
unhealthy_threshold: 3
|
||||
healthy_threshold: 2
|
||||
response_timeout: 2000
|
||||
response_timeout: 2000
|
||||
|
@ -23,7 +23,7 @@ zammad-scheduler:
|
||||
start_on_create: true
|
||||
zammad-railsserver:
|
||||
scale: 1
|
||||
tart_on_create: true
|
||||
start_on_create: true
|
||||
zammad-websocket:
|
||||
scale: 1
|
||||
start_on_create: true
|
||||
|
@ -41,7 +41,7 @@ https://docs.zammad.org/en/latest/api-intro.html
|
||||
https://zammad.org/participate
|
||||
|
||||
|
||||
Thanks! ❤️ ❤️ ❤️
|
||||
Thanks!
|
||||
|
||||
Your Zammad Team
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user