forked from cloudify-cosmo/cloudify-manager-blueprints
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvcloud-manager-blueprint-inputs.yaml
383 lines (306 loc) · 14.2 KB
/
vcloud-manager-blueprint-inputs.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
#############################
# vCloud specific Inputs
#############################
# Credentials:
# User login for vcloud air
vcloud_username: ''
# User password for vcloud air - for login by name + password
vcloud_password: ''
# User token for vcloud air - for login by name + token
#vcloud_token:
# Vcloud url for 'subscription' - https://vchs.vmware.com
# for 'ondemand' - https://vca.vmware.com
#vcloud_url:
# Vcloud service. Only required for 'subscription' service type
#vcloud_service:
# Organization name
vcloud_org: ''
# Vcloud org url, only required if using token based login on a private
# vcloud director.
#vcloud_org_url: ''
# Virtual data center name
vcloud_vdc: ''
# Only required for ondemand service type, instance uuid
vcloud_instance: ''
# Type of service: subscription, ondemand, vcd, private
#vcloud_service_type:
# Edge gateway name
# For 'ondemand' service type, the value of edge_gateway is always 'gateway'
#edge_gateway: gateway
#ssl_verify: True
# Version of api. For 'ondemand' service must be 5.6,
# for 'subscription' must be 5.7
#vcloud_api_version: '5.7'
# Name of catalog, can be 'Public Catalog'
catalog: ''
# Name of template from catalog
template: ''
# Names of components
# Name common network that can be used for nodes
#management_network_name:
# Use existed network
#network_use_existing:
#Human readible name for server
#server_name: cloudify-manager-server
#############################
# Provider specific Inputs
#############################
# The public IP of the manager to which the CLI will connect.
#public_ip: ''
# The manager's private IP address. This is the address which will be used by the
# application hosts to connect to the Manager's fileserver and message broker.
#private_ip: ''
# SSH user used to connect to the manager
#ssh_user: ''
# SSH key path used to connect to the manager
ssh_key_filename: ''
# SSH key path used to connect to the managed VMs
agent_private_key_path: ~/.ssh/cloudify-agent-kp.pem
# SSH port used to connect to the manager
#ssh_port: 22
# Public part from SSH key
user_public_key: ssh-rsa...
# This is the user with which the Manager will try to connect to the application hosts.
#agents_user: ''
#resources_prefix: ''
#############################
# Security Settings
#############################
# Enabling SSL limits communication with the server to SSL only.
# NOTE: If enabled, the certificate and private key files must reside in resources/ssl.
#ssl_enabled: false
# If SSL is enabled, agent requests to the REST service verify its certificate
# by default. To disable certificate verification, set this value to false.
#agent_verify_rest_certificate: true
# Username and password of the Cloudify administrator.
# This user will also be included in the simple userstore repostiroty if the
# simple userstore implementation is used.
#admin_username: 'admin'
#admin_password: ''
#insecure_endpoints_disabled: true
# Default locations for certificates on agents
#agent_rest_cert_path: '~/.cloudify/certs/rest.crt'
#broker_ssl_cert_path: '~/.cloudify/certs/broker.crt'
#############################
# Bootstrap Validations
#############################
# Validations are performed to check that attributes like disk space and memory
# correspond with some prerequisites and that some resources are available for
# download.
# Setting to `true` will allow to ignore those validations.
#ignore_bootstrap_validations: false
# These allow to override specific validation values
# NOTE: We do not recommend changing these values unless you know exactly
# what you're doing.
#minimum_required_total_physical_memory_in_mb: 3792
#minimum_required_available_disk_space_in_gb: 5
#allowed_heap_size_gap_in_mb: 1000
#############################
# Manager Resources Package
#############################
#manager_resources_package: http://repository.cloudifysource.org/cloudify/16.12.1/release/cloudify-manager-resources_16.12.1-community.tar.gz
# Providing a checksum file url will allow validating the resources package.
# By default, no validation is performed. Providing a checksum file will use
# the file to validate. Note that not providing a file but changing
# `skip_checksum_validation` to false means we will try to guess the location
# of an md5 checksum file and validate against it.
# You can download our md5 checksum file by appending .md5
# to the `manager_resources_package` url.
#manager_resources_package_checksum_file: ''
#skip_checksum_validation: true
#############################
# Agent Packages
#############################
# The key names must be in the format: distro_release_agent (e.g. ubuntu_trusty_agent)
# as the key is what's used to name the file, which later allows our
# agent installer to identify it for your distro and release automatically.
# Note that the windows agent key name MUST be `cloudify_windows_agent`
#agent_package_urls:
# ubuntu_trusty_agent: ''
# ubuntu_precise_agent: ''
# centos_7x_agent: ''
# centos_6x_agent: ''
# redhat_7x_agent: ''
# redhat_6x_agent: ''
# cloudify_windows_agent: ''
#############################
# Cloudify Modules
#############################
# Note that you can replace rpm urls with names of packages as long as they're available in your default yum repository.
# That is, as long as they provide the exact same version of that module.
#rest_service_rpm_source_url: ''
#management_worker_rpm_source_url: ''
#amqpinflux_rpm_source_url: ''
#cloudify_resources_url: ''
#stage_source_url: ''
# This is a Cloudify specific redistribution of Grafana.
#grafana_source_url: ''
#############################
# External Components
#############################
# Note that you can replace rpm urls with names of packages as long as they're available in your default yum repository.
# That is, as long as they provide the exact same version of that module.
#pip_source_rpm_url: ''
#java_source_url: ''
# RabbitMQ Distribution of Erlang
#erlang_source_url: ''
#rabbitmq_source_url: ''
#elasticsearch_source_url: ''
#elasticsearch_curator_rpm_source_url: ''
#logstash_source_url: ''
#nginx_source_url: ''
#influxdb_source_url: ''
#riemann_source_url: ''
# A RabbitMQ Client for Riemann
#langohr_source_url: ''
# Riemann's default daemonizer
#daemonize_source_url: ''
#nodejs_source_url: ''
##################################
# Management Workers configuration
##################################
# Sets the logging level to use for the management workers. This affects the logging performed
# by the manager during the execution of management tasks, such as deployment creation
# and deployment deletion.
# NOTE: specifying "debug" will result in considerable amount of logging activity. Consider
# using "info" (or a more restrictive level) for production environments.
#management_worker_log_level: debug
#############################
# RabbitMQ Configuration
#############################
# Sets the username/password to use for clients such as celery
# to connect to the rabbitmq broker.
# It is recommended that you set both the username and password
# to something reasonably secure.
#rabbitmq_username: 'cloudify'
#rabbitmq_password: 'c10udify'
# Enable SSL for RabbitMQ. If this is set to true then the public and private
# certs must be supplied (`rabbitmq_cert_private`, `rabbitmq_cert_public` inputs).
#rabbitmq_ssl_enabled: false
# The private certificate for RabbitMQ to use for SSL. This must be PEM formatted.
# It is expected to begin with a line containing 'PRIVATE KEY' in the middle.
#rabbitmq_cert_private: ''
# The public certificate for RabbitMQ to use for SSL. This does not need to be signed by any CA,
# as it will be deployed and explicitly used for all other components.
# It may be self-signed. It must be PEM formatted.
# It is expected to begin with a line of dashes with 'BEGIN CERTIFICATE' in the middle.
# If an external endpoint is used, this must be the public certificate associated with the private
# certificate that has already been configured for use by that rabbit endpoint.
#rabbitmq_cert_public: ''
# Allows to define the message-ttl for the different types of queues (in milliseconds).
# These are not used if `rabbitmq_endpoint_ip` is provided.
# https://www.rabbitmq.com/ttl.html
#rabbitmq_events_queue_message_ttl: 60000
#rabbitmq_logs_queue_message_ttl: 60000
#rabbitmq_metrics_queue_message_ttl: 60000
# This will set the queue length limit. Note that while new messages
# will be queued in RabbitMQ, old messages will be deleted once the
# limit is reached!
# These are not used if `rabbitmq_endpoint_ip` is provided.
# Note this is NOT the message byte length!
# https://www.rabbitmq.com/maxlength.html
#rabbitmq_events_queue_length_limit: 1000000
#rabbitmq_logs_queue_length_limit: 1000000
#rabbitmq_metrics_queue_length_limit: 1000000
# RabbitMQ File Descriptors Limit
#rabbitmq_fd_limit: 102400
# You can configure an external endpoint of a RabbitMQ Cluster to use
# instead of the built in one.
# If one is provided, the built in RabbitMQ cluster will not run.
# Also note that your external cluster must be preconfigured with any
# user name/pass and SSL certs if you plan on using RabbitMQ's security
# features.
#rabbitmq_endpoint_ip: ''
#############################
# Elasticsearch Configuration
#############################
# bootstrap.mlockall is set to true by default.
# This allows to set the heapsize for your cluster.
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
#elasticsearch_heap_size: 2g
# This allows to provide any JAVA_OPTS to Elasticsearch.
#elasticsearch_java_opts: ''
# The index for events will be named `logstash-YYYY.mm.dd`.
# A new index corresponding with today's date will be added each day.
# Elasticsearch Curator is used to rotate the indices on a daily basis
# via a cronjob. This allows to determine the number of days to keep.
#elasticsearch_index_rotation_interval: 30
# You can configure an external endpoint of an Elasticsearch Cluster to use
# instead of the built in one. The built in Elasticsearch cluster will not run.
# You need to provide an IP (defaults to localhost) and Port (defaults to 9200) of your Elasticsearch Cluster.
#elasticsearch_endpoint_ip: ''
#elasticsearch_endpoint_port: 9200
# You can enable automatic clustering of elasticsearch nodes and choose the port in which multicast discovery
# is performed. Note that when bootstrapping two managers on the same network, if enabling clustering, you must
# use a different port as to prevent clustering. This can be either 'true' or 'false'.
# Must be quoted to be passed as a string.
#elasticsearch_clustering_enabled: 'false'
#elasticsearch_clustering_discovery_port: 54329
#############################
# PostgreSQL Configuration
#############################
# You can configure the PostgreSQL database name for cloudify
#postgresql_db_name: 'cloudify_db'
#postgresql_host: 'localhost'
#############################
# LDAP Configuration
#############################
#ldap_server: ''
#ldap_username: ''
#ldap_password: ''
#ldap_domain: ''
#ldap_is_active_directory: true
#ldap_dn_extra: ''
#############################
# InfluxDB Configuration
#############################
# You can configure an external endpoint of an InfluxDB Cluster to use
# instead of the built in one.
# If one is provided, the built in InfluxDB cluster will not run.
# Note that the port is currently not configurable and must remain 8086.
# Also note that the database username and password are hardcoded to root:root.
#influxdb_endpoint_ip: ''
#################################
# Management Worker Configuration
#################################
# Maximum number of worker processes started by the management worker.
#management_worker_max_workers: 100
# Minimum number of worker processes maintained by the management worker.
#management_worker_min_workers: 2
#################################
# REST Configuration
#################################
# valid values: public_ip, private_ip
#rest_host_external_endpoint_type: public_ip
# valid values: public_ip, private_ip
#rest_host_internal_endpoint_type: private_ip
# The number of gunicorn worker processes for handling requests.
# If the default value (0) is set, then min((2 * cpu_count + 1 processes), 12) will be used.
#rest_service_gunicorn_worker_count: 0
# The maximum number of requests a worker will process before restarting.
# If this is set to zero then the automatic worker restarts are disabled.
#rest_service_gunicorn_max_requests: 1000
#############################
# Offline Resources Upload
#############################
# You can configure a set of resources to upload at bootstrap. These resources
# will reside on the manager and enable offline deployment. `dsl_resources`
# should contain any resource needed in the parsing process (i.e. plugin.yaml files)
#dsl_resources:
# - {'source_path': 'http://www.getcloudify.org/spec/fabric-plugin/1.4.2/plugin.yaml', 'destination_path': '/spec/fabric-plugin/1.4.2/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/script-plugin/1.4/plugin.yaml', 'destination_path': '/spec/script-plugin/1.4/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/diamond-plugin/1.3.5/plugin.yaml', 'destination_path': '/spec/diamond-plugin/1.3.5/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/aws-plugin/1.4.3/plugin.yaml', 'destination_path': '/spec/aws-plugin/1.4.3/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/openstack-plugin/1.5/plugin.yaml', 'destination_path': '/spec/openstack-plugin/1.5/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/tosca-vcloud-plugin/1.3.1/plugin.yaml', 'destination_path': '/spec/tosca-vcloud-plugin/1.3.1/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/vsphere-plugin/2.0.1/plugin.yaml', 'destination_path': '/spec/vsphere-plugin/2.0.1/plugin.yaml'}
# - {'source_path': 'http://www.getcloudify.org/spec/cloudify/4.0m12/types.yaml', 'destination_path': '/spec/cloudify/4.0m12/types.yaml'}
###############################
# Import Resolver Configuration
###############################
# An imported URL is prefix-matched against the key in each entry. If a match is found,
# then the URL prefix is replaced with the value of the corresponding entry.
# That allows serving YAML files from within the manager, even when the imported URL
# points to the external network.
#import_resolver_rules:
# - {'http://www.getcloudify.org/spec': 'file:///opt/manager/resources/spec'}