1
1
# This is the configuration file for StorageManager (SM)
2
2
3
3
[ObjectStorage]
4
- # 'service' is the module that SM will use for cloud IO.
4
+ # 'service' is the module that SM will use for cloud IO.
5
5
# Current options are "LocalStorage" and "S3".
6
- # "LocalStorage" will use a directory on the local filesystem as if it
6
+ # "LocalStorage" will use a directory on the local filesystem as if it
7
7
# were cloud storage. "S3" is the module that uses real cloud storage.
8
8
# Both modules have their own sections below.
9
9
#
@@ -20,19 +20,19 @@ service = LocalStorage
20
20
# Regarding tuning, object stores do not support modifying stored data;
21
21
# entire objects must be replaced on modification, and entire
22
22
# objects are fetched on read. This results in read and write amplification.
23
- #
23
+ #
24
24
# Network characteristics are important to consider. If the machine is
25
- # in the cloud and has low-latency and high-bandwidth access to the object
26
- # store, then the limiting factor may be the get/put rate imposed by your
25
+ # in the cloud and has low-latency and high-bandwidth access to the object
26
+ # store, then the limiting factor may be the get/put rate imposed by your
27
27
# cloud provider. In that case, using a larger object size will reduce
28
- # the number of get/put ops required to perform a task. If the machine has
29
- # low-bandwidth access to the object store, a lower value will reduce
30
- # the degree of read/write amplification, reducing the total amount of data
28
+ # the number of get/put ops required to perform a task. If the machine has
29
+ # low-bandwidth access to the object store, a lower value will reduce
30
+ # the degree of read/write amplification, reducing the total amount of data
31
31
# to transfer.
32
32
#
33
- # Of course, you will only really know how a complex system works by
33
+ # Of course, you will only really know how a complex system works by
34
34
# experimentation. If you experience poor performance using the default,
35
- # our suggestion is to reduce it to 2M and try again, then increase it to
35
+ # our suggestion is to reduce it to 2M and try again, then increase it to
36
36
# 10M and try again.
37
37
#
38
38
# object_size should not be changed after you have run postConfigure.
@@ -56,33 +56,33 @@ journal_path = @ENGINE_DATADIR@/storagemanager/journal
56
56
# max_concurrent_downloads is what is sounds like, per node.
57
57
# This is not a global setting.
58
58
max_concurrent_downloads = 21
59
-
59
+
60
60
# max_concurrent_uploads is what is sounds like, per node.
61
- # This is not a global setting. Currently, a file is locked while
61
+ # This is not a global setting. Currently, a file is locked while
62
62
# modifications to it are synchronized with cloud storage. If your network
63
63
# has low upstream bandwidth, consider lowering this value to the minimum
64
- # necessary to saturate your network. This will reduce the latency of certain
65
- # operations and improve your experience.
64
+ # necessary to saturate your network. This will reduce the latency of certain
65
+ # operations and improve your experience.
66
66
max_concurrent_uploads = 21
67
67
68
- # common_prefix_depth is the depth of the common prefix that all files
69
- # managed by SM have. Ex: /var/lib/columnstore/data1, and
68
+ # common_prefix_depth is the depth of the common prefix that all files
69
+ # managed by SM have. Ex: /var/lib/columnstore/data1, and
70
70
# /var/lib/columnstore/data2 differ at the 4th directory element,
71
- # so they have a common prefix depth of 3. For Columnstore, it should
71
+ # so they have a common prefix depth of 3. For Columnstore, it should
72
72
# be set to the number of directories that precede the data* directories.
73
- # The default value of 3 works with package installations, where data* would be in
73
+ # The default value of 3 works with package installations, where data* would be in
74
74
# /var/lib/columnstore
75
75
#
76
76
# This value is used to manage the ownership of prefixes between
77
77
# StorageManager instances that share a filesystem. For example,
78
78
# if you have SM data stored on a distributed filesystem for fault-tolerance
79
- # reasons, and all nodes have it mounted, SM instances will be able to
80
- # negotiate ownership of data from a failed instance.
79
+ # reasons, and all nodes have it mounted, SM instances will be able to
80
+ # negotiate ownership of data from a failed instance.
81
81
common_prefix_depth = 3
82
82
83
83
[S3]
84
84
# These should be self-explanatory. Region can be blank or commented
85
- # if using a private cloud storage system. Bucket has to be set to
85
+ # if using a private cloud storage system. Bucket has to be set to
86
86
# something though. Obviously, do not change these after running
87
87
# postConfigure, or SM will not be able to find your data.
88
88
region = some_region
@@ -102,29 +102,29 @@ bucket = some_bucket
102
102
# prefix as short as possible for performance reasons.
103
103
# prefix = cs/
104
104
105
- # Put your HMAC access keys here. Keys can also be set through the
105
+ # Put your HMAC access keys here. Keys can also be set through the
106
106
# environment vars AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
107
107
# If set, SM will use these values and ignore the envvars.
108
108
# aws_access_key_id =
109
- # aws_secret_access_key =
109
+ # aws_secret_access_key =
110
110
111
111
# If you want StorageManager to assume an IAM role to use for its S3
112
112
# accesses, specify the name of the role in iam_role_name. The name
113
113
# should be only the name, rather than the full path.
114
114
#
115
- # The specified role must already exist and have permission to get, put,
115
+ # The specified role must already exist and have permission to get, put,
116
116
# delete, and 'head' on the specified S3 bucket.
117
- # iam_role_name =
117
+ # iam_role_name =
118
118
119
119
# If an IAM role is specified, sts_endpoint and sts_region are used to specify
120
120
# which STS server & region to use to assume the role. The default for
121
121
# sts_endpoint is 'sts.amazonaws.com', and the default for sts_region is
122
122
# 'us-east-1'.
123
- # sts_region =
123
+ # sts_region =
124
124
# sts_endpoint =
125
125
126
126
# If running on AWS EC2 instance the value ec2_iam_mode can be set
127
- # 'enabled' and allow StorageManager to detect IAM role assigned
127
+ # 'enabled' and allow StorageManager to detect IAM role assigned
128
128
# to EC2 instances. This will then use the the temporary credentials
129
129
# provided by EC2 metadata for S3 authentication access/secret keys.
130
130
# ec2_iam_mode=enabled
@@ -137,6 +137,10 @@ bucket = some_bucket
137
137
# Default is ssl_verify = enabled
138
138
# ssl_verify = disabled
139
139
140
+ # libs3_debug setting controls S3 library debugging printouts
141
+ # Default is libs3_debug = disabled
142
+ # libs3_debug = disabled
143
+
140
144
# The LocalStorage section configures the 'local storage' module
141
145
# if specified by ObjectStorage/service.
142
146
[LocalStorage]
@@ -154,15 +158,15 @@ max_latency = 50000
154
158
[Cache]
155
159
156
160
# cache_size can be specified in terms of tera-, giga-, mega-, kilo-
157
- # bytes using T/t G/g M/m K/k. Drive manufacturers use a power-of-10
158
- # notion of what that means, which means 1m = 1,000,000 bytes. These
159
- # settings use the programmer's power-of-2 notion, which means
160
- # 1m = 1,048,576 bytes.
161
+ # bytes using T/t G/g M/m K/k. Drive manufacturers use a power-of-10
162
+ # notion of what that means, which means 1m = 1,000,000 bytes. These
163
+ # settings use the programmer's power-of-2 notion, which means
164
+ # 1m = 1,048,576 bytes.
161
165
#
162
166
# This number will include space used by journal files, but does not
163
- # include space used by metadata files. In this version, journal data
164
- # currently being written and downloads in progress are also not accounted
165
- # for, so disk usage can temporarily go above this number. You will want to
167
+ # include space used by metadata files. In this version, journal data
168
+ # currently being written and downloads in progress are also not accounted
169
+ # for, so disk usage can temporarily go above this number. You will want to
166
170
# leave a little space available on the mount for those operations.
167
171
cache_size = 2g
168
172
0 commit comments