Skip to content

Commit cacbbee

Browse files
drrtuyRoman Nozdrin
and
Roman Nozdrin
authored
MCOL-5175 Increase the maximum effective length of S3 secret used as SHA256 key producing S3 signature (mariadb-corporation#2859)
Co-authored-by: Roman Nozdrin <[email protected]>
1 parent 1eca477 commit cacbbee

File tree

3 files changed

+55
-45
lines changed

3 files changed

+55
-45
lines changed

storage-manager/src/S3Storage.cpp

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,14 @@
2828
#include <boost/uuid/random_generator.hpp>
2929
#define BOOST_SPIRIT_THREADSAFE
3030
#ifndef __clang__
31-
#pragma GCC diagnostic push
32-
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
31+
#pragma GCC diagnostic push
32+
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
3333
#endif
3434

3535
#include <boost/property_tree/ptree.hpp>
3636

3737
#ifndef __clang__
38-
#pragma GCC diagnostic pop
38+
#pragma GCC diagnostic pop
3939
#endif
4040
#include <boost/property_tree/json_parser.hpp>
4141
#include "Utilities.h"
@@ -61,8 +61,7 @@ static size_t WriteCallback(void* contents, size_t size, size_t nmemb, void* use
6161
inline bool retryable_error(uint8_t s3err)
6262
{
6363
return (s3err == MS3_ERR_RESPONSE_PARSE || s3err == MS3_ERR_REQUEST_ERROR || s3err == MS3_ERR_OOM ||
64-
s3err == MS3_ERR_IMPOSSIBLE || s3err == MS3_ERR_SERVER ||
65-
s3err == MS3_ERR_AUTH_ROLE);
64+
s3err == MS3_ERR_IMPOSSIBLE || s3err == MS3_ERR_SERVER || s3err == MS3_ERR_AUTH_ROLE);
6665
}
6766

6867
// Best effort to map the errors returned by the ms3 API to linux errnos
@@ -76,7 +75,7 @@ const int s3err_to_errno[] = {
7675
EBADMSG, // 4 MS3_ERR_RESPONSE_PARSE
7776
ECOMM, // 5 MS3_ERR_REQUEST_ERROR
7877
ENOMEM, // 6 MS3_ERR_OOM
79-
EINVAL, // 7 MS3_ERR_IMPOSSIBLE. Will have to look through the code to find out what this is exactly.
78+
EINVAL, // 7 MS3_ERR_IMPOSSIBLE. Will have to look through the code to find out what this is exactly.
8079
EKEYREJECTED, // 8 MS3_ERR_AUTH
8180
ENOENT, // 9 MS3_ERR_NOT_FOUND
8281
EPROTO, // 10 MS3_ERR_SERVER
@@ -131,6 +130,7 @@ S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry)
131130
string use_http = tolower(config->getValue("S3", "use_http"));
132131
string ssl_verify = tolower(config->getValue("S3", "ssl_verify"));
133132
string port_number = config->getValue("S3", "port_number");
133+
string libs3_debug = config->getValue("S3", "libs3_debug");
134134

135135
bool keyMissing = false;
136136
isEC2Instance = false;
@@ -212,7 +212,10 @@ S3Storage::S3Storage(bool skipRetry) : skipRetryableErrors(skipRetry)
212212
endpoint = config->getValue("S3", "endpoint");
213213

214214
ms3_library_init();
215-
// ms3_debug();
215+
if (libs3_debug == "enabled")
216+
{
217+
ms3_debug();
218+
}
216219
testConnectivityAndPerms();
217220
}
218221

@@ -308,7 +311,8 @@ void S3Storage::testConnectivityAndPerms()
308311
err = exists(testObjKey, &_exists);
309312
if (err)
310313
{
311-
logger->log(LOG_CRIT, "S3Storage::exists() failed on nonexistent object. Check 'ListBucket' permissions.");
314+
logger->log(LOG_CRIT,
315+
"S3Storage::exists() failed on nonexistent object. Check 'ListBucket' permissions.");
312316
FAIL(HEAD)
313317
}
314318
logger->log(LOG_INFO, "S3Storage: S3 connectivity & permissions are OK");
@@ -518,8 +522,10 @@ int S3Storage::putObject(const std::shared_ptr<uint8_t[]> data, size_t len, cons
518522
s3err_msgs[s3err], bucket.c_str(), destKey.c_str());
519523
errno = s3err_to_errno[s3err];
520524
if (s3err == MS3_ERR_ENDPOINT)
521-
logger->log(LOG_ERR, "S3Storage::putObject(): Bucket location not match provided endpoint:, bucket = %s, endpoint = %s.",
522-
bucket.c_str(), endpoint.c_str());
525+
logger->log(
526+
LOG_ERR,
527+
"S3Storage::putObject(): Bucket location not match provided endpoint:, bucket = %s, endpoint = %s.",
528+
bucket.c_str(), endpoint.c_str());
523529
return -1;
524530
}
525531
return 0;

storage-manager/storagemanager.cnf.in

Lines changed: 38 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
# This is the configuration file for StorageManager (SM)
22

33
[ObjectStorage]
4-
# 'service' is the module that SM will use for cloud IO.
4+
# 'service' is the module that SM will use for cloud IO.
55
# Current options are "LocalStorage" and "S3".
6-
# "LocalStorage" will use a directory on the local filesystem as if it
6+
# "LocalStorage" will use a directory on the local filesystem as if it
77
# were cloud storage. "S3" is the module that uses real cloud storage.
88
# Both modules have their own sections below.
99
#
@@ -20,19 +20,19 @@ service = LocalStorage
2020
# Regarding tuning, object stores do not support modifying stored data;
2121
# entire objects must be replaced on modification, and entire
2222
# objects are fetched on read. This results in read and write amplification.
23-
#
23+
#
2424
# Network characteristics are important to consider. If the machine is
25-
# in the cloud and has low-latency and high-bandwidth access to the object
26-
# store, then the limiting factor may be the get/put rate imposed by your
25+
# in the cloud and has low-latency and high-bandwidth access to the object
26+
# store, then the limiting factor may be the get/put rate imposed by your
2727
# cloud provider. In that case, using a larger object size will reduce
28-
# the number of get/put ops required to perform a task. If the machine has
29-
# low-bandwidth access to the object store, a lower value will reduce
30-
# the degree of read/write amplification, reducing the total amount of data
28+
# the number of get/put ops required to perform a task. If the machine has
29+
# low-bandwidth access to the object store, a lower value will reduce
30+
# the degree of read/write amplification, reducing the total amount of data
3131
# to transfer.
3232
#
33-
# Of course, you will only really know how a complex system works by
33+
# Of course, you will only really know how a complex system works by
3434
# experimentation. If you experience poor performance using the default,
35-
# our suggestion is to reduce it to 2M and try again, then increase it to
35+
# our suggestion is to reduce it to 2M and try again, then increase it to
3636
# 10M and try again.
3737
#
3838
# object_size should not be changed after you have run postConfigure.
@@ -56,33 +56,33 @@ journal_path = @ENGINE_DATADIR@/storagemanager/journal
5656
# max_concurrent_downloads is what is sounds like, per node.
5757
# This is not a global setting.
5858
max_concurrent_downloads = 21
59-
59+
6060
# max_concurrent_uploads is what is sounds like, per node.
61-
# This is not a global setting. Currently, a file is locked while
61+
# This is not a global setting. Currently, a file is locked while
6262
# modifications to it are synchronized with cloud storage. If your network
6363
# has low upstream bandwidth, consider lowering this value to the minimum
64-
# necessary to saturate your network. This will reduce the latency of certain
65-
# operations and improve your experience.
64+
# necessary to saturate your network. This will reduce the latency of certain
65+
# operations and improve your experience.
6666
max_concurrent_uploads = 21
6767

68-
# common_prefix_depth is the depth of the common prefix that all files
69-
# managed by SM have. Ex: /var/lib/columnstore/data1, and
68+
# common_prefix_depth is the depth of the common prefix that all files
69+
# managed by SM have. Ex: /var/lib/columnstore/data1, and
7070
# /var/lib/columnstore/data2 differ at the 4th directory element,
71-
# so they have a common prefix depth of 3. For Columnstore, it should
71+
# so they have a common prefix depth of 3. For Columnstore, it should
7272
# be set to the number of directories that precede the data* directories.
73-
# The default value of 3 works with package installations, where data* would be in
73+
# The default value of 3 works with package installations, where data* would be in
7474
# /var/lib/columnstore
7575
#
7676
# This value is used to manage the ownership of prefixes between
7777
# StorageManager instances that share a filesystem. For example,
7878
# if you have SM data stored on a distributed filesystem for fault-tolerance
79-
# reasons, and all nodes have it mounted, SM instances will be able to
80-
# negotiate ownership of data from a failed instance.
79+
# reasons, and all nodes have it mounted, SM instances will be able to
80+
# negotiate ownership of data from a failed instance.
8181
common_prefix_depth = 3
8282

8383
[S3]
8484
# These should be self-explanatory. Region can be blank or commented
85-
# if using a private cloud storage system. Bucket has to be set to
85+
# if using a private cloud storage system. Bucket has to be set to
8686
# something though. Obviously, do not change these after running
8787
# postConfigure, or SM will not be able to find your data.
8888
region = some_region
@@ -102,29 +102,29 @@ bucket = some_bucket
102102
# prefix as short as possible for performance reasons.
103103
# prefix = cs/
104104

105-
# Put your HMAC access keys here. Keys can also be set through the
105+
# Put your HMAC access keys here. Keys can also be set through the
106106
# environment vars AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
107107
# If set, SM will use these values and ignore the envvars.
108108
# aws_access_key_id =
109-
# aws_secret_access_key =
109+
# aws_secret_access_key =
110110

111111
# If you want StorageManager to assume an IAM role to use for its S3
112112
# accesses, specify the name of the role in iam_role_name. The name
113113
# should be only the name, rather than the full path.
114114
#
115-
# The specified role must already exist and have permission to get, put,
115+
# The specified role must already exist and have permission to get, put,
116116
# delete, and 'head' on the specified S3 bucket.
117-
# iam_role_name =
117+
# iam_role_name =
118118

119119
# If an IAM role is specified, sts_endpoint and sts_region are used to specify
120120
# which STS server & region to use to assume the role. The default for
121121
# sts_endpoint is 'sts.amazonaws.com', and the default for sts_region is
122122
# 'us-east-1'.
123-
# sts_region =
123+
# sts_region =
124124
# sts_endpoint =
125125

126126
# If running on AWS EC2 instance the value ec2_iam_mode can be set
127-
# 'enabled' and allow StorageManager to detect IAM role assigned
127+
# 'enabled' and allow StorageManager to detect IAM role assigned
128128
# to EC2 instances. This will then use the the temporary credentials
129129
# provided by EC2 metadata for S3 authentication access/secret keys.
130130
# ec2_iam_mode=enabled
@@ -137,6 +137,10 @@ bucket = some_bucket
137137
# Default is ssl_verify = enabled
138138
# ssl_verify = disabled
139139

140+
# libs3_debug setting controls S3 library debugging printouts
141+
# Default is libs3_debug = disabled
142+
# libs3_debug = disabled
143+
140144
# The LocalStorage section configures the 'local storage' module
141145
# if specified by ObjectStorage/service.
142146
[LocalStorage]
@@ -154,15 +158,15 @@ max_latency = 50000
154158
[Cache]
155159

156160
# cache_size can be specified in terms of tera-, giga-, mega-, kilo-
157-
# bytes using T/t G/g M/m K/k. Drive manufacturers use a power-of-10
158-
# notion of what that means, which means 1m = 1,000,000 bytes. These
159-
# settings use the programmer's power-of-2 notion, which means
160-
# 1m = 1,048,576 bytes.
161+
# bytes using T/t G/g M/m K/k. Drive manufacturers use a power-of-10
162+
# notion of what that means, which means 1m = 1,000,000 bytes. These
163+
# settings use the programmer's power-of-2 notion, which means
164+
# 1m = 1,048,576 bytes.
161165
#
162166
# This number will include space used by journal files, but does not
163-
# include space used by metadata files. In this version, journal data
164-
# currently being written and downloads in progress are also not accounted
165-
# for, so disk usage can temporarily go above this number. You will want to
167+
# include space used by metadata files. In this version, journal data
168+
# currently being written and downloads in progress are also not accounted
169+
# for, so disk usage can temporarily go above this number. You will want to
166170
# leave a little space available on the mount for those operations.
167171
cache_size = 2g
168172

0 commit comments

Comments
 (0)