Skip to content

Commit b92b40a

Browse files
committed
Add Redis OSS standalone benchmark setup for c8g.16xlarge -ARM
1 parent 1394770 commit b92b40a

File tree

9 files changed

+502
-0
lines changed

9 files changed

+502
-0
lines changed
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# oss-redis-standalone-arm64-ubuntu24.04-c8g.16xlarge
2+
3+
Deploy Multi-VM benchmark sceneario, including 2 clients and 1 DB machine.
4+
5+
- Cloud provider: AWS
6+
- OS: Ubuntu 24.04
7+
- Client machine: c8g.16xlarge
8+
- Benchmark machine: c8g.16xlarge
9+
10+
---
11+
12+
#### Tested scenarios
13+
14+
- TBD
15+
16+
#### Deployment
17+
18+
##### Required env variables
19+
20+
The terraform and ansible scripts expect the following env variables to be filled:
21+
22+
```
23+
export EC2_REGION={ ## INSERT REGION ## }
24+
export EC2_ACCESS_KEY={ ## INSERT EC2 ACCESS KEY ## }
25+
export EC2_SECRET_KEY={ ## INSERT EC2 SECRET KEY ## }
26+
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
27+
```
28+
29+
##### Required pub/private keys
30+
31+
The terraform script expects the following public private keys to be present on ~/.ssh/ dir:
32+
33+
```
34+
~/.ssh/perf-ci.pem
35+
~/.ssh/perf-ci.pub
36+
```
37+
38+
##### Deployment steps
39+
40+
within project repo
41+
42+
```bash
43+
cd terraform/oss-standalone-arm64-ubuntu24.04-c8g.16xlarge
44+
terraform plan
45+
terraform apply
46+
```
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
2+
resource "aws_instance" "client" {
3+
count = var.client_instance_count
4+
ami = var.instance_ami
5+
instance_type = var.instance_type
6+
subnet_id = data.terraform_remote_state.shared_resources.outputs.subnet_public_us_east_2b_id
7+
vpc_security_group_ids = ["${data.terraform_remote_state.shared_resources.outputs.performance_cto_sg_id}"]
8+
key_name = var.key_name
9+
placement_group = data.terraform_remote_state.shared_resources.outputs.placement_group_name_us_east_2b
10+
availability_zone = "us-east-2b"
11+
12+
# Cloud-init configuration from external file to install and build memtier
13+
user_data = file("${path.module}/cloud-init-client.yaml")
14+
15+
root_block_device {
16+
volume_size = var.instance_volume_size
17+
volume_type = var.instance_volume_type
18+
encrypted = var.instance_volume_encrypted
19+
delete_on_termination = true
20+
}
21+
22+
volume_tags = {
23+
Environment = "${var.environment}"
24+
Name = "ebs_block_device-${var.setup_name}-CLIENT-${count.index + 1}"
25+
setup = "${var.setup_name}"
26+
redis_module = "${var.redis_module}"
27+
github_actor = "${var.github_actor}"
28+
github_repo = "${var.github_repo}"
29+
github_sha = "${var.github_sha}"
30+
timeout_secs = "${var.timeout_secs}"
31+
}
32+
33+
tags = {
34+
Environment = "${var.environment}"
35+
Name = "${var.setup_name}-CLIENT-${count.index + 1}"
36+
setup = "${var.setup_name}"
37+
redis_module = "${var.redis_module}"
38+
github_actor = "${var.github_actor}"
39+
github_repo = "${var.github_repo}"
40+
github_sha = "${var.github_sha}"
41+
timeout_secs = "${var.timeout_secs}"
42+
}
43+
44+
################################################################################
45+
# This will ensure we wait here until the instance is ready to receive the ssh connection
46+
################################################################################
47+
provisioner "remote-exec" {
48+
script = "./../../scripts/wait_for_instance.sh"
49+
connection {
50+
host = self.public_ip # The `self` variable is like `this` in many programming languages
51+
type = "ssh" # in this case, `self` is the resource (the server).
52+
user = var.ssh_user
53+
private_key = file(var.private_key)
54+
#need to increase timeout to larger then 5m for metal instances
55+
timeout = "15m"
56+
agent = "false"
57+
}
58+
}
59+
}
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#cloud-config
2+
3+
package_update: true
4+
package_upgrade: true
5+
packages:
6+
- git
7+
- build-essential
8+
- autoconf
9+
- automake
10+
- libpcre3-dev
11+
- libevent-dev
12+
- pkg-config
13+
- zlib1g-dev
14+
- libssl-dev
15+
- libtool
16+
- ca-certificates
17+
- wget
18+
19+
# Create the memtier installation script
20+
write_files:
21+
- path: /tmp/install_memtier.sh
22+
permissions: "0755"
23+
content: |
24+
#!/bin/bash
25+
set -e # exit immediately on error
26+
# Clone memtier benchmark
27+
cd /tmp
28+
rm -rf memtier_benchmark
29+
git clone https://github.com/RedisLabs/memtier_benchmark.git
30+
cd memtier_benchmark
31+
# Build and install
32+
autoreconf -ivf
33+
./configure
34+
make -j
35+
sudo make install
36+
echo "Memtier benchmark installed successfully"
37+
# Run the installation script
38+
runcmd:
39+
- bash /tmp/install_memtier.sh
40+
- echo "Cloud-init installation completed"
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
#cloud-config
2+
3+
# Update and upgrade packages
4+
package_update: true
5+
package_upgrade: true
6+
7+
# Install required packages
8+
packages:
9+
### DragonflyDB dependencies
10+
- ninja-build
11+
- libunwind-dev
12+
- libboost-context-dev
13+
- libssl-dev
14+
- autoconf-archive
15+
- libtool
16+
- cmake
17+
- g++
18+
- bison
19+
- zlib1g-dev
20+
- git
21+
- make
22+
- pkg-config
23+
24+
# Redis dependencies
25+
- git
26+
- dpkg-dev
27+
- gcc
28+
- g++
29+
- libc6-dev
30+
- libssl-dev
31+
- make
32+
- cmake
33+
- clang
34+
- automake
35+
- autoconf
36+
- libtool
37+
- ca-certificates
38+
- wget
39+
40+
# Create directories
41+
runcmd:
42+
### DragonflyDB
43+
# Log start time
44+
- echo "Starting DragonflyDB installation at $(date)"
45+
46+
# Clone DragonflyDB repository
47+
- echo "Cloning DragonflyDB repository..."
48+
- cd /home/ubuntu
49+
- git clone --recursive https://github.com/dragonflydb/dragonfly
50+
- chown -R ubuntu:ubuntu /home/ubuntu/dragonfly
51+
52+
# Build DragonflyDB
53+
- echo "Building DragonflyDB (this may take a while)..."
54+
- cd /home/ubuntu/dragonfly
55+
- sudo -u ubuntu ./helio/blaze.sh -release
56+
- cd build-opt && sudo -u ubuntu ninja dragonfly
57+
58+
# Log completion
59+
- echo "DragonflyDB installation completed at $(date)"
60+
61+
### Redis
62+
# Log start time
63+
- echo "Start Redis installation at $(date)"
64+
65+
# Clone Redis repository
66+
- echo "Cloning Redis repository..."
67+
- cd /home/ubuntu
68+
- git clone https://github.com/redis/redis
69+
70+
# Build Redis
71+
- echo "Building Redis (this may take a while)..."
72+
- cd /home/ubuntu/redis
73+
- make distclean
74+
- make BUILD_TLS=yes -j
75+
76+
# Log completion
77+
- echo "Redis installation completed at $(date)"
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
2+
################################################################################
3+
# This is the bucket holding this specific setup tfstate
4+
################################################################################
5+
terraform {
6+
backend "s3" {
7+
bucket = "performance-cto-group"
8+
key = "benchmarks/infrastructure/oss-standalone-arm64-ubuntu24.04-c8g.16xlarge.tfstate"
9+
region = "us-east-1"
10+
}
11+
}
12+
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
2+
resource "aws_instance" "server" {
3+
count = var.server_instance_count
4+
ami = var.instance_ami
5+
instance_type = var.instance_type
6+
subnet_id = data.terraform_remote_state.shared_resources.outputs.subnet_public_us_east_2b_id
7+
vpc_security_group_ids = ["${data.terraform_remote_state.shared_resources.outputs.performance_cto_sg_id}"]
8+
key_name = var.key_name
9+
placement_group = data.terraform_remote_state.shared_resources.outputs.placement_group_name_us_east_2b
10+
availability_zone = "us-east-2b"
11+
12+
# Cloud-init configuration from external file to install and build DragonflyDB from source
13+
user_data = file("${path.module}/cloud-init-db.yaml")
14+
15+
root_block_device {
16+
volume_size = var.instance_volume_size
17+
volume_type = var.instance_volume_type
18+
encrypted = var.instance_volume_encrypted
19+
delete_on_termination = true
20+
}
21+
22+
volume_tags = {
23+
Environment = "${var.environment}"
24+
Name = "ebs_block_device-${var.setup_name}-DB-${count.index + 1}"
25+
setup = "${var.setup_name}"
26+
redis_module = "${var.redis_module}"
27+
github_actor = "${var.github_actor}"
28+
github_repo = "${var.github_repo}"
29+
github_sha = "${var.github_sha}"
30+
timeout_secs = "${var.timeout_secs}"
31+
}
32+
33+
tags = {
34+
Environment = "${var.environment}"
35+
Name = "${var.setup_name}-DB-${count.index + 1}"
36+
setup = "${var.setup_name}"
37+
redis_module = "${var.redis_module}"
38+
github_actor = "${var.github_actor}"
39+
github_repo = "${var.github_repo}"
40+
github_sha = "${var.github_sha}"
41+
timeout_secs = "${var.timeout_secs}"
42+
}
43+
44+
################################################################################
45+
# This will ensure we wait here until the instance is ready to receive the ssh connection
46+
################################################################################
47+
provisioner "remote-exec" {
48+
script = "./../../scripts/wait_for_instance.sh"
49+
connection {
50+
host = self.public_ip # The `self` variable is like `this` in many programming languages
51+
type = "ssh" # in this case, `self` is the resource (the server).
52+
user = var.ssh_user
53+
private_key = file(var.private_key)
54+
#need to increase timeout to larger then 5m for metal instances
55+
timeout = "15m"
56+
agent = "false"
57+
}
58+
}
59+
60+
################################################################################
61+
# Copy create-cluster script to the server
62+
################################################################################
63+
provisioner "file" {
64+
source = "./../../scripts/create-cluster"
65+
destination = "/home/${var.ssh_user}/create-cluster"
66+
connection {
67+
host = self.public_ip
68+
type = "ssh"
69+
user = var.ssh_user
70+
private_key = file(var.private_key)
71+
timeout = "1m"
72+
agent = "false"
73+
}
74+
}
75+
76+
################################################################################
77+
# Make create-cluster script executable
78+
################################################################################
79+
provisioner "remote-exec" {
80+
inline = [
81+
"chmod +x /home/${var.ssh_user}/create-cluster",
82+
"echo 'create-cluster script copied and made executable'"
83+
]
84+
connection {
85+
host = self.public_ip
86+
type = "ssh"
87+
user = var.ssh_user
88+
private_key = file(var.private_key)
89+
timeout = "1m"
90+
agent = "false"
91+
}
92+
}
93+
}
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
output "server_public_ip" {
2+
value = ["${aws_instance.server[*].public_ip}"]
3+
}
4+
5+
output "server_private_ip" {
6+
value = ["${aws_instance.server[*].private_ip}"]
7+
}
8+
9+
output "client_public_ip" {
10+
value = ["${aws_instance.client[*].public_ip}"]
11+
}
12+
13+
output "client_private_ip" {
14+
value = ["${aws_instance.client[*].private_ip}"]
15+
}
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# provider
2+
provider "aws" {
3+
region = var.region
4+
}
5+
6+
################################################################################
7+
# This is the shared resources bucket key -- you will need it across environments like security rules,etc...
8+
# !! do not change this !!
9+
################################################################################
10+
data "terraform_remote_state" "shared_resources" {
11+
backend = "s3"
12+
config = {
13+
bucket = "performance-cto-group"
14+
key = "benchmarks/infrastructure/shared_resources.tfstate"
15+
region = "us-east-1"
16+
}
17+
}

0 commit comments

Comments
 (0)