Skip to content

Commit 90de496

Browse files
committed
update code
1 parent 2cfeeb8 commit 90de496

File tree

12 files changed

+184
-55
lines changed

12 files changed

+184
-55
lines changed

delete-results.sh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#!/bin/bash
2+
3+
sudo chmod -R 777 results/
4+
cd results/
5+
sudo rm -r *
6+
7+
#sudo rm -r results/

plot.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#!/bin/bash
22

3+
sudo chmod -R 777 results/
34
cd src
45
python make_graphs.py
56
cd ..
6-
./push-plots.sh
7+
#./push-plots.sh
8+
./push-results.sh

push-results.sh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#!/bin/bash
2+
3+
# push results folder to git.
4+
cd results
5+
git add .
6+
git commit -m "results"
7+
git push

run.sh

Lines changed: 58 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,64 @@
11
#!/bin/bash
2+
cd build
3+
4+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=1 --r-size=250 --s-size=250 --total-cores=15 --task-size=10 --skew=0 --core-pausing=0 --program-pmu=1
5+
6+
# MEMORY SIZE EXPERIMENTS
7+
# 32 GB -- previous default max
8+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=32 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
9+
## 40 GB
10+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=40 --r-size=2500000000 --s-size=2500000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
11+
## 43 GB
12+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=43 --r-size=2700000000 --s-size=2700000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
13+
## 46.5
14+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=46 --r-size=2900000000 --s-size=2900000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
15+
## 48 GB
16+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=48 --r-size=3000000000 --s-size=3000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
17+
## 64 GB
18+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=64 --r-size=4000000000 --s-size=4000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
19+
## 96 GB
20+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=1 --r-size=6000000000 --s-size=6000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
21+
## 112 GB
22+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=7 --r-size=7000000000 --s-size=7000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
23+
## 128 GB
24+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=8 --r-size=8000000000 --s-size=8000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1
25+
## 136 GB -- max possible
26+
27+
# SOCKET EXPERIMENTS
28+
sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=1-8 --r-size=8500000000 --s-size=8500000000 --total-cores=4 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
229

3-
#for i in {1..10}
4-
#do
5-
# (
6-
# cd /home/sofia/Projects/CloudDB/pool-hashjoin-pcm/build
7-
# sudo ./Pool-HashJoin-PCM --r-size=2000000000 --s-size=2000000000 --total-cores=14 --task-size=10000000 --skew=0
8-
# ) &
9-
# (
10-
# cd /home/sofia/Projects/CloudDB/busy-cores/build
11-
# sudo ./Busy-Cores 2 3 4
12-
# ) &
13-
#
14-
# wait
15-
## cd ..
16-
# cd /home/sofia/Projects/CloudDB/pool-hashjoin-pcm
17-
# ./plot.sh
18-
#done
19-
#sudo ./Pool-HashJoin-PCM --r-size=200 --s-size=200 --total-cores=14 --task-size=10 --skew=1
20-
#sudo ./Pool-HashJoin-PCM --r-size=2000000000 --s-size=2000000000 --total-cores=14 --task-size=10000000 --skew=0
21-
#cd ..
22-
#./plot.sh
2330

24-
#cd build
25-
#sudo ./Pool-HashJoin-PCM --r-size=100 --s-size=100 --total-cores=14 --task-size=10 --skew=0
2631

32+
# NUMBER OF CORES EXPERIMENTS
33+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=13 --r-size=8500000000 --s-size=8500000000 --total-cores=13 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
34+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=12 --r-size=8500000000 --s-size=8500000000 --total-cores=12 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
35+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=11 --r-size=8500000000 --s-size=8500000000 --total-cores=11 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
36+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=10 --r-size=8500000000 --s-size=8500000000 --total-cores=10 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
37+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=9 --r-size=8500000000 --s-size=8500000000 --total-cores=9 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
38+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=8 --r-size=8500000000 --s-size=8500000000 --total-cores=8 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
39+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=7 --r-size=8500000000 --s-size=8500000000 --total-cores=7 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
40+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=6 --r-size=8500000000 --s-size=8500000000 --total-cores=6 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
41+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=5 --r-size=8500000000 --s-size=8500000000 --total-cores=5 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
42+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=4 --r-size=8500000000 --s-size=8500000000 --total-cores=4 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
43+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=3 --r-size=8500000000 --s-size=8500000000 --total-cores=3 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
44+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=2 --r-size=8500000000 --s-size=8500000000 --total-cores=2 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
45+
#sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=1 --r-size=8500000000 --s-size=8500000000 --total-cores=1 --task-size=5000000 --skew=0 --core-pausing=0 --program-pmu=1
2746

28-
for i in {1..10}
29-
do
30-
cd build
31-
sudo ./Pool-HashJoin-PCM --r-size=2000000000 --s-size=2000000000 --total-cores=14 --task-size=100000 --skew=0
3247

33-
cd ..
34-
./plot.sh
35-
done
48+
# CONCURRENT EXPERIMENTS
49+
#(sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=0 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=1 --program-pmu=1) &
50+
#(sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=1 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=1 --program-pmu=1) &
51+
#(sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=2 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=1 --program-pmu=1) &
52+
#(sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=3 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1) &
53+
#(sudo perf stat -e cpu-clock sudo ./Pool-HashJoin-PCM --id=4 --r-size=2000000000 --s-size=2000000000 --total-cores=15 --task-size=1000000 --skew=0 --core-pausing=0 --program-pmu=1) &
54+
55+
56+
# LOOP EXPERIMENTS
57+
#for i in {1..10}
58+
#do
59+
# cd build
60+
# sudo ./Pool-HashJoin-PCM --r-size=2000000000 --s-size=2000000000 --total-cores=14 --task-size=1000000 --skew=0
61+
#
62+
# cd ..
63+
# ./plot.sh
64+
#done

save-results.sh

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/bin/bash
2+
3+
# save graphs
4+
cd src
5+
python save_results.py
6+
7+
# save
8+
cd ..
9+
./push-plots.sh

src/join.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,11 @@ void allocate_hashtable(Hashtable ** ppht, uint64_t nbuckets) {
7272
*ppht = ht;
7373
}
7474

75+
void deallocate_hashtable(Hashtable & ht) {
76+
free(ht.buckets);
77+
// free(ht);
78+
}
79+
7580
/**
7681
* Initializes a new BucketBuffer for later use in allocating
7782
* buckets when overflow occurs.
@@ -204,10 +209,12 @@ void probe(ThreadArg &args) {
204209
args.matches += 1;
205210
matches ++;
206211
matchesPerKey ++;
212+
#if SAVE_RELATIONS_TO_FILE==1
207213
ChainTuple * chainTup = cb_next_writepos(args.threadJoinResults->chainedTupBuf);
208214
chainTup->key = args.relS->tuples[i].key; /* key */
209215
chainTup->sPayload = args.relS->tuples[i].payload; /* S-rid */
210216
chainTup->rPayload = b->tuples[j].payload; /* R-rid */
217+
#endif
211218
}
212219
}
213220
b = b->next; // Follow overflow pointer.

src/join.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010

1111
void allocate_hashtable(Hashtable ** ppht, uint64_t nbuckets);
1212

13+
void deallocate_hashtable(Hashtable & ht);
14+
1315
void init_bucket_buffer(BucketBuffer ** ppbuf);
1416

1517
void free_bucket_buffer(BucketBuffer * buf);

src/main.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,7 @@ int main(int argc, char **argv) {
185185
free(relR.tuples);
186186
free(relS.tuples);
187187
free(path);
188+
deallocate_hashtable(*ht);
188189
// std::cout << "DONE! BYE!" << std::endl;
189190
return 0;
190191
}

src/make_graphs.py

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# rmb_png_file_name = folder_path+'/remote-RM.png'
1414
# tresults_png_file_name = folder_path+'/Individual-Thread-Results.png'
1515

16-
numThreads = 15
16+
# numThreads = 15
1717

1818
cache_csv_file_name = '/cache-results.csv'
1919
ipc_csv_file_name = '/IPC-results.csv'
@@ -27,6 +27,7 @@
2727
rmb_png_file_name = '/MB-remote-results.png'
2828
tresults_png_file_name = '/individual-thread-results.png'
2929

30+
nThreads = 0
3031

3132
def getColumnInt(matrix, i):
3233
return [int(row[i]) for row in matrix]
@@ -35,17 +36,23 @@ def getColumnDob(matrix, i):
3536
return [float(row[i]) for row in matrix]
3637

3738
def plotCache(path):
39+
plt.clf()
3840
y = []
3941
all_rows = []
4042

4143
print(path + cache_csv_file_name)
4244
with open(path + cache_csv_file_name) as csvFile:
4345
rows = csv.reader(csvFile, delimiter=',')
4446

47+
# Find number of threads in the file:
48+
nThreads = len(next(rows)) # Read first line and count columns
49+
# print(nThreads)
50+
csvFile.seek(0)
51+
4552
for row in rows:
4653
all_rows.append(row)
4754

48-
for i in range (numThreads):
55+
for i in range (nThreads):
4956
y = getColumnInt(all_rows, i)
5057
x = list(range(0, len(y)))
5158
label = "core-"+str(i)
@@ -68,10 +75,15 @@ def plotIpc(path):
6875
with open(path + ipc_csv_file_name) as csvFile:
6976
rows = csv.reader(csvFile, delimiter=',')
7077

78+
# Find number of threads in the file:
79+
nThreads = len(next(rows)) # Read first line and count columns
80+
# print(nThreads)
81+
csvFile.seek(0)
82+
7183
for row in rows:
7284
all_rows.append(row)
7385

74-
for i in range (numThreads):
86+
for i in range (nThreads):
7587
y = getColumnDob(all_rows, i)
7688
x = list(range(0, len(y)))
7789
label = "core-"+str(i)
@@ -161,7 +173,7 @@ def plotLocalMemBandwidth(path):
161173
all_rows.append(row)
162174

163175
core = 0
164-
for i in range (0,28,2):
176+
for i in range (0,nThreads,2):
165177

166178
y = getColumnDob(all_rows, i)
167179
x = list(range(0, len(y)))
@@ -189,7 +201,7 @@ def plotRemoteMemBandwidth(path):
189201

190202
core = 0
191203

192-
for i in range (1,28,2):
204+
for i in range (1,nThreads,2):
193205
y = getColumnDob(all_rows, i)
194206
x = list(range(0, len(y)))
195207
label = "remote-core-"+str(core)
@@ -219,11 +231,11 @@ def plotRemoteMemBandwidth(path):
219231
if not os.path.exists(plot_dir):
220232
os.makedirs(plot_dir)
221233
print(f"Created folder '{plot_dir}'.")
222-
plotCache(directory)
234+
plotCache(directory) # also finds the number of rows.
223235
plotIpc(directory)
224236
plotThreadResults(directory)
225-
plotLocalMemBandwidth(directory)
226-
plotRemoteMemBandwidth(directory)
237+
# plotLocalMemBandwidth(directory)
238+
# plotRemoteMemBandwidth(directory)
227239
else:
228240
print(f"Folder '{plot_dir}' already exists.")
229241

src/pcm_monitor.cpp

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ PcmMonitor::PcmMonitor(int totalCores_, bool corePausing_, char * path_) {
3333
monitoring = false;
3434
totalCores = totalCores_;
3535
corePausing = corePausing_;
36+
memBandwidthFlag = false;
3637

3738
this->path = new char[strlen(path_)+1];
3839
strcpy(this->path, path_);
@@ -46,6 +47,8 @@ PcmMonitor::PcmMonitor(int totalCores_, bool corePausing_, char * path_) {
4647
ipcStats[i].second = 0;
4748
lmbStats[i].first = 0;
4849
lmbStats[i].second = 0;
50+
rmbStats[i].first = 0;
51+
rmbStats[i].second = 0;
4952
}
5053
}
5154

@@ -90,10 +93,11 @@ bool PcmMonitor::shouldThreadStop(int id) {
9093
}
9194

9295
void PcmMonitor::makeStopDecisions() {
93-
int maxStrikesTolerance = 20;
96+
int maxStrikesTolerance = 10;
9497

9598
// NOTE: core 0 is not allowed to stop.
96-
for (int i = 1; i < totalCores; i++) {
99+
// HERE: set which cores are allowed to stop at all
100+
for (int i = 1; i < 8; i++) {
97101
if (threadStrikes[i] > maxStrikesTolerance) {
98102
threadStop[i] = true;
99103
} else if (threadStrikes[i] <= 0) {
@@ -115,7 +119,7 @@ void PcmMonitor::analyzeCacheStats() {
115119
int worstValue = 0;
116120
int worstCore = 0;
117121
int maxStrikes = 5;
118-
double ipcThreshold = 1.5;
122+
double ipcThreshold = 0.4;
119123

120124
for (int i = 0; i < totalCores; i++) {
121125

@@ -164,6 +168,8 @@ void PcmMonitor::runMonitoring() {
164168
if (corePausing) { makeStopDecisions(); }
165169
std::this_thread::sleep_for(std::chrono::milliseconds(400));
166170
}
171+
memBandwidthFlag = false;
172+
saveMemoryBandwidthValues();
167173
}
168174

169175

@@ -268,24 +274,27 @@ void PcmMonitor::saveIpcValues() {
268274

269275
void PcmMonitor::saveMemoryBandwidthValues() {
270276

271-
std::ofstream file(this->path + std::string(MB_CSV), std::ios_base::app);
277+
if (!memBandwidthFlag) {
278+
std::ofstream file(this->path + std::string(MB_CSV), std::ios_base::app);
272279

273-
for (int i = 0; i < totalCores; i++) {
274-
if (!i == 0) { file << ","; }
275-
double localMemBdwth = getLocalMemoryBW(coreBeforeState[i], coreAfterState[i]);
276-
file << localMemBdwth;
277-
file << ",";
278-
double remoteMemBdwth = getRemoteMemoryBW(coreBeforeState[i], coreAfterState[i]);
279-
file << remoteMemBdwth;
280+
for (int i = 0; i < totalCores; i++) {
281+
if (!i == 0) { file << ","; }
282+
double localMemBdwth = getLocalMemoryBW(coreBeforeState[i], coreAfterState[i]);
283+
file << localMemBdwth;
284+
file << ",";
285+
double remoteMemBdwth = getRemoteMemoryBW(coreBeforeState[i], coreAfterState[i]);
286+
file << remoteMemBdwth;
280287

281-
lmbStats[i].first = lmbStats[i].second;
282-
lmbStats[i].second = localMemBdwth;
288+
lmbStats[i].first = lmbStats[i].second;
289+
lmbStats[i].second = localMemBdwth;
283290

284-
rmbStats[i].first = rmbStats[i].second;
285-
rmbStats[i].second = remoteMemBdwth;
291+
rmbStats[i].first = rmbStats[i].second;
292+
rmbStats[i].second = remoteMemBdwth;
293+
}
294+
file << "\n";
295+
file.close();
296+
memBandwidthFlag = true;
286297
}
287-
file << "\n";
288-
file.close();
289298
}
290299

291300
/*

0 commit comments

Comments
 (0)