Skip to content

Commit 11d8995

Browse files
committed
Add padding
1 parent f1a3009 commit 11d8995

File tree

4 files changed

+270
-98
lines changed

4 files changed

+270
-98
lines changed

cnn-frame.cc

Lines changed: 120 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -31,101 +31,163 @@ namespace cnn {
3131
return std::make_shared<tensor_tree::vertex>(root);
3232
}
3333

34-
std::shared_ptr<tensor_tree::vertex> make_tensor_tree(int conv_layer, int fc_layer)
34+
std::shared_ptr<tensor_tree::vertex> make_tensor_tree(cnn_t const& config)
3535
{
36-
std::shared_ptr<tensor_tree::vertex> root = make_cnn_tensor_tree(conv_layer);
36+
tensor_tree::vertex root { "nil" };
3737

38-
for (int i = 0; i < fc_layer; ++i) {
39-
tensor_tree::vertex t { "nil" };
40-
41-
t.children.push_back(tensor_tree::make_tensor("softmax weight"));
42-
t.children.push_back(tensor_tree::make_tensor("softmax bias"));
43-
44-
root->children.push_back(std::make_shared<tensor_tree::vertex>(t));
38+
for (int i = 0; i < config.layers.size(); ++i) {
39+
if (config.layers[i].type == "conv") {
40+
tensor_tree::vertex conv { "nil" };
41+
conv.children.push_back(tensor_tree::make_tensor("conv weight"));
42+
conv.children.push_back(tensor_tree::make_tensor("conv bias"));
43+
root.children.push_back(std::make_shared<tensor_tree::vertex>(conv));
44+
} else if (config.layers[i].type == "fc") {
45+
tensor_tree::vertex fc { "nil" };
46+
47+
fc.children.push_back(tensor_tree::make_tensor("weight"));
48+
fc.children.push_back(tensor_tree::make_tensor("bias"));
49+
50+
root.children.push_back(std::make_shared<tensor_tree::vertex>(fc));
51+
} else if (config.layers[i].type == "framewise-fc") {
52+
tensor_tree::vertex fc { "nil" };
53+
54+
fc.children.push_back(tensor_tree::make_tensor("weight"));
55+
fc.children.push_back(tensor_tree::make_tensor("bias"));
56+
57+
root.children.push_back(std::make_shared<tensor_tree::vertex>(fc));
58+
}
4559
}
4660

47-
tensor_tree::vertex t { "nil" };
48-
49-
t.children.push_back(tensor_tree::make_tensor("softmax weight"));
50-
t.children.push_back(tensor_tree::make_tensor("softmax bias"));
51-
52-
root->children.push_back(std::make_shared<tensor_tree::vertex>(t));
53-
54-
return root;
61+
return std::make_shared<tensor_tree::vertex>(root);
5562
}
5663

5764
cnn_t load_param(std::istream& is)
5865
{
5966
std::string line;
6067

61-
if (!std::getline(is, line)) {
62-
std::cout << "fail to parse the number of conv layers" << std::endl;
63-
exit(1);
64-
}
65-
6668
cnn_t result;
6769

68-
result.conv_layer = std::stoi(line);
69-
70-
for (int i = 0; i < result.conv_layer; ++i) {
71-
if (!std::getline(is, line)) {
72-
std::cout << "fail to parse dilation parameters" << std::endl;
73-
exit(1);
70+
while (std::getline(is, line) && line != "#") {
71+
if (ebt::startswith(line, "conv")) {
72+
auto parts = ebt::split(line);
73+
layer_t ell { "conv" };
74+
assert(parts.size() == 5);
75+
ell.data = std::make_shared<std::tuple<int, int, int, int>>(
76+
std::make_tuple(std::stoi(parts[1]), std::stoi(parts[2]),
77+
std::stoi(parts[4]), std::stoi(parts[4])));
78+
result.layers.push_back(ell);
79+
} else if (ebt::startswith(line, "max-pooling")) {
80+
auto parts = ebt::split(line);
81+
layer_t ell { "max-pooling" };
82+
assert(parts.size() == 5);
83+
ell.data = std::make_shared<std::tuple<int, int, int, int>>(
84+
std::make_tuple(std::stoi(parts[1]), std::stoi(parts[2]),
85+
std::stoi(parts[3]), std::stoi(parts[4])));
86+
result.layers.push_back(ell);
87+
} else if (ebt::startswith(line, "fc")) {
88+
result.layers.push_back(layer_t { "fc" });
89+
} else if (ebt::startswith(line, "framewise-fc")) {
90+
result.layers.push_back(layer_t { "framewise-fc" });
91+
} else if (ebt::startswith(line, "relu")) {
92+
result.layers.push_back(layer_t { "relu" });
93+
} else if (ebt::startswith(line, "logsoftmax")) {
94+
result.layers.push_back(layer_t { "logsoftmax" });
95+
} else {
96+
throw std::logic_error("unable to parse: " + line);
7497
}
75-
76-
std::vector<std::string> parts = ebt::split(line);
77-
78-
assert(parts.size() == 2);
79-
80-
result.dilation.push_back(std::make_pair(std::stoi(parts[0]), std::stoi(parts[1])));
81-
}
82-
83-
if (!std::getline(is, line)) {
84-
std::cout << "fail to parse the number of fc layers" << std::endl;
85-
exit(1);
8698
}
8799

88-
result.fc_layer = std::stoi(line);
89-
90-
result.param = make_tensor_tree(result.conv_layer, result.fc_layer);
100+
result.param = make_tensor_tree(result);
91101
tensor_tree::load_tensor(result.param, is);
92102

93103
return result;
94104
}
95105

96106
void save_param(cnn_t& config, std::ostream& os)
97107
{
98-
os << config.conv_layer << std::endl;
99-
100-
for (int i = 0; i < config.conv_layer; ++i) {
101-
os << config.dilation[i].first << " " << config.dilation[i].second << std::endl;
108+
for (int i = 0; i < config.layers.size(); ++i) {
109+
auto& ell = config.layers[i];
110+
111+
if (ell.type == "conv") {
112+
auto& t = *std::static_pointer_cast<std::tuple<int, int, int, int>>(ell.data);
113+
os << "conv " << std::get<0>(t) << " " << std::get<1>(t)
114+
<< " " << std::get<2>(t) << " " << std::get<3>(t) << std::endl;
115+
} else if (ell.type == "max-pooling") {
116+
std::tuple<int, int, int, int> t = *std::static_pointer_cast<
117+
std::tuple<int, int, int, int>>(ell.data);
118+
os << "max-pooling " << std::get<0>(t)
119+
<< " " << std::get<1>(t) << " " << std::get<2>(t)
120+
<< " " << std::get<3>(t) << std::endl;
121+
} else if (ell.type == "fc") {
122+
os << "fc" << std::endl;
123+
} else if (ell.type == "framewise-fc") {
124+
os << "framewise-fc" << std::endl;
125+
} else if (ell.type == "relu") {
126+
os << "relu" << std::endl;
127+
} else if (ell.type == "logsoftmax") {
128+
os << "logsoftmax" << std::endl;
129+
} else {
130+
throw std::logic_error("unable to parse: " + ell.type);
131+
}
102132
}
103133

104-
os << config.fc_layer << std::endl;
134+
os << "#" << std::endl;
105135

106136
tensor_tree::save_tensor(config.param, os);
107137
}
108138

109139
std::shared_ptr<transcriber>
110-
make_transcriber(cnn_t const& cnn_config, double dropout, std::default_random_engine *gen)
140+
make_transcriber(cnn_t const& config, double dropout, std::default_random_engine *gen)
111141
{
112142
cnn::multilayer_transcriber multi_trans;
113143

114-
for (int i = 0; i < cnn_config.conv_layer; ++i) {
115-
auto t = std::make_shared<cnn_transcriber>(
116-
cnn_transcriber { cnn_config.dilation[i].first, cnn_config.dilation[i].second });
144+
for (int i = 0; i < config.layers.size(); ++i) {
145+
auto& ell = config.layers[i];
117146

118-
multi_trans.layers.push_back(t);
119-
}
147+
if (ell.type == "conv") {
148+
auto& d = *std::static_pointer_cast<std::tuple<int, int, int, int>>(ell.data);
120149

121-
for (int i = 0; i < cnn_config.fc_layer; ++i) {
122-
auto t = std::make_shared<fc_transcriber>(fc_transcriber{});
150+
auto t = std::make_shared<conv_transcriber>(
151+
conv_transcriber { std::get<0>(d), std::get<1>(d),
152+
std::get<2>(d), std::get<3>(d) });
123153

124-
if (dropout == 0.0) {
154+
multi_trans.layers.push_back(t);
155+
156+
} else if (ell.type == "max-pooling") {
157+
auto& d = *std::static_pointer_cast<std::tuple<int, int, int, int>>(ell.data);
158+
159+
auto t = std::make_shared<max_pooling_transcriber>(
160+
max_pooling_transcriber { std::get<0>(d), std::get<1>(d),
161+
std::get<2>(d), std::get<3>(d) });
162+
163+
multi_trans.layers.push_back(t);
164+
165+
} else if (ell.type == "fc") {
166+
auto t = std::make_shared<fc_transcriber>(fc_transcriber{});
167+
168+
multi_trans.layers.push_back(t);
169+
170+
if (dropout != 0.0) {
171+
multi_trans.layers.push_back(std::make_shared<dropout_transcriber>(
172+
dropout_transcriber {dropout, *gen}));
173+
}
174+
} else if (ell.type == "framewise-fc") {
175+
auto t = std::make_shared<framewise_fc_transcriber>(framewise_fc_transcriber{});
176+
177+
multi_trans.layers.push_back(t);
178+
179+
if (dropout != 0.0) {
180+
multi_trans.layers.push_back(std::make_shared<dropout_transcriber>(
181+
dropout_transcriber {dropout, *gen}));
182+
}
183+
} else if (ell.type == "relu") {
184+
auto t = std::make_shared<relu_transcriber>(relu_transcriber {});
185+
multi_trans.layers.push_back(t);
186+
} else if (ell.type == "logsoftmax") {
187+
auto t = std::make_shared<logsoftmax_transcriber>(logsoftmax_transcriber {});
125188
multi_trans.layers.push_back(t);
126189
} else {
127-
multi_trans.layers.push_back(std::make_shared<dropout_transcriber>(
128-
dropout_transcriber {t, dropout, *gen}));
190+
throw std::logic_error("unknown layer type: " + ell.type);
129191
}
130192
}
131193

cnn-frame.h

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,21 @@
66

77
namespace cnn {
88

9-
std::shared_ptr<tensor_tree::vertex> make_cnn_tensor_tree(int layer);
10-
11-
std::shared_ptr<tensor_tree::vertex> make_densenet_tensor_tree(int layer);
9+
struct layer_t {
10+
std::string type;
11+
std::shared_ptr<void> data;
12+
};
1213

1314
struct cnn_t {
14-
int conv_layer;
15-
int fc_layer;
16-
std::vector<std::pair<int, int>> dilation;
15+
std::vector<layer_t> layers;
1716
std::shared_ptr<tensor_tree::vertex> param;
1817
};
1918

20-
std::shared_ptr<tensor_tree::vertex> make_tensor_tree(int conv_layer, int fc_layer);
19+
std::shared_ptr<tensor_tree::vertex> make_cnn_tensor_tree(int layer);
20+
21+
std::shared_ptr<tensor_tree::vertex> make_densenet_tensor_tree(int layer);
22+
23+
std::shared_ptr<tensor_tree::vertex> make_tensor_tree(cnn_t const& config);
2124

2225
cnn_t load_param(std::istream& is);
2326
void save_param(cnn_t& param, std::ostream& os);

0 commit comments

Comments
 (0)