@@ -38,7 +38,7 @@ class Layer {
38
38
* layer.
39
39
*/
40
40
explicit Layer (const LayerParameter& param)
41
- : layer_param_(param), is_shared_( false ) {
41
+ : layer_param_(param) {
42
42
// Set phase and copy blobs (if there are any).
43
43
phase_ = param.phase ();
44
44
if (layer_param_.blobs_size () > 0 ) {
@@ -66,7 +66,6 @@ class Layer {
66
66
*/
67
67
void SetUp (const vector<Blob<Dtype>*>& bottom,
68
68
const vector<Blob<Dtype>*>& top) {
69
- InitMutex ();
70
69
CheckBlobCounts (bottom, top);
71
70
LayerSetUp (bottom, top);
72
71
Reshape (bottom, top);
@@ -92,30 +91,6 @@ class Layer {
92
91
virtual void LayerSetUp (const vector<Blob<Dtype>*>& bottom,
93
92
const vector<Blob<Dtype>*>& top) {}
94
93
95
- /* *
96
- * @brief Whether a layer should be shared by multiple nets during data
97
- * parallelism. By default, all layers except for data layers should
98
- * not be shared. data layers should be shared to ensure each worker
99
- * solver access data sequentially during data parallelism.
100
- */
101
- virtual inline bool ShareInParallel () const { return false ; }
102
-
103
- /* * @brief Return whether this layer is actually shared by other nets.
104
- * If ShareInParallel() is true and using more than one GPU and the
105
- * net has TRAIN phase, then this function is expected return true.
106
- */
107
- inline bool IsShared () const { return is_shared_; }
108
-
109
- /* * @brief Set whether this layer is actually shared by other nets
110
- * If ShareInParallel() is true and using more than one GPU and the
111
- * net has TRAIN phase, then is_shared should be set true.
112
- */
113
- inline void SetShared (bool is_shared) {
114
- CHECK (ShareInParallel () || !is_shared)
115
- << type () << " Layer does not support sharing." ;
116
- is_shared_ = is_shared;
117
- }
118
-
119
94
/* *
120
95
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
121
96
* the shapes of the bottom blobs.
@@ -428,19 +403,6 @@ class Layer {
428
403
}
429
404
430
405
private:
431
- /* * Whether this layer is actually shared by other nets*/
432
- bool is_shared_;
433
-
434
- /* * The mutex for sequential forward if this layer is shared */
435
- shared_ptr<boost::mutex> forward_mutex_;
436
-
437
- /* * Initialize forward_mutex_ */
438
- void InitMutex ();
439
- /* * Lock forward_mutex_ if this layer is shared */
440
- void Lock ();
441
- /* * Unlock forward_mutex_ if this layer is shared */
442
- void Unlock ();
443
-
444
406
DISABLE_COPY_AND_ASSIGN (Layer);
445
407
}; // class Layer
446
408
@@ -450,8 +412,6 @@ class Layer {
450
412
template <typename Dtype>
451
413
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
452
414
const vector<Blob<Dtype>*>& top) {
453
- // Lock during forward to ensure sequential forward
454
- Lock ();
455
415
Dtype loss = 0 ;
456
416
Reshape (bottom, top);
457
417
switch (Caffe::mode ()) {
@@ -482,7 +442,6 @@ inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
482
442
default :
483
443
LOG (FATAL) << " Unknown caffe mode." ;
484
444
}
485
- Unlock ();
486
445
return loss;
487
446
}
488
447
0 commit comments