@@ -45,72 +45,70 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
4545 public:
4646 LRNOpMaker (framework::OpProto* proto, framework::OpAttrChecker* op_checker)
4747 : OpProtoAndCheckerMaker(proto, op_checker) {
48- AddInput (" X" , R"DOC(
49- (Tensor) The input of LRN operator. It must be a 4D tenor with NCHW format.
50- )DOC" );
51-
48+ AddInput (" X" ,
49+ " (Tensor) The input of LRN operator. "
50+ " It must be a 4D tenor with NCHW format." );
5251 AddOutput (" Out" ,
5352 " (Tensor) The output of LRN operator, which is also the 4D "
5453 " tensor with NCHW format." );
55- AddOutput (" MidOut" , R"Doc(
56- (Tensor)Middle result of lrn op. It's computed in forward process
57- and also used in backward process.
58- )Doc" );
59-
60- AddAttr< int >( " n " , R"DOC(
61- (int, default 5)n is “adjacent” kernel maps at the same spatial position.
62- )DOC " )
54+ AddOutput (" MidOut" ,
55+ " (Tensor) Middle result of LRN operator. It's computed in "
56+ " forward process and also used in backward process." );
57+
58+ AddAttr< int >( " n " ,
59+ " (int default 5) "
60+ " n is the \" adjacent \" kernel that maps "
61+ " at the same spatial position. " )
6362 .SetDefault (5 )
6463 .GreaterThan (0 );
6564
66- AddAttr<T>(" k" , R"DOC(
67- (float, default 2.0)k is the bias.
68- )DOC " )
65+ AddAttr<T>(" k" ,
66+ " (float, default 2.0) "
67+ " k is the bias. " )
6968 .SetDefault (2.0 )
7069 .GreaterThan (0.0 );
7170
72- AddAttr<T>(" alpha" , R"DOC(
73- (float, default 0.0001)alpha is the scale number.
74- )DOC " )
71+ AddAttr<T>(" alpha" ,
72+ " (float, default 0.0001) "
73+ " alpha is the scale number. " )
7574 .SetDefault (0.0001 )
7675 .GreaterThan (0.0 );
7776
78- AddAttr<T>(" beta" , R"DOC(
79- (float, default 0.75)beta is the power number.
80- )DOC " )
77+ AddAttr<T>(" beta" ,
78+ " (float, default 0.75) "
79+ " beta is the power number. " )
8180 .SetDefault (0.75 )
8281 .GreaterThan (0.0 );
8382
8483 AddComment (R"DOC(
85- Local Response Normalization.
86-
87- This Function comes from the paper
88- "ImageNet Classification with Deep Convolutional Neural Networks".
84+ Local Response Normalization Operator.
8985
90- The original formula is:
86+ This operator comes from the paper
87+ "ImageNet Classification with Deep Convolutional Neural Networks".
9188
92- Input(i, x, y)
93- Output(i, x, y) = ----------------------------------------------
94- -- upper
95- (k + alpha * > (Input(j, x, y))^2) ^ (beta)
96- -- j = lower
89+ The original formula is:
9790
98- upper is `min(C, c + n/2)`
99- lower if `max(0, c - n/2)`
91+ $$
92+ Output(i, x, y) = Input(i, x, y) / \left(
93+ k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)}
94+ (Input(j, x, y))^2
95+ \right)^{\beta}
96+ $$
10097
101- Function implementation:
98+ Function implementation:
10299
103- inputs and outpus is NCHW format, while input.shape.ndims() is equal 4.
104- And the meaning of each dimension(0-3) is respectively batch size,
105- feature maps, rows and columns.
100+ Inputs and outpus are in NCHW format, while input.shape.ndims() equals 4.
101+ And dimensions 0 ~ 3 represent batch size, feature maps, rows ,
102+ and columns, respectively .
106103
107- Input and Output in the above formula is for each map(i) of one image, and
108- Input(i, x, y), Output(i, x, y) represents an element in an image.
104+ Input and Output in the formula above is for each map(i) of one image, and
105+ Input(i, x, y), Output(i, x, y) represents an element in an image.
109106
110- C is the number of feature maps of one image, and n is a hyper-parameters
111- is configured when Function is initialized. The sum in the denominator
112- is the sum of the same position in the neighboring maps.
113- )DOC" );
107+ C is the number of feature maps of one image. n is a hyper-parameter
108+ configured when operator is initialized. The sum in the denominator
109+ is the sum of the same positions in the neighboring maps.
110+
111+ )DOC" );
114112 }
115113};
116114
0 commit comments