1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
17#define TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
18
19#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
20#include "tensorflow/core/kernels/eigen_cuboid_convolution.h"
21
22namespace Eigen {
23
24/** CuboidConvolutionBackwardInput
25 * \ingroup CXX11_NeuralNetworks_Module
26 *
27 * \brief Computes the backprop for the input of a 3D convolution.
28 *
29 * The output_backward parameter is expected to be a tensor with a rank of 4 or
30 * more (channels, depth, height, width, and optionally others)
31 * The kernel parameter is expected to be a 5D tensor (filters, channels,
32 * kernel_depth, kernel_height, kernel_width)
33 * output_backward and kernel have to be in the same layout.
34 *
35 * The dimensions of the result will be filters, depth, height, width (and
36 * others if applicable).
37 *
38 * It is possible to swap the order of the depth, width and height dimensions
39 * provided that the same order is used in the input, the kernel, and the
40 * output.
41 *
42 * All dimension orders above are given for col-major, and should be reversed
43 * for row-major.
44 */
45
46template <typename OutputBackward, typename Kernel>
47EIGEN_ALWAYS_INLINE static const std::conditional_t<
48 internal::traits<OutputBackward>::Layout == ColMajor,
49 TensorReshapingOp<
50 const DSizes<typename internal::traits<OutputBackward>::Index,
51 internal::traits<OutputBackward>::NumDimensions>,
52 const TensorContractionOp<
53 const array<
54 IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
55 const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
56 const DSizes<typename internal::traits<OutputBackward>::Index,
57 2>,
58 const TensorShufflingOp<
59 const array<
60 typename internal::traits<OutputBackward>::Index, 5>,
61 const TensorReverseOp<const Eigen::array<bool, 5>,
62 const Kernel>>>>,
63 const TensorReshapingOp<
64 const DSizes<typename internal::traits<OutputBackward>::Index,
65 2>,
66 const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
67 const OutputBackward>>>>,
68 TensorReshapingOp<
69 const DSizes<typename internal::traits<OutputBackward>::Index,
70 internal::traits<OutputBackward>::NumDimensions>,
71 const TensorContractionOp<
72 const array<
73 IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
74 const TensorReshapingOp<
75 const DSizes<typename internal::traits<OutputBackward>::Index,
76 2>,
77 const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
78 const OutputBackward>>,
79 const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
80 const DSizes<typename internal::traits<OutputBackward>::Index,
81 2>,
82 const TensorShufflingOp<
83 const array<
84 typename internal::traits<OutputBackward>::Index, 5>,
85 const TensorReverseOp<const Eigen::array<bool, 5>,
86 const Kernel>>>>>>>
87CuboidConvolutionBackwardInput(
88 const Kernel& kernel, const OutputBackward& output_backward,
89 typename internal::traits<OutputBackward>::Index inputPlanes,
90 typename internal::traits<OutputBackward>::Index inputRows,
91 typename internal::traits<OutputBackward>::Index inputCols,
92 const DenseIndex plane_stride = 1, const DenseIndex row_stride = 1,
93 const DenseIndex col_stride = 1) {
94 typedef typename internal::traits<OutputBackward>::Index TensorIndex;
95 const TensorRef<const Tensor<typename internal::traits<Kernel>::Scalar,
96 internal::traits<Kernel>::NumDimensions,
97 internal::traits<Kernel>::Layout, TensorIndex>>
98 kern(kernel);
99 const TensorRef<
100 const Tensor<typename internal::traits<OutputBackward>::Scalar,
101 internal::traits<OutputBackward>::NumDimensions,
102 internal::traits<OutputBackward>::Layout, TensorIndex>>
103 out(output_backward);
104
105 EIGEN_STATIC_ASSERT(internal::traits<Kernel>::Layout ==
106 internal::traits<OutputBackward>::Layout,
107 YOU_MADE_A_PROGRAMMING_MISTAKE);
108
109 static const bool isColMajor =
110 (internal::traits<OutputBackward>::Layout == ColMajor);
111
112 static const int NumDims = internal::traits<OutputBackward>::NumDimensions;
113
114 // Number of filters to apply. This is the same as the output depth of the
115 // result
116 const TensorIndex kernelFilters =
117 isColMajor ? kern.dimensions()[0] : kern.dimensions()[4];
118 // Number of channels. This is the same as the input depth.
119 const TensorIndex kernelChannels =
120 isColMajor ? kern.dimensions()[1] : kern.dimensions()[3];
121 const TensorIndex kernelPlanes =
122 isColMajor ? kern.dimensions()[2] : kern.dimensions()[2];
123 const TensorIndex kernelRows =
124 isColMajor ? kern.dimensions()[3] : kern.dimensions()[1];
125 const TensorIndex kernelCols =
126 isColMajor ? kern.dimensions()[4] : kern.dimensions()[0];
127
128 const TensorIndex outputPlanes =
129 isColMajor ? out.dimensions()[1] : out.dimensions()[NumDims - 2];
130 const TensorIndex outputRows =
131 isColMajor ? out.dimensions()[2] : out.dimensions()[NumDims - 3];
132 const TensorIndex outputCols =
133 isColMajor ? out.dimensions()[3] : out.dimensions()[NumDims - 4];
134
135 // TODO(ezhulenev): Add support for inflated strides. Without inflated strides
136 // effective kernel planes/rows/cols are always the same as the kernel itself
137 // (see eigen_spatial_convolutions for details).
138 const TensorIndex kernelPlanesEff = kernelPlanes;
139 const TensorIndex kernelRowsEff = kernelRows;
140 const TensorIndex kernelColsEff = kernelCols;
141
142 // Computing the forward padding.
143 const TensorIndex forward_pad_top_z = numext::maxi<Index>(
144 0,
145 ((outputPlanes - 1) * plane_stride + kernelPlanesEff - inputPlanes) / 2);
146 const TensorIndex forward_pad_top = numext::maxi<Index>(
147 0, ((outputRows - 1) * row_stride + kernelRowsEff - inputRows) / 2);
148 const TensorIndex forward_pad_left = numext::maxi<Index>(
149 0, ((outputCols - 1) * col_stride + kernelColsEff - inputCols) / 2);
150
151 const TensorIndex padding_top_z = kernelPlanesEff - 1 - forward_pad_top_z;
152 const TensorIndex padding_top = kernelRowsEff - 1 - forward_pad_top;
153 const TensorIndex padding_left = kernelColsEff - 1 - forward_pad_left;
154
155 const TensorIndex padding_bottom_z = inputPlanes -
156 (outputPlanes - 1) * plane_stride - 2 -
157 padding_top_z + kernelPlanesEff;
158 const TensorIndex padding_bottom = inputRows - (outputRows - 1) * row_stride -
159 2 - padding_top + kernelRowsEff;
160 const TensorIndex padding_right = inputCols - (outputCols - 1) * col_stride -
161 2 - padding_left + kernelColsEff;
162
163 eigen_assert(padding_top_z >= 0);
164 eigen_assert(padding_top >= 0);
165 eigen_assert(padding_left >= 0);
166 eigen_assert(padding_bottom_z >= 0);
167 eigen_assert(padding_bottom >= 0);
168 eigen_assert(padding_right >= 0);
169
170 // The kernel has dimensions :
171 // filters x channels x patch_planes x patch_rows x patch_cols.
172 // We need to reverse the kernel along the spatial dimensions.
173 Eigen::array<bool, 5> kernel_reverse;
174 if (isColMajor) {
175 kernel_reverse[0] = false;
176 kernel_reverse[1] = false;
177 kernel_reverse[2] = true;
178 kernel_reverse[3] = true;
179 kernel_reverse[4] = true;
180 } else {
181 kernel_reverse[0] = true;
182 kernel_reverse[1] = true;
183 kernel_reverse[2] = true;
184 kernel_reverse[3] = false;
185 kernel_reverse[4] = false;
186 }
187
188 // Reorder the dimensions to:
189 // filters x patch_planes x patch_rows x patch_cols x channels
190 array<TensorIndex, 5> kernel_shuffle;
191 if (isColMajor) {
192 // From: filters x channels x planes x rows x cols
193 // To: filters x planes x rows x cols x channels
194 kernel_shuffle[0] = 0;
195 kernel_shuffle[1] = 2;
196 kernel_shuffle[2] = 3;
197 kernel_shuffle[3] = 4;
198 kernel_shuffle[4] = 1;
199 } else {
200 // From: cols x rows x planes x channels x filters
201 // To: channels x cols x rows x planes x filters
202 kernel_shuffle[0] = 3;
203 kernel_shuffle[1] = 0;
204 kernel_shuffle[2] = 1;
205 kernel_shuffle[3] = 2;
206 kernel_shuffle[4] = 4;
207 }
208
209 // Collapse the dims
210 DSizes<TensorIndex, 2> kernel_dims;
211 if (isColMajor) {
212 kernel_dims[0] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
213 kernel_dims[1] = kernelChannels;
214 } else {
215 kernel_dims[1] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
216 kernel_dims[0] = kernelChannels;
217 }
218
219 // The output_backward has dimensions out_depth X out_planes X out_rows X
220 // out_cols X OTHERS
221 // When we extract the image patches from output_backward, it will have
222 // dimensions:
223 // out_depth X (patch_planes * patch_rows * patch_cols) X (input_planes *
224 // input_rows * input_cols * OTHERS)
225 DSizes<TensorIndex, 2> pre_contract_dims;
226 if (isColMajor) {
227 pre_contract_dims[0] =
228 kernelFilters * kernelPlanes * kernelRows * kernelCols;
229 pre_contract_dims[1] = inputPlanes * inputRows * inputCols;
230 for (int i = 4; i < NumDims; ++i) {
231 pre_contract_dims[1] *= out.dimension(i);
232 }
233 } else {
234 pre_contract_dims[1] =
235 kernelFilters * kernelPlanes * kernelRows * kernelCols;
236 pre_contract_dims[0] = inputPlanes * inputRows * inputCols;
237 for (int i = 0; i < NumDims - 4; ++i) {
238 pre_contract_dims[0] *= out.dimension(i);
239 }
240 }
241
242 // We will contract along the collapsed dimension that contains the
243 // kernelFilters, kernelPlanes, kernelRows and kernelCols.
244 array<IndexPair<TensorIndex>, 1> contract_dims;
245 if (isColMajor) {
246 // col-major: kernel.contract(output.patches)
247 contract_dims[0] = IndexPair<TensorIndex>(0, 0);
248 } else {
249 // row-major: output.patches.contract(kernel)
250 contract_dims[0] = IndexPair<TensorIndex>(1, 1);
251 }
252
253 // Post contraction, the dimensions of the input_backprop is
254 // channels X input_planes X input_rows X input_cols X OTHERS
255 DSizes<TensorIndex, NumDims> post_contract_dims;
256 if (isColMajor) {
257 post_contract_dims[0] = kernelChannels;
258 post_contract_dims[1] = inputPlanes;
259 post_contract_dims[2] = inputRows;
260 post_contract_dims[3] = inputCols;
261 for (int i = 4; i < NumDims; ++i) {
262 post_contract_dims[i] = out.dimension(i);
263 }
264 } else {
265 post_contract_dims[NumDims - 1] = kernelChannels;
266 post_contract_dims[NumDims - 2] = inputPlanes;
267 post_contract_dims[NumDims - 3] = inputRows;
268 post_contract_dims[NumDims - 4] = inputCols;
269 for (int i = 0; i < NumDims - 4; ++i) {
270 post_contract_dims[i] = out.dimension(i);
271 }
272 }
273
274 return choose(
275 Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
276 kernel.reverse(kernel_reverse)
277 .shuffle(kernel_shuffle)
278 .reshape(kernel_dims)
279 .eval()
280 .contract(output_backward
281 .extract_volume_patches(
282 kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
283 plane_stride, row_stride, col_stride, padding_top_z,
284 padding_bottom_z, padding_top, padding_bottom,
285 padding_left, padding_right)
286 .reshape(pre_contract_dims),
287 contract_dims)
288 .reshape(post_contract_dims),
289 output_backward
290 .extract_volume_patches(kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
291 plane_stride, row_stride, col_stride,
292 padding_top_z, padding_bottom_z, padding_top,
293 padding_bottom, padding_left, padding_right)
294 .reshape(pre_contract_dims)
295 .contract(kernel.reverse(kernel_reverse)
296 .shuffle(kernel_shuffle)
297 .reshape(kernel_dims)
298 .eval(),
299 contract_dims)
300 .reshape(post_contract_dims));
301}
302
303/** CuboidConvolutionBackwardKernel
304 * \ingroup CXX11_NeuralNetworks_Module
305 *
306 * \brief Computes the backprop for the filter of a 3D convolution.
307 *
308 * The output_backward parameter is expected to be a tensor with a rank of 4 or
309 * more (channels, depth, height, width, and optionally others)
310 * The kernel parameter is expected to be a 4D tensor (filters, channels,
311 * kernel_depth, kernel_height, kernel_width)
312 * output_backward and kernel have to be in the same layout.
313 *
314 * The dimensions of the result will be filters, depth, height, width (and
315 * others if applicable).
316 *
317 * It is possible to swap the order of the depth, width and height dimensions
318 * provided that the same order is used in the input, the kernel, and the
319 * output.
320 *
321 * All dimension orders above are given for col-major, and should be reversed
322 * for row-major.
323 */
324template <typename OutputBackward, typename Input>
325EIGEN_ALWAYS_INLINE static const std::conditional_t<
326 internal::traits<Input>::Layout == ColMajor,
327 const TensorReverseOp<
328 const Eigen::array<typename internal::traits<Input>::Index,
329 internal::traits<Input>::NumDimensions>,
330 const Eigen::TensorShufflingOp<
331 const Eigen::array<typename internal::traits<Input>::Index,
332 internal::traits<Input>::NumDimensions>,
333 const Eigen::TensorReshapingOp<
334 const Eigen::DSizes<typename internal::traits<Input>::Index,
335 internal::traits<Input>::NumDimensions>,
336 const TensorContractionOp<
337 const array<
338 IndexPair<typename internal::traits<Input>::Index>, 1>,
339 const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
340 const DSizes<typename internal::traits<Input>::Index,
341 2>,
342 const Eigen::TensorShufflingOp<
343 const Eigen::array<
344 typename internal::traits<Input>::Index,
345 internal::traits<Input>::NumDimensions>,
346 const OutputBackward>>>,
347 const TensorReshapingOp<
348 const DSizes<typename internal::traits<Input>::Index,
349 2>,
350 const TensorVolumePatchOp<
351 Dynamic, Dynamic, Dynamic,
352 const Eigen::TensorForcedEvalOp<
353 const Eigen::TensorShufflingOp<
354 const Eigen::array<
355 typename internal::traits<Input>::Index,
356 internal::traits<Input>::NumDimensions>,
357 const Input>>>>>>>>,
358 const TensorReverseOp<
359 const Eigen::array<typename internal::traits<Input>::Index,
360 internal::traits<Input>::NumDimensions>,
361 const Eigen::TensorShufflingOp<
362 const Eigen::array<typename internal::traits<Input>::Index,
363 internal::traits<Input>::NumDimensions>,
364 const Eigen::TensorReshapingOp<
365 const Eigen::DSizes<typename internal::traits<Input>::Index,
366 internal::traits<Input>::NumDimensions>,
367 const TensorContractionOp<
368 const array<
369 IndexPair<typename internal::traits<Input>::Index>, 1>,
370 const TensorReshapingOp<
371 const DSizes<typename internal::traits<Input>::Index,
372 2>,
373 const TensorVolumePatchOp<
374 Dynamic, Dynamic, Dynamic,
375 const Eigen::TensorForcedEvalOp<
376 const Eigen::TensorShufflingOp<
377 const Eigen::array<
378 typename internal::traits<Input>::Index,
379 internal::traits<Input>::NumDimensions>,
380 const Input>>>>,
381 const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
382 const DSizes<typename internal::traits<Input>::Index,
383 2>,
384 const Eigen::TensorShufflingOp<
385 const Eigen::array<
386 typename internal::traits<Input>::Index,
387 internal::traits<Input>::NumDimensions>,
388 const OutputBackward>>>>>>>>
389CuboidConvolutionBackwardKernel(
390 const Input& input, const OutputBackward& output_backward,
391 typename internal::traits<Input>::Index kernelPlanes,
392 typename internal::traits<Input>::Index kernelRows,
393 typename internal::traits<Input>::Index kernelCols,
394 const DenseIndex stridePlanes = 1, const DenseIndex strideRows = 1,
395 const DenseIndex strideCols = 1) {
396 typedef typename internal::traits<Input>::Index TensorIndex;
397 TensorRef<Tensor<typename internal::traits<Input>::Scalar,
398 internal::traits<Input>::NumDimensions,
399 internal::traits<Input>::Layout, TensorIndex>>
400 in(input);
401 TensorRef<Tensor<typename internal::traits<OutputBackward>::Scalar,
402 internal::traits<OutputBackward>::NumDimensions,
403 internal::traits<OutputBackward>::Layout, TensorIndex>>
404 out(output_backward);
405
406 EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout ==
407 internal::traits<OutputBackward>::Layout,
408 YOU_MADE_A_PROGRAMMING_MISTAKE);
409
410 static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
411
412 static const int NumDims = internal::traits<Input>::NumDimensions;
413 EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions ==
414 internal::traits<OutputBackward>::NumDimensions,
415 YOU_MADE_A_PROGRAMMING_MISTAKE);
416
417 // We do not support higher dimensional backward convolutions, or convolutions
418 // without batch dimension.
419 // TODO(ezhulenev): Relax this constraint, and turn on tests without batch
420 // dimension in eigen_backward_cuboid_convolutions_test.cc.
421 EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
422 YOU_MADE_A_PROGRAMMING_MISTAKE);
423
424 const TensorIndex inputPlanes =
425 isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
426 const TensorIndex inputRows =
427 isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
428 const TensorIndex inputCols =
429 isColMajor ? in.dimension(3) : in.dimension(NumDims - 4);
430
431 const TensorIndex outputPlanes =
432 isColMajor ? out.dimension(1) : out.dimension(NumDims - 2);
433 const TensorIndex outputRows =
434 isColMajor ? out.dimension(2) : out.dimension(NumDims - 3);
435 const TensorIndex outputCols =
436 isColMajor ? out.dimension(3) : out.dimension(NumDims - 4);
437
438 // Number of filters. This is the same as the output depth.
439 const TensorIndex kernelFilters =
440 isColMajor ? out.dimension(0) : out.dimension(NumDims - 1);
441 // Number of channels. This is the same as the input depth.
442 const TensorIndex kernelChannels =
443 isColMajor ? in.dimension(0) : in.dimension(NumDims - 1);
444
445 // Number of batches in the input tensor.
446 const TensorIndex batch =
447 isColMajor ? in.dimension(4) : in.dimension(NumDims - 5);
448
449 // TODO(ezhulenev): Add support for inflated strides. Without inflated strides
450 // effective kernel planes/rows/cols are always the same as the kernel itself
451 // (see eigen_spatial_convolutions for details).
452 const TensorIndex kernelPlanesEff = kernelPlanes;
453 const TensorIndex kernelRowsEff = kernelRows;
454 const TensorIndex kernelColsEff = kernelCols;
455
456 // Compute forward padding from input and output_backward dimensions.
457 const TensorIndex padPlanes = numext::maxi<Index>(
458 0, (outputPlanes - 1) * stridePlanes + kernelPlanesEff - inputPlanes);
459 const TensorIndex padRows = numext::maxi<Index>(
460 0, (outputRows - 1) * strideRows + kernelRowsEff - inputRows);
461 const TensorIndex padCols = numext::maxi<Index>(
462 0, (outputCols - 1) * strideCols + kernelColsEff - inputCols);
463
464 const TensorIndex padding_top_z = padPlanes / 2;
465 const TensorIndex padding_top = padRows / 2;
466 const TensorIndex padding_left = padCols / 2;
467
468 // Compute paddings for output_backward before extracting patches.
469 const auto expanded_out_planes = (outputPlanes - 1) * stridePlanes + 1;
470 const auto expanded_out_rows = (outputRows - 1) * strideRows + 1;
471 const auto expanded_out_cols = (outputCols - 1) * strideCols + 1;
472 const auto padded_out_planes = inputPlanes + kernelPlanes - 1;
473 const auto padded_out_rows = inputRows + kernelRows - 1;
474 const auto padded_out_cols = inputCols + kernelCols - 1;
475 const auto top_pad_planes = kernelPlanes - 1 - padding_top_z;
476 const auto top_pad_rows = kernelRows - 1 - padding_top;
477 const auto left_pad_cols = kernelCols - 1 - padding_left;
478 const auto bottom_pad_planes =
479 padded_out_planes - expanded_out_planes - top_pad_planes;
480 const auto bottom_pad_rows =
481 padded_out_rows - expanded_out_rows - top_pad_rows;
482 const auto right_pad_cols =
483 padded_out_cols - expanded_out_cols - left_pad_cols;
484
485 // Reorder output_backward dimensions.
486 array<TensorIndex, 5> output_backward_shuffle;
487 if (isColMajor) {
488 // From: [out_depth, out_planes, out_rows, out_cols, batch]
489 // To: [batch, out_planes, out_rows, out_cols, out_depth]
490 output_backward_shuffle = {4, 1, 2, 3, 0};
491 } else {
492 // From: [batch, out_cols, out_rows, out_planes, out_depth]
493 // To: [out_depth, out_cols, out_rows, out_planes, batch]
494 output_backward_shuffle = {4, 1, 2, 3, 0};
495 }
496
497 // Reorder input dimensions.
498 array<TensorIndex, 5> input_shuffle;
499 if (isColMajor) {
500 // From: [in_depth, in_planes, in_rows, in_cols, batch]
501 // To: [in_depth, batch, in_planes, in_rows, in_cols]
502 input_shuffle = {0, 4, 1, 2, 3};
503 } else {
504 // From: [batch, in_cols, in_rows, in_planes, in_depth]
505 // To: [in_cols, in_rows, in_planes, batch, in_depth]
506 input_shuffle = {1, 2, 3, 0, 4};
507 }
508
509 // Input is playing the role of a "kernel" in this convolution.
510 DSizes<TensorIndex, 2> input_dims;
511 if (isColMajor) {
512 input_dims[0] = kernelChannels;
513 input_dims[1] = batch * inputPlanes * inputRows * inputCols;
514 } else {
515 input_dims[1] = kernelChannels;
516 input_dims[0] = inputCols * inputRows * inputPlanes * batch;
517 }
518
519 // Molds the output of the patch extraction result into a 2D tensor:
520 // - the first dimension (dims[0]): the patch values to be multiplied with the
521 // kernels
522 // - the second dimension (dims[1]): everything else
523 DSizes<TensorIndex, 2> pre_contract_dims;
524 if (isColMajor) {
525 pre_contract_dims[0] = batch * inputPlanes * inputRows * inputCols;
526 pre_contract_dims[1] =
527 kernelPlanes * kernelRows * kernelCols * kernelFilters;
528 } else {
529 pre_contract_dims[1] = inputCols * inputRows * inputPlanes * batch;
530 pre_contract_dims[0] =
531 kernelFilters * kernelCols * kernelRows * kernelPlanes;
532 }
533
534 // We will contract along the collapsed dimension that contains the
535 // batch, inputPlanes, inputRows and inputCols.
536 array<IndexPair<TensorIndex>, 1> contract_dims;
537 contract_dims[0] = IndexPair<TensorIndex>(1, 0);
538
539 // Dimensions after contraction.
540 DSizes<TensorIndex, NumDims> post_contract_dims;
541 if (isColMajor) {
542 post_contract_dims[0] = kernelChannels;
543 post_contract_dims[1] = kernelPlanes;
544 post_contract_dims[2] = kernelRows;
545 post_contract_dims[3] = kernelCols;
546 post_contract_dims[4] = kernelFilters;
547 } else {
548 post_contract_dims[0] = kernelFilters;
549 post_contract_dims[1] = kernelCols;
550 post_contract_dims[2] = kernelRows;
551 post_contract_dims[3] = kernelPlanes;
552 post_contract_dims[4] = kernelChannels;
553 }
554
555 // Reorder output of contraction to valid filter shape.
556 array<TensorIndex, 5> kernel_shuffle;
557 if (isColMajor) {
558 // From: [in_depth, kernel_planes, kernel_rows, kernel_cols, out_depth]
559 // To: [out_depth, in_depth, kernel_planes, kernel_rows, kernel_cols]
560 kernel_shuffle = {4, 0, 1, 2, 3};
561 } else {
562 // From: [out_depth, kernel_cols, kernel_rows, kernel_planes, in_depth]
563 // To: [kernel_cols, kernel_rows, kernel_planes, in_depth, out_depth]
564 kernel_shuffle = {1, 2, 3, 4, 0};
565 }
566
567 // Reverse kernel backprop dimensions.
568 array<TensorIndex, 5> kernel_reverse;
569 if (isColMajor) {
570 kernel_reverse = {false, false, true, true, true};
571 } else {
572 kernel_reverse = {true, true, true, false, false};
573 }
574
575 // Create convolution input (aka source of patches) from output backward
576 // tensor by shuffling dimensions.
577 const auto the_input =
578 output_backward.shuffle(output_backward_shuffle).eval();
579
580 // Create convolution kernel (aka filter) from input by shuffling and
581 // reshaping.
582 const auto the_kernel =
583 input.shuffle(input_shuffle).reshape(input_dims).eval();
584
585 return choose(Cond<internal::traits<Input>::Layout == ColMajor>(),
586 the_kernel.contract(
587 the_input
588 .extract_volume_patches(
589 inputPlanes, inputRows, inputCols, 1, 1, 1,
590 stridePlanes, strideRows, strideCols,
591 top_pad_planes, bottom_pad_planes, top_pad_rows,
592 bottom_pad_rows, left_pad_cols, right_pad_cols)
593 .reshape(pre_contract_dims),
594 contract_dims),
595 the_input
596 .extract_volume_patches(
597 inputPlanes, inputRows, inputCols, 1, 1, 1,
598 stridePlanes, strideRows, strideCols, top_pad_planes,
599 bottom_pad_planes, top_pad_rows, bottom_pad_rows,
600 left_pad_cols, right_pad_cols)
601 .reshape(pre_contract_dims)
602 .contract(the_kernel, contract_dims))
603 .reshape(post_contract_dims)
604 .shuffle(kernel_shuffle)
605 .reverse(kernel_reverse);
606}
607
608} // end namespace Eigen
609
610#endif // TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
611