1/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15#include "tensorflow/core/kernels/fractional_pool_common.h"
16
17#include "tensorflow/core/lib/random/simple_philox.h"
18
19namespace tensorflow {
20static std::vector<int64_t> GeneratePoolingSequencePseudoRandom(
21 int input_length, int output_length, GuardedPhiloxRandom* generator) {
22 std::vector<int64_t> cum_seq(output_length + 1, 0);
23 std::vector<int64_t> diff(output_length, 0);
24
25 double alpha = static_cast<double>(input_length) / output_length;
26 int k = input_length / output_length;
27
28 // In the paper [1], author proposes the following procedure to generate a
29 // pseudo random pooling region:
30 // 1) Set a_0 = 1, a_Nout = Nin;
31 // 2) a_i = ceil(alpha*(u+i))
32 // in which, i = 1, 2, ... , Nout-1
33 // u is a random number in (0,1) for all i
34 // alpha = Nin/Nout in (1,2)
35 // The sequence {a_i} should satisfy a_i-a_{i-1} = 1 or 2
36 // Note: for step 1), it makes more sense to make a_Nout = Nin+1, that way,
37 // a_i-a_{i-1} = 1 or 2 is also true for i = Nout.
38 //
39 // However, there are choices of alpha and u that will make
40 // a_i - a_{i-1} > 2. This happens at the left boundary. For example, with
41 // alpha = 1.732, u = 0.8, then a_1 = 4, a_1-a_0 = 3.
42 // This is why u_max1 is needed, i.e. u is a random number in (0,u_max1)
43 // instead of (0,1).
44 // Define k = ceil(alpha)-1, then we require:
45 // a_1 = alpha*(u+1) <= a_0+(k+1)
46 // ===> This gives u_max1 = (k+2)/alpha - 1.
47 //
48 // In addition, when extending the pooling sequence generation process for
49 // alpha beyond (1,2), e.g. (k,k+1); a check on the right boundary is also
50 // needed to make sure the last gap a_Nout-a_{Nout-1} >= k. Solving it gives
51 // u_max2 = (Nin+1-k)/alpha - (Nout-1)
52 // Here is an example where u > u_max2, alpha = 2.3, u = 0.7, u_max2 = 0.565,
53 // Nin = 23, Nout = 10; the sequence
54 // from a_0 to a_10 is:
55 // [1, 4, 7, 9, 11, 14, 16, 18, 21, 23, 24]
56 // The last gap is only 1.
57 //
58 // [1]: https://arxiv.org/abs/1412.6071
59 double u_max1 = (k + 2) / alpha - 1;
60 double u_max2 = (input_length + 1 - k) / alpha - (output_length - 1);
61 double max_u = std::min(u_max1, u_max2);
62
63 // Generate random number in parallel.
64 auto local_gen = generator->ReserveSamples32(2);
65 random::SimplePhilox random(&local_gen);
66 const double u = random.RandDouble() * max_u;
67
68 cum_seq[0] = 1;
69 cum_seq[output_length] = input_length + 1;
70 for (int i = 1; i < output_length; ++i) {
71 cum_seq[i] = static_cast<int>(ceil(alpha * (i + u)));
72 }
73
74 for (int i = 0; i < output_length; ++i) {
75 diff[i] = cum_seq[i + 1] - cum_seq[i];
76 }
77
78 return diff;
79}
80
81static std::vector<int64_t> GeneratePoolingSequenceRandom(
82 int input_length, int output_length, GuardedPhiloxRandom* generator) {
83 int k = input_length / output_length;
84 int num_random_spot = input_length % output_length;
85 std::vector<int64_t> diff(output_length, k);
86
87 for (int i = 0; i < num_random_spot; ++i) {
88 diff[i] += 1;
89 }
90
91 // Randomly shuffle this vector.
92 auto local_gen = generator->ReserveSamples32(diff.size());
93 random::SingleSampleAdapter<random::PhiloxRandom> single(&local_gen);
94 const auto uniform = [&single](uint32 n) { return single() % n; };
95 RandomShuffle(diff.begin(), diff.end(), uniform);
96
97 return diff;
98}
99
100std::vector<int64_t> GeneratePoolingSequence(int input_length,
101 int output_length,
102 GuardedPhiloxRandom* generator,
103 bool pseudo_random) {
104 std::vector<int64_t> diff;
105 // This is a case that regular pooling can handle, just return diff with
106 // each element input_length/output_length.
107 if (input_length % output_length == 0) {
108 diff = std::vector<int64_t>(output_length, input_length / output_length);
109 }
110
111 if (pseudo_random) {
112 diff = GeneratePoolingSequencePseudoRandom(input_length, output_length,
113 generator);
114 } else {
115 diff =
116 GeneratePoolingSequenceRandom(input_length, output_length, generator);
117 }
118
119 // Sanity check.
120 int k = input_length / output_length;
121 for (int i = 0; i < output_length; ++i) {
122 // k<= diff[i] <= k+1.
123 DCHECK_GE(diff[i], k);
124 DCHECK_LE(diff[i], k + 1);
125 }
126
127 // Return cumulative sequence.
128 std::vector<int64_t> cum_seq(output_length + 1, 0);
129 for (int i = 1; i < cum_seq.size(); ++i) {
130 cum_seq[i] = cum_seq[i - 1] + diff[i - 1];
131 }
132 return cum_seq;
133}
134
135} // namespace tensorflow
136