1#pragma once
2
3// @generated from ../tools/autograd/templates/variable_factories.h
4
5#include <ATen/core/Tensor.h>
6#include <ATen/TracerMode.h>
7#include <ATen/core/grad_mode.h>
8#include <c10/util/ArrayRef.h>
9#include <c10/core/MemoryFormat.h>
10#include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
11#include <torch/csrc/autograd/variable.h>
12
13#ifndef AT_PER_OPERATOR_HEADERS
14#include <ATen/Functions.h>
15#else
16#include <ATen/ops/from_blob.h>
17#include <ATen/ops/_cudnn_init_dropout_state.h>
18#include <ATen/ops/arange.h>
19#include <ATen/ops/arange.h>
20#include <ATen/ops/arange.h>
21#include <ATen/ops/bartlett_window.h>
22#include <ATen/ops/bartlett_window.h>
23#include <ATen/ops/blackman_window.h>
24#include <ATen/ops/blackman_window.h>
25#include <ATen/ops/empty.h>
26#include <ATen/ops/empty.h>
27#include <ATen/ops/_empty_affine_quantized.h>
28#include <ATen/ops/_empty_per_channel_affine_quantized.h>
29#include <ATen/ops/empty_quantized.h>
30#include <ATen/ops/empty_like.h>
31#include <ATen/ops/empty_strided.h>
32#include <ATen/ops/eye.h>
33#include <ATen/ops/eye.h>
34#include <ATen/ops/full.h>
35#include <ATen/ops/full.h>
36#include <ATen/ops/full_like.h>
37#include <ATen/ops/from_file.h>
38#include <ATen/ops/hann_window.h>
39#include <ATen/ops/hann_window.h>
40#include <ATen/ops/hamming_window.h>
41#include <ATen/ops/hamming_window.h>
42#include <ATen/ops/hamming_window.h>
43#include <ATen/ops/hamming_window.h>
44#include <ATen/ops/kaiser_window.h>
45#include <ATen/ops/kaiser_window.h>
46#include <ATen/ops/kaiser_window.h>
47#include <ATen/ops/linspace.h>
48#include <ATen/ops/logspace.h>
49#include <ATen/ops/ones.h>
50#include <ATen/ops/ones.h>
51#include <ATen/ops/ones_like.h>
52#include <ATen/ops/scalar_tensor.h>
53#include <ATen/ops/rand.h>
54#include <ATen/ops/rand.h>
55#include <ATen/ops/rand.h>
56#include <ATen/ops/rand.h>
57#include <ATen/ops/rand_like.h>
58#include <ATen/ops/randint.h>
59#include <ATen/ops/randint.h>
60#include <ATen/ops/randint.h>
61#include <ATen/ops/randint.h>
62#include <ATen/ops/randint_like.h>
63#include <ATen/ops/randint_like.h>
64#include <ATen/ops/randn.h>
65#include <ATen/ops/randn.h>
66#include <ATen/ops/randn.h>
67#include <ATen/ops/randn.h>
68#include <ATen/ops/randn_like.h>
69#include <ATen/ops/randperm.h>
70#include <ATen/ops/randperm.h>
71#include <ATen/ops/range.h>
72#include <ATen/ops/range.h>
73#include <ATen/ops/zeros.h>
74#include <ATen/ops/_efficientzerotensor.h>
75#include <ATen/ops/zeros.h>
76#include <ATen/ops/zeros_like.h>
77#include <ATen/ops/sparse_compressed_tensor.h>
78#include <ATen/ops/sparse_csr_tensor.h>
79#include <ATen/ops/sparse_csc_tensor.h>
80#include <ATen/ops/sparse_bsr_tensor.h>
81#include <ATen/ops/sparse_bsc_tensor.h>
82#include <ATen/ops/sparse_compressed_tensor.h>
83#include <ATen/ops/sparse_csr_tensor.h>
84#include <ATen/ops/sparse_csc_tensor.h>
85#include <ATen/ops/sparse_bsr_tensor.h>
86#include <ATen/ops/sparse_bsc_tensor.h>
87#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
88#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
89#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
90#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
91#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
92#include <ATen/ops/sparse_coo_tensor.h>
93#include <ATen/ops/sparse_coo_tensor.h>
94#include <ATen/ops/sparse_coo_tensor.h>
95#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
96#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
97#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
98#include <ATen/ops/_to_copy.h>
99#include <ATen/ops/tril_indices.h>
100#include <ATen/ops/triu_indices.h>
101#include <ATen/ops/normal.h>
102#include <ATen/ops/fft_fftfreq.h>
103#include <ATen/ops/fft_rfftfreq.h>
104#endif
105
106#include <functional>
107#include <initializer_list>
108#include <utility>
109
110namespace torch {
111
112/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
113/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
114/// support it in the future by iterating over all sub-lists to find
115/// the largest data type that can represent all of the elements, or by using
116/// variadic templates.
117///
118/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
119/// (nested) braced-init-list of floating-point types always produces a tensor of dtype
120/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
121///
122/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
123/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
124/// (aka. int64_t), matching Python `torch.tensor` behavior.
125///
126/// NOTE: The following dtypes are not supported by `torch::tensor` currently:
127/// - `unsigned int`
128/// - `unsigned long int`
129/// - `unsigned long long int`
130/// - `long long int`
131inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
132 return autograd::make_variable(
133 // note: we remove the requires_grad setting from the TensorOptions because
134 // it is ignored anyways (and we actually have an assertion that it isn't set
135 // which would fail otherwise). We handle requires_grad explicitly here
136 // instead of passing it through to the kernel.
137 tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)),
138 options.requires_grad());
139}
140
141/// A generic deleter function.
142using Deleter = std::function<void(void*)>;
143using at::MemoryFormat;
144
145/// Exposes the given `data` as a `Tensor` without taking ownership of the
146/// original data. `sizes` should specify the shape of the tensor, `strides` the
147/// stride in each dimension. The `deleter` function (a
148/// `std::function<void(void*)>`) will be called on the `data` when the Tensor
149/// data would normally be deallocated. The `TensorOptions` specify additional
150/// configuration options for the returned tensor, such as what type to
151/// interpret the `data` as.
152inline at::Tensor from_blob(
153 void* data,
154 at::IntArrayRef sizes,
155 at::IntArrayRef strides,
156 const Deleter& deleter,
157 const at::TensorOptions& options = at::TensorOptions()) {
158 at::Tensor tensor = ([&]() {
159 at::AutoDispatchBelowAutograd guard; // TODO: remove
160 at::tracer::impl::NoTracerDispatchMode tracer_guard;
161 return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt));
162 })();
163 return autograd::make_variable(tensor, options.requires_grad());
164}
165
166/// Exposes the given `data` as a `Tensor` without taking ownership of the
167/// original data. `sizes` should specify the shape of the tensor, `strides` the
168/// stride in each dimension. The `TensorOptions`
169/// specify additional configuration options for the returned tensor, such as
170/// what type to interpret the `data` as.
171inline at::Tensor from_blob(
172 void* data,
173 at::IntArrayRef sizes,
174 at::IntArrayRef strides,
175 const at::TensorOptions& options = at::TensorOptions()) {
176 at::Tensor tensor = ([&]() {
177 at::AutoDispatchBelowAutograd guard; // TODO: remove
178 at::tracer::impl::NoTracerDispatchMode tracer_guard;
179 return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt));
180 })();
181 return autograd::make_variable(tensor, options.requires_grad());
182}
183
184/// Exposes the given `data` as a `Tensor` without taking ownership of the
185/// original data. `sizes` should specify the shape of the tensor. The `deleter`
186/// (a `std::function<void(void*)>`) function will be called on the `data` when
187/// the Tensor data would normally be deallocated. The `TensorOptions` specify
188/// additional configuration options for the returned tensor, such as what type
189/// to interpret the `data` as.
190inline at::Tensor from_blob(
191 void* data,
192 at::IntArrayRef sizes,
193 const Deleter& deleter,
194 const at::TensorOptions& options = at::TensorOptions()) {
195 at::Tensor tensor = ([&]() {
196 at::AutoDispatchBelowAutograd guard; // TODO: remove
197 at::tracer::impl::NoTracerDispatchMode tracer_guard;
198 return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt));
199 })();
200 return autograd::make_variable(tensor, options.requires_grad());
201}
202
203/// Exposes the given `data` as a `Tensor` without taking ownership of the
204/// original data. `sizes` should specify the shape of the tensor. The
205/// `TensorOptions` specify additional configuration options for the returned
206/// tensor, such as what type to interpret the `data` as.
207inline at::Tensor from_blob(
208 void* data,
209 at::IntArrayRef sizes,
210 const at::TensorOptions& options = at::TensorOptions()) {
211 at::Tensor tensor = ([&]() {
212 at::AutoDispatchBelowAutograd guard; // TODO: remove
213 at::tracer::impl::NoTracerDispatchMode tracer_guard;
214 return at::from_blob(data, sizes, options.requires_grad(c10::nullopt));
215 })();
216 return autograd::make_variable(tensor, options.requires_grad());
217}
218
219inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
220 at::AutoDispatchBelowADInplaceOrView guard;
221 return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
222}
223inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) {
224 at::AutoDispatchBelowADInplaceOrView guard;
225 return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
226}
227inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
228 at::AutoDispatchBelowADInplaceOrView guard;
229 return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
230}
231inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) {
232 at::AutoDispatchBelowADInplaceOrView guard;
233 return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
234}
235inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) {
236 at::AutoDispatchBelowADInplaceOrView guard;
237 return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
238}
239inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
240 at::AutoDispatchBelowADInplaceOrView guard;
241 return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
242}
243inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) {
244 at::AutoDispatchBelowADInplaceOrView guard;
245 return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
246}
247inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
248 at::AutoDispatchBelowADInplaceOrView guard;
249 return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
250}
251inline at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
252 at::AutoDispatchBelowADInplaceOrView guard;
253 return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
254}
255inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
256 at::AutoDispatchBelowADInplaceOrView guard;
257 return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
258}
259inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
260 at::AutoDispatchBelowADInplaceOrView guard;
261 return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
262}
263inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
264 at::AutoDispatchBelowADInplaceOrView guard;
265 return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
266}
267inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
268 at::AutoDispatchBelowADInplaceOrView guard;
269 return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
270}
271inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
272 at::AutoDispatchBelowADInplaceOrView guard;
273 return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
274}
275inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
276 at::AutoDispatchBelowADInplaceOrView guard;
277 return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
278}
279inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) {
280 at::AutoDispatchBelowADInplaceOrView guard;
281 return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
282}
283inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) {
284 at::AutoDispatchBelowADInplaceOrView guard;
285 return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
286}
287inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) {
288 at::AutoDispatchBelowADInplaceOrView guard;
289 return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
290}
291inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) {
292 at::AutoDispatchBelowADInplaceOrView guard;
293 return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
294}
295inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
296 at::AutoDispatchBelowADInplaceOrView guard;
297 return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
298}
299inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
300 at::AutoDispatchBelowADInplaceOrView guard;
301 return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
302}
303inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
304 at::AutoDispatchBelowADInplaceOrView guard;
305 return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
306}
307inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
308 at::AutoDispatchBelowADInplaceOrView guard;
309 return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
310}
311inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared = c10::nullopt, c10::optional<int64_t> size = 0, at::TensorOptions options = {}) {
312 at::AutoDispatchBelowADInplaceOrView guard;
313 return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
314}
315inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) {
316 at::AutoDispatchBelowADInplaceOrView guard;
317 return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
318}
319inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
320 at::AutoDispatchBelowADInplaceOrView guard;
321 return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
322}
323inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) {
324 at::AutoDispatchBelowADInplaceOrView guard;
325 return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
326}
327inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
328 at::AutoDispatchBelowADInplaceOrView guard;
329 return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
330}
331inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) {
332 at::AutoDispatchBelowADInplaceOrView guard;
333 return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
334}
335inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) {
336 at::AutoDispatchBelowADInplaceOrView guard;
337 return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
338}
339inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) {
340 at::AutoDispatchBelowADInplaceOrView guard;
341 return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
342}
343inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
344 at::AutoDispatchBelowADInplaceOrView guard;
345 return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
346}
347inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) {
348 at::AutoDispatchBelowADInplaceOrView guard;
349 return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
350}
351inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
352 at::AutoDispatchBelowADInplaceOrView guard;
353 return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
354}
355inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
356 at::AutoDispatchBelowADInplaceOrView guard;
357 return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
358}
359inline at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
360 at::AutoDispatchBelowADInplaceOrView guard;
361 return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
362}
363inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) {
364 at::AutoDispatchBelowADInplaceOrView guard;
365 return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
366}
367inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
368 at::AutoDispatchBelowADInplaceOrView guard;
369 return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
370}
371inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
372 at::AutoDispatchBelowADInplaceOrView guard;
373 return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
374}
375inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) {
376 at::AutoDispatchBelowADInplaceOrView guard;
377 return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
378}
379inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
380 at::AutoDispatchBelowADInplaceOrView guard;
381 return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
382}
383inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
384 at::AutoDispatchBelowADInplaceOrView guard;
385 return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
386}
387inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
388 at::AutoDispatchBelowADInplaceOrView guard;
389 return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
390}
391inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
392 at::AutoDispatchBelowADInplaceOrView guard;
393 return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
394}
395inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) {
396 at::AutoDispatchBelowADInplaceOrView guard;
397 return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
398}
399inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
400 at::AutoDispatchBelowADInplaceOrView guard;
401 return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
402}
403inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
404 at::AutoDispatchBelowADInplaceOrView guard;
405 return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
406}
407inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
408 at::AutoDispatchBelowADInplaceOrView guard;
409 return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
410}
411inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
412 at::AutoDispatchBelowADInplaceOrView guard;
413 return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
414}
415inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
416 at::AutoDispatchBelowADInplaceOrView guard;
417 return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
418}
419inline at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
420 at::AutoDispatchBelowADInplaceOrView guard;
421 return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
422}
423inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
424 at::AutoDispatchBelowADInplaceOrView guard;
425 return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
426}
427inline at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
428 at::AutoDispatchBelowADInplaceOrView guard;
429 return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
430}
431inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
432 at::AutoDispatchBelowADInplaceOrView guard;
433 return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
434}
435inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
436 at::AutoDispatchBelowADInplaceOrView guard;
437 return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
438}
439inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
440 at::AutoDispatchBelowADInplaceOrView guard;
441 return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
442}
443inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
444 at::AutoDispatchBelowADInplaceOrView guard;
445 return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
446}
447inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
448 at::AutoDispatchBelowADInplaceOrView guard;
449 return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
450}
451inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
452 at::AutoDispatchBelowADInplaceOrView guard;
453 return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
454}
455inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) {
456 at::AutoDispatchBelowADInplaceOrView guard;
457 return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
458}
459inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
460 at::AutoDispatchBelowADInplaceOrView guard;
461 return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
462}
463inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
464 at::AutoDispatchBelowADInplaceOrView guard;
465 return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
466}
467inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
468 at::AutoDispatchBelowADInplaceOrView guard;
469 return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
470}
471inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
472 at::AutoDispatchBelowADInplaceOrView guard;
473 return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
474}
475inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
476 at::AutoDispatchBelowADInplaceOrView guard;
477 return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
478}
479inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
480 at::AutoDispatchBelowADInplaceOrView guard;
481 return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
482}
483inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
484 at::AutoDispatchBelowADInplaceOrView guard;
485 return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
486}
487inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
488 at::AutoDispatchBelowADInplaceOrView guard;
489 return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
490}
491inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) {
492 at::AutoDispatchBelowADInplaceOrView guard;
493 return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
494}
495inline at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
496 at::AutoDispatchBelowADInplaceOrView guard;
497 return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
498}
499inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) {
500 at::AutoDispatchBelowADInplaceOrView guard;
501 return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
502}
503inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
504 at::AutoDispatchBelowADInplaceOrView guard;
505 return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
506}
507inline at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
508 at::AutoDispatchBelowADInplaceOrView guard;
509 return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
510}
511inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) {
512 at::AutoDispatchBelowADInplaceOrView guard;
513 return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
514}
515inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) {
516 at::AutoDispatchBelowADInplaceOrView guard;
517 return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
518}
519inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
520 at::AutoDispatchBelowADInplaceOrView guard;
521 return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
522}
523inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
524 at::AutoDispatchBelowADInplaceOrView guard;
525 return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
526}
527inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
528 at::AutoDispatchBelowADInplaceOrView guard;
529 return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
530}
531inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
532 at::AutoDispatchBelowADInplaceOrView guard;
533 return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
534}
535inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
536 at::AutoDispatchBelowADInplaceOrView guard;
537 return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
538}
539inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
540 at::AutoDispatchBelowADInplaceOrView guard;
541 return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
542}
543inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
544 at::AutoDispatchBelowADInplaceOrView guard;
545 return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
546}
547inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
548 at::AutoDispatchBelowADInplaceOrView guard;
549 return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
550}
551inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
552 at::AutoDispatchBelowADInplaceOrView guard;
553 return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
554}
555inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
556 at::AutoDispatchBelowADInplaceOrView guard;
557 return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
558}
559inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
560 at::AutoDispatchBelowADInplaceOrView guard;
561 return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
562}
563inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
564 at::AutoDispatchBelowADInplaceOrView guard;
565 return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
566}
567inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
568 at::AutoDispatchBelowADInplaceOrView guard;
569 return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
570}
571inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
572 at::AutoDispatchBelowADInplaceOrView guard;
573 return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
574}
575inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
576 at::AutoDispatchBelowADInplaceOrView guard;
577 return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
578}
579inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
580 at::AutoDispatchBelowADInplaceOrView guard;
581 return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
582}
583inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
584 at::AutoDispatchBelowADInplaceOrView guard;
585 return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
586}
587inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
588 at::AutoDispatchBelowADInplaceOrView guard;
589 return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
590}
591inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}) {
592 at::AutoDispatchBelowADInplaceOrView guard;
593 return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
594}
595inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
596 at::AutoDispatchBelowADInplaceOrView guard;
597 return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
598}
599inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
600 at::AutoDispatchBelowADInplaceOrView guard;
601 return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
602}
603inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}) {
604 at::AutoDispatchBelowADInplaceOrView guard;
605 return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
606}
607inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
608 at::AutoDispatchBelowADInplaceOrView guard;
609 return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
610}
611inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
612 at::AutoDispatchBelowADInplaceOrView guard;
613 return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
614}
615inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
616 at::AutoDispatchBelowADInplaceOrView guard;
617 return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
618}
619inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
620 at::AutoDispatchBelowADInplaceOrView guard;
621 return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(c10::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad());
622}
623inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
624 at::AutoDispatchBelowADInplaceOrView guard;
625 return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
626}
627inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
628 at::AutoDispatchBelowADInplaceOrView guard;
629 return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
630}
631inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
632 at::AutoDispatchBelowADInplaceOrView guard;
633 return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
634}
635inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
636 at::AutoDispatchBelowADInplaceOrView guard;
637 return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
638}
639inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
640 at::AutoDispatchBelowADInplaceOrView guard;
641 return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
642}
643inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
644 at::AutoDispatchBelowADInplaceOrView guard;
645 return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
646}
647
648} // namespace torch
649