1#pragma once
2
3// @generated by torchgen/gen.py from NativeFunction.h
4
5#include <c10/core/Scalar.h>
6#include <c10/core/Storage.h>
7#include <c10/core/TensorOptions.h>
8#include <c10/util/Deprecated.h>
9#include <c10/util/Optional.h>
10#include <c10/core/QScheme.h>
11#include <ATen/core/Reduction.h>
12#include <ATen/core/Tensor.h>
13#include <tuple>
14#include <vector>
15
16
17namespace at {
18namespace native {
19TORCH_API at::Tensor hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
20TORCH_API at::Tensor & hamming_window_out(int64_t window_length, at::Tensor & out);
21TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
22TORCH_API at::Tensor & hamming_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out);
23TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
24TORCH_API at::Tensor & hamming_window_periodic_alpha_out(int64_t window_length, bool periodic, double alpha, at::Tensor & out);
25TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
26TORCH_API at::Tensor & hamming_window_periodic_alpha_beta_out(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out);
27} // namespace native
28} // namespace at
29