1#pragma once
2
3// @generated from ../tools/autograd/templates/VariableType.h
4
5#include <ATen/core/Tensor.h>
6#include <ATen/Context.h>
7
8#include <c10/util/intrusive_ptr.h>
9
10#include <torch/csrc/Export.h>
11#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
12
13#include <cstdint> // for size_t
14#include <functional> // for function
15#include <memory> // for unique_ptr
16#include <string>
17#include <vector>
18
19namespace at {
20 struct Quantizer;
21};
22
23namespace torch { namespace autograd {
24
25using Variable = at::Tensor;
26using at::Context;
27using at::Device;
28using at::Dimname;
29using at::DimnameList;
30using at::Generator;
31using at::IntArrayRef;
32using at::MemoryFormat;
33using at::QScheme;
34using at::Scalar;
35using at::ScalarType;
36using at::Storage;
37using at::Tensor;
38using at::TensorList;
39using at::TensorOptions;
40using at::Quantizer;
41// This is temporary typedef to enable Quantizer in aten native function API
42// we'll remove them when we are actually exposing Quantizer class
43// to frontend
44using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
45using c10::optional;
46
47namespace VariableType {
48 TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
49 TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
50
51 at::Tensor & unpack(Tensor & t, const char * name, int pos);
52 const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
53 at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
54 std::vector<at::Tensor> unpack(at::ITensorListRef tl, const char *name, int pos);
55};
56
57}} // namespace torch::autograd
58