1#pragma once
2
3#include <cstdint>
4#include <memory>
5#include <utility>
6#include <vector>
7
8#include <c10/util/strong_type.h>
9
10#include <torch/csrc/profiler/kineto_shim.h>
11#include <torch/csrc/profiler/util.h>
12
13namespace torch {
14namespace profiler {
15namespace impl {
16
17class RecordQueue;
18struct Result;
19namespace python_tracer {
20
21using TraceKey = strong::type<
22 uint64_t,
23 struct TraceKey_,
24 strong::regular,
25 strong::hashable,
26 strong::ostreamable>;
27
28struct CompressedEvent {
29 TraceKey key_;
30 uint64_t system_tid_;
31 kineto::DeviceAndResource kineto_info_;
32 time_t enter_t_;
33};
34
35/*
36Libtorch does not depend on Python (e.g. cannot #include <Python.h>); however
37when we call the profiler from libtorch_python we need the profiler to be able
38to ingest the data that we collect from the Python tracer. (`PyEval_SetProfile`)
39
40In order to solve this dependency issue we define a virtual base and a function
41to register a getter. The python tracer then implements these functions and
42exposes itself by calling `registerTracer` from `torch/csrc/autograd/init.cpp`.
43This pattern of registration for faux python dependencies in libtorch is common
44in the PyTorch codebase.
45*/
46struct TORCH_API PythonTracerBase {
47 static std::unique_ptr<PythonTracerBase> make(RecordQueue* queue);
48 virtual ~PythonTracerBase() = default;
49
50 virtual void stop() = 0;
51 virtual std::vector<std::shared_ptr<Result>> getEvents(
52 std::function<time_t(approx_time_t)> time_converter,
53 std::vector<CompressedEvent>& enters,
54 time_t end_time_ns) = 0;
55};
56
57using MakeFn = std::unique_ptr<PythonTracerBase> (*)(RecordQueue*);
58TORCH_API void registerTracer(MakeFn make_tracer);
59} // namespace python_tracer
60} // namespace impl
61} // namespace profiler
62} // namespace torch
63