1#pragma once
2#include <torch/csrc/Export.h>
3#include <string>
4#include <unordered_map>
5
6// `TorchScript` offers a simple optimization limit checker
7// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`.
8// The purpose is to limit how many optimization you can make per pass.
9// This is useful for debugging any passes.
10
11// Opt limit checker is enabled on a per file basis (hence per pass). For
12// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set
13// to `constant_propagation=<opt_limt>` or, simply, to
14// `constant_propagation=<opt_limit>` where <opt_limit> is the number of
15// optimizations you want to make for the pass. (i.e.
16// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>"`).
17
18// Multiple files can be configured by separating each file name with a colon
19// `:` as in the following example,
20// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>:dead_code_elimination=<opt_limit>"`
21
22// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if
23// we haven't reached the optimization limit yet. Otherwise, it will return
24// false. Typical usage:
25
26// if (!JIT_OPT_ALLOWED) {
27// GRAPH_DUMP(...); //supplied from jit_log
28// return;
29// }
30
31namespace torch {
32namespace jit {
33
34TORCH_API bool opt_limit(const char* pass_name);
35
36#define JIT_OPT_ALLOWED opt_limit(__FILE__)
37
38} // namespace jit
39} // namespace torch
40