1/*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
17 * under the License.
18 */
19
20/*!
21 * \file src/contrib/ethosu/cascader/plan.h
22 * \brief Plan object for the NPU cascader
23 */
24#ifndef TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_
25#define TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_
26
27#include <tvm/node/reflection.h>
28#include <tvm/runtime/object.h>
29
30#include <functional>
31#include <unordered_map>
32#include <unordered_set>
33#include <vector>
34
35#include "graph.h"
36#include "tensor_config.h"
37
38namespace tvm {
39namespace contrib {
40namespace ethosu {
41namespace cascader {
42
43/*! \brief Node to represent a Plan */
44class PlanNode : public Object {
45 public:
46 void VisitAttrs(AttrVisitor* v);
47
48 /*! \return The TensorConfigs specified by the Plan */
49 const std::vector<TensorConfig>& GetTensorConfigs() const { return tensor_configs_; }
50 /*! \return The TensorConfigs which are 'open' meaning they are a Plan input/output but have
51 * INTERIOR state */
52 const std::vector<TensorConfig>& GetOpenConfigs() const { return open_configs_; }
53 /*! \return The TensorConfig of the Plan's output tensor */
54 const TensorConfig GetOutputConfig() const { return output_config_; }
55 /*! \return The Parts which are covered by the Plan */
56 const std::vector<Part>& GetPartGroup() const { return part_group_; }
57 /*! \return The memory region in which to store interior Plan buffers */
58 MemoryRegion const GetInteriorRegion() const { return interior_region_; }
59 /*!
60 * \return The interior memory used by the Plan in bytes.
61 * \note The interior memory usage is defined as being the memory required in the interior region
62 * to execute the Plan excluding input and output buffers.
63 */
64 int GetMemoryUsage() const { return memory_usage_; }
65 /*! \return The cycles taken to execute the Plan */
66 int GetCycles() const { return cycles_; }
67 /*! \return Whether the Plan is 'closed' meaning it has no 'open' TensorConfigs */
68 bool IsClosed() const { return open_configs_.size() == 0; }
69
70 static constexpr const char* _type_key = "contrib.ethosu.cascader.Plan";
71 TVM_DECLARE_FINAL_OBJECT_INFO(PlanNode, Object);
72
73 protected:
74 friend class Plan;
75
76 /*! \brief The TensorConfigs specified by the Plan */
77 std::vector<TensorConfig> tensor_configs_;
78 /*! \brief The TensorConfigs which are 'open' meaning they are a Plan input/output but have
79 * INTERIOR state */
80 std::vector<TensorConfig> open_configs_;
81 /*! \brief The TensorConfig of the Plan's output tensor */
82 TensorConfig output_config_;
83 /*! \brief The Parts which are covered by the Plan */
84 std::vector<Part> part_group_;
85 /*! \brief The memory region in which to store interior Plan buffers */
86 MemoryRegion interior_region_;
87 /*! \brief The interior memory used by the Plan in bytes */
88 int memory_usage_;
89 /*! \brief The cycles taken to execute the Plan */
90 int cycles_;
91};
92
93/*!
94 * \brief A class which describes how to schedule a subgraph of Parts together.
95 * \note A Plan takes the form of a subgraph of connected Parts (recorded in part_group) with
96 * TensorConfigs for all of the required Tensors (recorded in tensor_configs). This information can
97 * be used to produce a Tensor Expression schedule with inter-operator scheduling. A Plan is
98 * necessarily single-output such that all non-output Parts are 'computed_at'ed the scope of the
99 * output Part. This is what achieves the technique referred to as 'cascading'. A Plan also has an
100 * interior memory region which specifies the region of memory into which all the Plans intermediate
101 * buffers should be allocated.
102 *
103 * Additionally, a Plan contains some other information used during the Plan generation and
104 * selection algorithms. Both the memory and cycles required to run the Plan are accounted for so
105 * that Plans can be ranked and Pareto-culled on these metrics. Furthermore, the TensorConfigs which
106 * are 'open' is recorded indicating that these are valid points to merge with another Plan. A Plan
107 * can only be turned into a schedule if it has no 'open' TensorConfigs - at which point the Plan is
108 * said to be 'closed'.
109 */
110class Plan : public ObjectRef {
111 public:
112 Plan(const std::vector<TensorConfig>& tensor_configs,
113 const std::vector<TensorConfig>& open_configs, const TensorConfig& output_config,
114 const std::vector<Part>& part_group, const MemoryRegion& interior_region, int memory_usage,
115 int cycles);
116 /*!
117 * \brief Merge two Plans which share an 'open' TensorConfig.
118 * \param other The Plan to merge with.
119 * \return The merged Plan.
120 * \note The current Plan is referred to as the 'upper Plan' and the other Plan as the 'lower
121 * Plan'. The 'open' output config of the upper Plan must be an 'open' input config of the lower
122 * Plan. The Tensor referenced by these configs is the Tensor on which the two Plans will be
123 * merged. The merge process does the following:
124 *
125 * The tensor config maps will be merged with TensorConfigs from the upper Plan taking priority.
126 * The open configs will be merged with the TensorConfigs that are being merged having been
127 * removed. The output config will be that of the lower Plan. The part groups will be merged. The
128 * interior region is necessarily the same for both the upper and lower Plan. The cycles and
129 * memory usage will be summed.
130 */
131 Plan Merge(const Plan& other) const;
132
133 TVM_DEFINE_OBJECT_REF_METHODS(Plan, ObjectRef, PlanNode);
134};
135
136} // namespace cascader
137} // namespace ethosu
138} // namespace contrib
139} // namespace tvm
140
141// Hash functions TensorConfig and Part sets
142namespace std {
143
144using TensorConfigSet = std::vector<::tvm::contrib::ethosu::cascader::TensorConfig>;
145using PartSet = std::vector<::tvm::contrib::ethosu::cascader::Part>;
146
147template <>
148struct hash<TensorConfigSet> {
149 std::size_t operator()(const TensorConfigSet& tensor_config_set) const {
150 size_t seed = 0;
151 for (const auto& tensor_config : tensor_config_set) {
152 seed ^= hash<::tvm::contrib::ethosu::cascader::TensorConfig>()(tensor_config);
153 }
154 return seed;
155 }
156};
157
158template <>
159struct equal_to<TensorConfigSet> {
160 bool operator()(const TensorConfigSet& lhs, const TensorConfigSet& rhs) const {
161 std::unordered_set<::tvm::contrib::ethosu::cascader::TensorConfig> lh_set(lhs.begin(),
162 lhs.end());
163 std::unordered_set<::tvm::contrib::ethosu::cascader::TensorConfig> rh_set(rhs.begin(),
164 rhs.end());
165 return lh_set == rh_set;
166 }
167};
168
169template <>
170struct hash<PartSet> {
171 std::size_t operator()(const PartSet& part_set) const {
172 size_t seed = 0;
173 for (const auto& part : part_set) {
174 seed ^= tvm::runtime::ObjectHash()(part);
175 }
176 return seed;
177 }
178};
179
180template <>
181struct equal_to<PartSet> {
182 bool operator()(const PartSet& lhs, const PartSet& rhs) const { return lhs == rhs; }
183};
184
185} // namespace std
186
187#endif // TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_
188