1/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_SCHEDULER_H_
17#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_SCHEDULER_H_
18
19#include <deque>
20#include <functional>
21#include <map>
22#include <unordered_map>
23#include <vector>
24
25#include "tensorflow/core/common_runtime/device.h"
26#include "tensorflow/core/common_runtime/device_set.h"
27#include "tensorflow/core/graph/costmodel.h"
28
29namespace tensorflow {
30
31class SlackAnalysis {
32 public:
33 SlackAnalysis(const Graph* g, const CostModel* cost_model);
34
35 ~SlackAnalysis() {}
36
37 // Compute the earliest possible start time for each node, based on
38 // a given cost model. 'asap_time' is indexed by node id.
39 Microseconds ComputeAsap(std::vector<Microseconds>* asap_times);
40
41 // Compute the latest possible start time for each node, based on
42 // a given cost model. 'alap_time' is indexed by node id.
43 Microseconds ComputeAlap(std::vector<Microseconds>* alap_times);
44
45 // Compute the "slack" of each node. 'slacks' is indexed by node id.
46 void ComputeSlack(std::vector<int64_t>* slacks);
47
48 private:
49 const Graph* graph_;
50 const CostModel* cost_model_;
51
52 TF_DISALLOW_COPY_AND_ASSIGN(SlackAnalysis);
53};
54
55class GreedyScheduler {
56 public:
57 struct Sim {
58 int degree_parallelism;
59 int num_running;
60 std::vector<const Node*> ready_nodes;
61 };
62
63 struct Event {
64 const Node* node;
65 Microseconds time;
66 bool is_completion;
67
68 bool operator<(const Event& other) const { return time < other.time; }
69 };
70
71 GreedyScheduler(const DeviceSet* devices, const CostModel* cost_model,
72 const Graph* g, std::vector<int64_t>* priority);
73
74 ~GreedyScheduler();
75
76 // Computes the start time of each node given the priorities of
77 // the nodes.
78 Microseconds ComputeSchedule(std::vector<Microseconds>* start_times);
79
80 private:
81 // Returns the ready node with the highest priority for a sim.
82 const Node* GetNodeWithHighestPriority(const std::vector<const Node*>& nodes);
83
84 const DeviceSet* devices_;
85 const CostModel* cost_model_;
86 const Graph* graph_;
87 std::vector<int64_t>* priority_;
88 std::unordered_map<string, Sim*> device_states_;
89
90 TF_DISALLOW_COPY_AND_ASSIGN(GreedyScheduler);
91};
92
93class PriorityScheduler {
94 public:
95 PriorityScheduler(const DeviceSet* devices, const CostModel* cost_model,
96 const Graph* g);
97
98 ~PriorityScheduler() {}
99
100 // Computes a schedule of the ideal start time for each node.
101 // Returns the makespan (the total running time).
102 Microseconds ComputeSchedule(std::vector<Microseconds>* start_times);
103
104 // Computes a schedule and assigns priorities to the nodes based on
105 // the schedule. Returns the makespan.
106 Microseconds AssignPriorities(std::vector<int64_t>* priorities);
107
108 private:
109 const DeviceSet* devices_;
110 const CostModel* cost_model_;
111 const Graph* graph_;
112
113 TF_DISALLOW_COPY_AND_ASSIGN(PriorityScheduler);
114};
115
116} // namespace tensorflow
117
118#endif // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_SCHEDULER_H_
119