forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
insert_guards.cpp
52 lines (45 loc) · 1.34 KB
/
insert_guards.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#include <torch/csrc/jit/passes/insert_guards.h>
#include <torch/csrc/jit/runtime/profiling_record.h>
#include <memory>
#include <unordered_set>
namespace torch {
namespace jit {
struct GuardInserter {
GuardInserter(std::shared_ptr<Graph> graph) : graph_(std::move(graph)) {}
void run() {
insertGuards(graph_->block());
ProfilingRecord::removeProfilingNodes(graph_->block());
}
private:
void insertGuards(Block* b) {
for (auto it = b->nodes().begin(); it != b->nodes().end(); it++) {
auto n = *it;
if (n->kind() == prim::profile) {
auto pttp = n->ty(attr::profiled_type)->cast<TensorType>();
if (pttp) {
auto guard = graph_->create(prim::Guard, {n->input()}, 1);
auto go = guard->output();
go->setType(pttp);
guard->insertBefore(n);
n->output()->replaceAllUsesWith(go);
} else {
// we didn't go down this path i.e
// no profiling information is available
n->output()->replaceAllUsesWith(n->input());
}
it.destroyCurrent();
} else {
for (Block* ib : n->blocks()) {
insertGuards(ib);
}
}
}
}
std::shared_ptr<Graph> graph_;
};
void InsertGuards(std::shared_ptr<Graph> graph) {
GuardInserter gi(std::move(graph));
gi.run();
}
} // namespace jit
} // namespace torch