forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest_interpreter.cpp
182 lines (163 loc) · 5.52 KB
/
test_interpreter.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#include <gtest/gtest.h>
#include <ATen/Parallel.h>
#include "test/cpp/jit/test_utils.h"
#include "torch/jit.h"
#include "torch/script.h"
#include "torch/torch.h"
namespace torch {
namespace jit {
class TypeCheckTest : public ::testing::Test {
protected:
TypeCheckTest() : interp(makeInterp()) {}
InterpreterState interp;
private:
static InterpreterState makeInterp() {
auto graph = std::make_shared<Graph>();
std::unordered_map<std::string, Value*> vmap;
parseIR(
R"IR(
graph(%a.1 : Tensor,
%b.1 : Tensor):
%t0 : Float(2, 2, strides=[2, 1], device=cpu, requires_grad=1), %t1 : Float(3, 3, strides=[3, 1]), %type_matched : bool = prim::TypeCheck(%a.1, %b.1)
return (%t0, %t1, %type_matched)
)IR",
&*graph,
vmap);
Code function(graph, "");
return InterpreterState(function);
}
};
TEST_F(TypeCheckTest, MatchingType) {
// TypeCheck yields to true! Shape, grad and device matches.
auto a = at::zeros({2, 2}, at::kFloat);
auto b = at::ones({3, 3}, at::kFloat);
a.set_requires_grad(true);
a = a.to(at::kCPU);
std::vector<IValue> stack({a, b});
interp.run(stack);
ASSERT_TRUE(exactlyEqual(stack[0].toTensor(), a));
ASSERT_TRUE(exactlyEqual(stack[1].toTensor(), b));
ASSERT_TRUE(stack[2].toBool());
}
TEST_F(TypeCheckTest, SizeMismatch) {
auto a = at::zeros({2, 2}, at::kFloat);
auto b = at::ones({2, 2}, at::kFloat); // Size mismatch
a.set_requires_grad(true);
a = a.to(at::kCPU);
std::vector<IValue> stack({a, b});
interp.run(stack);
ASSERT_FALSE(stack[2].toBool());
}
TEST_F(TypeCheckTest, GradientMismatch) {
auto a = at::zeros({2, 2}, at::kFloat);
auto b = at::ones({3, 3}, at::kFloat);
a = a.to(at::kCPU);
a.set_requires_grad(false); // Gradient mismatch
std::vector<IValue> stack({a, b});
interp.run(stack);
ASSERT_FALSE(stack[2].toBool());
}
TEST_F(TypeCheckTest, ScalarTypeMismatch) {
auto a = at::zeros({2, 2}, at::kFloat);
auto b = at::ones({3, 3}, at::kFloat);
a = a.to(at::kCPU);
a.set_requires_grad(true);
a = a.to(at::kInt); // Scalar type mismatch
std::vector<IValue> stack({a, b});
interp.run(stack);
ASSERT_FALSE(stack[2].toBool());
}
TEST_F(TypeCheckTest, DeviceMismatch_CUDA) {
auto a = at::zeros({2, 2}, at::kFloat);
auto b = at::ones({3, 3}, at::kFloat);
a.set_requires_grad(true);
a = a.to(at::kCUDA); // Device mismatch
std::vector<IValue> stack({a, b});
interp.run(stack);
ASSERT_FALSE(stack[2].toBool());
}
// TODO: These tests weren't doing anything.
// TEST(TypeCheckErrorTest, EmptyCheckRaises) {
// // Test empty Typecheck raises an internal assertion
// auto graph = std::make_shared<Graph>();
// std::unordered_map<std::string, Value*> vmap;
// EXPECT_ANY_THROW(parseIR(
// R"IR(
// graph(%a.1 : Tensor,
// %b.1 : Tensor):
// %type_matched : bool = prim::TypeCheck()
// return (%type_matched)
// )IR",
// &*graph,
// vmap));
// }
// TODO: These tests weren't doing anything.
// TEST(TypeCheckErrorTest, WrongInputOutputCountRaises) {
// // Test for assertion if num_inputs + 1 != num_outputs
// auto graph = std::make_shared<Graph>();
// std::unordered_map<std::string, Value*> vmap;
// EXPECT_ANY_THROW(parseIR(
// R"IR(
// graph(%a.1 : Tensor,
// %b.1 : Tensor):
// %type_matched : bool = prim::TypeCheck(%a.1)
// return (%type_matched)
// )IR",
// &*graph,
// vmap));
// }
TEST(InterpreterTest, Basic_CUDA) {
constexpr int batch_size = 4;
constexpr int input_size = 256;
constexpr int seq_len = 32;
int hidden_size = 2 * input_size;
auto input = at::randn({seq_len, batch_size, input_size}, at::kCUDA);
auto hx = at::randn({batch_size, hidden_size}, at::kCUDA);
auto cx = at::randn({batch_size, hidden_size}, at::kCUDA);
auto w_ih = t_def(at::randn({4 * hidden_size, input_size}, at::kCUDA));
auto w_hh = t_def(at::randn({4 * hidden_size, hidden_size}, at::kCUDA));
auto lstm_g = build_lstm();
Code lstm_function(lstm_g, "");
InterpreterState lstm_interp(lstm_function);
auto outputs = run(lstm_interp, {input[0], hx, cx, w_ih, w_hh});
std::tie(hx, cx) = lstm(input[0], hx, cx, w_ih, w_hh);
ASSERT_TRUE(exactlyEqual(outputs[0], hx));
ASSERT_TRUE(exactlyEqual(outputs[1], cx));
}
TEST(InterpreterTest, runAsyncBasicTest) {
/*
TODO: there are some problem with C++ parsing script program involving
fork. Use the test module below for now.
issue about this: github.com/pytorch/pytorch/issues/46368
The test module file is generated by following:
class DemoModule(torch.nn.Module):
def forward(self):
r1 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
r2 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
return r1.wait() + r2.wait()
demo = DemoModule()
torch.jit.save(torch.jit.script(demo), 'test_interpreter_async.pth')
*/
std::string filePath(__FILE__);
auto testModelFile = filePath.substr(0, filePath.find_last_of("/\\") + 1);
testModelFile.append("test_interpreter_async.pt");
auto model = load(testModelFile);
auto graph = model.get_method("forward").graph();
Code function(graph, "");
auto asyncCounter = 0;
std::mutex mtx;
// a dummy executor which actually use at::launch, but add up a counter
auto launcher = [&](std::function<void()> f) {
mtx.lock();
++asyncCounter;
mtx.unlock();
at::launch(f);
};
std::vector<IValue> stack;
stack.push_back(model._ivalue());
InterpreterState interp(function, launcher);
interp.runAsync(stack)->wait();
ASSERT_TRUE(asyncCounter > 0);
}
} // namespace jit
} // namespace torch