forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
CUDAGeneratorImpl.h
40 lines (30 loc) · 1.13 KB
/
CUDAGeneratorImpl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#pragma once
#include <c10/core/GeneratorImpl.h>
#include <ATen/core/Generator.h>
// TODO: this file should be in ATen/cuda, not top level
namespace at {
struct TORCH_CUDA_API CUDAGeneratorImpl : public c10::GeneratorImpl {
// Constructors
CUDAGeneratorImpl(DeviceIndex device_index = -1);
~CUDAGeneratorImpl() = default;
// CUDAGeneratorImpl methods
std::shared_ptr<CUDAGeneratorImpl> clone() const;
void set_current_seed(uint64_t seed) override;
uint64_t current_seed() const override;
uint64_t seed() override;
void set_philox_offset_per_thread(uint64_t offset);
uint64_t philox_offset_per_thread();
std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
static DeviceType device_type();
private:
CUDAGeneratorImpl* clone_impl() const override;
uint64_t seed_ = default_rng_seed_val;
uint64_t philox_offset_per_thread_ = 0;
};
namespace cuda {
namespace detail {
TORCH_CUDA_API const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1);
TORCH_CUDA_API Generator createCUDAGenerator(DeviceIndex device_index = -1);
} // namespace detail
} // namespace cuda
} // namespace at