Skip to content

Commit

Permalink
Tweaks to the graph experiment.
Browse files Browse the repository at this point in the history
  • Loading branch information
LaurentMazare committed Oct 3, 2024
1 parent b295685 commit 1bb6885
Showing 1 changed file with 44 additions and 24 deletions.
68 changes: 44 additions & 24 deletions candle-core/examples/cuda_basics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ extern crate intel_mkl_src;
use anyhow::Result;
use candle_core::{Device, Tensor};

const USE_CUDA_GRAPH: bool = true;

fn cuda_graph() -> Result<()> {
let device = Device::new_cuda_with_stream(0)?;
let cu_device = match &device {
Expand All @@ -24,47 +26,65 @@ fn cuda_graph() -> Result<()> {
let v = Tensor::zeros(4096, candle_core::DType::F32, &device)?
.to_dtype(candle_core::DType::BF16)?;
let _x = x.mul(&u)?.broadcast_add(&v)?;
let _x = x.affine(1., 0.5)?;
x.slice_set(&u, 0, 0)?;
device.synchronize()?;
}
unsafe {
cudarc::driver::sys::lib()
if USE_CUDA_GRAPH {
unsafe {
cudarc::driver::sys::lib()
.cuStreamBeginCapture_v2(
*cu_stream,
cudarc::driver::sys::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_THREAD_LOCAL,
)
.result()?
};
}
}
{
let u = Tensor::zeros((4096, 4096), candle_core::DType::F32, &device)?
.to_dtype(candle_core::DType::BF16)?;
let mut x = Tensor::zeros((4096, 4096), candle_core::DType::F32, &device)?
.to_dtype(candle_core::DType::BF16)?;
let v = Tensor::zeros(4096, candle_core::DType::F32, &device)?
let v = Tensor::zeros((4096, 1), candle_core::DType::F32, &device)?
.to_dtype(candle_core::DType::BF16)?;
for _i in 0..1 {
x = x.mul(&u)?.broadcast_add(&v)?;
for _i in 0..100 {
// x.slice_set(&u, 0, 0)?;
// x.broadcast_add(&v)?;
x = x.affine(1., 0.5)?;
// x = (&u + &x)?;
}
}
let cu_graph = unsafe {
let mut cu_graph = std::mem::MaybeUninit::uninit();
cudarc::driver::sys::lib()
.cuStreamEndCapture(*cu_stream, cu_graph.as_mut_ptr())
.result()?;
cu_graph.assume_init()
};
let cu_graph_e = unsafe {
let mut cu_graph_e = std::mem::MaybeUninit::uninit();
cudarc::driver::sys::lib()
.cuGraphInstantiateWithFlags(cu_graph_e.as_mut_ptr(), cu_graph, 0)
.result()?;
cu_graph_e.assume_init()
};
for _i in 0..100 {
unsafe {
if USE_CUDA_GRAPH {
let cu_graph: cudarc::driver::sys::CUgraph = unsafe {
let mut cu_graph = std::mem::MaybeUninit::uninit();
cudarc::driver::sys::lib()
.cuStreamEndCapture(*cu_stream, cu_graph.as_mut_ptr())
.result()?;
cu_graph.assume_init()
};
let cu_graph_e: cudarc::driver::sys::CUgraphExec = unsafe {
let mut cu_graph_e = std::mem::MaybeUninit::uninit();
cudarc::driver::sys::lib()
.cuGraphLaunch(cu_graph_e, *cu_stream)
.result()?
.cuGraphInstantiateWithFlags(cu_graph_e.as_mut_ptr(), cu_graph, 0)
.result()?;
cu_graph_e.assume_init()
};
println!("graph captured!");
for i in 1..100 {
println!("graph exec {i}");
unsafe {
cudarc::driver::sys::lib()
.cuGraphLaunch(cu_graph_e, *cu_stream)
.result()?
}
println!("sync");
if let Err(err) = device.synchronize() {
println!("err: {err:?}")
}
println!("done syncing");
}
} else {
device.synchronize()?;
}
Ok(())
}
Expand Down

0 comments on commit 1bb6885

Please sign in to comment.