Skip to content

Commit

Permalink
fix typos and dead code
Browse files Browse the repository at this point in the history
  • Loading branch information
maxtremblay committed Jan 24, 2025
1 parent 16882c4 commit 7611ffd
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 2 deletions.
1 change: 0 additions & 1 deletion backend-comparison/benches/reduce.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ struct ReduceBenchmark<B: Backend> {
impl<B: Backend> ReduceBenchmark<B> {
pub fn new(instruction: Instruction, device: B::Device) -> Self {
let shape = Shape::new([4096, 512, 64]);
// let shape = Shape::new([128, 128, 64]);
let tensor = Tensor::random(shape.clone(), Distribution::Default, &device);
Self {
instruction,
Expand Down
2 changes: 1 addition & 1 deletion crates/burn-jit/src/kernel/reduce/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use burn_tensor::{Shape, TensorData};
pub use cubecl::reduce::instructions::{ArgMax, ArgMin, Mean, Prod, Sum};
use cubecl::reduce::shared_sum;

/// Specialize reduce function to computhe the sum of all elements of the `input` tensor and return
/// Specialize reduce function to compute the sum of all elements of the `input` tensor and return
/// the value into a single-element tensor of shape `1 x 1 x 1 x ...` with the same rank as `input`.
///
/// This is expected to be faster for larger tensors than calling [reduce] with the `Sum` instruction.
Expand Down

0 comments on commit 7611ffd

Please sign in to comment.