Skip to content

Commit 57197b7

Browse files
committed
add ggml_cann prefix for acl funcs
1 parent 96e09b9 commit 57197b7

File tree

4 files changed

+302
-284
lines changed

4 files changed

+302
-284
lines changed

ggml/src/ggml-cann/acl_tensor.cpp

+23-21
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
#include <algorithm>
2626
#include <cstring>
2727

28-
aclDataType type_mapping(ggml_type type) {
28+
aclDataType ggml_cann_type_mapping(ggml_type type) {
2929
switch (type) {
3030
case GGML_TYPE_F32:
3131
return ACL_FLOAT;
@@ -43,8 +43,9 @@ aclDataType type_mapping(ggml_type type) {
4343
return ACL_DT_UNDEFINED;
4444
}
4545

46-
aclTensor* create_acl_tensor(const ggml_tensor* tensor, int64_t* ne, size_t* nb,
47-
int64_t dims, aclFormat format, size_t offset) {
46+
aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne,
47+
size_t* nb, int64_t dims, aclFormat format,
48+
size_t offset) {
4849
// If tensor is bcasted, Up to GGML_MAX_DIMS additional dimensions will be
4950
// added.
5051
int64_t acl_ne[GGML_MAX_DIMS * 2], acl_stride[GGML_MAX_DIMS * 2];
@@ -71,15 +72,15 @@ aclTensor* create_acl_tensor(const ggml_tensor* tensor, int64_t* ne, size_t* nb,
7172
std::reverse(acl_ne, acl_ne + final_dims);
7273
std::reverse(acl_stride, acl_stride + final_dims);
7374

74-
aclTensor* acl_tensor =
75-
aclCreateTensor(acl_ne, final_dims, type_mapping(tensor->type),
76-
acl_stride, offset / ggml_element_size(tensor), format,
77-
&acl_storage_len, 1, tensor->data);
75+
aclTensor* acl_tensor = aclCreateTensor(
76+
acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride,
77+
offset / ggml_element_size(tensor), format, &acl_storage_len, 1,
78+
tensor->data);
7879

7980
return acl_tensor;
8081
}
8182

82-
bool need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) {
83+
bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) {
8384
for (int i = 0; i < GGML_MAX_DIMS; i++) {
8485
if (t1->ne[i] != t0->ne[i] && t1->ne[i] != 1) {
8586
return true;
@@ -88,9 +89,10 @@ bool need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) {
8889
return false;
8990
}
9091

91-
aclTensor* create_acl_tensor(void* data_ptr, aclDataType dtype,
92-
size_t type_size, int64_t* ne, size_t* nb,
93-
int64_t dims, aclFormat format, size_t offset) {
92+
aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
93+
size_t type_size, int64_t* ne, size_t* nb,
94+
int64_t dims, aclFormat format,
95+
size_t offset) {
9496
int64_t tmp_ne[GGML_MAX_DIMS * 2];
9597
int64_t tmp_stride[GGML_MAX_DIMS * 2];
9698

@@ -114,9 +116,11 @@ aclTensor* create_acl_tensor(void* data_ptr, aclDataType dtype,
114116
return acl_tensor;
115117
}
116118

117-
int64_t get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
118-
int64_t* bcast_src0_ne, int64_t* bcast_src1_ne,
119-
size_t* bcast_src0_nb, size_t* bcast_src1_nb) {
119+
int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0,
120+
const ggml_tensor* src1,
121+
int64_t* bcast_src0_ne,
122+
int64_t* bcast_src1_ne, size_t* bcast_src0_nb,
123+
size_t* bcast_src1_nb) {
120124
GGML_ASSERT(ggml_can_repeat(src1, src0));
121125
int bcast_dim_cnt = 0;
122126
for (int i = 0; i < GGML_MAX_DIMS; i++) {
@@ -140,13 +144,11 @@ int64_t get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
140144
return bcast_dim_cnt;
141145
}
142146

143-
int64_t get_mul_mat_bcast_shape(const int64_t* input_ne,
144-
const int64_t* weight_ne, const int64_t* dst_ne,
145-
const size_t* input_nb, const size_t* weight_nb,
146-
const size_t* dst_nb, int64_t* bcast_input_ne,
147-
int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
148-
size_t* bcast_input_nb, size_t* bcast_weight_nb,
149-
size_t* bcast_dst_nb) {
147+
int64_t ggml_cann_get_mulmat_bcast_shape(
148+
const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
149+
const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
150+
int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
151+
size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb) {
150152
// input and dst shoule in same shape, except first two dims.
151153
GGML_ASSERT(input_ne[2] == dst_ne[2]);
152154
GGML_ASSERT(input_ne[3] == dst_ne[3]);

ggml/src/ggml-cann/acl_tensor.h

+22-24
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
* @return The corresponding aclDataType. If the input type is not recognized,
3939
* ACL_DT_UNDEFINED is returned.
4040
*/
41-
aclDataType type_mapping(ggml_type type);
41+
aclDataType ggml_cann_type_mapping(ggml_type type);
4242

4343
/**
4444
* @brief Creates an ACL tensor from a ggml_tensor with optional shape.
@@ -59,7 +59,7 @@ aclDataType type_mapping(ggml_type type);
5959
* @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
6060
* @return Pointer to the created ACL tensor.
6161
*/
62-
aclTensor* create_acl_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
62+
aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
6363
size_t* nb = nullptr, int64_t dims = 0,
6464
aclFormat format = ACL_FORMAT_ND,
6565
size_t offset = 0);
@@ -83,7 +83,7 @@ aclTensor* create_acl_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
8383
* @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
8484
* @return Pointer to the created ACL tensor.
8585
*/
86-
aclTensor* create_acl_tensor(void* data_ptr, aclDataType dtype,
86+
aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
8787
size_t type_size, int64_t* ne, size_t* nb,
8888
int64_t dims, aclFormat format = ACL_FORMAT_ND,
8989
size_t offset = 0);
@@ -104,7 +104,7 @@ aclTensor* create_acl_tensor(void* data_ptr, aclDataType dtype,
104104
* to 1. If such a dimension is found, broadcasting is required to align t1
105105
* with t0 for element-wise operations.
106106
*/
107-
bool need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
107+
bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
108108

109109
/**
110110
* @brief Computes broadcast shapes and strides for two ggml_tensors.
@@ -159,19 +159,19 @@ bool need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
159159
* dim1 in a inserted dim, should add nb for dim1,
160160
* and all other nb moves to next in order.
161161
*/
162-
int64_t get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
162+
int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
163163
int64_t* bcast_ne_src0, int64_t* bcast_ne_src1,
164164
size_t* bcast_nb_src0, size_t* bcast_nb_src1);
165165

166166
// Bcast macro to avoid duplicate code.
167-
#define BCAST_SHAPE(src0, src1) \
168-
int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
169-
int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
170-
size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
171-
size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
172-
int64_t bcast_dims = \
173-
get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \
174-
bcast_##src0##_nb, bcast_##src1##_nb);
167+
#define BCAST_SHAPE(src0, src1) \
168+
int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
169+
int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
170+
size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
171+
size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
172+
int64_t bcast_dims = ggml_cann_get_bcast_shape( \
173+
src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, bcast_##src0##_nb, \
174+
bcast_##src1##_nb);
175175

176176
#define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
177177

@@ -201,17 +201,15 @@ int64_t get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
201201
* shapes needed for matrix multiplication. It ensures that dimensions where
202202
* weight tensor requires expansion are appropriately handled to conform with
203203
* broadcasting rules.
204-
* @note compare with get_bcast_shape,mul_mat broadcast need add this new dim before
205-
* cast dim.
206-
* @sa get_bcast_shape
204+
* @note compare with ggml_cann_get_bcast_shape,mul_mat broadcast need add this new dim
205+
* before cast dim.
206+
* @sa ggml_cann_get_bcast_shape
207207
*/
208-
int64_t get_mul_mat_bcast_shape(const int64_t* input_ne,
209-
const int64_t* weight_ne, const int64_t* dst_ne,
210-
const size_t* input_nb, const size_t* weight_nb,
211-
const size_t* dst_nb, int64_t* bcast_input_ne,
212-
int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
213-
size_t* bcast_input_nb, size_t* bcast_weight_nb,
214-
size_t* bcast_dst_nb);
208+
int64_t ggml_cann_get_mulmat_bcast_shape(
209+
const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
210+
const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
211+
int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
212+
size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb);
215213

216214
// Bcast macro to avoid duplicate code.
217215
#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
@@ -221,7 +219,7 @@ int64_t get_mul_mat_bcast_shape(const int64_t* input_ne,
221219
size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
222220
size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
223221
size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
224-
int64_t bcast_dims = get_mul_mat_bcast_shape( \
222+
int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
225223
input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, \
226224
bcast_##input##_ne, bcast_##weight##_ne, bcast_##dst##_ne, \
227225
bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);

0 commit comments

Comments
 (0)