Skip to content

Commit

Permalink
work in progress
Browse files Browse the repository at this point in the history
  • Loading branch information
smazouz42 committed Jul 10, 2024
1 parent 96c3f29 commit b414d62
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 43 deletions.
4 changes: 4 additions & 0 deletions pyccel/ast/cudatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ def __init__(self, dtype, rank, order, memory_location):
self._memory_location = memory_location
super().__init__()

@property
def memory_location(self):
return self._memory_location

@lru_cache
def __add__(self, other):
test_type = np.zeros(1, dtype = pyccel_type_to_original_type[self.element_type])
Expand Down
39 changes: 0 additions & 39 deletions pyccel/ast/variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,6 @@ class Variable(TypedAstNode):
'stack' if memory should be allocated on the stack, represents stack arrays and scalars.
'alias' if object allows access to memory stored in another variable.
memory_location: str, default: 'host'
'host' the variable can only be accessed by the CPU.
'device' the variable can only be accessed by the GPU.
'managed' the variable can be accessed by CPU and GPU and is being managed by the Cuda API (memory transfer is being done implicitly).
is_const : bool, default: False
Indicates if object is a const argument of a function.
Expand Down Expand Up @@ -147,10 +142,6 @@ def __init__(
raise ValueError("memory_handling must be 'heap', 'stack' or 'alias'")
self._memory_handling = memory_handling

if memory_location not in ('host', 'device', 'managed'):
raise ValueError("memory_location must be 'host', 'device' or 'managed'")
self._memory_location = memory_location

if not isinstance(is_const, bool):
raise TypeError('is_const must be a boolean.')
self._is_const = is_const
Expand Down Expand Up @@ -333,36 +324,6 @@ def cls_base(self):
"""
return self._cls_base

@property
def memory_location(self):
""" Indicates whether a Variable has a dynamic size
"""
return self._memory_location

@memory_location.setter
def memory_location(self, memory_location):
if memory_location not in ('host', 'device', 'managed'):
raise ValueError("memory_location must be 'host', 'device' or 'managed'")
self._memory_location = memory_location

@property
def on_host(self):
""" Indicates if memory is only accessible by the CPU
"""
return self.memory_location == 'host'

@property
def on_device(self):
""" Indicates if memory is only accessible by the GPU
"""
return self.memory_location == 'device'

@property
def is_managed(self):
""" Indicates if memory is being managed by CUDA API
"""
return self.memory_location == 'managed'

@property
def is_const(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion pyccel/codegen/printing/cucode.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def _print_Allocate(self, expr):
raise NotImplementedError(f"Don't know how to index {variable.class_type} type")
shape_Assign = "int64_t shape_Assign [] = {" + shape + "};\n"
is_view = 'false' if variable.on_heap else 'true'
memory_location = expr.variable.memory_location
memory_location = variable.class_type.memory_location
if memory_location in ('device', 'host'):
memory_location = 'allocateMemoryOn' + str(memory_location).capitalize()
else:
Expand Down
3 changes: 0 additions & 3 deletions pyccel/stdlib/cuda_ndarrays/cuda_ndarrays.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ typedef enum e_types

enum e_memory_locations
{
managedMemory,
allocateMemoryOnHost,
allocateMemoryOnDevice
};
Expand All @@ -39,8 +38,6 @@ typedef struct s_cuda_ndarray
/* shape 'size of each dimension' */
int64_t *shape;
/* strides 'number of elements to skip to get the next element' */
int64_t *strides;
/* type of the array elements */
t_types type;

This comment has been minimized.

Copy link
@EmilyBourne

EmilyBourne Jul 11, 2024

Member

Accidental change?

/* type size of the array elements */
int32_t type_size;
Expand Down

0 comments on commit b414d62

Please sign in to comment.