Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

multi camera #62

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 62 additions & 0 deletions examples/exampleDoubleBodyTracking.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import sys
import cv2

sys.path.insert(1, '../')
import pykinect_azure as pykinect

if __name__ == "__main__":

# Initialize the library, if the library is not found, add the library path as argument
pykinect.initialize_libraries(track_body=True)

# Modify camera configuration
device_config = pykinect.default_configuration
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_OFF
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_WFOV_2X2BINNED
# print(device_config)

# Start device
device = pykinect.start_device(config=device_config, device_index=0)
device1 = pykinect.start_device(config=device_config, device_index=1)

# Start body tracker
bodyTracker = pykinect.start_body_tracker()
bodyTracker1 = pykinect.start_body_tracker()

cv2.namedWindow('Depth image with skeleton', cv2.WINDOW_NORMAL)
cv2.namedWindow('Depth image with skeleton1', cv2.WINDOW_NORMAL)
while True:

# Get capture
capture = device.update()
capture1 = device1.update()

# Get body tracker frame
body_frame = bodyTracker.update(capture)
body_frame1 = bodyTracker1.update(capture1)

# Get the color depth image from the capture
ret, depth_color_image = capture.get_colored_depth_image()
ret, depth_color_image1 = capture1.get_colored_depth_image()

# Get the colored body segmentation
ret, body_image_color = body_frame.get_segmentation_image()
ret, body_image_color1 = body_frame1.get_segmentation_image()
if not ret:
continue

# Combine both images
combined_image = cv2.addWeighted(depth_color_image, 0.6, body_image_color, 0.4, 0)
combined_image1 = cv2.addWeighted(depth_color_image1, 0.6, body_image_color1, 0.4, 0)

# Draw the skeletons
combined_image = body_frame.draw_bodies(combined_image)
combined_image1 = body_frame1.draw_bodies(combined_image1)

# Overlay body segmentation on depth image
cv2.imshow('Depth image with skeleton', combined_image)
cv2.imshow('Depth image with skeleton1', combined_image1)

# Press q key to stop
if cv2.waitKey(1) == ord('q'):
break
146 changes: 71 additions & 75 deletions pykinect_azure/k4a/capture.py
Original file line number Diff line number Diff line change
@@ -1,116 +1,112 @@
import cv2
import cv2

from pykinect_azure.k4a import _k4a
from pykinect_azure.k4a.image import Image
from pykinect_azure.k4a.transformation import Transformation
from pykinect_azure.utils.postProcessing import smooth_depth_image

class Capture:

def __init__(self, capture_handle, calibration_handle):

self._handle = capture_handle
self.calibration_handle = calibration_handle
self.camera_transform = Transformation(calibration_handle)

def __del__(self):
self.reset()

def is_valid(self):
return self._handle

def handle(self):
return self._handle

def reset(self):
if self.is_valid():
self.release_handle()
self._handle = None
class Capture:

def release_handle(self):
if self.is_valid():
_k4a.k4a_capture_release(self._handle)
def __init__(self, capture_handle, calibration_handle):

@staticmethod
def create():
self._handle = capture_handle
self.calibration_handle = calibration_handle
self.camera_transform = Transformation(calibration_handle)

handle = _k4a.k4a_capture_t
_k4a.VERIFY(Capture._k4a.k4a_capture_create(handle),"Create capture failed!")
def __del__(self):
self.reset()

return Capture(handle)
def is_valid(self):
return self._handle

def get_color_image_object(self):

return Image(_k4a.k4a_capture_get_color_image(self._handle))
def handle(self):
return self._handle

def get_depth_image_object(self):
def reset(self):
if self.is_valid():
self.release_handle()
self._handle = None

return Image(_k4a.k4a_capture_get_depth_image(self._handle))
def release_handle(self):
if self.is_valid():
_k4a.k4a_capture_release(self._handle)

def get_ir_image_object(self):
@staticmethod
def create():

return Image(_k4a.k4a_capture_get_ir_image(self._handle))
handle = _k4a.k4a_capture_t
_k4a.VERIFY(Capture._k4a.k4a_capture_create(handle), "Create capture failed!")

def get_transformed_depth_object(self):
return self.camera_transform.depth_image_to_color_camera(self.get_depth_image_object())
return Capture(handle)

def get_transformed_color_object(self):
return self.camera_transform.color_image_to_depth_camera(self.get_depth_image_object(),self.get_color_image_object())
def get_color_image_object(self):

def get_pointcloud_object(self, calibration_type = _k4a.K4A_CALIBRATION_TYPE_DEPTH):
return self.camera_transform.depth_image_to_point_cloud(self.get_depth_image_object(), calibration_type)
return Image(_k4a.k4a_capture_get_color_image(self._handle))

def get_color_image(self):
return self.get_color_image_object().to_numpy()
def get_depth_image_object(self):

def get_depth_image(self):
return Image(_k4a.k4a_capture_get_depth_image(self._handle))

return self.get_depth_image_object().to_numpy()
def get_ir_image_object(self):

def get_colored_depth_image(self):
ret, depth_image = self.get_depth_image()
if not ret:
return ret, None
return Image(_k4a.k4a_capture_get_ir_image(self._handle))

return ret, self.color_depth_image(depth_image)
def get_transformed_depth_object(self):
return self.camera_transform.depth_image_to_color_camera(self.get_depth_image_object())

def get_ir_image(self):
return self.get_ir_image_object().to_numpy()
def get_transformed_color_object(self):
return self.camera_transform.color_image_to_depth_camera(self.get_depth_image_object(),
self.get_color_image_object())

def get_transformed_depth_image(self):
return self.get_transformed_depth_object().to_numpy()
def get_pointcloud_object(self, calibration_type=_k4a.K4A_CALIBRATION_TYPE_DEPTH):
return self.camera_transform.depth_image_to_point_cloud(self.get_depth_image_object(), calibration_type)

def get_transformed_colored_depth_image(self):
ret, transformed_depth_image = self.get_transformed_depth_image()
def get_color_image(self):
return self.get_color_image_object().to_numpy()

return ret, self.color_depth_image(transformed_depth_image)
def get_depth_image(self):

def get_transformed_color_image(self):
return self.get_transformed_color_object().to_numpy()
return self.get_depth_image_object().to_numpy()

def get_smooth_depth_image(self, maximum_hole_size=10):
ret, depth_image = self.get_depth_image()
return ret, smooth_depth_image(depth_image,maximum_hole_size)
def get_colored_depth_image(self):
ret, depth_image = self.get_depth_image()
if not ret:
return ret, None

def get_smooth_colored_depth_image(self, maximum_hole_size=10):
ret, smooth_depth_image = self.get_smooth_depth_image(maximum_hole_size)
return ret, self.color_depth_image(smooth_depth_image)
return ret, self.color_depth_image(depth_image)

def get_pointcloud(self, calibration_type = _k4a.K4A_CALIBRATION_TYPE_DEPTH):
ret, points = self.get_pointcloud_object(calibration_type).to_numpy()
points = points.reshape((-1, 3))
return ret, points
def get_ir_image(self):
return self.get_ir_image_object().to_numpy()

@staticmethod
def color_depth_image(depth_image):
depth_color_image = cv2.convertScaleAbs (depth_image, alpha=0.05) #alpha is fitted by visual comparison with Azure k4aviewer results
depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET)
def get_transformed_depth_image(self):
return self.get_transformed_depth_object().to_numpy()

return depth_color_image
def get_transformed_colored_depth_image(self):
ret, transformed_depth_image = self.get_transformed_depth_image()

return ret, self.color_depth_image(transformed_depth_image)

def get_transformed_color_image(self):
return self.get_transformed_color_object().to_numpy()

def get_smooth_depth_image(self, maximum_hole_size=10):
ret, depth_image = self.get_depth_image()
return ret, smooth_depth_image(depth_image, maximum_hole_size)

def get_smooth_colored_depth_image(self, maximum_hole_size=10):
ret, smooth_depth_image = self.get_smooth_depth_image(maximum_hole_size)
return ret, self.color_depth_image(smooth_depth_image)

def get_pointcloud(self, calibration_type=_k4a.K4A_CALIBRATION_TYPE_DEPTH):
ret, points = self.get_pointcloud_object(calibration_type).to_numpy()
points = points.reshape((-1, 3))
return ret, points

@staticmethod
def color_depth_image(depth_image):
depth_color_image = cv2.convertScaleAbs(depth_image,
alpha=0.05) # alpha is fitted by visual comparison with Azure k4aviewer results
depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET)

return depth_color_image
Loading