import threading
import queue
import time
from ultralytics import YOLO
# Initialize YOLO model
model = YOLO("/home/abhi/Documents/roich-360/yolov8n.engine")
#model.to("cuda") # Ensure the model is using GPU
# Define the optimized GStreamer pipeline for RICOH THETA camera
gst_pipeline = (
"thetauvcsrc ! "
"decodebin ! "
"videoconvert ! "
"video/x-raw,format=BGR ! "
"queue max-size-buffers=1 leaky=0 ! "
"appsink drop=true max-buffers=1"
)
# Initialize Video Capture
cap = cv2.VideoCapture(gst_pipeline)
if not cap.isOpened():
raise IOError('Cannot open RICOH THETA camera')
# Set frame width and height to reduce resolution
#FRAME_WIDTH = 640
#FRAME_HEIGHT = 480
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)
# Thread-safe queue with max size 1 to hold the latest frame
frame_queue = queue.Queue(maxsize=1)
# Event to signal thread termination
stop_event = threading.Event()
def frame_capture():
"""Continuously capture frames from the camera and put them into the queue."""
while not stop_event.is_set():
ret, frame = cap.read()
if not ret:
print("Failed to grab frame")
stop_event.set()
break
# Resize the frame to reduce processing time
#frame = cv2.resize(frame, (FRAME_WIDTH, FRAME_HEIGHT))
# Put the latest frame into the queue, overwrite if necessary
if not frame_queue.empty():
try:
frame_queue.get_nowait()
except queue.Empty:
pass
frame_queue.put(frame)
def frame_detection():
"""Continuously get frames from the queue and perform YOLOv8 inference."""
while not stop_event.is_set():
start_time = time.time()
try:
frame = frame_queue.get(timeout=1) # Wait for a frame
except queue.Empty:
continue # No frame available, continue waiting
# Run inference
results = model(frame)
# Annotate frame
annotated_frame = results[0].plot()
# Display the frame
cv2.imshow("YOLO Inference", annotated_frame)
# Check for 'q' key press to exit
if cv2.waitKey(1) & 0xFF == ord("q"):
stop_event.set()
break
time_elapsed = time.time() - start_time
print(f"Inference time: {time_elapsed:.5f} seconds")
# Create and start threads
capture_thread = threading.Thread(target=frame_capture, daemon=True)
detection_thread = threading.Thread(target=frame_detection, daemon=True)
capture_thread.start()
detection_thread.start()
try:
# Keep the main thread alive while the other threads are running
while not stop_event.is_set():
capture_thread.join(timeout=0.1)
detection_thread.join(timeout=0.1)
except KeyboardInterrupt:
stop_event.set()
# Cleanup
cap.release()
cv2.destroyAllWindows()
which version of the Jetson are you running? I can’t run object detection at 4K on the Jetson Nano at reasonable speed.
Is there less latency with a x86 workstation with discrete NVIDIA graphics card?
I’m not that familiar with gstreamer pipelines. You could try nvv4l2decoder
, but it’s likely already using it.
Hello craig, thanks for the reply I am using jetson xavier NX. I will try it with a system with discrete Nvidia graphics card…
on the jetson xavier NX, what does tegrastats show when you are running yolov8
RAM 1288/3964MB (lfb 28x4MB)
SWAP 210/1982MB (cached 20MB)
CPU [100%@1479,89%@1479,85%@1479,86%@1479]
EMC_FREQ 0%
GR3D_FREQ 35%
PLL@32C CPU@35C PMIC@100C
GPU@30.5C AO@40.5C thermal@32.25C
POM_5V_IN 5607/5561
POM_5V_GPU 118/98
POM_5V_CPU 2843/2791
also, try and use jetson stats
Is your GPU and CPU maxed out when yolov8 is running?
using opencv
Make sure that you are using the GPU acceleration (the GPU should show high use, not just the CPU)
@Daniyal_Ahmad also, please post camera model (such as THETA X or Z) and firmware version (example: v2.61.0 on THETA X)