yiking/process.py

167 lines
5.5 KiB
Python
Raw Normal View History

2024-12-10 22:14:39 +00:00
from operator import itemgetter
from PIL import Image, ImageTk
import math
from dataclasses import dataclass
import numpy as np
import cv2
from pathlib import Path
dir_path = Path(".").absolute()
TYPE_1 = "_________"
TYPE_2 = "___ ___"
2024-12-15 13:43:20 +00:00
import cv2
2024-12-15 17:09:32 +00:00
def capture_frame_from_webcam(cam_id):
2024-12-15 13:43:20 +00:00
"""
Captures a single frame from the webcam.
Returns:
frame (numpy.ndarray): The captured frame as a NumPy array.
"""
2024-12-15 16:44:54 +00:00
# Open a connection to the second webcam (index 0)
2024-12-15 17:09:32 +00:00
cap = cv2.VideoCapture(cam_id)
2024-12-15 13:43:20 +00:00
if not cap.isOpened():
raise Exception("Could not open webcam. Please check your webcam connection.")
try:
# Capture a single frame
ret, frame = cap.read()
if not ret:
raise Exception("Failed to capture frame from webcam.")
return frame
finally:
# Release the webcam resource
cap.release()
2024-12-10 22:14:39 +00:00
@dataclass
class Object:
x: int
y: int
rayon: int
2024-12-15 17:09:32 +00:00
def process_frame(params, cam_id):
2024-12-10 22:14:39 +00:00
"""
Simulates OpenCV processing using parameters from the GUI.
Args:
params (dict): A dictionary of variable values passed from the GUI.
Returns:
ImageTk.PhotoImage: A Tkinter-compatible image.
str: A result text description.
"""
# Simulate processing: for now, return a dummy image and text.
# width, height = 400, 300
# image = Image.new("RGB", (width, height),
2024-12-15 13:28:27 +00:00
# color=(params["color1_R_min"] * 4, params["color1_V_min"] * 4, params["color1_B_min"] * 4))
2024-12-10 22:14:39 +00:00
# image_tk = ImageTk.PhotoImage(image)
#
# result_text = f"Processed image with params: {params}"
# return image_tk, result_text
(minDist, param1, param2, minRadius, maxRadius,
2024-12-15 13:28:27 +00:00
color1_R_min, color1_V_min, color1_B_min, color1_R_max, color1_V_max, color1_B_max) = itemgetter(
2024-12-10 22:14:39 +00:00
'minDist', 'param1', 'param2', 'minRadius', 'maxRadius',
2024-12-15 13:28:27 +00:00
'color1_R_min', 'color1_V_min', 'color1_B_min', 'color1_R_max', 'color1_V_max', 'color1_B_max'
2024-12-10 22:14:39 +00:00
)(params)
# 1. Acquisition de l'image
2024-12-15 13:43:20 +00:00
# src = dir_path.joinpath('tests/images/balls-full-small.jpg')
# raw_image = cv2.imread(str(src))
2024-12-15 17:09:32 +00:00
raw_image = capture_frame_from_webcam(cam_id)
2024-12-10 22:14:39 +00:00
# 2. Boxing des objets via opencv
gray = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY)
blurred = cv2.medianBlur(gray, 25)
circles = cv2.HoughCircles(blurred, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2,
minRadius=minRadius,
maxRadius=maxRadius)
min_rayon = 9999
cochonnet = None
boules = []
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
boule = Object(x=int(i[0]), y=int(i[1]), rayon=int(i[2]))
# 3. Détection de la box la plus petite : cochonnet
if boule.rayon < min_rayon:
min_rayon = boule.rayon
if cochonnet is not None:
boules.append(cochonnet)
cochonnet = boule
else:
boules.append(boule)
# 4. Regroupement en liste de boules 1 ou 2 selon la couleur principale de chaque box restante
hsv = cv2.cvtColor(raw_image, cv2.COLOR_BGR2HSV)
(h, s, v) = cv2.split(hsv)
s = s * 2
s = np.clip(s, 0, 255)
imghsv = cv2.merge([h, s, v])
boules_couleurs = []
2024-12-15 13:28:27 +00:00
boules_bgr = []
2024-12-10 22:14:39 +00:00
for boule in boules:
half_diametre = int(boule.rayon / 2)
crop = imghsv[
boule.y - half_diametre:boule.y + half_diametre,
boule.x - half_diametre:boule.x + half_diametre,
].copy()
pixels = np.float32(crop.reshape(-1, 3))
n_colors = 2
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)
_, labels, palette = cv2.kmeans(pixels, n_colors, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
_, counts = np.unique(labels, return_counts=True)
2024-12-15 13:28:27 +00:00
(b, v, r) = palette[np.argmax(counts)]
boules_bgr.append(f"R:{int(r)} V:{int(v)} B:{int(b)}")
# On récupère les valeurs de R G et B qui sont à analyser
if int(
color1_R_min <= math.floor(r) <= color1_R_max
and color1_V_min <= math.floor(v) <= color1_V_max
and color1_B_min <= math.floor(b) <= color1_B_max
):
boules_couleurs.append(TYPE_1)
else :
boules_couleurs.append(TYPE_2)
2024-12-10 22:14:39 +00:00
# 5. Calcul des distances entre chaque boule et le cochonnet selon le centre des boxs
boules_distance = {}
for i, boule in enumerate(boules):
dist = int(math.sqrt(math.pow(cochonnet.x - boule.x, 2) + math.pow(cochonnet.y - boule.y, 2)))
boules_distance[i] = dist
boules_distance = dict(sorted(boules_distance.items(), key=lambda item: item[1]))
# 6. Liste ordonnée des 6 distances les plus faibles
boules_proches = [x for x in list(boules_distance)[0:6]]
# 7. Sortie des 6 couleurs en --- ou - -
return_text = ""
2024-12-16 20:39:20 +00:00
img_public = raw_image.copy()
2024-12-10 22:14:39 +00:00
img_final = raw_image.copy()
for i in boules_proches:
boule = boules[i]
return_text += f"{boules_couleurs[i]}\n"
cv2.circle(img_final, (boule.x, boule.y), boule.rayon, (0, 255, 0), 2)
2024-12-16 20:39:20 +00:00
cv2.circle(img_public, (boule.x, boule.y), boule.rayon, (0, 255, 0), 2)
2024-12-15 13:28:27 +00:00
cv2.putText(img_final, boules_bgr[i], (boule.x, boule.y), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.75, color=(255, 255, 255), thickness=1, lineType=cv2.LINE_AA)
2024-12-10 22:14:39 +00:00
2024-12-16 20:39:20 +00:00
cv2.imwrite('/tmp/yiking.png', img_public)
2024-12-15 13:28:27 +00:00
return img_final, return_text