lj_qualibration/src/qualibration.rs

701 lines
23 KiB
Rust

pub mod annalyse;
pub mod borders;
use std::time::Instant;
use annalyse::{image_diff, is_same_frame};
use borders::{bord_mult, get_extermities, get_intersection, mix_borders, probabilistic_hough};
use std::env::args;
use crate::draw;
use crate::point::{Color, Point};
use enum_iterator::{next, Sequence as Seq};
use opencv::core::Mat;
use opencv::Result;
use std::f64::consts::PI;
use opencv::core::{bitwise_and, find_file, in_range, Point as OcvPoint, Scalar, Size_};
use opencv::core::{VecN, Vector};
use opencv::imgcodecs::imwrite;
use opencv::imgcodecs::{imread, IMREAD_COLOR};
use opencv::imgproc::{canny, cvt_color, line, COLOR_BGR2GRAY};
use opencv::prelude::*;
use opencv::{
highgui,
videoio::{self, VideoCapture},
};
use std::fs::create_dir;
use std::fs::read_dir;
use opencv::{
calib3d,
core::{self, Size},
imgproc,
};
opencv::opencv_branch_4! {
use opencv::imgproc::LINE_AA;
}
opencv::not_opencv_branch_4! {
use opencv::core::LINE_AA;
}
const DEBUG: bool = true;
#[derive(Debug, PartialEq, Seq, Copy, Clone)]
pub enum Sequence {
//TODO: avoir le meme nombre d'image en mode capture ET en mode replay
FirstState,
WaitSpace,
BackGround,
UpBorder,
LeftBorder,
DownBorder,
RightBorder,
ReadDir,
ComputeArea,
PlayLineDotted,
Finish,
LinearConstSpeed, // [multiple test]
JumpFromTo,
AdaptLineSeg(u16), // [multiple test] find the correct distense
AdaptLineLum, // [multiple test] try minimu, medium, maximum.
//
SelectSpeedestColor, // on pourait mettre a jour les valeur a chaque passage
Vertical(u16),
Horizontal(u16),
SelectNbAll(u16),
ComputeSelectNbAll,
}
#[derive(Debug)]
pub struct Qualibration {
pub begin: Instant,
pub cam: VideoCapture,
pub r: i32,
pub g: i32,
pub b: i32,
pub capture_mode: bool,
pub frame: Mat,
pub frame_prev: Mat,
pub img: Vec<Mat>,
pub id: Option<Sequence>,
pub nb_all: i32,
pub nb_visible: i32,
pub nb_liss: i32,
pub tresh: Treshold,
pub dir_name: String,
pub key: i32,
pub canny_v1: i32,
pub canny_v2: i32,
pub hough_param: HoughLine,
pub border_pt: Vec<(f64, f64)>,
pub homography: Mat,
pub h_size: Size_<i32>,
pub line_pos: Vec<i32>,
}
impl Qualibration {
pub fn new() -> Result<Self> {
let mut dir_name = "".to_string(); //"building.jpg".to_string(); // by default
if let Some(dir_name_arg) = args().nth(1) {
dir_name = dir_name_arg;
}
let mut cam = videoio::VideoCapture::new(0, videoio::CAP_ANY)?; // 0 is the default camera ;
let opened_cam = videoio::VideoCapture::is_opened(&cam)?;
if !opened_cam {
panic!("Unable to open default camera!");
}
let mut frame = Mat::default();
cam.read(&mut frame)?;
//let now = std::time::Instant::now();
Ok(Qualibration {
begin: std::time::Instant::now(),
cam,
r: 150,
g: 0,
b: 0,
capture_mode: dir_name.len() == 0,
img: vec![],
frame: Mat::default(), // TODO: init with frame from cam
frame_prev: Mat::default(),
id: Some(Sequence::FirstState),
nb_all: 120,
nb_visible: 40,
nb_liss: 10,
tresh: Treshold::new("histogram: 0", 0, 255)?,
dir_name: dir_name.clone(),
key: 10,
canny_v1: 150,
canny_v2: 255,
hough_param: HoughLine {
rho: 100,
theta: 100,
treshold: 30,
min_length: 0,
max_line_gap: 50000,
},
border_pt: vec![],
homography: Mat::default(),
h_size: Size::default(),
line_pos: vec![4095; 100],
})
}
pub fn run_step(&mut self) -> Result<(), Box<dyn std::error::Error>> {
if self.capture_mode {
//println!("capture");
self.cam.read(&mut self.frame)?;
highgui::imshow("camera", &self.frame)?;
}
if self.frame.size()?.width > 0 && self.frame_prev.size()?.width > 0 || !self.capture_mode {
if self.id.is_some() {
self.id = if true {
//self.capture_mode || self.id == Some(Sequence::WaitSpace) || is_same_frame(&self.frame, &self.frame_prev)? {
if self.id != Some(Sequence::WaitSpace)
&& self.id != Some(Sequence::FirstState)
&& self.id != Some(Sequence::PlayLineDotted)
&& self.capture_mode
{
self.img.push(self.frame.clone());
}
self.compute_sequence()?;
self.get_next_id_seq()
} else {
self.id
};
}
}
//println!("sequence: {:?}", self.id);
self.frame_prev = self.frame.clone();
Ok(())
}
pub fn draw_sequence(&self) -> Result<Vec<Point>, Box<dyn std::error::Error>> {
if !self.capture_mode {
return Ok(vec![])
}
let seq = self.id;
let mut pl = vec![];
//let color = Color { r: 0, g: 30, b: 0 };
let color = Color {
r: self.r as u8,
g: self.g as u8,
b: self.b as u8,
};
//let color = Color { r: 0, g: 0, b: 50 };
let p0 = Point {
x: 0.,
y: 0.,
color,
};
let p1 = Point {
x: 4095.,
y: 0.,
color,
};
let p2 = Point {
x: 4095.,
y: 4095.,
color,
};
let p3 = Point {
x: 0.,
y: 4095.,
color,
};
let p4 = Point {
x: 0.,
y: 1000.,
color: Color {r: self.r as u8, g:0, b:0},
};
let p5 = Point {
x: 4095.,
y: 1000.,
color: Color {r: self.r as u8, g:0, b:0},
};
let p6 = Point {
x: 0.,
y: 2000.,
color: Color {r: 0, g:self.g as u8, b:0},
};
let p7 = Point {
x: 4095.,
y: 2000.,
color: Color {r: 0, g:self.g as u8, b:0},
};
let p8 = Point {
x: 0.,
y: 3000.,
color: Color {r: 0, g:0, b:self.b as u8},
};
let p9 = Point {
x: 4095.,
y: 3000.,
color: Color {r: 0, g:0, b:self.b as u8},
};
let nb_all = self.nb_all as usize;
let nb_visible = self.nb_visible as usize;
let nb_wait = 20;
if seq.is_some() {
match seq.unwrap() {
Sequence::PlayLineDotted => {
// la on va faire une ligne qu'on peut observer
pl = vec![];
let black = Color{r: 0, g: 0, b: 0};
for _ in 0..nb_all {
pl.push(Point{x: 0., y: 0., color: black});
}
let len = (self.line_pos.len() + nb_wait) as f32;
for i in 0..nb_wait {
let y = i as f32 * 4095. / len;
pl.push(Point{x: 0., y, color: black});
}
for i in 0..(self.line_pos.len()) {
let y = (i + nb_wait) as f32 * 4095. / len;
let c = if (i + nb_wait) % 2 == 0 {color} else {black};
pl.push(Point{x: self.line_pos[i] as f32, y, color: c});
}
}
Sequence::WaitSpace => {
pl = draw::draw_line(&p0, &p1, nb_all, nb_visible)?;
pl.extend(draw::draw_line(&p1, &p2, nb_all, nb_visible)?);
pl.extend(draw::draw_line(&p3, &p0, nb_all, nb_visible)?);
pl.extend(draw::draw_line(&p2, &p3, nb_all, nb_visible)?);
pl.extend(draw::draw_line_dotted(&p4, &p5, nb_all, nb_visible)?);
pl.extend(draw::draw_line_dotted(&p6, &p7, nb_all, nb_visible)?);
pl.extend(draw::draw_line_dotted(&p8, &p9, nb_all, nb_visible)?);
}
Sequence::SelectNbAll(n) => {
pl = draw::draw_line(&p0, &p1, n as usize, n as usize)?;
pl.extend(draw::draw_line(&p1, &p2, n as usize, n as usize)?);
pl.extend(draw::draw_line(&p3, &p0, n as usize, n as usize)?);
pl.extend(draw::draw_line(&p2, &p3, n as usize, n as usize)?);
}
Sequence::UpBorder => {
pl = draw::draw_line(&p0, &p1, nb_all, nb_visible)?;
}
Sequence::RightBorder => {
pl = draw::draw_line(&p1, &p2, nb_all, nb_visible)?;
}
Sequence::DownBorder => {
pl = draw::draw_line(&p2, &p3, nb_all, nb_visible)?;
}
Sequence::LeftBorder => {
pl = draw::draw_line(&p3, &p0, nb_all, nb_visible)?;
}
Sequence::Vertical(n) => {
let p1 = Point {
x: n as f32,
y: 0.,
color,
};
let p2 = Point {
x: n as f32,
y: 4095.,
color,
};
pl = draw::draw_line(&p1, &p2, nb_all, nb_visible)?;
}
Sequence::Horizontal(n) => {
let p1 = Point {
x: 0.,
y: n as f32,
color,
};
let p2 = Point {
x: 4095.,
y: n as f32,
color,
};
pl = draw::draw_line(&p1, &p2, nb_all, nb_visible)?;
}
_ => (),
}
}
Ok(pl)
}
pub fn get_next_id_seq(&self) -> Option<Sequence> {
let line_max = 4095;
let line_add = 100;
if self.id.is_none() {
return None;
}
//println!("Hey");
match self.id.unwrap() {
//Sequence::Finish => Some(Sequence::Finish),
Sequence::Finish => None,
Sequence::SelectNbAll(n) => {
if n == 0 {
Some(Sequence::SelectNbAll(2 - 1))
} else if (2 * n) > line_max as u16 {
next(&Sequence::SelectNbAll(u16::MAX))
} else {
next(&Sequence::SelectNbAll(n * 2 - 1))
}
}
Sequence::WaitSpace => {
//println!("key: {}", self.key);
if self.key == 32 || !self.capture_mode{
next(&Sequence::WaitSpace)
} else {
Some(Sequence::WaitSpace)
}
}
Sequence::PlayLineDotted => {
//println!("key: {}", self.key);
if self.key == 32 {
next(&Sequence::PlayLineDotted)
} else {
Some(Sequence::PlayLineDotted)
}
}
Sequence::Vertical(n) => {
let after = if n > line_max { u16::MAX } else { n + line_add };
next(&Sequence::Vertical(after))
}
Sequence::Horizontal(n) => {
let after = if n > line_max { u16::MAX } else { n + line_add };
next(&Sequence::Horizontal(after))
}
//Sequence::ComputeArea => Some(Sequence::ComputeArea), //
id => next(&id),
}
}
pub fn compute_sequence(&mut self) -> Result<(), Box<dyn std::error::Error>> {
if self.id.is_some() {
match self.id.unwrap() {
Sequence::ComputeSelectNbAll => {
let background: Mat;
let steps: Vec<Mat>;
background = self.img[1].clone();
steps = self.img[2..6].into();
let mut angles: Vec<f64> = vec![];
for (id, step) in steps.iter().enumerate() {
let lines = get_lines(
&background,
step,
id,
self.canny_v1,
self.canny_v2,
&self.hough_param,
)?;
for l in lines {
let (x0, y0, x1, y1) =
(l[0] as f64, l[1] as f64, l[2] as f64, l[3] as f64);
let ang = (y1 - y0).atan2(x1 - x0);
angles.push(ang);
}
println!("ang: {angles:?}");
}
// on compare ce qui doit l'etre
}
Sequence::ComputeArea => {
let background: Mat;
let borders: Vec<Mat>;
background = self.img[1].clone();
borders = self.img[2..6].into();
// on recupere chaqu'un des 4 bord
let mut bords_pts = vec![];
for (i, bord) in borders.iter().enumerate() {
let bord_pt = self.get_one_border(&background, &bord, i)?;
bords_pts.push(bord_pt);
}
//for (i, m) in self.img.iter().enumerate() {
// highgui::imshow(format!("img[{i}]").as_str(), m)?;
//}
// on calcul le cadre
let border_pt = get_intersection(&bords_pts);
self.border_pt = bord_mult(border_pt, 1.1);
let color: VecN<f64, 4> = VecN::new(255., 128., 0., 255.);
let mut mixed = mix_borders(&background, borders)?;
let b = &self.border_pt;
for i in 0..b.len() {
let j = (i + 1) % self.border_pt.len();
let pa = VecN::from_array([b[i].0 as i32, b[i].1 as i32]);
let pb = VecN::from_array([b[j].0 as i32, b[j].1 as i32]);
let a = OcvPoint::from_vec2(pa);
let b = OcvPoint::from_vec2(pb);
line(&mut mixed, a, b, color, 1, LINE_AA, 0)?;
}
highgui::imshow("mixed bored", &mixed)?;
// ici on va requadrer la partie de la projection laser de l'image
let warped_image_size = Size::new(1024, 1024);
let roi_corners: Vec<OcvPoint> = self
.border_pt
.iter()
.map(|(x, y)| OcvPoint::new(*x as i32, *y as i32))
.collect();
let dst = [(0, 0), (0, 1024), (1024, 1024), (1024, 0)];
let dst_corners: Vec<OcvPoint> =
dst.iter().map(|(x, y)| OcvPoint::new(*x, *y)).collect();
let roi_corners_mat = Mat::from_slice(&roi_corners[..])?;
let dst_corners_mat = Mat::from_slice(&dst_corners)?;
let h = calib3d::find_homography(
&roi_corners_mat,
&dst_corners_mat,
&mut Mat::default(),
0,
3.,
)?; //get homography
let mut warped_image = Mat::default();
imgproc::warp_perspective(
&mixed,
&mut warped_image,
&h,
warped_image_size,
imgproc::INTER_LINEAR, // I dont see difference with INTER_CUBIC
core::BORDER_CONSTANT,
Scalar::default(),
)?; // do perspective transformation
highgui::imshow("Warped Image", &warped_image)?;
}
Sequence::ReadDir => {
if !self.capture_mode {
self.load_image()?;
}
}
Sequence::Finish => {
if self.capture_mode {
self.save_image()?
}
}
_ => (),
}
}
Ok(())
}
fn save_image(&self) -> Result<()> {
// on fait le nom de dossier general au cas ou
// on fait un nom de dossier avec le temps
// on sauvgarde toutes les image
let now = self.begin;
let name = format!("image/");
create_dir(&name).unwrap_or(());
let name = format!("image/{:0>6?}_{:0>9?}/", now.elapsed().as_secs(), now.elapsed().as_nanos());
create_dir(&name).unwrap_or(());
//name.push_str(format!("image/{}_{}/", now.elapsed().as_secs(), now.elapsed().as_nanos()).as_str());
//let name = format!("image/{now:?}/");
//
for (i, img) in self.img.iter().enumerate() {
let mut name_img = name.clone();
name_img.push_str(&format!("img_{i}.png"));
imwrite(&name_img, img, &Vector::from_slice(&[6, 6, 6, 0]))?;
}
Ok(())
}
//use std::cmp::Ordering;
fn load_image(&mut self) -> Result<(), Box<dyn std::error::Error>> {
let mut imgs = vec![];
let paths = read_dir(&self.dir_name)?;
//let len = paths.size_hint();
for entry in paths {
let dir = entry?;
let path = dir.path();
let c: Vec<&str> = path.to_str().unwrap().split("/").collect();
let d: Vec<_> = c[c.len()-1].chars().collect();
let e: String = d[4..d.len()-4].iter().collect();
let img_id: i32 = e.parse()?;
//println!("c: {c:?}");
//let a: Vec<_> = path.to_str().unwrap().to_string().chars().collect();
//let b: String = a[27..(a.len() - 4)].iter().collect();
//let img_id: i32 = b.parse()?;
let path = format!("{path:?}");
let path = path[1..(path.len() - 1)].to_owned();
let img: Mat = imread(&find_file(&path, false, false)?, IMREAD_COLOR)?;
// highgui::imshow(&path, &img)?;
imgs.push((img_id, img));
}
imgs.sort_by(|v1, v2| {
if v1.0 > v2.0 {
std::cmp::Ordering::Greater
} else if v1.0 == v2.0 {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Less
}
});
//println!("Youpi");
for (i, m) in imgs.iter().enumerate() {
self.img.push(m.1.clone());
//highgui::imshow(format!("img: {i}").as_str(), &m.1)?;
}
//loop {}
//prointln!("");
Ok(())
}
pub fn get_one_border(
&self,
background: &Mat,
bord: &Mat,
id: usize,
) -> Result<((f64, f64), (f64, f64))> {
let diff: Mat = image_diff(bord, background)?;
//let (t1, s1, l1) = (
// self.tresh.min_0 as f64,
// self.tresh.min_1 as f64,
// self.tresh.min_2 as f64,
//);
//let (t2, s2, l2) = (
// self.tresh.max_0 as f64,
// self.tresh.max_1 as f64,
// self.tresh.max_2 as f64,
//);
//let min = Mat::from_slice(&[t1, s1, l1])?;
//let max = Mat::from_slice(&[t2, s2, l2])?;
//let mut color_selected = Mat::default();
//let _ = in_range(&diff, &min, &max, &mut color_selected);
////highgui::imshow(format!("mask: {id}").as_str(), &color_selected)?;
//let mut bord_treshed = Mat::default();
//bitwise_and(&diff, &diff, &mut bord_treshed, &color_selected)?;
////highgui::imshow(format!("diff & mask: {id}").as_str(), &bord_treshed)?;
// Pass the image to gray
let mut diff_gray = Mat::default();
cvt_color(&diff, &mut diff_gray, COLOR_BGR2GRAY, 0)?;
//cvt_color(&bord_treshed, &mut diff_gray, COLOR_BGR2GRAY, 0)?;
// Apply Canny edge detector
let mut edges = Mat::default();
canny(
&diff_gray,
&mut edges,
self.canny_v1 as f64,
self.canny_v2 as f64,
3,
false,
)?;
let lines = probabilistic_hough(&edges, &self.hough_param, id)?;
//let ((x1, y1), (x2, y2)) = get_extermities(&lines, id);
Ok(get_extermities(&lines, id))
}
}
// ca c'est les donner manipuler par les slider
#[derive(Debug, Clone)]
pub struct HoughLine {
pub rho: i32,
pub theta: i32,
pub treshold: i32,
pub min_length: i32,
pub max_line_gap: i32,
}
// ca c'est les donner qu'on envoie a la fonction
pub struct HoughLineValue {
pub rho: f64,
pub theta: f64,
pub treshold: i32,
pub min_length: f64,
pub max_line_gap: f64,
}
impl HoughLine {
pub fn get_param(&self) -> HoughLineValue {
HoughLineValue {
rho: self.rho as f64 / 100.,
theta: self.theta as f64 / 100. * PI / 180.,
treshold: self.treshold,
min_length: self.min_length as f64 / 100.,
max_line_gap: self.max_line_gap as f64 / 100.,
}
}
}
#[derive(Clone, Debug)]
pub struct Treshold {
pub win_name: String,
pub min_0: i32,
pub min_1: i32,
pub min_2: i32,
pub max_0: i32,
pub max_1: i32,
pub max_2: i32,
}
impl Treshold {
pub fn new(name: &str, min: i32, max: i32) -> Result<Self> {
let tresh = Treshold {
win_name: name.to_owned(),
min_0: min,
min_1: min,
min_2: min,
max_0: max,
max_1: max,
max_2: max,
};
Ok(tresh)
}
}
pub fn get_lines(
background: &Mat,
bord: &Mat,
id: usize,
canny_v1: i32,
canny_v2: i32,
hough_param: &HoughLine,
) -> Result<Vector<VecN<i32, 4>>> {
let diff: Mat = image_diff(bord, background)?;
// Pass the image to gray
let mut diff_gray = Mat::default();
cvt_color(&diff, &mut diff_gray, COLOR_BGR2GRAY, 0)?;
// Apply Canny edge detector
let mut edges = Mat::default();
canny(
&diff_gray,
&mut edges,
canny_v1 as f64,
canny_v2 as f64,
3,
false,
)?;
let lines = probabilistic_hough(&edges, hough_param, id)?;
//let ((x1, y1), (x2, y2)) = get_extermities(&lines, id);
Ok(lines)
}