Eigen::MatrixXf EA_IPM::adaptive_roll(float r, float c)
{
Eigen::Matrix2f rot;
Eigen::MatrixXf rc(1, 2), rot_rc(1, 2);
rot << cos(theta_r), -sin(theta_r), sin(theta_r), cos(theta_r);
rc << r, c;
rot_rc = rc*rot;
return rot_rc;
}
void EA_IPM::get_ea_ipm(const sensor_msgs::msg::CompressedImage::SharedPtr img_msg)
{
cv_bridge::CvImagePtr front_img_ptr;
int img_col, img_row;
cv::Mat front_img, roi_img;
front_img_ptr = cv_bridge::toCvCopy(img_msg, "bgr8");
front_img = front_img_ptr->image;
img_col = front_img_ptr->image.cols; // 2592
img_row = front_img_ptr->image.rows; // 1944
// ground를 보기 위한 ROI설정
roi_img = get_roi_img(front_img);
cv::namedWindow("front_image", 0);
cv::resizeWindow("front_image", img_col/2, img_row/2);
cv::imshow("front_image", roi_img);
cv::waitKey(1);
// =================================== EA-IPM 적용 부분 ===================================
float theta_v, X, Y, Z;
std::vector<std::vector<float>> point_fields;
uchar* ptr_row;
int row_increase = 2;
int col_increase = 4;
// 클래스의 멤버 변수를 사용하고자 할 때 this를 capture
auto v2r = [this](int v_) {return cy + 0.5 - v_;};
auto u2c = [this](int u_) {return u_ - cx + 0.5;};
std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
for (int v = img_row*3/5; v < img_row; v = v + row_increase){
// 포인터를 이용하여 pixel 접근
ptr_row = roi_img.ptr<uchar>(v);
for (int u = 0; u < img_col; u = u + col_increase){
float B = ptr_row[u * 3 + 0];
float G = ptr_row[u * 3 + 1];
float R = ptr_row[u * 3 + 2];
//std::cout << b << " " << g << " " << r << std::endl;
float r = v2r(v);
float c = u2c(u);
// adaptive roll
// r = adaptive_roll(r, c)(0, 0);
// c = adaptive_roll(r, c)(0, 1);
theta_v = -atan(r/fx);
X = h * (1 / tan((theta + theta_p) + theta_v));
Y = -(cos(theta_v) / cos((theta + theta_p) + theta_v)) * (X * c / fx);
Z = 0;
point_fields.push_back({X, Y, Z, B, G, R});
}
}
- 이중 for문을 python처럼 matrix 연산으로 바꿀 수 있으면 속도를 개선시킬 수 있을 것이라 생각
- 이중 for문 안에 roll model을 적용하면 루프 만큼 roll model 연산량이 증가하여 속도가 급격히 저하
- 모든 픽셀 값을 담고 있는 N * 1 matrix를 가져오는 것을 어떻게?
- python에서는 아래와 같이 meshgrid를 통해 N * 1 matrix를 output으로 바로 가져올 수 있어서 matrix연산을 효율적으로 가능
# =================================== Adaptive IPM 적용 부분 ===================================
# pixel idx 얻기
u_arr, v_arr = np.meshgrid(np.arange(roi_u1, roi_u6), np.arange(roi_v3, roi_v6))
# IPM 적용시킬 pixel
u = np.reshape(u_arr, (u_arr.size, 1))
v = np.reshape(v_arr, (v_arr.size, 1))
#pixel_arr = np.column_stack((u, v))
# image coordinates translation
v2r = lambda v:(self.cy//2 + 0.5 - v)
u2c = lambda u:(u - (self.cx//2 + 0.5))
r = v2r(v)
c = u2c(u)
- AVP-SLAM image to cloud 부분 또한 이중 for문을 사용
void calCloudFromImage(Eigen::Matrix3d& K, Eigen::Matrix3d& RT,const cv::Mat& image,pcl::PointCloud<PointType>::Ptr& cameraCloud){
Eigen::Matrix3d KInv=K.inverse();
Eigen::Matrix3d RTInv=RT.inverse();
int row=image.rows;
int col=image.cols;
for(int i=0;i<row;i=i+imageRowIncrease){
const uchar* p=image.ptr<uchar>(i);
for(int j=0;j<col;j=j+imageColIncrease){
int b=p[3*j];
int g=p[3*j+1];
int r=p[3*j+2];
// there,for simplicity,just according to color,detect invalid area like sky area;
// for real scene,should adapt machine learning method or other method detecting invalid area
if(b==skyColor ){
break;
}
Eigen::Vector3d u;
u(0)=j;
u(1)=i;
u(2)=1;
Eigen::Vector3d u1;
u1=KInv*u;
u1=RTInv*u1;
u1(0)=u1.x()/u1.z();
u1(1)=u1.y()/u1.z();
double dis=sqrt(u1.x()*u1.x()+u1.y()*u1.y());
if(dis>cameraRealiableDis)
continue;
PointType po;
po.x = u1.x();
po.y = u1.y();
po.z = 0;
po.r=r;
po.g=g;
po.b=b;
cameraCloud->push_back(po);
}
}
}