cvepilines.cpp revision 6acb9a7ea3d7564944e12cbc73a857b88c1301ee
1/*M/////////////////////////////////////////////////////////////////////////////////////// 2// 3// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4// 5// By downloading, copying, installing or using the software you agree to this license. 6// If you do not agree to this license, do not download, install, 7// copy or use the software. 8// 9// 10// Intel License Agreement 11// For Open Source Computer Vision Library 12// 13// Copyright (C) 2000, Intel Corporation, all rights reserved. 14// Third party copyrights are property of their respective owners. 15// 16// Redistribution and use in source and binary forms, with or without modification, 17// are permitted provided that the following conditions are met: 18// 19// * Redistribution's of source code must retain the above copyright notice, 20// this list of conditions and the following disclaimer. 21// 22// * Redistribution's in binary form must reproduce the above copyright notice, 23// this list of conditions and the following disclaimer in the documentation 24// and/or other materials provided with the distribution. 25// 26// * The name of Intel Corporation may not be used to endorse or promote products 27// derived from this software without specific prior written permission. 28// 29// This software is provided by the copyright holders and contributors "as is" and 30// any express or implied warranties, including, but not limited to, the implied 31// warranties of merchantability and fitness for a particular purpose are disclaimed. 32// In no event shall the Intel Corporation or contributors be liable for any direct, 33// indirect, incidental, special, exemplary, or consequential damages 34// (including, but not limited to, procurement of substitute goods or services; 35// loss of use, data, or profits; or business interruption) however caused 36// and on any theory of liability, whether in contract, strict liability, 37// or tort (including negligence or otherwise) arising in any way out of 38// the use of this software, even if advised of the possibility of such damage. 39// 40//M*/ 41 42 43#include "_cvaux.h" 44#include "cvtypes.h" 45#include <float.h> 46#include <limits.h> 47#include "cv.h" 48 49/* Valery Mosyagin */ 50 51#undef quad 52 53#define EPS64D 1e-9 54 55int cvComputeEssentialMatrix( CvMatr32f rotMatr, 56 CvMatr32f transVect, 57 CvMatr32f essMatr); 58 59int cvConvertEssential2Fundamental( CvMatr32f essMatr, 60 CvMatr32f fundMatr, 61 CvMatr32f cameraMatr1, 62 CvMatr32f cameraMatr2); 63 64int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr, 65 CvPoint3D32f* epipole1, 66 CvPoint3D32f* epipole2); 67 68void icvTestPoint( CvPoint2D64d testPoint, 69 CvVect64d line1,CvVect64d line2, 70 CvPoint2D64d basePoint, 71 int* result); 72 73 74 75int icvGetSymPoint3D( CvPoint3D64d pointCorner, 76 CvPoint3D64d point1, 77 CvPoint3D64d point2, 78 CvPoint3D64d *pointSym2) 79{ 80 double len1,len2; 81 double alpha; 82 icvGetPieceLength3D(pointCorner,point1,&len1); 83 if( len1 < EPS64D ) 84 { 85 return CV_BADARG_ERR; 86 } 87 icvGetPieceLength3D(pointCorner,point2,&len2); 88 alpha = len2 / len1; 89 90 pointSym2->x = pointCorner.x + alpha*(point1.x - pointCorner.x); 91 pointSym2->y = pointCorner.y + alpha*(point1.y - pointCorner.y); 92 pointSym2->z = pointCorner.z + alpha*(point1.z - pointCorner.z); 93 return CV_NO_ERR; 94} 95 96/* author Valery Mosyagin */ 97 98/* Compute 3D point for scanline and alpha betta */ 99int icvCompute3DPoint( double alpha,double betta, 100 CvStereoLineCoeff* coeffs, 101 CvPoint3D64d* point) 102{ 103 104 double partX; 105 double partY; 106 double partZ; 107 double partAll; 108 double invPartAll; 109 110 double alphabetta = alpha*betta; 111 112 partAll = alpha - betta; 113 if( fabs(partAll) > 0.00001 ) /* alpha must be > betta */ 114 { 115 116 partX = coeffs->Xcoef + coeffs->XcoefA *alpha + 117 coeffs->XcoefB*betta + coeffs->XcoefAB*alphabetta; 118 119 partY = coeffs->Ycoef + coeffs->YcoefA *alpha + 120 coeffs->YcoefB*betta + coeffs->YcoefAB*alphabetta; 121 122 partZ = coeffs->Zcoef + coeffs->ZcoefA *alpha + 123 coeffs->ZcoefB*betta + coeffs->ZcoefAB*alphabetta; 124 125 invPartAll = 1.0 / partAll; 126 127 point->x = partX * invPartAll; 128 point->y = partY * invPartAll; 129 point->z = partZ * invPartAll; 130 return CV_NO_ERR; 131 } 132 else 133 { 134 return CV_BADFACTOR_ERR; 135 } 136} 137 138/*--------------------------------------------------------------------------------------*/ 139 140/* Compute rotate matrix and trans vector for change system */ 141int icvCreateConvertMatrVect( CvMatr64d rotMatr1, 142 CvMatr64d transVect1, 143 CvMatr64d rotMatr2, 144 CvMatr64d transVect2, 145 CvMatr64d convRotMatr, 146 CvMatr64d convTransVect) 147{ 148 double invRotMatr2[9]; 149 double tmpVect[3]; 150 151 152 icvInvertMatrix_64d(rotMatr2,3,invRotMatr2); 153 /* Test for error */ 154 155 icvMulMatrix_64d( rotMatr1, 156 3,3, 157 invRotMatr2, 158 3,3, 159 convRotMatr); 160 161 icvMulMatrix_64d( convRotMatr, 162 3,3, 163 transVect2, 164 1,3, 165 tmpVect); 166 167 icvSubVector_64d(transVect1,tmpVect,convTransVect,3); 168 169 170 return CV_NO_ERR; 171} 172 173/*--------------------------------------------------------------------------------------*/ 174 175/* Compute point coordinates in other system */ 176int icvConvertPointSystem(CvPoint3D64d M2, 177 CvPoint3D64d* M1, 178 CvMatr64d rotMatr, 179 CvMatr64d transVect 180 ) 181{ 182 double tmpVect[3]; 183 184 icvMulMatrix_64d( rotMatr, 185 3,3, 186 (double*)&M2, 187 1,3, 188 tmpVect); 189 190 icvAddVector_64d(tmpVect,transVect,(double*)M1,3); 191 192 return CV_NO_ERR; 193} 194/*--------------------------------------------------------------------------------------*/ 195int icvComputeCoeffForStereoV3( double quad1[4][2], 196 double quad2[4][2], 197 int numScanlines, 198 CvMatr64d camMatr1, 199 CvMatr64d rotMatr1, 200 CvMatr64d transVect1, 201 CvMatr64d camMatr2, 202 CvMatr64d rotMatr2, 203 CvMatr64d transVect2, 204 CvStereoLineCoeff* startCoeffs, 205 int* needSwapCamera) 206{ 207 /* For each pair */ 208 /* In this function we must define position of cameras */ 209 210 CvPoint2D64d point1; 211 CvPoint2D64d point2; 212 CvPoint2D64d point3; 213 CvPoint2D64d point4; 214 215 int currLine; 216 *needSwapCamera = 0; 217 for( currLine = 0; currLine < numScanlines; currLine++ ) 218 { 219 /* Compute points */ 220 double alpha = ((double)currLine)/((double)(numScanlines)); /* maybe - 1 */ 221 222 point1.x = (1.0 - alpha) * quad1[0][0] + alpha * quad1[3][0]; 223 point1.y = (1.0 - alpha) * quad1[0][1] + alpha * quad1[3][1]; 224 225 point2.x = (1.0 - alpha) * quad1[1][0] + alpha * quad1[2][0]; 226 point2.y = (1.0 - alpha) * quad1[1][1] + alpha * quad1[2][1]; 227 228 point3.x = (1.0 - alpha) * quad2[0][0] + alpha * quad2[3][0]; 229 point3.y = (1.0 - alpha) * quad2[0][1] + alpha * quad2[3][1]; 230 231 point4.x = (1.0 - alpha) * quad2[1][0] + alpha * quad2[2][0]; 232 point4.y = (1.0 - alpha) * quad2[1][1] + alpha * quad2[2][1]; 233 234 /* We can compute coeffs for this line */ 235 icvComCoeffForLine( point1, 236 point2, 237 point3, 238 point4, 239 camMatr1, 240 rotMatr1, 241 transVect1, 242 camMatr2, 243 rotMatr2, 244 transVect2, 245 &startCoeffs[currLine], 246 needSwapCamera); 247 } 248 return CV_NO_ERR; 249} 250/*--------------------------------------------------------------------------------------*/ 251int icvComputeCoeffForStereoNew( double quad1[4][2], 252 double quad2[4][2], 253 int numScanlines, 254 CvMatr32f camMatr1, 255 CvMatr32f rotMatr1, 256 CvMatr32f transVect1, 257 CvMatr32f camMatr2, 258 CvStereoLineCoeff* startCoeffs, 259 int* needSwapCamera) 260{ 261 /* Convert data */ 262 263 double camMatr1_64d[9]; 264 double camMatr2_64d[9]; 265 266 double rotMatr1_64d[9]; 267 double transVect1_64d[3]; 268 269 double rotMatr2_64d[9]; 270 double transVect2_64d[3]; 271 272 icvCvt_32f_64d(camMatr1,camMatr1_64d,9); 273 icvCvt_32f_64d(camMatr2,camMatr2_64d,9); 274 275 icvCvt_32f_64d(rotMatr1,rotMatr1_64d,9); 276 icvCvt_32f_64d(transVect1,transVect1_64d,3); 277 278 rotMatr2_64d[0] = 1; 279 rotMatr2_64d[1] = 0; 280 rotMatr2_64d[2] = 0; 281 rotMatr2_64d[3] = 0; 282 rotMatr2_64d[4] = 1; 283 rotMatr2_64d[5] = 0; 284 rotMatr2_64d[6] = 0; 285 rotMatr2_64d[7] = 0; 286 rotMatr2_64d[8] = 1; 287 288 transVect2_64d[0] = 0; 289 transVect2_64d[1] = 0; 290 transVect2_64d[2] = 0; 291 292 int status = icvComputeCoeffForStereoV3( quad1, 293 quad2, 294 numScanlines, 295 camMatr1_64d, 296 rotMatr1_64d, 297 transVect1_64d, 298 camMatr2_64d, 299 rotMatr2_64d, 300 transVect2_64d, 301 startCoeffs, 302 needSwapCamera); 303 304 305 return status; 306 307} 308/*--------------------------------------------------------------------------------------*/ 309int icvComputeCoeffForStereo( CvStereoCamera* stereoCamera) 310{ 311 double quad1[4][2]; 312 double quad2[4][2]; 313 314 int i; 315 for( i = 0; i < 4; i++ ) 316 { 317 quad1[i][0] = stereoCamera->quad[0][i].x; 318 quad1[i][1] = stereoCamera->quad[0][i].y; 319 320 quad2[i][0] = stereoCamera->quad[1][i].x; 321 quad2[i][1] = stereoCamera->quad[1][i].y; 322 } 323 324 icvComputeCoeffForStereoNew( quad1, 325 quad2, 326 stereoCamera->warpSize.height, 327 stereoCamera->camera[0]->matrix, 328 stereoCamera->rotMatrix, 329 stereoCamera->transVector, 330 stereoCamera->camera[1]->matrix, 331 stereoCamera->lineCoeffs, 332 &(stereoCamera->needSwapCameras)); 333 return CV_OK; 334} 335 336 337 338/*--------------------------------------------------------------------------------------*/ 339int icvComCoeffForLine( CvPoint2D64d point1, 340 CvPoint2D64d point2, 341 CvPoint2D64d point3, 342 CvPoint2D64d point4, 343 CvMatr64d camMatr1, 344 CvMatr64d rotMatr1, 345 CvMatr64d transVect1, 346 CvMatr64d camMatr2, 347 CvMatr64d rotMatr2, 348 CvMatr64d transVect2, 349 CvStereoLineCoeff* coeffs, 350 int* needSwapCamera) 351{ 352 /* Get direction for all points */ 353 /* Direction for camera 1 */ 354 355 double direct1[3]; 356 double direct2[3]; 357 double camPoint1[3]; 358 359 double directS3[3]; 360 double directS4[3]; 361 double direct3[3]; 362 double direct4[3]; 363 double camPoint2[3]; 364 365 icvGetDirectionForPoint( point1, 366 camMatr1, 367 (CvPoint3D64d*)direct1); 368 369 icvGetDirectionForPoint( point2, 370 camMatr1, 371 (CvPoint3D64d*)direct2); 372 373 /* Direction for camera 2 */ 374 375 icvGetDirectionForPoint( point3, 376 camMatr2, 377 (CvPoint3D64d*)directS3); 378 379 icvGetDirectionForPoint( point4, 380 camMatr2, 381 (CvPoint3D64d*)directS4); 382 383 /* Create convertion for camera 2: two direction and camera point */ 384 385 double convRotMatr[9]; 386 double convTransVect[3]; 387 388 icvCreateConvertMatrVect( rotMatr1, 389 transVect1, 390 rotMatr2, 391 transVect2, 392 convRotMatr, 393 convTransVect); 394 395 double zeroVect[3]; 396 zeroVect[0] = zeroVect[1] = zeroVect[2] = 0.0; 397 camPoint1[0] = camPoint1[1] = camPoint1[2] = 0.0; 398 399 icvConvertPointSystem(*((CvPoint3D64d*)directS3),(CvPoint3D64d*)direct3,convRotMatr,convTransVect); 400 icvConvertPointSystem(*((CvPoint3D64d*)directS4),(CvPoint3D64d*)direct4,convRotMatr,convTransVect); 401 icvConvertPointSystem(*((CvPoint3D64d*)zeroVect),(CvPoint3D64d*)camPoint2,convRotMatr,convTransVect); 402 403 double pointB[3]; 404 405 int postype = 0; 406 407 /* Changed order */ 408 /* Compute point B: xB,yB,zB */ 409 icvGetCrossLines(*((CvPoint3D64d*)camPoint1),*((CvPoint3D64d*)direct2), 410 *((CvPoint3D64d*)camPoint2),*((CvPoint3D64d*)direct3), 411 (CvPoint3D64d*)pointB); 412 413 if( pointB[2] < 0 )/* If negative use other lines for cross */ 414 { 415 postype = 1; 416 icvGetCrossLines(*((CvPoint3D64d*)camPoint1),*((CvPoint3D64d*)direct1), 417 *((CvPoint3D64d*)camPoint2),*((CvPoint3D64d*)direct4), 418 (CvPoint3D64d*)pointB); 419 } 420 421 CvPoint3D64d pointNewA; 422 CvPoint3D64d pointNewC; 423 424 pointNewA.x = pointNewA.y = pointNewA.z = 0; 425 pointNewC.x = pointNewC.y = pointNewC.z = 0; 426 427 if( postype == 0 ) 428 { 429 icvGetSymPoint3D( *((CvPoint3D64d*)camPoint1), 430 *((CvPoint3D64d*)direct1), 431 *((CvPoint3D64d*)pointB), 432 &pointNewA); 433 434 icvGetSymPoint3D( *((CvPoint3D64d*)camPoint2), 435 *((CvPoint3D64d*)direct4), 436 *((CvPoint3D64d*)pointB), 437 &pointNewC); 438 } 439 else 440 {/* In this case we must change cameras */ 441 *needSwapCamera = 1; 442 icvGetSymPoint3D( *((CvPoint3D64d*)camPoint2), 443 *((CvPoint3D64d*)direct3), 444 *((CvPoint3D64d*)pointB), 445 &pointNewA); 446 447 icvGetSymPoint3D( *((CvPoint3D64d*)camPoint1), 448 *((CvPoint3D64d*)direct2), 449 *((CvPoint3D64d*)pointB), 450 &pointNewC); 451 } 452 453 454 double gamma; 455 456 double x1,y1,z1; 457 458 x1 = camPoint1[0]; 459 y1 = camPoint1[1]; 460 z1 = camPoint1[2]; 461 462 double xA,yA,zA; 463 double xB,yB,zB; 464 double xC,yC,zC; 465 466 xA = pointNewA.x; 467 yA = pointNewA.y; 468 zA = pointNewA.z; 469 470 xB = pointB[0]; 471 yB = pointB[1]; 472 zB = pointB[2]; 473 474 xC = pointNewC.x; 475 yC = pointNewC.y; 476 zC = pointNewC.z; 477 478 double len1,len2; 479 len1 = sqrt( (xA-xB)*(xA-xB) + (yA-yB)*(yA-yB) + (zA-zB)*(zA-zB) ); 480 len2 = sqrt( (xB-xC)*(xB-xC) + (yB-yC)*(yB-yC) + (zB-zC)*(zB-zC) ); 481 gamma = len2 / len1; 482 483 icvComputeStereoLineCoeffs( pointNewA, 484 *((CvPoint3D64d*)pointB), 485 *((CvPoint3D64d*)camPoint1), 486 gamma, 487 coeffs); 488 489 return CV_NO_ERR; 490} 491 492 493/*--------------------------------------------------------------------------------------*/ 494 495int icvGetDirectionForPoint( CvPoint2D64d point, 496 CvMatr64d camMatr, 497 CvPoint3D64d* direct) 498{ 499 /* */ 500 double invMatr[9]; 501 502 /* Invert matrix */ 503 504 icvInvertMatrix_64d(camMatr,3,invMatr); 505 /* TEST FOR ERRORS */ 506 507 double vect[3]; 508 vect[0] = point.x; 509 vect[1] = point.y; 510 vect[2] = 1; 511 512 /* Mul matr */ 513 icvMulMatrix_64d( invMatr, 514 3,3, 515 vect, 516 1,3, 517 (double*)direct); 518 519 return CV_NO_ERR; 520} 521 522/*--------------------------------------------------------------------------------------*/ 523 524int icvGetCrossLines(CvPoint3D64d point11,CvPoint3D64d point12, 525 CvPoint3D64d point21,CvPoint3D64d point22, 526 CvPoint3D64d* midPoint) 527{ 528 double xM,yM,zM; 529 double xN,yN,zN; 530 531 double xA,yA,zA; 532 double xB,yB,zB; 533 534 double xC,yC,zC; 535 double xD,yD,zD; 536 537 xA = point11.x; 538 yA = point11.y; 539 zA = point11.z; 540 541 xB = point12.x; 542 yB = point12.y; 543 zB = point12.z; 544 545 xC = point21.x; 546 yC = point21.y; 547 zC = point21.z; 548 549 xD = point22.x; 550 yD = point22.y; 551 zD = point22.z; 552 553 double a11,a12,a21,a22; 554 double b1,b2; 555 556 a11 = (xB-xA)*(xB-xA)+(yB-yA)*(yB-yA)+(zB-zA)*(zB-zA); 557 a12 = -(xD-xC)*(xB-xA)-(yD-yC)*(yB-yA)-(zD-zC)*(zB-zA); 558 a21 = (xB-xA)*(xD-xC)+(yB-yA)*(yD-yC)+(zB-zA)*(zD-zC); 559 a22 = -(xD-xC)*(xD-xC)-(yD-yC)*(yD-yC)-(zD-zC)*(zD-zC); 560 b1 = -( (xA-xC)*(xB-xA)+(yA-yC)*(yB-yA)+(zA-zC)*(zB-zA) ); 561 b2 = -( (xA-xC)*(xD-xC)+(yA-yC)*(yD-yC)+(zA-zC)*(zD-zC) ); 562 563 double delta; 564 double deltaA,deltaB; 565 double alpha,betta; 566 567 delta = a11*a22-a12*a21; 568 569 if( fabs(delta) < EPS64D ) 570 { 571 /*return ERROR;*/ 572 } 573 574 deltaA = b1*a22-b2*a12; 575 deltaB = a11*b2-b1*a21; 576 577 alpha = deltaA / delta; 578 betta = deltaB / delta; 579 580 xM = xA+alpha*(xB-xA); 581 yM = yA+alpha*(yB-yA); 582 zM = zA+alpha*(zB-zA); 583 584 xN = xC+betta*(xD-xC); 585 yN = yC+betta*(yD-yC); 586 zN = zC+betta*(zD-zC); 587 588 /* Compute middle point */ 589 midPoint->x = (xM + xN) * 0.5; 590 midPoint->y = (yM + yN) * 0.5; 591 midPoint->z = (zM + zN) * 0.5; 592 593 return CV_NO_ERR; 594} 595 596/*--------------------------------------------------------------------------------------*/ 597 598int icvComputeStereoLineCoeffs( CvPoint3D64d pointA, 599 CvPoint3D64d pointB, 600 CvPoint3D64d pointCam1, 601 double gamma, 602 CvStereoLineCoeff* coeffs) 603{ 604 double x1,y1,z1; 605 606 x1 = pointCam1.x; 607 y1 = pointCam1.y; 608 z1 = pointCam1.z; 609 610 double xA,yA,zA; 611 double xB,yB,zB; 612 613 xA = pointA.x; 614 yA = pointA.y; 615 zA = pointA.z; 616 617 xB = pointB.x; 618 yB = pointB.y; 619 zB = pointB.z; 620 621 if( gamma > 0 ) 622 { 623 coeffs->Xcoef = -x1 + xA; 624 coeffs->XcoefA = xB + x1 - xA; 625 coeffs->XcoefB = -xA - gamma * x1 + gamma * xA; 626 coeffs->XcoefAB = -xB + xA + gamma * xB - gamma * xA; 627 628 coeffs->Ycoef = -y1 + yA; 629 coeffs->YcoefA = yB + y1 - yA; 630 coeffs->YcoefB = -yA - gamma * y1 + gamma * yA; 631 coeffs->YcoefAB = -yB + yA + gamma * yB - gamma * yA; 632 633 coeffs->Zcoef = -z1 + zA; 634 coeffs->ZcoefA = zB + z1 - zA; 635 coeffs->ZcoefB = -zA - gamma * z1 + gamma * zA; 636 coeffs->ZcoefAB = -zB + zA + gamma * zB - gamma * zA; 637 } 638 else 639 { 640 gamma = - gamma; 641 coeffs->Xcoef = -( -x1 + xA); 642 coeffs->XcoefB = -( xB + x1 - xA); 643 coeffs->XcoefA = -( -xA - gamma * x1 + gamma * xA); 644 coeffs->XcoefAB = -( -xB + xA + gamma * xB - gamma * xA); 645 646 coeffs->Ycoef = -( -y1 + yA); 647 coeffs->YcoefB = -( yB + y1 - yA); 648 coeffs->YcoefA = -( -yA - gamma * y1 + gamma * yA); 649 coeffs->YcoefAB = -( -yB + yA + gamma * yB - gamma * yA); 650 651 coeffs->Zcoef = -( -z1 + zA); 652 coeffs->ZcoefB = -( zB + z1 - zA); 653 coeffs->ZcoefA = -( -zA - gamma * z1 + gamma * zA); 654 coeffs->ZcoefAB = -( -zB + zA + gamma * zB - gamma * zA); 655 } 656 657 658 659 return CV_NO_ERR; 660} 661/*--------------------------------------------------------------------------------------*/ 662 663 664/*---------------------------------------------------------------------------------------*/ 665 666/* This function get minimum angle started at point which contains rect */ 667int icvGetAngleLine( CvPoint2D64d startPoint, CvSize imageSize,CvPoint2D64d *point1,CvPoint2D64d *point2) 668{ 669 /* Get crosslines with image corners */ 670 671 /* Find four lines */ 672 673 CvPoint2D64d pa,pb,pc,pd; 674 675 pa.x = 0; 676 pa.y = 0; 677 678 pb.x = imageSize.width-1; 679 pb.y = 0; 680 681 pd.x = imageSize.width-1; 682 pd.y = imageSize.height-1; 683 684 pc.x = 0; 685 pc.y = imageSize.height-1; 686 687 /* We can compute points for angle */ 688 /* Test for place section */ 689 690 if( startPoint.x < 0 ) 691 {/* 1,4,7 */ 692 if( startPoint.y < 0) 693 {/* 1 */ 694 *point1 = pb; 695 *point2 = pc; 696 } 697 else if( startPoint.y > imageSize.height-1 ) 698 {/* 7 */ 699 *point1 = pa; 700 *point2 = pd; 701 } 702 else 703 {/* 4 */ 704 *point1 = pa; 705 *point2 = pc; 706 } 707 } 708 else if ( startPoint.x > imageSize.width-1 ) 709 {/* 3,6,9 */ 710 if( startPoint.y < 0 ) 711 {/* 3 */ 712 *point1 = pa; 713 *point2 = pd; 714 } 715 else if ( startPoint.y > imageSize.height-1 ) 716 {/* 9 */ 717 *point1 = pb; 718 *point2 = pc; 719 } 720 else 721 {/* 6 */ 722 *point1 = pb; 723 *point2 = pd; 724 } 725 } 726 else 727 {/* 2,5,8 */ 728 if( startPoint.y < 0 ) 729 {/* 2 */ 730 if( startPoint.x < imageSize.width/2 ) 731 { 732 *point1 = pb; 733 *point2 = pa; 734 } 735 else 736 { 737 *point1 = pa; 738 *point2 = pb; 739 } 740 } 741 else if( startPoint.y > imageSize.height-1 ) 742 {/* 8 */ 743 if( startPoint.x < imageSize.width/2 ) 744 { 745 *point1 = pc; 746 *point2 = pd; 747 } 748 else 749 { 750 *point1 = pd; 751 *point2 = pc; 752 } 753 } 754 else 755 {/* 5 - point in the image */ 756 return 2; 757 } 758 } 759 return 0; 760}/* GetAngleLine */ 761 762/*---------------------------------------------------------------------------------------*/ 763 764void icvGetCoefForPiece( CvPoint2D64d p_start,CvPoint2D64d p_end, 765 double *a,double *b,double *c, 766 int* result) 767{ 768 double det; 769 double detA,detB,detC; 770 771 det = p_start.x*p_end.y+p_end.x+p_start.y-p_end.y-p_start.y*p_end.x-p_start.x; 772 if( fabs(det) < EPS64D)/* Error */ 773 { 774 *result = 0; 775 return; 776 } 777 778 detA = p_start.y - p_end.y; 779 detB = p_end.x - p_start.x; 780 detC = p_start.x*p_end.y - p_end.x*p_start.y; 781 782 double invDet = 1.0 / det; 783 *a = detA * invDet; 784 *b = detB * invDet; 785 *c = detC * invDet; 786 787 *result = 1; 788 return; 789} 790 791/*---------------------------------------------------------------------------------------*/ 792 793/* Get common area of rectifying */ 794void icvGetCommonArea( CvSize imageSize, 795 CvPoint3D64d epipole1,CvPoint3D64d epipole2, 796 CvMatr64d fundMatr, 797 CvVect64d coeff11,CvVect64d coeff12, 798 CvVect64d coeff21,CvVect64d coeff22, 799 int* result) 800{ 801 int res = 0; 802 CvPoint2D64d point11; 803 CvPoint2D64d point12; 804 CvPoint2D64d point21; 805 CvPoint2D64d point22; 806 807 double corr11[3]; 808 double corr12[3]; 809 double corr21[3]; 810 double corr22[3]; 811 812 double pointW11[3]; 813 double pointW12[3]; 814 double pointW21[3]; 815 double pointW22[3]; 816 817 double transFundMatr[3*3]; 818 /* Compute transpose of fundamental matrix */ 819 icvTransposeMatrix_64d( fundMatr, 3, 3, transFundMatr ); 820 821 CvPoint2D64d epipole1_2d; 822 CvPoint2D64d epipole2_2d; 823 824 if( fabs(epipole1.z) < 1e-8 ) 825 {/* epipole1 in infinity */ 826 *result = 0; 827 return; 828 } 829 epipole1_2d.x = epipole1.x / epipole1.z; 830 epipole1_2d.y = epipole1.y / epipole1.z; 831 832 if( fabs(epipole2.z) < 1e-8 ) 833 {/* epipole2 in infinity */ 834 *result = 0; 835 return; 836 } 837 epipole2_2d.x = epipole2.x / epipole2.z; 838 epipole2_2d.y = epipole2.y / epipole2.z; 839 840 int stat = icvGetAngleLine( epipole1_2d, imageSize,&point11,&point12); 841 if( stat == 2 ) 842 { 843 /* No angle */ 844 *result = 0; 845 return; 846 } 847 848 stat = icvGetAngleLine( epipole2_2d, imageSize,&point21,&point22); 849 if( stat == 2 ) 850 { 851 /* No angle */ 852 *result = 0; 853 return; 854 } 855 856 /* ============= Computation for line 1 ================ */ 857 /* Find correspondence line for angle points11 */ 858 /* corr21 = Fund'*p1 */ 859 860 pointW11[0] = point11.x; 861 pointW11[1] = point11.y; 862 pointW11[2] = 1.0; 863 864 icvTransformVector_64d( transFundMatr, /* !!! Modified from not transposed */ 865 pointW11, 866 corr21, 867 3,3); 868 869 /* Find crossing of line with image 2 */ 870 CvPoint2D64d start; 871 CvPoint2D64d end; 872 icvGetCrossRectDirect( imageSize, 873 corr21[0],corr21[1],corr21[2], 874 &start,&end, 875 &res); 876 877 if( res == 0 ) 878 {/* We have not cross */ 879 /* We must define new angle */ 880 881 pointW21[0] = point21.x; 882 pointW21[1] = point21.y; 883 pointW21[2] = 1.0; 884 885 /* Find correspondence line for this angle points */ 886 /* We know point and try to get corr line */ 887 /* For point21 */ 888 /* corr11 = Fund * p21 */ 889 890 icvTransformVector_64d( fundMatr, /* !!! Modified */ 891 pointW21, 892 corr11, 893 3,3); 894 895 /* We have cross. And it's result cross for up line. Set result coefs */ 896 897 /* Set coefs for line 1 image 1 */ 898 coeff11[0] = corr11[0]; 899 coeff11[1] = corr11[1]; 900 coeff11[2] = corr11[2]; 901 902 /* Set coefs for line 1 image 2 */ 903 icvGetCoefForPiece( epipole2_2d,point21, 904 &coeff21[0],&coeff21[1],&coeff21[2], 905 &res); 906 if( res == 0 ) 907 { 908 *result = 0; 909 return;/* Error */ 910 } 911 } 912 else 913 {/* Line 1 cross image 2 */ 914 /* Set coefs for line 1 image 1 */ 915 icvGetCoefForPiece( epipole1_2d,point11, 916 &coeff11[0],&coeff11[1],&coeff11[2], 917 &res); 918 if( res == 0 ) 919 { 920 *result = 0; 921 return;/* Error */ 922 } 923 924 /* Set coefs for line 1 image 2 */ 925 coeff21[0] = corr21[0]; 926 coeff21[1] = corr21[1]; 927 coeff21[2] = corr21[2]; 928 929 } 930 931 /* ============= Computation for line 2 ================ */ 932 /* Find correspondence line for angle points11 */ 933 /* corr22 = Fund*p2 */ 934 935 pointW12[0] = point12.x; 936 pointW12[1] = point12.y; 937 pointW12[2] = 1.0; 938 939 icvTransformVector_64d( transFundMatr, 940 pointW12, 941 corr22, 942 3,3); 943 944 /* Find crossing of line with image 2 */ 945 icvGetCrossRectDirect( imageSize, 946 corr22[0],corr22[1],corr22[2], 947 &start,&end, 948 &res); 949 950 if( res == 0 ) 951 {/* We have not cross */ 952 /* We must define new angle */ 953 954 pointW22[0] = point22.x; 955 pointW22[1] = point22.y; 956 pointW22[2] = 1.0; 957 958 /* Find correspondence line for this angle points */ 959 /* We know point and try to get corr line */ 960 /* For point21 */ 961 /* corr2 = Fund' * p1 */ 962 963 icvTransformVector_64d( fundMatr, 964 pointW22, 965 corr12, 966 3,3); 967 968 969 /* We have cross. And it's result cross for down line. Set result coefs */ 970 971 /* Set coefs for line 2 image 1 */ 972 coeff12[0] = corr12[0]; 973 coeff12[1] = corr12[1]; 974 coeff12[2] = corr12[2]; 975 976 /* Set coefs for line 1 image 2 */ 977 icvGetCoefForPiece( epipole2_2d,point22, 978 &coeff22[0],&coeff22[1],&coeff22[2], 979 &res); 980 if( res == 0 ) 981 { 982 *result = 0; 983 return;/* Error */ 984 } 985 } 986 else 987 {/* Line 2 cross image 2 */ 988 /* Set coefs for line 2 image 1 */ 989 icvGetCoefForPiece( epipole1_2d,point12, 990 &coeff12[0],&coeff12[1],&coeff12[2], 991 &res); 992 if( res == 0 ) 993 { 994 *result = 0; 995 return;/* Error */ 996 } 997 998 /* Set coefs for line 1 image 2 */ 999 coeff22[0] = corr22[0]; 1000 coeff22[1] = corr22[1]; 1001 coeff22[2] = corr22[2]; 1002 1003 } 1004 1005 /* Now we know common area */ 1006 1007 return; 1008 1009}/* GetCommonArea */ 1010 1011/*---------------------------------------------------------------------------------------*/ 1012 1013/* Get cross for direction1 and direction2 */ 1014/* Result = 1 - cross */ 1015/* Result = 2 - parallel and not equal */ 1016/* Result = 3 - parallel and equal */ 1017 1018void icvGetCrossDirectDirect( CvVect64d direct1,CvVect64d direct2, 1019 CvPoint2D64d *cross,int* result) 1020{ 1021 double det = direct1[0]*direct2[1] - direct2[0]*direct1[1]; 1022 double detx = -direct1[2]*direct2[1] + direct1[1]*direct2[2]; 1023 1024 if( fabs(det) > EPS64D ) 1025 {/* Have cross */ 1026 cross->x = detx/det; 1027 cross->y = (-direct1[0]*direct2[2] + direct2[0]*direct1[2])/det; 1028 *result = 1; 1029 } 1030 else 1031 {/* may be parallel */ 1032 if( fabs(detx) > EPS64D ) 1033 {/* parallel and not equal */ 1034 *result = 2; 1035 } 1036 else 1037 {/* equals */ 1038 *result = 3; 1039 } 1040 } 1041 1042 return; 1043} 1044 1045/*---------------------------------------------------------------------------------------*/ 1046 1047/* Get cross for piece p1,p2 and direction a,b,c */ 1048/* Result = 0 - no cross */ 1049/* Result = 1 - cross */ 1050/* Result = 2 - parallel and not equal */ 1051/* Result = 3 - parallel and equal */ 1052 1053void icvGetCrossPieceDirect( CvPoint2D64d p_start,CvPoint2D64d p_end, 1054 double a,double b,double c, 1055 CvPoint2D64d *cross,int* result) 1056{ 1057 1058 if( (a*p_start.x + b*p_start.y + c) * (a*p_end.x + b*p_end.y + c) <= 0 ) 1059 {/* Have cross */ 1060 double det; 1061 double detxc,detyc; 1062 1063 det = a * (p_end.x - p_start.x) + b * (p_end.y - p_start.y); 1064 1065 if( fabs(det) < EPS64D ) 1066 {/* lines are parallel and may be equal or line is point */ 1067 if( fabs(a*p_start.x + b*p_start.y + c) < EPS64D ) 1068 {/* line is point or not diff */ 1069 *result = 3; 1070 return; 1071 } 1072 else 1073 { 1074 *result = 2; 1075 } 1076 return; 1077 } 1078 1079 detxc = b*(p_end.y*p_start.x - p_start.y*p_end.x) + c*(p_start.x - p_end.x); 1080 detyc = a*(p_end.x*p_start.y - p_start.x*p_end.y) + c*(p_start.y - p_end.y); 1081 1082 cross->x = detxc / det; 1083 cross->y = detyc / det; 1084 *result = 1; 1085 1086 } 1087 else 1088 { 1089 *result = 0; 1090 } 1091 return; 1092} 1093/*--------------------------------------------------------------------------------------*/ 1094 1095void icvGetCrossPiecePiece( CvPoint2D64d p1_start,CvPoint2D64d p1_end, 1096 CvPoint2D64d p2_start,CvPoint2D64d p2_end, 1097 CvPoint2D64d* cross, 1098 int* result) 1099{ 1100 double ex1,ey1,ex2,ey2; 1101 double px1,py1,px2,py2; 1102 double del; 1103 double delA,delB,delX,delY; 1104 double alpha,betta; 1105 1106 ex1 = p1_start.x; 1107 ey1 = p1_start.y; 1108 ex2 = p1_end.x; 1109 ey2 = p1_end.y; 1110 1111 px1 = p2_start.x; 1112 py1 = p2_start.y; 1113 px2 = p2_end.x; 1114 py2 = p2_end.y; 1115 1116 del = (py1-py2)*(ex1-ex2)-(px1-px2)*(ey1-ey2); 1117 if( fabs(del) <= EPS64D ) 1118 {/* May be they are parallel !!! */ 1119 *result = 0; 1120 return; 1121 } 1122 1123 delA = (ey1-ey2)*(ex1-px1) + (ex1-ex2)*(py1-ey1); 1124 delB = (py1-py2)*(ex1-px1) + (px1-px2)*(py1-ey1); 1125 1126 alpha = delA / del; 1127 betta = delB / del; 1128 1129 if( alpha < 0 || alpha > 1.0 || betta < 0 || betta > 1.0) 1130 { 1131 *result = 0; 1132 return; 1133 } 1134 1135 delX = (px1-px2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2))+ 1136 (ex1-ex2)*(px1*(py1-py2)-py1*(px1-px2)); 1137 1138 delY = (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2))+ 1139 (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2)); 1140 1141 cross->x = delX / del; 1142 cross->y = delY / del; 1143 1144 *result = 1; 1145 return; 1146} 1147 1148 1149/*---------------------------------------------------------------------------------------*/ 1150 1151void icvGetPieceLength(CvPoint2D64d point1,CvPoint2D64d point2,double* dist) 1152{ 1153 double dx = point2.x - point1.x; 1154 double dy = point2.y - point1.y; 1155 *dist = sqrt( dx*dx + dy*dy ); 1156 return; 1157} 1158 1159/*---------------------------------------------------------------------------------------*/ 1160 1161void icvGetPieceLength3D(CvPoint3D64d point1,CvPoint3D64d point2,double* dist) 1162{ 1163 double dx = point2.x - point1.x; 1164 double dy = point2.y - point1.y; 1165 double dz = point2.z - point1.z; 1166 *dist = sqrt( dx*dx + dy*dy + dz*dz ); 1167 return; 1168} 1169 1170/*---------------------------------------------------------------------------------------*/ 1171 1172/* Find line from epipole which cross image rect */ 1173/* Find points of cross 0 or 1 or 2. Return number of points in cross */ 1174void icvGetCrossRectDirect( CvSize imageSize, 1175 double a,double b,double c, 1176 CvPoint2D64d *start,CvPoint2D64d *end, 1177 int* result) 1178{ 1179 CvPoint2D64d frameBeg; 1180 CvPoint2D64d frameEnd; 1181 CvPoint2D64d cross[4]; 1182 int haveCross[4]; 1183 1184 haveCross[0] = 0; 1185 haveCross[1] = 0; 1186 haveCross[2] = 0; 1187 haveCross[3] = 0; 1188 1189 frameBeg.x = 0; 1190 frameBeg.y = 0; 1191 frameEnd.x = imageSize.width; 1192 frameEnd.y = 0; 1193 1194 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]); 1195 1196 frameBeg.x = imageSize.width; 1197 frameBeg.y = 0; 1198 frameEnd.x = imageSize.width; 1199 frameEnd.y = imageSize.height; 1200 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]); 1201 1202 frameBeg.x = imageSize.width; 1203 frameBeg.y = imageSize.height; 1204 frameEnd.x = 0; 1205 frameEnd.y = imageSize.height; 1206 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]); 1207 1208 frameBeg.x = 0; 1209 frameBeg.y = imageSize.height; 1210 frameEnd.x = 0; 1211 frameEnd.y = 0; 1212 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]); 1213 1214 double maxDist; 1215 1216 int maxI=0,maxJ=0; 1217 1218 1219 int i,j; 1220 1221 maxDist = -1.0; 1222 1223 double distance; 1224 1225 for( i = 0; i < 3; i++ ) 1226 { 1227 if( haveCross[i] == 1 ) 1228 { 1229 for( j = i + 1; j < 4; j++ ) 1230 { 1231 if( haveCross[j] == 1) 1232 {/* Compute dist */ 1233 icvGetPieceLength(cross[i],cross[j],&distance); 1234 if( distance > maxDist ) 1235 { 1236 maxI = i; 1237 maxJ = j; 1238 maxDist = distance; 1239 } 1240 } 1241 } 1242 } 1243 } 1244 1245 if( maxDist >= 0 ) 1246 {/* We have cross */ 1247 *start = cross[maxI]; 1248 *result = 1; 1249 if( maxDist > 0 ) 1250 { 1251 *end = cross[maxJ]; 1252 *result = 2; 1253 } 1254 } 1255 else 1256 { 1257 *result = 0; 1258 } 1259 1260 return; 1261}/* GetCrossRectDirect */ 1262 1263/*---------------------------------------------------------------------------------------*/ 1264void icvProjectPointToImage( CvPoint3D64d point, 1265 CvMatr64d camMatr,CvMatr64d rotMatr,CvVect64d transVect, 1266 CvPoint2D64d* projPoint) 1267{ 1268 1269 double tmpVect1[3]; 1270 double tmpVect2[3]; 1271 1272 icvMulMatrix_64d ( rotMatr, 1273 3,3, 1274 (double*)&point, 1275 1,3, 1276 tmpVect1); 1277 1278 icvAddVector_64d ( tmpVect1, transVect,tmpVect2, 3); 1279 1280 icvMulMatrix_64d ( camMatr, 1281 3,3, 1282 tmpVect2, 1283 1,3, 1284 tmpVect1); 1285 1286 projPoint->x = tmpVect1[0] / tmpVect1[2]; 1287 projPoint->y = tmpVect1[1] / tmpVect1[2]; 1288 1289 return; 1290} 1291 1292/*---------------------------------------------------------------------------------------*/ 1293/* Get quads for transform images */ 1294void icvGetQuadsTransform( 1295 CvSize imageSize, 1296 CvMatr64d camMatr1, 1297 CvMatr64d rotMatr1, 1298 CvVect64d transVect1, 1299 CvMatr64d camMatr2, 1300 CvMatr64d rotMatr2, 1301 CvVect64d transVect2, 1302 CvSize* warpSize, 1303 double quad1[4][2], 1304 double quad2[4][2], 1305 CvMatr64d fundMatr, 1306 CvPoint3D64d* epipole1, 1307 CvPoint3D64d* epipole2 1308 ) 1309{ 1310 /* First compute fundamental matrix and epipoles */ 1311 int res; 1312 1313 1314 /* Compute epipoles and fundamental matrix using new functions */ 1315 { 1316 double convRotMatr[9]; 1317 double convTransVect[3]; 1318 1319 icvCreateConvertMatrVect( rotMatr1, 1320 transVect1, 1321 rotMatr2, 1322 transVect2, 1323 convRotMatr, 1324 convTransVect); 1325 float convRotMatr_32f[9]; 1326 float convTransVect_32f[3]; 1327 1328 icvCvt_64d_32f(convRotMatr,convRotMatr_32f,9); 1329 icvCvt_64d_32f(convTransVect,convTransVect_32f,3); 1330 1331 /* We know R and t */ 1332 /* Compute essential matrix */ 1333 float essMatr[9]; 1334 float fundMatr_32f[9]; 1335 1336 float camMatr1_32f[9]; 1337 float camMatr2_32f[9]; 1338 1339 icvCvt_64d_32f(camMatr1,camMatr1_32f,9); 1340 icvCvt_64d_32f(camMatr2,camMatr2_32f,9); 1341 1342 cvComputeEssentialMatrix( convRotMatr_32f, 1343 convTransVect_32f, 1344 essMatr); 1345 1346 cvConvertEssential2Fundamental( essMatr, 1347 fundMatr_32f, 1348 camMatr1_32f, 1349 camMatr2_32f); 1350 1351 CvPoint3D32f epipole1_32f; 1352 CvPoint3D32f epipole2_32f; 1353 1354 cvComputeEpipolesFromFundMatrix( fundMatr_32f, 1355 &epipole1_32f, 1356 &epipole2_32f); 1357 /* copy to 64d epipoles */ 1358 epipole1->x = epipole1_32f.x; 1359 epipole1->y = epipole1_32f.y; 1360 epipole1->z = epipole1_32f.z; 1361 1362 epipole2->x = epipole2_32f.x; 1363 epipole2->y = epipole2_32f.y; 1364 epipole2->z = epipole2_32f.z; 1365 1366 /* Convert fundamental matrix */ 1367 icvCvt_32f_64d(fundMatr_32f,fundMatr,9); 1368 } 1369 1370 double coeff11[3]; 1371 double coeff12[3]; 1372 double coeff21[3]; 1373 double coeff22[3]; 1374 1375 icvGetCommonArea( imageSize, 1376 *epipole1,*epipole2, 1377 fundMatr, 1378 coeff11,coeff12, 1379 coeff21,coeff22, 1380 &res); 1381 1382 CvPoint2D64d point11, point12,point21, point22; 1383 double width1,width2; 1384 double height1,height2; 1385 double tmpHeight1,tmpHeight2; 1386 1387 CvPoint2D64d epipole1_2d; 1388 CvPoint2D64d epipole2_2d; 1389 1390 /* ----- Image 1 ----- */ 1391 if( fabs(epipole1->z) < 1e-8 ) 1392 { 1393 return; 1394 } 1395 epipole1_2d.x = epipole1->x / epipole1->z; 1396 epipole1_2d.y = epipole1->y / epipole1->z; 1397 1398 icvGetCutPiece( coeff11,coeff12, 1399 epipole1_2d, 1400 imageSize, 1401 &point11,&point12, 1402 &point21,&point22, 1403 &res); 1404 1405 /* Compute distance */ 1406 icvGetPieceLength(point11,point21,&width1); 1407 icvGetPieceLength(point11,point12,&tmpHeight1); 1408 icvGetPieceLength(point21,point22,&tmpHeight2); 1409 height1 = MAX(tmpHeight1,tmpHeight2); 1410 1411 quad1[0][0] = point11.x; 1412 quad1[0][1] = point11.y; 1413 1414 quad1[1][0] = point21.x; 1415 quad1[1][1] = point21.y; 1416 1417 quad1[2][0] = point22.x; 1418 quad1[2][1] = point22.y; 1419 1420 quad1[3][0] = point12.x; 1421 quad1[3][1] = point12.y; 1422 1423 /* ----- Image 2 ----- */ 1424 if( fabs(epipole2->z) < 1e-8 ) 1425 { 1426 return; 1427 } 1428 epipole2_2d.x = epipole2->x / epipole2->z; 1429 epipole2_2d.y = epipole2->y / epipole2->z; 1430 1431 icvGetCutPiece( coeff21,coeff22, 1432 epipole2_2d, 1433 imageSize, 1434 &point11,&point12, 1435 &point21,&point22, 1436 &res); 1437 1438 /* Compute distance */ 1439 icvGetPieceLength(point11,point21,&width2); 1440 icvGetPieceLength(point11,point12,&tmpHeight1); 1441 icvGetPieceLength(point21,point22,&tmpHeight2); 1442 height2 = MAX(tmpHeight1,tmpHeight2); 1443 1444 quad2[0][0] = point11.x; 1445 quad2[0][1] = point11.y; 1446 1447 quad2[1][0] = point21.x; 1448 quad2[1][1] = point21.y; 1449 1450 quad2[2][0] = point22.x; 1451 quad2[2][1] = point22.y; 1452 1453 quad2[3][0] = point12.x; 1454 quad2[3][1] = point12.y; 1455 1456 1457 /*=======================================================*/ 1458 /* This is a new additional way to compute quads. */ 1459 /* We must correct quads */ 1460 { 1461 double convRotMatr[9]; 1462 double convTransVect[3]; 1463 1464 double newQuad1[4][2]; 1465 double newQuad2[4][2]; 1466 1467 1468 icvCreateConvertMatrVect( rotMatr1, 1469 transVect1, 1470 rotMatr2, 1471 transVect2, 1472 convRotMatr, 1473 convTransVect); 1474 1475 /* -------------Compute for first image-------------- */ 1476 CvPoint2D32f pointb1; 1477 CvPoint2D32f pointe1; 1478 1479 CvPoint2D32f pointb2; 1480 CvPoint2D32f pointe2; 1481 1482 pointb1.x = (float)quad1[0][0]; 1483 pointb1.y = (float)quad1[0][1]; 1484 1485 pointe1.x = (float)quad1[3][0]; 1486 pointe1.y = (float)quad1[3][1]; 1487 1488 icvComputeeInfiniteProject1(convRotMatr, 1489 camMatr1, 1490 camMatr2, 1491 pointb1, 1492 &pointb2); 1493 1494 icvComputeeInfiniteProject1(convRotMatr, 1495 camMatr1, 1496 camMatr2, 1497 pointe1, 1498 &pointe2); 1499 1500 /* JUST TEST FOR POINT */ 1501 1502 /* Compute distances */ 1503 double dxOld,dyOld; 1504 double dxNew,dyNew; 1505 double distOld,distNew; 1506 1507 dxOld = quad2[1][0] - quad2[0][0]; 1508 dyOld = quad2[1][1] - quad2[0][1]; 1509 distOld = dxOld*dxOld + dyOld*dyOld; 1510 1511 dxNew = quad2[1][0] - pointb2.x; 1512 dyNew = quad2[1][1] - pointb2.y; 1513 distNew = dxNew*dxNew + dyNew*dyNew; 1514 1515 if( distNew > distOld ) 1516 {/* Get new points for second quad */ 1517 newQuad2[0][0] = pointb2.x; 1518 newQuad2[0][1] = pointb2.y; 1519 newQuad2[3][0] = pointe2.x; 1520 newQuad2[3][1] = pointe2.y; 1521 newQuad1[0][0] = quad1[0][0]; 1522 newQuad1[0][1] = quad1[0][1]; 1523 newQuad1[3][0] = quad1[3][0]; 1524 newQuad1[3][1] = quad1[3][1]; 1525 } 1526 else 1527 {/* Get new points for first quad */ 1528 1529 pointb2.x = (float)quad2[0][0]; 1530 pointb2.y = (float)quad2[0][1]; 1531 1532 pointe2.x = (float)quad2[3][0]; 1533 pointe2.y = (float)quad2[3][1]; 1534 1535 icvComputeeInfiniteProject2(convRotMatr, 1536 camMatr1, 1537 camMatr2, 1538 &pointb1, 1539 pointb2); 1540 1541 icvComputeeInfiniteProject2(convRotMatr, 1542 camMatr1, 1543 camMatr2, 1544 &pointe1, 1545 pointe2); 1546 1547 1548 /* JUST TEST FOR POINT */ 1549 1550 newQuad2[0][0] = quad2[0][0]; 1551 newQuad2[0][1] = quad2[0][1]; 1552 newQuad2[3][0] = quad2[3][0]; 1553 newQuad2[3][1] = quad2[3][1]; 1554 1555 newQuad1[0][0] = pointb1.x; 1556 newQuad1[0][1] = pointb1.y; 1557 newQuad1[3][0] = pointe1.x; 1558 newQuad1[3][1] = pointe1.y; 1559 } 1560 1561 /* -------------Compute for second image-------------- */ 1562 pointb1.x = (float)quad1[1][0]; 1563 pointb1.y = (float)quad1[1][1]; 1564 1565 pointe1.x = (float)quad1[2][0]; 1566 pointe1.y = (float)quad1[2][1]; 1567 1568 icvComputeeInfiniteProject1(convRotMatr, 1569 camMatr1, 1570 camMatr2, 1571 pointb1, 1572 &pointb2); 1573 1574 icvComputeeInfiniteProject1(convRotMatr, 1575 camMatr1, 1576 camMatr2, 1577 pointe1, 1578 &pointe2); 1579 1580 /* Compute distances */ 1581 1582 dxOld = quad2[0][0] - quad2[1][0]; 1583 dyOld = quad2[0][1] - quad2[1][1]; 1584 distOld = dxOld*dxOld + dyOld*dyOld; 1585 1586 dxNew = quad2[0][0] - pointb2.x; 1587 dyNew = quad2[0][1] - pointb2.y; 1588 distNew = dxNew*dxNew + dyNew*dyNew; 1589 1590 if( distNew > distOld ) 1591 {/* Get new points for second quad */ 1592 newQuad2[1][0] = pointb2.x; 1593 newQuad2[1][1] = pointb2.y; 1594 newQuad2[2][0] = pointe2.x; 1595 newQuad2[2][1] = pointe2.y; 1596 newQuad1[1][0] = quad1[1][0]; 1597 newQuad1[1][1] = quad1[1][1]; 1598 newQuad1[2][0] = quad1[2][0]; 1599 newQuad1[2][1] = quad1[2][1]; 1600 } 1601 else 1602 {/* Get new points for first quad */ 1603 1604 pointb2.x = (float)quad2[1][0]; 1605 pointb2.y = (float)quad2[1][1]; 1606 1607 pointe2.x = (float)quad2[2][0]; 1608 pointe2.y = (float)quad2[2][1]; 1609 1610 icvComputeeInfiniteProject2(convRotMatr, 1611 camMatr1, 1612 camMatr2, 1613 &pointb1, 1614 pointb2); 1615 1616 icvComputeeInfiniteProject2(convRotMatr, 1617 camMatr1, 1618 camMatr2, 1619 &pointe1, 1620 pointe2); 1621 1622 newQuad2[1][0] = quad2[1][0]; 1623 newQuad2[1][1] = quad2[1][1]; 1624 newQuad2[2][0] = quad2[2][0]; 1625 newQuad2[2][1] = quad2[2][1]; 1626 1627 newQuad1[1][0] = pointb1.x; 1628 newQuad1[1][1] = pointb1.y; 1629 newQuad1[2][0] = pointe1.x; 1630 newQuad1[2][1] = pointe1.y; 1631 } 1632 1633 1634 1635/*-------------------------------------------------------------------------------*/ 1636 1637 /* Copy new quads to old quad */ 1638 int i; 1639 for( i = 0; i < 4; i++ ) 1640 { 1641 { 1642 quad1[i][0] = newQuad1[i][0]; 1643 quad1[i][1] = newQuad1[i][1]; 1644 quad2[i][0] = newQuad2[i][0]; 1645 quad2[i][1] = newQuad2[i][1]; 1646 } 1647 } 1648 } 1649 /*=======================================================*/ 1650 1651 double warpWidth,warpHeight; 1652 1653 warpWidth = MAX(width1,width2); 1654 warpHeight = MAX(height1,height2); 1655 1656 warpSize->width = (int)warpWidth; 1657 warpSize->height = (int)warpHeight; 1658 1659 warpSize->width = cvRound(warpWidth-1); 1660 warpSize->height = cvRound(warpHeight-1); 1661 1662/* !!! by Valery Mosyagin. this lines added just for test no warp */ 1663 warpSize->width = imageSize.width; 1664 warpSize->height = imageSize.height; 1665 1666 return; 1667} 1668 1669 1670/*---------------------------------------------------------------------------------------*/ 1671 1672void icvGetQuadsTransformNew( CvSize imageSize, 1673 CvMatr32f camMatr1, 1674 CvMatr32f camMatr2, 1675 CvMatr32f rotMatr1, 1676 CvVect32f transVect1, 1677 CvSize* warpSize, 1678 double quad1[4][2], 1679 double quad2[4][2], 1680 CvMatr32f fundMatr, 1681 CvPoint3D32f* epipole1, 1682 CvPoint3D32f* epipole2 1683 ) 1684{ 1685 /* Convert data */ 1686 /* Convert camera matrix */ 1687 double camMatr1_64d[9]; 1688 double camMatr2_64d[9]; 1689 double rotMatr1_64d[9]; 1690 double transVect1_64d[3]; 1691 double rotMatr2_64d[9]; 1692 double transVect2_64d[3]; 1693 double fundMatr_64d[9]; 1694 CvPoint3D64d epipole1_64d; 1695 CvPoint3D64d epipole2_64d; 1696 1697 icvCvt_32f_64d(camMatr1,camMatr1_64d,9); 1698 icvCvt_32f_64d(camMatr2,camMatr2_64d,9); 1699 icvCvt_32f_64d(rotMatr1,rotMatr1_64d,9); 1700 icvCvt_32f_64d(transVect1,transVect1_64d,3); 1701 1702 /* Create vector and matrix */ 1703 1704 rotMatr2_64d[0] = 1; 1705 rotMatr2_64d[1] = 0; 1706 rotMatr2_64d[2] = 0; 1707 rotMatr2_64d[3] = 0; 1708 rotMatr2_64d[4] = 1; 1709 rotMatr2_64d[5] = 0; 1710 rotMatr2_64d[6] = 0; 1711 rotMatr2_64d[7] = 0; 1712 rotMatr2_64d[8] = 1; 1713 1714 transVect2_64d[0] = 0; 1715 transVect2_64d[1] = 0; 1716 transVect2_64d[2] = 0; 1717 1718 icvGetQuadsTransform( imageSize, 1719 camMatr1_64d, 1720 rotMatr1_64d, 1721 transVect1_64d, 1722 camMatr2_64d, 1723 rotMatr2_64d, 1724 transVect2_64d, 1725 warpSize, 1726 quad1, 1727 quad2, 1728 fundMatr_64d, 1729 &epipole1_64d, 1730 &epipole2_64d 1731 ); 1732 1733 /* Convert epipoles */ 1734 epipole1->x = (float)(epipole1_64d.x); 1735 epipole1->y = (float)(epipole1_64d.y); 1736 epipole1->z = (float)(epipole1_64d.z); 1737 1738 epipole2->x = (float)(epipole2_64d.x); 1739 epipole2->y = (float)(epipole2_64d.y); 1740 epipole2->z = (float)(epipole2_64d.z); 1741 1742 /* Convert fundamental matrix */ 1743 icvCvt_64d_32f(fundMatr_64d,fundMatr,9); 1744 1745 return; 1746} 1747 1748/*---------------------------------------------------------------------------------------*/ 1749void icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera) 1750{ 1751 /* Wrapper for icvGetQuadsTransformNew */ 1752 1753 1754 double quad1[4][2]; 1755 double quad2[4][2]; 1756 1757 icvGetQuadsTransformNew( cvSize(cvRound(stereoCamera->camera[0]->imgSize[0]),cvRound(stereoCamera->camera[0]->imgSize[1])), 1758 stereoCamera->camera[0]->matrix, 1759 stereoCamera->camera[1]->matrix, 1760 stereoCamera->rotMatrix, 1761 stereoCamera->transVector, 1762 &(stereoCamera->warpSize), 1763 quad1, 1764 quad2, 1765 stereoCamera->fundMatr, 1766 &(stereoCamera->epipole[0]), 1767 &(stereoCamera->epipole[1]) 1768 ); 1769 1770 int i; 1771 for( i = 0; i < 4; i++ ) 1772 { 1773 stereoCamera->quad[0][i] = cvPoint2D32f(quad1[i][0],quad1[i][1]); 1774 stereoCamera->quad[1][i] = cvPoint2D32f(quad2[i][0],quad2[i][1]); 1775 } 1776 1777 return; 1778} 1779 1780/*---------------------------------------------------------------------------------------*/ 1781void icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera) 1782{ 1783 /* For given intrinsic and extrinsic parameters computes rest parameters 1784 ** such as fundamental matrix. warping coeffs, epipoles, ... 1785 */ 1786 1787 1788 /* compute rotate matrix and translate vector */ 1789 double rotMatr1[9]; 1790 double rotMatr2[9]; 1791 1792 double transVect1[3]; 1793 double transVect2[3]; 1794 1795 double convRotMatr[9]; 1796 double convTransVect[3]; 1797 1798 /* fill matrices */ 1799 icvCvt_32f_64d(stereoCamera->camera[0]->rotMatr,rotMatr1,9); 1800 icvCvt_32f_64d(stereoCamera->camera[1]->rotMatr,rotMatr2,9); 1801 1802 icvCvt_32f_64d(stereoCamera->camera[0]->transVect,transVect1,3); 1803 icvCvt_32f_64d(stereoCamera->camera[1]->transVect,transVect2,3); 1804 1805 icvCreateConvertMatrVect( rotMatr1, 1806 transVect1, 1807 rotMatr2, 1808 transVect2, 1809 convRotMatr, 1810 convTransVect); 1811 1812 /* copy to stereo camera params */ 1813 icvCvt_64d_32f(convRotMatr,stereoCamera->rotMatrix,9); 1814 icvCvt_64d_32f(convTransVect,stereoCamera->transVector,3); 1815 1816 1817 icvGetQuadsTransformStruct(stereoCamera); 1818 icvComputeRestStereoParams(stereoCamera); 1819} 1820 1821 1822 1823/*---------------------------------------------------------------------------------------*/ 1824 1825/* Get cut line for one image */ 1826void icvGetCutPiece( CvVect64d areaLineCoef1,CvVect64d areaLineCoef2, 1827 CvPoint2D64d epipole, 1828 CvSize imageSize, 1829 CvPoint2D64d* point11,CvPoint2D64d* point12, 1830 CvPoint2D64d* point21,CvPoint2D64d* point22, 1831 int* result) 1832{ 1833 /* Compute nearest cut line to epipole */ 1834 /* Get corners inside sector */ 1835 /* Collect all candidate point */ 1836 1837 CvPoint2D64d candPoints[8]; 1838 CvPoint2D64d midPoint; 1839 int numPoints = 0; 1840 int res; 1841 int i; 1842 1843 double cutLine1[3]; 1844 double cutLine2[3]; 1845 1846 /* Find middle line of sector */ 1847 double midLine[3]; 1848 1849 1850 /* Different way */ 1851 CvPoint2D64d pointOnLine1; pointOnLine1.x = pointOnLine1.y = 0; 1852 CvPoint2D64d pointOnLine2; pointOnLine2.x = pointOnLine2.y = 0; 1853 1854 CvPoint2D64d start1,end1; 1855 1856 icvGetCrossRectDirect( imageSize, 1857 areaLineCoef1[0],areaLineCoef1[1],areaLineCoef1[2], 1858 &start1,&end1,&res); 1859 if( res > 0 ) 1860 { 1861 pointOnLine1 = start1; 1862 } 1863 1864 icvGetCrossRectDirect( imageSize, 1865 areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2], 1866 &start1,&end1,&res); 1867 if( res > 0 ) 1868 { 1869 pointOnLine2 = start1; 1870 } 1871 1872 icvGetMiddleAnglePoint(epipole,pointOnLine1,pointOnLine2,&midPoint); 1873 1874 icvGetCoefForPiece(epipole,midPoint,&midLine[0],&midLine[1],&midLine[2],&res); 1875 1876 /* Test corner points */ 1877 CvPoint2D64d cornerPoint; 1878 CvPoint2D64d tmpPoints[2]; 1879 1880 cornerPoint.x = 0; 1881 cornerPoint.y = 0; 1882 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res); 1883 if( res == 1 ) 1884 {/* Add point */ 1885 candPoints[numPoints] = cornerPoint; 1886 numPoints++; 1887 } 1888 1889 cornerPoint.x = imageSize.width; 1890 cornerPoint.y = 0; 1891 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res); 1892 if( res == 1 ) 1893 {/* Add point */ 1894 candPoints[numPoints] = cornerPoint; 1895 numPoints++; 1896 } 1897 1898 cornerPoint.x = imageSize.width; 1899 cornerPoint.y = imageSize.height; 1900 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res); 1901 if( res == 1 ) 1902 {/* Add point */ 1903 candPoints[numPoints] = cornerPoint; 1904 numPoints++; 1905 } 1906 1907 cornerPoint.x = 0; 1908 cornerPoint.y = imageSize.height; 1909 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res); 1910 if( res == 1 ) 1911 {/* Add point */ 1912 candPoints[numPoints] = cornerPoint; 1913 numPoints++; 1914 } 1915 1916 /* Find cross line 1 with image border */ 1917 icvGetCrossRectDirect( imageSize, 1918 areaLineCoef1[0],areaLineCoef1[1],areaLineCoef1[2], 1919 &tmpPoints[0], &tmpPoints[1], 1920 &res); 1921 for( i = 0; i < res; i++ ) 1922 { 1923 candPoints[numPoints++] = tmpPoints[i]; 1924 } 1925 1926 /* Find cross line 2 with image border */ 1927 icvGetCrossRectDirect( imageSize, 1928 areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2], 1929 &tmpPoints[0], &tmpPoints[1], 1930 &res); 1931 1932 for( i = 0; i < res; i++ ) 1933 { 1934 candPoints[numPoints++] = tmpPoints[i]; 1935 } 1936 1937 if( numPoints < 2 ) 1938 { 1939 *result = 0; 1940 return;/* Error. Not enought points */ 1941 } 1942 /* Project all points to middle line and get max and min */ 1943 1944 CvPoint2D64d projPoint; 1945 CvPoint2D64d minPoint; minPoint.x = minPoint.y = FLT_MAX; 1946 CvPoint2D64d maxPoint; maxPoint.x = maxPoint.y = -FLT_MAX; 1947 1948 1949 double dist; 1950 double maxDist = 0; 1951 double minDist = 10000000; 1952 1953 1954 for( i = 0; i < numPoints; i++ ) 1955 { 1956 icvProjectPointToDirect(candPoints[i], midLine, &projPoint); 1957 icvGetPieceLength(epipole,projPoint,&dist); 1958 if( dist < minDist) 1959 { 1960 minDist = dist; 1961 minPoint = projPoint; 1962 } 1963 1964 if( dist > maxDist) 1965 { 1966 maxDist = dist; 1967 maxPoint = projPoint; 1968 } 1969 } 1970 1971 /* We know maximum and minimum points. Now we can compute cut lines */ 1972 1973 icvGetNormalDirect(midLine,minPoint,cutLine1); 1974 icvGetNormalDirect(midLine,maxPoint,cutLine2); 1975 1976 /* Test for begin of line. */ 1977 CvPoint2D64d tmpPoint2; 1978 1979 /* Get cross with */ 1980 icvGetCrossDirectDirect(areaLineCoef1,cutLine1,point11,&res); 1981 icvGetCrossDirectDirect(areaLineCoef2,cutLine1,point12,&res); 1982 1983 icvGetCrossDirectDirect(areaLineCoef1,cutLine2,point21,&res); 1984 icvGetCrossDirectDirect(areaLineCoef2,cutLine2,point22,&res); 1985 1986 if( epipole.x > imageSize.width * 0.5 ) 1987 {/* Need to change points */ 1988 tmpPoint2 = *point11; 1989 *point11 = *point21; 1990 *point21 = tmpPoint2; 1991 1992 tmpPoint2 = *point12; 1993 *point12 = *point22; 1994 *point22 = tmpPoint2; 1995 } 1996 1997 return; 1998} 1999/*---------------------------------------------------------------------------------------*/ 2000/* Get middle angle */ 2001void icvGetMiddleAnglePoint( CvPoint2D64d basePoint, 2002 CvPoint2D64d point1,CvPoint2D64d point2, 2003 CvPoint2D64d* midPoint) 2004{/* !!! May be need to return error */ 2005 2006 double dist1; 2007 double dist2; 2008 icvGetPieceLength(basePoint,point1,&dist1); 2009 icvGetPieceLength(basePoint,point2,&dist2); 2010 CvPoint2D64d pointNew1; 2011 CvPoint2D64d pointNew2; 2012 double alpha = dist2/dist1; 2013 2014 pointNew1.x = basePoint.x + (1.0/alpha) * ( point2.x - basePoint.x ); 2015 pointNew1.y = basePoint.y + (1.0/alpha) * ( point2.y - basePoint.y ); 2016 2017 pointNew2.x = basePoint.x + alpha * ( point1.x - basePoint.x ); 2018 pointNew2.y = basePoint.y + alpha * ( point1.y - basePoint.y ); 2019 2020 int res; 2021 icvGetCrossPiecePiece(point1,point2,pointNew1,pointNew2,midPoint,&res); 2022 2023 return; 2024} 2025 2026/*---------------------------------------------------------------------------------------*/ 2027/* Get normal direct to direct in line */ 2028void icvGetNormalDirect(CvVect64d direct,CvPoint2D64d point,CvVect64d normDirect) 2029{ 2030 normDirect[0] = direct[1]; 2031 normDirect[1] = - direct[0]; 2032 normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y); 2033 return; 2034} 2035 2036/*---------------------------------------------------------------------------------------*/ 2037CV_IMPL double icvGetVect(CvPoint2D64d basePoint,CvPoint2D64d point1,CvPoint2D64d point2) 2038{ 2039 return (point1.x - basePoint.x)*(point2.y - basePoint.y) - 2040 (point2.x - basePoint.x)*(point1.y - basePoint.y); 2041} 2042/*---------------------------------------------------------------------------------------*/ 2043/* Test for point in sector */ 2044/* Return 0 - point not inside sector */ 2045/* Return 1 - point inside sector */ 2046void icvTestPoint( CvPoint2D64d testPoint, 2047 CvVect64d line1,CvVect64d line2, 2048 CvPoint2D64d basePoint, 2049 int* result) 2050{ 2051 CvPoint2D64d point1,point2; 2052 2053 icvProjectPointToDirect(testPoint,line1,&point1); 2054 icvProjectPointToDirect(testPoint,line2,&point2); 2055 2056 double sign1 = icvGetVect(basePoint,point1,point2); 2057 double sign2 = icvGetVect(basePoint,point1,testPoint); 2058 if( sign1 * sign2 > 0 ) 2059 {/* Correct for first line */ 2060 sign1 = - sign1; 2061 sign2 = icvGetVect(basePoint,point2,testPoint); 2062 if( sign1 * sign2 > 0 ) 2063 {/* Correct for both lines */ 2064 *result = 1; 2065 } 2066 else 2067 { 2068 *result = 0; 2069 } 2070 } 2071 else 2072 { 2073 *result = 0; 2074 } 2075 2076 return; 2077} 2078 2079/*---------------------------------------------------------------------------------------*/ 2080/* Project point to line */ 2081void icvProjectPointToDirect( CvPoint2D64d point,CvVect64d lineCoeff, 2082 CvPoint2D64d* projectPoint) 2083{ 2084 double a = lineCoeff[0]; 2085 double b = lineCoeff[1]; 2086 2087 double det = 1.0 / ( a*a + b*b ); 2088 double delta = a*point.y - b*point.x; 2089 2090 projectPoint->x = ( -a*lineCoeff[2] - b * delta ) * det; 2091 projectPoint->y = ( -b*lineCoeff[2] + a * delta ) * det ; 2092 2093 return; 2094} 2095 2096/*---------------------------------------------------------------------------------------*/ 2097/* Get distance from point to direction */ 2098void icvGetDistanceFromPointToDirect( CvPoint2D64d point,CvVect64d lineCoef,double*dist) 2099{ 2100 CvPoint2D64d tmpPoint; 2101 icvProjectPointToDirect(point,lineCoef,&tmpPoint); 2102 double dx = point.x - tmpPoint.x; 2103 double dy = point.y - tmpPoint.y; 2104 *dist = sqrt(dx*dx+dy*dy); 2105 return; 2106} 2107/*---------------------------------------------------------------------------------------*/ 2108 2109CV_IMPL IplImage* icvCreateIsometricImage( IplImage* src, IplImage* dst, 2110 int desired_depth, int desired_num_channels ) 2111{ 2112 CvSize src_size ; 2113 src_size.width = src->width; 2114 src_size.height = src->height; 2115 2116 CvSize dst_size = src_size; 2117 2118 if( dst ) 2119 { 2120 dst_size.width = dst->width; 2121 dst_size.height = dst->height; 2122 } 2123 2124 if( !dst || dst->depth != desired_depth || 2125 dst->nChannels != desired_num_channels || 2126 dst_size.width != src_size.width || 2127 dst_size.height != dst_size.height ) 2128 { 2129 cvReleaseImage( &dst ); 2130 dst = cvCreateImage( src_size, desired_depth, desired_num_channels ); 2131 CvRect rect = cvRect(0,0,src_size.width,src_size.height); 2132 cvSetImageROI( dst, rect ); 2133 2134 } 2135 2136 return dst; 2137} 2138 2139int 2140icvCvt_32f_64d( float *src, double *dst, int size ) 2141{ 2142 int t; 2143 2144 if( !src || !dst ) 2145 return CV_NULLPTR_ERR; 2146 if( size <= 0 ) 2147 return CV_BADRANGE_ERR; 2148 2149 for( t = 0; t < size; t++ ) 2150 { 2151 dst[t] = (double) (src[t]); 2152 } 2153 2154 return CV_OK; 2155} 2156 2157/*======================================================================================*/ 2158/* Type conversion double -> float */ 2159int 2160icvCvt_64d_32f( double *src, float *dst, int size ) 2161{ 2162 int t; 2163 2164 if( !src || !dst ) 2165 return CV_NULLPTR_ERR; 2166 if( size <= 0 ) 2167 return CV_BADRANGE_ERR; 2168 2169 for( t = 0; t < size; t++ ) 2170 { 2171 dst[t] = (float) (src[t]); 2172 } 2173 2174 return CV_OK; 2175} 2176 2177/*----------------------------------------------------------------------------------*/ 2178 2179 2180/* Find line which cross frame by line(a,b,c) */ 2181void FindLineForEpiline( CvSize imageSize, 2182 float a,float b,float c, 2183 CvPoint2D32f *start,CvPoint2D32f *end, 2184 int* result) 2185{ 2186 result = result; 2187 CvPoint2D32f frameBeg; 2188 2189 CvPoint2D32f frameEnd; 2190 CvPoint2D32f cross[4]; 2191 int haveCross[4]; 2192 float dist; 2193 2194 haveCross[0] = 0; 2195 haveCross[1] = 0; 2196 haveCross[2] = 0; 2197 haveCross[3] = 0; 2198 2199 frameBeg.x = 0; 2200 frameBeg.y = 0; 2201 frameEnd.x = (float)(imageSize.width); 2202 frameEnd.y = 0; 2203 haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]); 2204 2205 frameBeg.x = (float)(imageSize.width); 2206 frameBeg.y = 0; 2207 frameEnd.x = (float)(imageSize.width); 2208 frameEnd.y = (float)(imageSize.height); 2209 haveCross[1] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[1]); 2210 2211 frameBeg.x = (float)(imageSize.width); 2212 frameBeg.y = (float)(imageSize.height); 2213 frameEnd.x = 0; 2214 frameEnd.y = (float)(imageSize.height); 2215 haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]); 2216 2217 frameBeg.x = 0; 2218 frameBeg.y = (float)(imageSize.height); 2219 frameEnd.x = 0; 2220 frameEnd.y = 0; 2221 haveCross[3] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[3]); 2222 2223 int n; 2224 float minDist = (float)(INT_MAX); 2225 float maxDist = (float)(INT_MIN); 2226 2227 int maxN = -1; 2228 int minN = -1; 2229 2230 double midPointX = imageSize.width / 2.0; 2231 double midPointY = imageSize.height / 2.0; 2232 2233 for( n = 0; n < 4; n++ ) 2234 { 2235 if( haveCross[n] > 0 ) 2236 { 2237 dist = (float)((midPointX - cross[n].x)*(midPointX - cross[n].x) + 2238 (midPointY - cross[n].y)*(midPointY - cross[n].y)); 2239 2240 if( dist < minDist ) 2241 { 2242 minDist = dist; 2243 minN = n; 2244 } 2245 2246 if( dist > maxDist ) 2247 { 2248 maxDist = dist; 2249 maxN = n; 2250 } 2251 } 2252 } 2253 2254 if( minN >= 0 && maxN >= 0 && (minN != maxN) ) 2255 { 2256 *start = cross[minN]; 2257 *end = cross[maxN]; 2258 } 2259 else 2260 { 2261 start->x = 0; 2262 start->y = 0; 2263 end->x = 0; 2264 end->y = 0; 2265 } 2266 2267 return; 2268 2269} 2270 2271 2272/*----------------------------------------------------------------------------------*/ 2273 2274int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2) 2275{ 2276 float width = (float)(imageSize.width); 2277 float height = (float)(imageSize.height); 2278 2279 /* Get crosslines with image corners */ 2280 2281 /* Find four lines */ 2282 2283 CvPoint2D32f pa,pb,pc,pd; 2284 2285 pa.x = 0; 2286 pa.y = 0; 2287 2288 pb.x = width; 2289 pb.y = 0; 2290 2291 pd.x = width; 2292 pd.y = height; 2293 2294 pc.x = 0; 2295 pc.y = height; 2296 2297 /* We can compute points for angle */ 2298 /* Test for place section */ 2299 2300 float x,y; 2301 x = epipole.x; 2302 y = epipole.y; 2303 2304 if( x < 0 ) 2305 {/* 1,4,7 */ 2306 if( y < 0) 2307 {/* 1 */ 2308 point1 = pb; 2309 point2 = pc; 2310 } 2311 else if( y > height ) 2312 {/* 7 */ 2313 point1 = pa; 2314 point2 = pd; 2315 } 2316 else 2317 {/* 4 */ 2318 point1 = pa; 2319 point2 = pc; 2320 } 2321 } 2322 else if ( x > width ) 2323 {/* 3,6,9 */ 2324 if( y < 0 ) 2325 {/* 3 */ 2326 point1 = pa; 2327 point2 = pd; 2328 } 2329 else if ( y > height ) 2330 {/* 9 */ 2331 point1 = pc; 2332 point2 = pb; 2333 } 2334 else 2335 {/* 6 */ 2336 point1 = pb; 2337 point2 = pd; 2338 } 2339 } 2340 else 2341 {/* 2,5,8 */ 2342 if( y < 0 ) 2343 {/* 2 */ 2344 point1 = pa; 2345 point2 = pb; 2346 } 2347 else if( y > height ) 2348 {/* 8 */ 2349 point1 = pc; 2350 point2 = pd; 2351 } 2352 else 2353 {/* 5 - point in the image */ 2354 return 2; 2355 } 2356 2357 2358 } 2359 2360 2361 return 0; 2362} 2363 2364/*--------------------------------------------------------------------------------------*/ 2365void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3]) 2366{/* Computes perspective coeffs for transformation from src to dst quad */ 2367 2368 2369 CV_FUNCNAME( "icvComputePerspectiveCoeffs" ); 2370 2371 __BEGIN__; 2372 2373 double A[64]; 2374 double b[8]; 2375 double c[8]; 2376 CvPoint2D32f pt[4]; 2377 int i; 2378 2379 pt[0] = srcQuad[0]; 2380 pt[1] = srcQuad[1]; 2381 pt[2] = srcQuad[2]; 2382 pt[3] = srcQuad[3]; 2383 2384 for( i = 0; i < 4; i++ ) 2385 { 2386#if 0 2387 double x = dstQuad[i].x; 2388 double y = dstQuad[i].y; 2389 double X = pt[i].x; 2390 double Y = pt[i].y; 2391#else 2392 double x = pt[i].x; 2393 double y = pt[i].y; 2394 double X = dstQuad[i].x; 2395 double Y = dstQuad[i].y; 2396#endif 2397 double* a = A + i*16; 2398 2399 a[0] = x; 2400 a[1] = y; 2401 a[2] = 1; 2402 a[3] = 0; 2403 a[4] = 0; 2404 a[5] = 0; 2405 a[6] = -X*x; 2406 a[7] = -X*y; 2407 2408 a += 8; 2409 2410 a[0] = 0; 2411 a[1] = 0; 2412 a[2] = 0; 2413 a[3] = x; 2414 a[4] = y; 2415 a[5] = 1; 2416 a[6] = -Y*x; 2417 a[7] = -Y*y; 2418 2419 b[i*2] = X; 2420 b[i*2 + 1] = Y; 2421 } 2422 2423 { 2424 double invA[64]; 2425 CvMat matA = cvMat( 8, 8, CV_64F, A ); 2426 CvMat matInvA = cvMat( 8, 8, CV_64F, invA ); 2427 CvMat matB = cvMat( 8, 1, CV_64F, b ); 2428 CvMat matX = cvMat( 8, 1, CV_64F, c ); 2429 2430 CV_CALL( cvPseudoInverse( &matA, &matInvA )); 2431 CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX )); 2432 } 2433 2434 coeffs[0][0] = c[0]; 2435 coeffs[0][1] = c[1]; 2436 coeffs[0][2] = c[2]; 2437 coeffs[1][0] = c[3]; 2438 coeffs[1][1] = c[4]; 2439 coeffs[1][2] = c[5]; 2440 coeffs[2][0] = c[6]; 2441 coeffs[2][1] = c[7]; 2442 coeffs[2][2] = 1.0; 2443 2444 __END__; 2445 2446 return; 2447} 2448 2449/*--------------------------------------------------------------------------------------*/ 2450 2451CV_IMPL void cvComputePerspectiveMap(const double c[3][3], CvArr* rectMapX, CvArr* rectMapY ) 2452{ 2453 CV_FUNCNAME( "cvComputePerspectiveMap" ); 2454 2455 __BEGIN__; 2456 2457 CvSize size; 2458 CvMat stubx, *mapx = (CvMat*)rectMapX; 2459 CvMat stuby, *mapy = (CvMat*)rectMapY; 2460 int i, j; 2461 2462 CV_CALL( mapx = cvGetMat( mapx, &stubx )); 2463 CV_CALL( mapy = cvGetMat( mapy, &stuby )); 2464 2465 if( CV_MAT_TYPE( mapx->type ) != CV_32FC1 || CV_MAT_TYPE( mapy->type ) != CV_32FC1 ) 2466 CV_ERROR( CV_StsUnsupportedFormat, "" ); 2467 2468 size = cvGetMatSize(mapx); 2469 assert( fabs(c[2][2] - 1.) < FLT_EPSILON ); 2470 2471 for( i = 0; i < size.height; i++ ) 2472 { 2473 float* mx = (float*)(mapx->data.ptr + mapx->step*i); 2474 float* my = (float*)(mapy->data.ptr + mapy->step*i); 2475 2476 for( j = 0; j < size.width; j++ ) 2477 { 2478 double w = 1./(c[2][0]*j + c[2][1]*i + 1.); 2479 double x = (c[0][0]*j + c[0][1]*i + c[0][2])*w; 2480 double y = (c[1][0]*j + c[1][1]*i + c[1][2])*w; 2481 2482 mx[j] = (float)x; 2483 my[j] = (float)y; 2484 } 2485 } 2486 2487 __END__; 2488} 2489 2490/*--------------------------------------------------------------------------------------*/ 2491 2492CV_IMPL void cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f quad[4], double matrix[3][3], 2493 CvArr* rectMap ) 2494{ 2495 /* Computes Perspective Transform coeffs and map if need 2496 for given image size and given result quad */ 2497 CV_FUNCNAME( "cvInitPerspectiveTransform" ); 2498 2499 __BEGIN__; 2500 2501 double A[64]; 2502 double b[8]; 2503 double c[8]; 2504 CvPoint2D32f pt[4]; 2505 CvMat mapstub, *map = (CvMat*)rectMap; 2506 int i, j; 2507 2508 if( map ) 2509 { 2510 CV_CALL( map = cvGetMat( map, &mapstub )); 2511 2512 if( CV_MAT_TYPE( map->type ) != CV_32FC2 ) 2513 CV_ERROR( CV_StsUnsupportedFormat, "" ); 2514 2515 if( map->width != size.width || map->height != size.height ) 2516 CV_ERROR( CV_StsUnmatchedSizes, "" ); 2517 } 2518 2519 pt[0] = cvPoint2D32f( 0, 0 ); 2520 pt[1] = cvPoint2D32f( size.width, 0 ); 2521 pt[2] = cvPoint2D32f( size.width, size.height ); 2522 pt[3] = cvPoint2D32f( 0, size.height ); 2523 2524 for( i = 0; i < 4; i++ ) 2525 { 2526#if 0 2527 double x = quad[i].x; 2528 double y = quad[i].y; 2529 double X = pt[i].x; 2530 double Y = pt[i].y; 2531#else 2532 double x = pt[i].x; 2533 double y = pt[i].y; 2534 double X = quad[i].x; 2535 double Y = quad[i].y; 2536#endif 2537 double* a = A + i*16; 2538 2539 a[0] = x; 2540 a[1] = y; 2541 a[2] = 1; 2542 a[3] = 0; 2543 a[4] = 0; 2544 a[5] = 0; 2545 a[6] = -X*x; 2546 a[7] = -X*y; 2547 2548 a += 8; 2549 2550 a[0] = 0; 2551 a[1] = 0; 2552 a[2] = 0; 2553 a[3] = x; 2554 a[4] = y; 2555 a[5] = 1; 2556 a[6] = -Y*x; 2557 a[7] = -Y*y; 2558 2559 b[i*2] = X; 2560 b[i*2 + 1] = Y; 2561 } 2562 2563 { 2564 double invA[64]; 2565 CvMat matA = cvMat( 8, 8, CV_64F, A ); 2566 CvMat matInvA = cvMat( 8, 8, CV_64F, invA ); 2567 CvMat matB = cvMat( 8, 1, CV_64F, b ); 2568 CvMat matX = cvMat( 8, 1, CV_64F, c ); 2569 2570 CV_CALL( cvPseudoInverse( &matA, &matInvA )); 2571 CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX )); 2572 } 2573 2574 matrix[0][0] = c[0]; 2575 matrix[0][1] = c[1]; 2576 matrix[0][2] = c[2]; 2577 matrix[1][0] = c[3]; 2578 matrix[1][1] = c[4]; 2579 matrix[1][2] = c[5]; 2580 matrix[2][0] = c[6]; 2581 matrix[2][1] = c[7]; 2582 matrix[2][2] = 1.0; 2583 2584 if( map ) 2585 { 2586 for( i = 0; i < size.height; i++ ) 2587 { 2588 CvPoint2D32f* maprow = (CvPoint2D32f*)(map->data.ptr + map->step*i); 2589 for( j = 0; j < size.width; j++ ) 2590 { 2591 double w = 1./(c[6]*j + c[7]*i + 1.); 2592 double x = (c[0]*j + c[1]*i + c[2])*w; 2593 double y = (c[3]*j + c[4]*i + c[5])*w; 2594 2595 maprow[j].x = (float)x; 2596 maprow[j].y = (float)y; 2597 } 2598 } 2599 } 2600 2601 __END__; 2602 2603 return; 2604} 2605 2606 2607/*-----------------------------------------------------------------------*/ 2608/* Compute projected infinite point for second image if first image point is known */ 2609void icvComputeeInfiniteProject1( CvMatr64d rotMatr, 2610 CvMatr64d camMatr1, 2611 CvMatr64d camMatr2, 2612 CvPoint2D32f point1, 2613 CvPoint2D32f* point2) 2614{ 2615 double invMatr1[9]; 2616 icvInvertMatrix_64d(camMatr1,3,invMatr1); 2617 double P1[3]; 2618 double p1[3]; 2619 p1[0] = (double)(point1.x); 2620 p1[1] = (double)(point1.y); 2621 p1[2] = 1; 2622 2623 icvMulMatrix_64d( invMatr1, 2624 3,3, 2625 p1, 2626 1,3, 2627 P1); 2628 2629 double invR[9]; 2630 icvTransposeMatrix_64d( rotMatr, 3, 3, invR ); 2631 2632 /* Change system 1 to system 2 */ 2633 double P2[3]; 2634 icvMulMatrix_64d( invR, 2635 3,3, 2636 P1, 2637 1,3, 2638 P2); 2639 2640 /* Now we can project this point to image 2 */ 2641 double projP[3]; 2642 2643 icvMulMatrix_64d( camMatr2, 2644 3,3, 2645 P2, 2646 1,3, 2647 projP); 2648 2649 point2->x = (float)(projP[0] / projP[2]); 2650 point2->y = (float)(projP[1] / projP[2]); 2651 2652 return; 2653} 2654 2655/*-----------------------------------------------------------------------*/ 2656/* Compute projected infinite point for second image if first image point is known */ 2657void icvComputeeInfiniteProject2( CvMatr64d rotMatr, 2658 CvMatr64d camMatr1, 2659 CvMatr64d camMatr2, 2660 CvPoint2D32f* point1, 2661 CvPoint2D32f point2) 2662{ 2663 double invMatr2[9]; 2664 icvInvertMatrix_64d(camMatr2,3,invMatr2); 2665 double P2[3]; 2666 double p2[3]; 2667 p2[0] = (double)(point2.x); 2668 p2[1] = (double)(point2.y); 2669 p2[2] = 1; 2670 2671 icvMulMatrix_64d( invMatr2, 2672 3,3, 2673 p2, 2674 1,3, 2675 P2); 2676 2677 /* Change system 1 to system 2 */ 2678 2679 double P1[3]; 2680 icvMulMatrix_64d( rotMatr, 2681 3,3, 2682 P2, 2683 1,3, 2684 P1); 2685 2686 /* Now we can project this point to image 2 */ 2687 double projP[3]; 2688 2689 icvMulMatrix_64d( camMatr1, 2690 3,3, 2691 P1, 2692 1,3, 2693 projP); 2694 2695 point1->x = (float)(projP[0] / projP[2]); 2696 point1->y = (float)(projP[1] / projP[2]); 2697 2698 return; 2699} 2700 2701/* Select best R and t for given cameras, points, ... */ 2702/* For both cameras */ 2703int icvSelectBestRt( int numImages, 2704 int* numPoints, 2705 CvPoint2D32f* imagePoints1, 2706 CvPoint2D32f* imagePoints2, 2707 CvPoint3D32f* objectPoints, 2708 2709 CvMatr32f cameraMatrix1, 2710 CvVect32f distortion1, 2711 CvMatr32f rotMatrs1, 2712 CvVect32f transVects1, 2713 2714 CvMatr32f cameraMatrix2, 2715 CvVect32f distortion2, 2716 CvMatr32f rotMatrs2, 2717 CvVect32f transVects2, 2718 2719 CvMatr32f bestRotMatr, 2720 CvVect32f bestTransVect 2721 ) 2722{ 2723 2724 /* Need to convert input data 32 -> 64 */ 2725 CvPoint3D64d* objectPoints_64d; 2726 2727 double* rotMatrs1_64d; 2728 double* rotMatrs2_64d; 2729 2730 double* transVects1_64d; 2731 double* transVects2_64d; 2732 2733 double cameraMatrix1_64d[9]; 2734 double cameraMatrix2_64d[9]; 2735 2736 double distortion1_64d[4]; 2737 double distortion2_64d[4]; 2738 2739 /* allocate memory for 64d data */ 2740 int totalNum = 0; 2741 2742 int i; 2743 for( i = 0; i < numImages; i++ ) 2744 { 2745 totalNum += numPoints[i]; 2746 } 2747 2748 objectPoints_64d = (CvPoint3D64d*)calloc(totalNum,sizeof(CvPoint3D64d)); 2749 2750 rotMatrs1_64d = (double*)calloc(numImages,sizeof(double)*9); 2751 rotMatrs2_64d = (double*)calloc(numImages,sizeof(double)*9); 2752 2753 transVects1_64d = (double*)calloc(numImages,sizeof(double)*3); 2754 transVects2_64d = (double*)calloc(numImages,sizeof(double)*3); 2755 2756 /* Convert input data to 64d */ 2757 2758 icvCvt_32f_64d((float*)objectPoints, (double*)objectPoints_64d, totalNum*3); 2759 2760 icvCvt_32f_64d(rotMatrs1, rotMatrs1_64d, numImages*9); 2761 icvCvt_32f_64d(rotMatrs2, rotMatrs2_64d, numImages*9); 2762 2763 icvCvt_32f_64d(transVects1, transVects1_64d, numImages*3); 2764 icvCvt_32f_64d(transVects2, transVects2_64d, numImages*3); 2765 2766 /* Convert to arrays */ 2767 icvCvt_32f_64d(cameraMatrix1, cameraMatrix1_64d, 9); 2768 icvCvt_32f_64d(cameraMatrix2, cameraMatrix2_64d, 9); 2769 2770 icvCvt_32f_64d(distortion1, distortion1_64d, 4); 2771 icvCvt_32f_64d(distortion2, distortion2_64d, 4); 2772 2773 2774 /* for each R and t compute error for image pair */ 2775 float* errors; 2776 2777 errors = (float*)calloc(numImages*numImages,sizeof(float)); 2778 if( errors == 0 ) 2779 { 2780 return CV_OUTOFMEM_ERR; 2781 } 2782 2783 int currImagePair; 2784 int currRt; 2785 for( currRt = 0; currRt < numImages; currRt++ ) 2786 { 2787 int begPoint = 0; 2788 for(currImagePair = 0; currImagePair < numImages; currImagePair++ ) 2789 { 2790 /* For current R,t R,t compute relative position of cameras */ 2791 2792 double convRotMatr[9]; 2793 double convTransVect[3]; 2794 2795 icvCreateConvertMatrVect( rotMatrs1_64d + currRt*9, 2796 transVects1_64d + currRt*3, 2797 rotMatrs2_64d + currRt*9, 2798 transVects2_64d + currRt*3, 2799 convRotMatr, 2800 convTransVect); 2801 2802 /* Project points using relative position of cameras */ 2803 2804 double convRotMatr2[9]; 2805 double convTransVect2[3]; 2806 2807 convRotMatr2[0] = 1; 2808 convRotMatr2[1] = 0; 2809 convRotMatr2[2] = 0; 2810 2811 convRotMatr2[3] = 0; 2812 convRotMatr2[4] = 1; 2813 convRotMatr2[5] = 0; 2814 2815 convRotMatr2[6] = 0; 2816 convRotMatr2[7] = 0; 2817 convRotMatr2[8] = 1; 2818 2819 convTransVect2[0] = 0; 2820 convTransVect2[1] = 0; 2821 convTransVect2[2] = 0; 2822 2823 /* Compute error for given pair and Rt */ 2824 /* We must project points to image and compute error */ 2825 2826 CvPoint2D64d* projImagePoints1; 2827 CvPoint2D64d* projImagePoints2; 2828 2829 CvPoint3D64d* points1; 2830 CvPoint3D64d* points2; 2831 2832 int numberPnt; 2833 numberPnt = numPoints[currImagePair]; 2834 projImagePoints1 = (CvPoint2D64d*)calloc(numberPnt,sizeof(CvPoint2D64d)); 2835 projImagePoints2 = (CvPoint2D64d*)calloc(numberPnt,sizeof(CvPoint2D64d)); 2836 2837 points1 = (CvPoint3D64d*)calloc(numberPnt,sizeof(CvPoint3D64d)); 2838 points2 = (CvPoint3D64d*)calloc(numberPnt,sizeof(CvPoint3D64d)); 2839 2840 /* Transform object points to first camera position */ 2841 int i; 2842 for( i = 0; i < numberPnt; i++ ) 2843 { 2844 /* Create second camera point */ 2845 CvPoint3D64d tmpPoint; 2846 tmpPoint.x = (double)(objectPoints[i].x); 2847 tmpPoint.y = (double)(objectPoints[i].y); 2848 tmpPoint.z = (double)(objectPoints[i].z); 2849 2850 icvConvertPointSystem( tmpPoint, 2851 points2+i, 2852 rotMatrs2_64d + currImagePair*9, 2853 transVects2_64d + currImagePair*3); 2854 2855 /* Create first camera point using R, t */ 2856 icvConvertPointSystem( points2[i], 2857 points1+i, 2858 convRotMatr, 2859 convTransVect); 2860 2861 CvPoint3D64d tmpPoint2 = { 0, 0, 0 }; 2862 icvConvertPointSystem( tmpPoint, 2863 &tmpPoint2, 2864 rotMatrs1_64d + currImagePair*9, 2865 transVects1_64d + currImagePair*3); 2866 double err; 2867 double dx,dy,dz; 2868 dx = tmpPoint2.x - points1[i].x; 2869 dy = tmpPoint2.y - points1[i].y; 2870 dz = tmpPoint2.z - points1[i].z; 2871 err = sqrt(dx*dx + dy*dy + dz*dz); 2872 2873 2874 } 2875 2876#if 0 2877 cvProjectPointsSimple( numPoints[currImagePair], 2878 objectPoints_64d + begPoint, 2879 rotMatrs1_64d + currRt*9, 2880 transVects1_64d + currRt*3, 2881 cameraMatrix1_64d, 2882 distortion1_64d, 2883 projImagePoints1); 2884 2885 cvProjectPointsSimple( numPoints[currImagePair], 2886 objectPoints_64d + begPoint, 2887 rotMatrs2_64d + currRt*9, 2888 transVects2_64d + currRt*3, 2889 cameraMatrix2_64d, 2890 distortion2_64d, 2891 projImagePoints2); 2892#endif 2893 2894 /* Project with no translate and no rotation */ 2895 2896#if 0 2897 { 2898 double nodist[4] = {0,0,0,0}; 2899 cvProjectPointsSimple( numPoints[currImagePair], 2900 points1, 2901 convRotMatr2, 2902 convTransVect2, 2903 cameraMatrix1_64d, 2904 nodist, 2905 projImagePoints1); 2906 2907 cvProjectPointsSimple( numPoints[currImagePair], 2908 points2, 2909 convRotMatr2, 2910 convTransVect2, 2911 cameraMatrix2_64d, 2912 nodist, 2913 projImagePoints2); 2914 2915 } 2916#endif 2917 2918 cvProjectPointsSimple( numPoints[currImagePair], 2919 points1, 2920 convRotMatr2, 2921 convTransVect2, 2922 cameraMatrix1_64d, 2923 distortion1_64d, 2924 projImagePoints1); 2925 2926 cvProjectPointsSimple( numPoints[currImagePair], 2927 points2, 2928 convRotMatr2, 2929 convTransVect2, 2930 cameraMatrix2_64d, 2931 distortion2_64d, 2932 projImagePoints2); 2933 2934 /* points are projected. Compute error */ 2935 2936 int currPoint; 2937 double err1 = 0; 2938 double err2 = 0; 2939 double err; 2940 for( currPoint = 0; currPoint < numberPnt; currPoint++ ) 2941 { 2942 double len1,len2; 2943 double dx1,dy1; 2944 dx1 = imagePoints1[begPoint+currPoint].x - projImagePoints1[currPoint].x; 2945 dy1 = imagePoints1[begPoint+currPoint].y - projImagePoints1[currPoint].y; 2946 len1 = sqrt(dx1*dx1 + dy1*dy1); 2947 err1 += len1; 2948 2949 double dx2,dy2; 2950 dx2 = imagePoints2[begPoint+currPoint].x - projImagePoints2[currPoint].x; 2951 dy2 = imagePoints2[begPoint+currPoint].y - projImagePoints2[currPoint].y; 2952 len2 = sqrt(dx2*dx2 + dy2*dy2); 2953 err2 += len2; 2954 } 2955 2956 err1 /= (float)(numberPnt); 2957 err2 /= (float)(numberPnt); 2958 2959 err = (err1+err2) * 0.5; 2960 begPoint += numberPnt; 2961 2962 /* Set this error to */ 2963 errors[numImages*currImagePair+currRt] = (float)err; 2964 2965 free(points1); 2966 free(points2); 2967 free(projImagePoints1); 2968 free(projImagePoints2); 2969 } 2970 } 2971 2972 /* Just select R and t with minimal average error */ 2973 2974 int bestnumRt = 0; 2975 float minError = 0;/* Just for no warnings. Uses 'first' flag. */ 2976 int first = 1; 2977 for( currRt = 0; currRt < numImages; currRt++ ) 2978 { 2979 float avErr = 0; 2980 for(currImagePair = 0; currImagePair < numImages; currImagePair++ ) 2981 { 2982 avErr += errors[numImages*currImagePair+currRt]; 2983 } 2984 avErr /= (float)(numImages); 2985 2986 if( first ) 2987 { 2988 bestnumRt = 0; 2989 minError = avErr; 2990 first = 0; 2991 } 2992 else 2993 { 2994 if( avErr < minError ) 2995 { 2996 bestnumRt = currRt; 2997 minError = avErr; 2998 } 2999 } 3000 3001 } 3002 3003 double bestRotMatr_64d[9]; 3004 double bestTransVect_64d[3]; 3005 3006 icvCreateConvertMatrVect( rotMatrs1_64d + bestnumRt * 9, 3007 transVects1_64d + bestnumRt * 3, 3008 rotMatrs2_64d + bestnumRt * 9, 3009 transVects2_64d + bestnumRt * 3, 3010 bestRotMatr_64d, 3011 bestTransVect_64d); 3012 3013 icvCvt_64d_32f(bestRotMatr_64d,bestRotMatr,9); 3014 icvCvt_64d_32f(bestTransVect_64d,bestTransVect,3); 3015 3016 3017 free(errors); 3018 3019 return CV_OK; 3020} 3021 3022 3023/* ----------------- Stereo calibration functions --------------------- */ 3024 3025float icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point) 3026{ 3027 float ax = point2.x - point1.x; 3028 float ay = point2.y - point1.y; 3029 3030 float bx = point.x - point1.x; 3031 float by = point.y - point1.y; 3032 3033 return (ax*by - ay*bx); 3034} 3035 3036/* Convert function for stereo warping */ 3037int icvConvertWarpCoordinates(double coeffs[3][3], 3038 CvPoint2D32f* cameraPoint, 3039 CvPoint2D32f* warpPoint, 3040 int direction) 3041{ 3042 double x,y; 3043 double det; 3044 if( direction == CV_WARP_TO_CAMERA ) 3045 {/* convert from camera image to warped image coordinates */ 3046 x = warpPoint->x; 3047 y = warpPoint->y; 3048 3049 det = (coeffs[2][0] * x + coeffs[2][1] * y + coeffs[2][2]); 3050 if( fabs(det) > 1e-8 ) 3051 { 3052 cameraPoint->x = (float)((coeffs[0][0] * x + coeffs[0][1] * y + coeffs[0][2]) / det); 3053 cameraPoint->y = (float)((coeffs[1][0] * x + coeffs[1][1] * y + coeffs[1][2]) / det); 3054 return CV_OK; 3055 } 3056 } 3057 else if( direction == CV_CAMERA_TO_WARP ) 3058 {/* convert from warped image to camera image coordinates */ 3059 x = cameraPoint->x; 3060 y = cameraPoint->y; 3061 3062 det = (coeffs[2][0]*x-coeffs[0][0])*(coeffs[2][1]*y-coeffs[1][1])-(coeffs[2][1]*x-coeffs[0][1])*(coeffs[2][0]*y-coeffs[1][0]); 3063 3064 if( fabs(det) > 1e-8 ) 3065 { 3066 warpPoint->x = (float)(((coeffs[0][2]-coeffs[2][2]*x)*(coeffs[2][1]*y-coeffs[1][1])-(coeffs[2][1]*x-coeffs[0][1])*(coeffs[1][2]-coeffs[2][2]*y))/det); 3067 warpPoint->y = (float)(((coeffs[2][0]*x-coeffs[0][0])*(coeffs[1][2]-coeffs[2][2]*y)-(coeffs[0][2]-coeffs[2][2]*x)*(coeffs[2][0]*y-coeffs[1][0]))/det); 3068 return CV_OK; 3069 } 3070 } 3071 3072 return CV_BADFACTOR_ERR; 3073} 3074 3075/* Compute stereo params using some camera params */ 3076/* by Valery Mosyagin. int ComputeRestStereoParams(StereoParams *stereoparams) */ 3077int icvComputeRestStereoParams(CvStereoCamera *stereoparams) 3078{ 3079 3080 3081 icvGetQuadsTransformStruct(stereoparams); 3082 3083 cvInitPerspectiveTransform( stereoparams->warpSize, 3084 stereoparams->quad[0], 3085 stereoparams->coeffs[0], 3086 0); 3087 3088 cvInitPerspectiveTransform( stereoparams->warpSize, 3089 stereoparams->quad[1], 3090 stereoparams->coeffs[1], 3091 0); 3092 3093 /* Create border for warped images */ 3094 CvPoint2D32f corns[4]; 3095 corns[0].x = 0; 3096 corns[0].y = 0; 3097 3098 corns[1].x = (float)(stereoparams->camera[0]->imgSize[0]-1); 3099 corns[1].y = 0; 3100 3101 corns[2].x = (float)(stereoparams->camera[0]->imgSize[0]-1); 3102 corns[2].y = (float)(stereoparams->camera[0]->imgSize[1]-1); 3103 3104 corns[3].x = 0; 3105 corns[3].y = (float)(stereoparams->camera[0]->imgSize[1]-1); 3106 3107 int i; 3108 for( i = 0; i < 4; i++ ) 3109 { 3110 /* For first camera */ 3111 icvConvertWarpCoordinates( stereoparams->coeffs[0], 3112 corns+i, 3113 stereoparams->border[0]+i, 3114 CV_CAMERA_TO_WARP); 3115 3116 /* For second camera */ 3117 icvConvertWarpCoordinates( stereoparams->coeffs[1], 3118 corns+i, 3119 stereoparams->border[1]+i, 3120 CV_CAMERA_TO_WARP); 3121 } 3122 3123 /* Test compute */ 3124 { 3125 CvPoint2D32f warpPoints[4]; 3126 warpPoints[0] = cvPoint2D32f(0,0); 3127 warpPoints[1] = cvPoint2D32f(stereoparams->warpSize.width-1,0); 3128 warpPoints[2] = cvPoint2D32f(stereoparams->warpSize.width-1,stereoparams->warpSize.height-1); 3129 warpPoints[3] = cvPoint2D32f(0,stereoparams->warpSize.height-1); 3130 3131 CvPoint2D32f camPoints1[4]; 3132 CvPoint2D32f camPoints2[4]; 3133 3134 for( int i = 0; i < 4; i++ ) 3135 { 3136 icvConvertWarpCoordinates(stereoparams->coeffs[0], 3137 camPoints1+i, 3138 warpPoints+i, 3139 CV_WARP_TO_CAMERA); 3140 3141 icvConvertWarpCoordinates(stereoparams->coeffs[1], 3142 camPoints2+i, 3143 warpPoints+i, 3144 CV_WARP_TO_CAMERA); 3145 } 3146 } 3147 3148 3149 /* Allocate memory for scanlines coeffs */ 3150 3151 stereoparams->lineCoeffs = (CvStereoLineCoeff*)calloc(stereoparams->warpSize.height,sizeof(CvStereoLineCoeff)); 3152 3153 /* Compute coeffs for epilines */ 3154 3155 icvComputeCoeffForStereo( stereoparams); 3156 3157 /* all coeffs are known */ 3158 return CV_OK; 3159} 3160 3161/*-------------------------------------------------------------------------------------------*/ 3162 3163int icvStereoCalibration( int numImages, 3164 int* nums, 3165 CvSize imageSize, 3166 CvPoint2D32f* imagePoints1, 3167 CvPoint2D32f* imagePoints2, 3168 CvPoint3D32f* objectPoints, 3169 CvStereoCamera* stereoparams 3170 ) 3171{ 3172 /* Firstly we must calibrate both cameras */ 3173 /* Alocate memory for data */ 3174 /* Allocate for translate vectors */ 3175 float* transVects1; 3176 float* transVects2; 3177 float* rotMatrs1; 3178 float* rotMatrs2; 3179 3180 transVects1 = (float*)calloc(numImages,sizeof(float)*3); 3181 transVects2 = (float*)calloc(numImages,sizeof(float)*3); 3182 3183 rotMatrs1 = (float*)calloc(numImages,sizeof(float)*9); 3184 rotMatrs2 = (float*)calloc(numImages,sizeof(float)*9); 3185 3186 /* Calibrate first camera */ 3187 cvCalibrateCamera( numImages, 3188 nums, 3189 imageSize, 3190 imagePoints1, 3191 objectPoints, 3192 stereoparams->camera[0]->distortion, 3193 stereoparams->camera[0]->matrix, 3194 transVects1, 3195 rotMatrs1, 3196 1); 3197 3198 /* Calibrate second camera */ 3199 cvCalibrateCamera( numImages, 3200 nums, 3201 imageSize, 3202 imagePoints2, 3203 objectPoints, 3204 stereoparams->camera[1]->distortion, 3205 stereoparams->camera[1]->matrix, 3206 transVects2, 3207 rotMatrs2, 3208 1); 3209 3210 /* Cameras are calibrated */ 3211 3212 stereoparams->camera[0]->imgSize[0] = (float)imageSize.width; 3213 stereoparams->camera[0]->imgSize[1] = (float)imageSize.height; 3214 3215 stereoparams->camera[1]->imgSize[0] = (float)imageSize.width; 3216 stereoparams->camera[1]->imgSize[1] = (float)imageSize.height; 3217 3218 icvSelectBestRt( numImages, 3219 nums, 3220 imagePoints1, 3221 imagePoints2, 3222 objectPoints, 3223 stereoparams->camera[0]->matrix, 3224 stereoparams->camera[0]->distortion, 3225 rotMatrs1, 3226 transVects1, 3227 stereoparams->camera[1]->matrix, 3228 stereoparams->camera[1]->distortion, 3229 rotMatrs2, 3230 transVects2, 3231 stereoparams->rotMatrix, 3232 stereoparams->transVector 3233 ); 3234 3235 /* Free memory */ 3236 free(transVects1); 3237 free(transVects2); 3238 free(rotMatrs1); 3239 free(rotMatrs2); 3240 3241 icvComputeRestStereoParams(stereoparams); 3242 3243 return CV_NO_ERR; 3244} 3245 3246/* Find line from epipole */ 3247void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end) 3248{ 3249 CvPoint2D32f frameBeg; 3250 CvPoint2D32f frameEnd; 3251 CvPoint2D32f cross[4]; 3252 int haveCross[4]; 3253 float dist; 3254 3255 haveCross[0] = 0; 3256 haveCross[1] = 0; 3257 haveCross[2] = 0; 3258 haveCross[3] = 0; 3259 3260 frameBeg.x = 0; 3261 frameBeg.y = 0; 3262 frameEnd.x = (float)(imageSize.width); 3263 frameEnd.y = 0; 3264 haveCross[0] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[0]); 3265 3266 frameBeg.x = (float)(imageSize.width); 3267 frameBeg.y = 0; 3268 frameEnd.x = (float)(imageSize.width); 3269 frameEnd.y = (float)(imageSize.height); 3270 haveCross[1] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[1]); 3271 3272 frameBeg.x = (float)(imageSize.width); 3273 frameBeg.y = (float)(imageSize.height); 3274 frameEnd.x = 0; 3275 frameEnd.y = (float)(imageSize.height); 3276 haveCross[2] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[2]); 3277 3278 frameBeg.x = 0; 3279 frameBeg.y = (float)(imageSize.height); 3280 frameEnd.x = 0; 3281 frameEnd.y = 0; 3282 haveCross[3] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[3]); 3283 3284 int n; 3285 float minDist = (float)(INT_MAX); 3286 float maxDist = (float)(INT_MIN); 3287 3288 int maxN = -1; 3289 int minN = -1; 3290 3291 for( n = 0; n < 4; n++ ) 3292 { 3293 if( haveCross[n] > 0 ) 3294 { 3295 dist = (epipole.x - cross[n].x)*(epipole.x - cross[n].x) + 3296 (epipole.y - cross[n].y)*(epipole.y - cross[n].y); 3297 3298 if( dist < minDist ) 3299 { 3300 minDist = dist; 3301 minN = n; 3302 } 3303 3304 if( dist > maxDist ) 3305 { 3306 maxDist = dist; 3307 maxN = n; 3308 } 3309 } 3310 } 3311 3312 if( minN >= 0 && maxN >= 0 && (minN != maxN) ) 3313 { 3314 *start = cross[minN]; 3315 *end = cross[maxN]; 3316 } 3317 else 3318 { 3319 start->x = 0; 3320 start->y = 0; 3321 end->x = 0; 3322 end->y = 0; 3323 } 3324 3325 return; 3326} 3327 3328 3329/* Find line which cross frame by line(a,b,c) */ 3330void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end) 3331{ 3332 CvPoint2D32f frameBeg; 3333 CvPoint2D32f frameEnd; 3334 CvPoint2D32f cross[4]; 3335 int haveCross[4]; 3336 float dist; 3337 3338 haveCross[0] = 0; 3339 haveCross[1] = 0; 3340 haveCross[2] = 0; 3341 haveCross[3] = 0; 3342 3343 frameBeg.x = 0; 3344 frameBeg.y = 0; 3345 frameEnd.x = (float)(imageSize.width); 3346 frameEnd.y = 0; 3347 haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]); 3348 3349 frameBeg.x = (float)(imageSize.width); 3350 frameBeg.y = 0; 3351 frameEnd.x = (float)(imageSize.width); 3352 frameEnd.y = (float)(imageSize.height); 3353 haveCross[1] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[1]); 3354 3355 frameBeg.x = (float)(imageSize.width); 3356 frameBeg.y = (float)(imageSize.height); 3357 frameEnd.x = 0; 3358 frameEnd.y = (float)(imageSize.height); 3359 haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]); 3360 3361 frameBeg.x = 0; 3362 frameBeg.y = (float)(imageSize.height); 3363 frameEnd.x = 0; 3364 frameEnd.y = 0; 3365 haveCross[3] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[3]); 3366 3367 int n; 3368 float minDist = (float)(INT_MAX); 3369 float maxDist = (float)(INT_MIN); 3370 3371 int maxN = -1; 3372 int minN = -1; 3373 3374 double midPointX = imageSize.width / 2.0; 3375 double midPointY = imageSize.height / 2.0; 3376 3377 for( n = 0; n < 4; n++ ) 3378 { 3379 if( haveCross[n] > 0 ) 3380 { 3381 dist = (float)((midPointX - cross[n].x)*(midPointX - cross[n].x) + 3382 (midPointY - cross[n].y)*(midPointY - cross[n].y)); 3383 3384 if( dist < minDist ) 3385 { 3386 minDist = dist; 3387 minN = n; 3388 } 3389 3390 if( dist > maxDist ) 3391 { 3392 maxDist = dist; 3393 maxN = n; 3394 } 3395 } 3396 } 3397 3398 if( minN >= 0 && maxN >= 0 && (minN != maxN) ) 3399 { 3400 *start = cross[minN]; 3401 *end = cross[maxN]; 3402 } 3403 else 3404 { 3405 start->x = 0; 3406 start->y = 0; 3407 end->x = 0; 3408 end->y = 0; 3409 } 3410 3411 return; 3412 3413} 3414 3415/* Cross lines */ 3416int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross) 3417{ 3418 double ex1,ey1,ex2,ey2; 3419 double px1,py1,px2,py2; 3420 double del; 3421 double delA,delB,delX,delY; 3422 double alpha,betta; 3423 3424 ex1 = p1_start.x; 3425 ey1 = p1_start.y; 3426 ex2 = p1_end.x; 3427 ey2 = p1_end.y; 3428 3429 px1 = p2_start.x; 3430 py1 = p2_start.y; 3431 px2 = p2_end.x; 3432 py2 = p2_end.y; 3433 3434 del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1); 3435 if( del == 0) 3436 { 3437 return -1; 3438 } 3439 3440 delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2); 3441 delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2); 3442 3443 alpha = delA / del; 3444 betta = -delB / del; 3445 3446 if( alpha < 0 || alpha > 1.0 || betta < 0 || betta > 1.0) 3447 { 3448 return -1; 3449 } 3450 3451 delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+ 3452 (px1-px2)*(ex1*(ey1-ey2)-ey1*(ex1-ex2)); 3453 3454 delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+ 3455 (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2)); 3456 3457 cross->x = (float)( delX / del); 3458 cross->y = (float)(-delY / del); 3459 return 1; 3460} 3461 3462 3463int icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross) 3464{ 3465 double ex1,ey1,ex2,ey2; 3466 double px1,py1,px2,py2; 3467 double del; 3468 double delA,delB,delX,delY; 3469 double alpha,betta; 3470 3471 ex1 = p1_start.x; 3472 ey1 = p1_start.y; 3473 ex2 = p1_end.x; 3474 ey2 = p1_end.y; 3475 3476 px1 = v2_start.x; 3477 py1 = v2_start.y; 3478 px2 = v2_end.x; 3479 py2 = v2_end.y; 3480 3481 del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1); 3482 if( del == 0) 3483 { 3484 return -1; 3485 } 3486 3487 delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2); 3488 delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2); 3489 3490 alpha = delA / del; 3491 betta = -delB / del; 3492 3493 if( alpha < 0 || alpha > 1.0 ) 3494 { 3495 return -1; 3496 } 3497 3498 delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+ 3499 (px1-px2)*(ex1*(ey1-ey2)-ey1*(ex1-ex2)); 3500 3501 delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+ 3502 (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2)); 3503 3504 cross->x = (float)( delX / del); 3505 cross->y = (float)(-delY / del); 3506 return 1; 3507} 3508 3509 3510int icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross) 3511{ 3512 double del; 3513 double delX,delY,delA; 3514 3515 double px1,px2,py1,py2; 3516 double X,Y,alpha; 3517 3518 px1 = p1.x; 3519 py1 = p1.y; 3520 3521 px2 = p2.x; 3522 py2 = p2.y; 3523 3524 del = a * (px2 - px1) + b * (py2-py1); 3525 if( del == 0 ) 3526 { 3527 return -1; 3528 } 3529 3530 delA = - c - a*px1 - b*py1; 3531 alpha = delA / del; 3532 3533 if( alpha < 0 || alpha > 1.0 ) 3534 { 3535 return -1;/* no cross */ 3536 } 3537 3538 delX = b * (py1*(px1-px2) - px1*(py1-py2)) + c * (px1-px2); 3539 delY = a * (px1*(py1-py2) - py1*(px1-px2)) + c * (py1-py2); 3540 3541 X = delX / del; 3542 Y = delY / del; 3543 3544 cross->x = (float)X; 3545 cross->y = (float)Y; 3546 3547 return 1; 3548} 3549 3550int cvComputeEpipoles( CvMatr32f camMatr1, CvMatr32f camMatr2, 3551 CvMatr32f rotMatr1, CvMatr32f rotMatr2, 3552 CvVect32f transVect1,CvVect32f transVect2, 3553 CvVect32f epipole1, 3554 CvVect32f epipole2) 3555{ 3556 3557 /* Copy matrix */ 3558 3559 CvMat ccamMatr1 = cvMat(3,3,CV_MAT32F,camMatr1); 3560 CvMat ccamMatr2 = cvMat(3,3,CV_MAT32F,camMatr2); 3561 CvMat crotMatr1 = cvMat(3,3,CV_MAT32F,rotMatr1); 3562 CvMat crotMatr2 = cvMat(3,3,CV_MAT32F,rotMatr2); 3563 CvMat ctransVect1 = cvMat(3,1,CV_MAT32F,transVect1); 3564 CvMat ctransVect2 = cvMat(3,1,CV_MAT32F,transVect2); 3565 CvMat cepipole1 = cvMat(3,1,CV_MAT32F,epipole1); 3566 CvMat cepipole2 = cvMat(3,1,CV_MAT32F,epipole2); 3567 3568 3569 CvMat cmatrP1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrP1); 3570 CvMat cmatrP2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrP2); 3571 CvMat cvectp1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&cvectp1); 3572 CvMat cvectp2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&cvectp2); 3573 CvMat ctmpF1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpF1); 3574 CvMat ctmpM1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpM1); 3575 CvMat ctmpM2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpM2); 3576 CvMat cinvP1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cinvP1); 3577 CvMat cinvP2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cinvP2); 3578 CvMat ctmpMatr = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpMatr); 3579 CvMat ctmpVect1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpVect1); 3580 CvMat ctmpVect2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpVect2); 3581 CvMat cmatrF1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrF1); 3582 CvMat ctmpF = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpF); 3583 CvMat ctmpE1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpE1); 3584 CvMat ctmpE2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpE2); 3585 3586 /* Compute first */ 3587 cvmMul( &ccamMatr1, &crotMatr1, &cmatrP1); 3588 cvmInvert( &cmatrP1,&cinvP1 ); 3589 cvmMul( &ccamMatr1, &ctransVect1, &cvectp1 ); 3590 3591 /* Compute second */ 3592 cvmMul( &ccamMatr2, &crotMatr2, &cmatrP2 ); 3593 cvmInvert( &cmatrP2,&cinvP2 ); 3594 cvmMul( &ccamMatr2, &ctransVect2, &cvectp2 ); 3595 3596 cvmMul( &cmatrP1, &cinvP2, &ctmpM1); 3597 cvmMul( &ctmpM1, &cvectp2, &ctmpVect1); 3598 cvmSub( &cvectp1,&ctmpVect1,&ctmpE1); 3599 3600 cvmMul( &cmatrP2, &cinvP1, &ctmpM2); 3601 cvmMul( &ctmpM2, &cvectp1, &ctmpVect2); 3602 cvmSub( &cvectp2, &ctmpVect2, &ctmpE2); 3603 3604 3605 /* Need scale */ 3606 3607 cvmScale(&ctmpE1,&cepipole1,1.0); 3608 cvmScale(&ctmpE2,&cepipole2,1.0); 3609 3610 cvmFree(&cmatrP1); 3611 cvmFree(&cmatrP1); 3612 cvmFree(&cvectp1); 3613 cvmFree(&cvectp2); 3614 cvmFree(&ctmpF1); 3615 cvmFree(&ctmpM1); 3616 cvmFree(&ctmpM2); 3617 cvmFree(&cinvP1); 3618 cvmFree(&cinvP2); 3619 cvmFree(&ctmpMatr); 3620 cvmFree(&ctmpVect1); 3621 cvmFree(&ctmpVect2); 3622 cvmFree(&cmatrF1); 3623 cvmFree(&ctmpF); 3624 cvmFree(&ctmpE1); 3625 cvmFree(&ctmpE2); 3626 3627 return CV_NO_ERR; 3628}/* cvComputeEpipoles */ 3629 3630 3631/* Compute epipoles for fundamental matrix */ 3632int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr, 3633 CvPoint3D32f* epipole1, 3634 CvPoint3D32f* epipole2) 3635{ 3636 /* Decompose fundamental matrix using SVD ( A = U W V') */ 3637 CvMat fundMatrC = cvMat(3,3,CV_MAT32F,fundMatr); 3638 3639 CvMat* matrW = cvCreateMat(3,3,CV_MAT32F); 3640 CvMat* matrU = cvCreateMat(3,3,CV_MAT32F); 3641 CvMat* matrV = cvCreateMat(3,3,CV_MAT32F); 3642 3643 /* From svd we need just last vector of U and V or last row from U' and V' */ 3644 /* We get transposed matrixes U and V */ 3645 cvSVD(&fundMatrC,matrW,matrU,matrV,CV_SVD_V_T|CV_SVD_U_T); 3646 3647 /* Get last row from U' and compute epipole1 */ 3648 epipole1->x = matrU->data.fl[6]; 3649 epipole1->y = matrU->data.fl[7]; 3650 epipole1->z = matrU->data.fl[8]; 3651 3652 /* Get last row from V' and compute epipole2 */ 3653 epipole2->x = matrV->data.fl[6]; 3654 epipole2->y = matrV->data.fl[7]; 3655 epipole2->z = matrV->data.fl[8]; 3656 3657 cvReleaseMat(&matrW); 3658 cvReleaseMat(&matrU); 3659 cvReleaseMat(&matrV); 3660 return CV_OK; 3661} 3662 3663int cvConvertEssential2Fundamental( CvMatr32f essMatr, 3664 CvMatr32f fundMatr, 3665 CvMatr32f cameraMatr1, 3666 CvMatr32f cameraMatr2) 3667{/* Fund = inv(CM1') * Ess * inv(CM2) */ 3668 3669 CvMat essMatrC = cvMat(3,3,CV_MAT32F,essMatr); 3670 CvMat fundMatrC = cvMat(3,3,CV_MAT32F,fundMatr); 3671 CvMat cameraMatr1C = cvMat(3,3,CV_MAT32F,cameraMatr1); 3672 CvMat cameraMatr2C = cvMat(3,3,CV_MAT32F,cameraMatr2); 3673 3674 CvMat* invCM2 = cvCreateMat(3,3,CV_MAT32F); 3675 CvMat* tmpMatr = cvCreateMat(3,3,CV_MAT32F); 3676 CvMat* invCM1T = cvCreateMat(3,3,CV_MAT32F); 3677 3678 cvTranspose(&cameraMatr1C,tmpMatr); 3679 cvInvert(tmpMatr,invCM1T); 3680 cvmMul(invCM1T,&essMatrC,tmpMatr); 3681 cvInvert(&cameraMatr2C,invCM2); 3682 cvmMul(tmpMatr,invCM2,&fundMatrC); 3683 3684 /* Scale fundamental matrix */ 3685 double scale; 3686 scale = 1.0/fundMatrC.data.fl[8]; 3687 cvConvertScale(&fundMatrC,&fundMatrC,scale); 3688 3689 cvReleaseMat(&invCM2); 3690 cvReleaseMat(&tmpMatr); 3691 cvReleaseMat(&invCM1T); 3692 3693 return CV_OK; 3694} 3695 3696/* Compute essential matrix */ 3697 3698int cvComputeEssentialMatrix( CvMatr32f rotMatr, 3699 CvMatr32f transVect, 3700 CvMatr32f essMatr) 3701{ 3702 float transMatr[9]; 3703 3704 /* Make antisymmetric matrix from transpose vector */ 3705 transMatr[0] = 0; 3706 transMatr[1] = - transVect[2]; 3707 transMatr[2] = transVect[1]; 3708 3709 transMatr[3] = transVect[2]; 3710 transMatr[4] = 0; 3711 transMatr[5] = - transVect[0]; 3712 3713 transMatr[6] = - transVect[1]; 3714 transMatr[7] = transVect[0]; 3715 transMatr[8] = 0; 3716 3717 icvMulMatrix_32f(transMatr,3,3,rotMatr,3,3,essMatr); 3718 3719 return CV_OK; 3720} 3721 3722 3723