1/*M/////////////////////////////////////////////////////////////////////////////////////// 2// 3// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4// 5// By downloading, copying, installing or using the software you agree to this license. 6// If you do not agree to this license, do not download, install, 7// copy or use the software. 8// 9// 10// Intel License Agreement 11// For Open Source Computer Vision Library 12// 13// Copyright (C) 2000, Intel Corporation, all rights reserved. 14// Third party copyrights are property of their respective owners. 15// 16// Redistribution and use in source and binary forms, with or without modification, 17// are permitted provided that the following conditions are met: 18// 19// * Redistribution's of source code must retain the above copyright notice, 20// this list of conditions and the following disclaimer. 21// 22// * Redistribution's in binary form must reproduce the above copyright notice, 23// this list of conditions and the following disclaimer in the documentation 24// and/or other materials provided with the distribution. 25// 26// * The name of Intel Corporation may not be used to endorse or promote products 27// derived from this software without specific prior written permission. 28// 29// This software is provided by the copyright holders and contributors "as is" and 30// any express or implied warranties, including, but not limited to, the implied 31// warranties of merchantability and fitness for a particular purpose are disclaimed. 32// In no event shall the Intel Corporation or contributors be liable for any direct, 33// indirect, incidental, special, exemplary, or consequential damages 34// (including, but not limited to, procurement of substitute goods or services; 35// loss of use, data, or profits; or business interruption) however caused 36// and on any theory of liability, whether in contract, strict liability, 37// or tort (including negligence or otherwise) arising in any way out of 38// the use of this software, even if advised of the possibility of such damage. 39// 40//M*/ 41 42#include "_cxcore.h" 43 44/****************************************************************************************\ 45* [scaled] Identity matrix initialization * 46\****************************************************************************************/ 47 48CV_IMPL void 49cvSetIdentity( CvArr* array, CvScalar value ) 50{ 51 CV_FUNCNAME( "cvSetIdentity" ); 52 53 __BEGIN__; 54 55 CvMat stub, *mat = (CvMat*)array; 56 CvSize size; 57 int i, k, len, step; 58 int type, pix_size; 59 uchar* data = 0; 60 double buf[4]; 61 62 if( !CV_IS_MAT( mat )) 63 { 64 int coi = 0; 65 CV_CALL( mat = cvGetMat( mat, &stub, &coi )); 66 if( coi != 0 ) 67 CV_ERROR( CV_BadCOI, "coi is not supported" ); 68 } 69 70 size = cvGetMatSize( mat ); 71 len = CV_IMIN( size.width, size.height ); 72 73 type = CV_MAT_TYPE(mat->type); 74 pix_size = CV_ELEM_SIZE(type); 75 size.width *= pix_size; 76 77 if( CV_IS_MAT_CONT( mat->type )) 78 { 79 size.width *= size.height; 80 size.height = 1; 81 } 82 83 data = mat->data.ptr; 84 step = mat->step; 85 if( step == 0 ) 86 step = CV_STUB_STEP; 87 IPPI_CALL( icvSetZero_8u_C1R( data, step, size )); 88 step += pix_size; 89 90 if( type == CV_32FC1 ) 91 { 92 float val = (float)value.val[0]; 93 float* _data = (float*)data; 94 step /= sizeof(_data[0]); 95 len *= step; 96 97 for( i = 0; i < len; i += step ) 98 _data[i] = val; 99 } 100 else if( type == CV_64FC1 ) 101 { 102 double val = value.val[0]; 103 double* _data = (double*)data; 104 step /= sizeof(_data[0]); 105 len *= step; 106 107 for( i = 0; i < len; i += step ) 108 _data[i] = val; 109 } 110 else 111 { 112 uchar* val_ptr = (uchar*)buf; 113 cvScalarToRawData( &value, buf, type, 0 ); 114 len *= step; 115 116 for( i = 0; i < len; i += step ) 117 for( k = 0; k < pix_size; k++ ) 118 data[i+k] = val_ptr[k]; 119 } 120 121 __END__; 122} 123 124 125/****************************************************************************************\ 126* Trace of the matrix * 127\****************************************************************************************/ 128 129CV_IMPL CvScalar 130cvTrace( const CvArr* array ) 131{ 132 CvScalar sum = {{0,0,0,0}}; 133 134 CV_FUNCNAME( "cvTrace" ); 135 136 __BEGIN__; 137 138 CvMat stub, *mat = 0; 139 140 if( CV_IS_MAT( array )) 141 { 142 mat = (CvMat*)array; 143 int type = CV_MAT_TYPE(mat->type); 144 int size = MIN(mat->rows,mat->cols); 145 uchar* data = mat->data.ptr; 146 147 if( type == CV_32FC1 ) 148 { 149 int step = mat->step + sizeof(float); 150 151 for( ; size--; data += step ) 152 sum.val[0] += *(float*)data; 153 EXIT; 154 } 155 156 if( type == CV_64FC1 ) 157 { 158 int step = mat->step + sizeof(double); 159 160 for( ; size--; data += step ) 161 sum.val[0] += *(double*)data; 162 EXIT; 163 } 164 } 165 166 CV_CALL( mat = cvGetDiag( array, &stub )); 167 CV_CALL( sum = cvSum( mat )); 168 169 __END__; 170 171 return sum; 172} 173 174 175/****************************************************************************************\ 176* Matrix transpose * 177\****************************************************************************************/ 178 179/////////////////// macros for inplace transposition of square matrix //////////////////// 180 181#define ICV_DEF_TRANSP_INP_CASE_C1( \ 182 arrtype, len ) \ 183{ \ 184 arrtype* arr1 = arr; \ 185 step /= sizeof(arr[0]); \ 186 \ 187 while( --len ) \ 188 { \ 189 arr += step, arr1++; \ 190 arrtype* arr2 = arr; \ 191 arrtype* arr3 = arr1; \ 192 \ 193 do \ 194 { \ 195 arrtype t0 = arr2[0]; \ 196 arrtype t1 = arr3[0]; \ 197 arr2[0] = t1; \ 198 arr3[0] = t0; \ 199 \ 200 arr2++; \ 201 arr3 += step; \ 202 } \ 203 while( arr2 != arr3 ); \ 204 } \ 205} 206 207 208#define ICV_DEF_TRANSP_INP_CASE_C3( \ 209 arrtype, len ) \ 210{ \ 211 arrtype* arr1 = arr; \ 212 int y; \ 213 step /= sizeof(arr[0]); \ 214 \ 215 for( y = 1; y < len; y++ ) \ 216 { \ 217 arr += step, arr1 += 3; \ 218 arrtype* arr2 = arr; \ 219 arrtype* arr3 = arr1; \ 220 \ 221 for( ; arr2!=arr3; arr2+=3, \ 222 arr3+=step )\ 223 { \ 224 arrtype t0 = arr2[0]; \ 225 arrtype t1 = arr3[0]; \ 226 arr2[0] = t1; \ 227 arr3[0] = t0; \ 228 t0 = arr2[1]; \ 229 t1 = arr3[1]; \ 230 arr2[1] = t1; \ 231 arr3[1] = t0; \ 232 t0 = arr2[2]; \ 233 t1 = arr3[2]; \ 234 arr2[2] = t1; \ 235 arr3[2] = t0; \ 236 } \ 237 } \ 238} 239 240 241#define ICV_DEF_TRANSP_INP_CASE_C4( \ 242 arrtype, len ) \ 243{ \ 244 arrtype* arr1 = arr; \ 245 int y; \ 246 step /= sizeof(arr[0]); \ 247 \ 248 for( y = 1; y < len; y++ ) \ 249 { \ 250 arr += step, arr1 += 4; \ 251 arrtype* arr2 = arr; \ 252 arrtype* arr3 = arr1; \ 253 \ 254 for( ; arr2!=arr3; arr2+=4, \ 255 arr3+=step )\ 256 { \ 257 arrtype t0 = arr2[0]; \ 258 arrtype t1 = arr3[0]; \ 259 arr2[0] = t1; \ 260 arr3[0] = t0; \ 261 t0 = arr2[1]; \ 262 t1 = arr3[1]; \ 263 arr2[1] = t1; \ 264 arr3[1] = t0; \ 265 t0 = arr2[2]; \ 266 t1 = arr3[2]; \ 267 arr2[2] = t1; \ 268 arr3[2] = t0; \ 269 t0 = arr2[3]; \ 270 t1 = arr3[3]; \ 271 arr2[3] = t1; \ 272 arr3[3] = t0; \ 273 } \ 274 } \ 275} 276 277 278//////////////// macros for non-inplace transposition of rectangular matrix ////////////// 279 280#define ICV_DEF_TRANSP_CASE_C1( arrtype ) \ 281{ \ 282 int x, y; \ 283 srcstep /= sizeof(src[0]); \ 284 dststep /= sizeof(dst[0]); \ 285 \ 286 for( y = 0; y <= size.height - 2; y += 2, \ 287 src += 2*srcstep, dst += 2 ) \ 288 { \ 289 const arrtype* src1 = src + srcstep; \ 290 arrtype* dst1 = dst; \ 291 \ 292 for( x = 0; x <= size.width - 2; \ 293 x += 2, dst1 += dststep ) \ 294 { \ 295 arrtype t0 = src[x]; \ 296 arrtype t1 = src1[x]; \ 297 dst1[0] = t0; \ 298 dst1[1] = t1; \ 299 dst1 += dststep; \ 300 \ 301 t0 = src[x + 1]; \ 302 t1 = src1[x + 1]; \ 303 dst1[0] = t0; \ 304 dst1[1] = t1; \ 305 } \ 306 \ 307 if( x < size.width ) \ 308 { \ 309 arrtype t0 = src[x]; \ 310 arrtype t1 = src1[x]; \ 311 dst1[0] = t0; \ 312 dst1[1] = t1; \ 313 } \ 314 } \ 315 \ 316 if( y < size.height ) \ 317 { \ 318 arrtype* dst1 = dst; \ 319 for( x = 0; x <= size.width - 2; \ 320 x += 2, dst1 += 2*dststep ) \ 321 { \ 322 arrtype t0 = src[x]; \ 323 arrtype t1 = src[x + 1]; \ 324 dst1[0] = t0; \ 325 dst1[dststep] = t1; \ 326 } \ 327 \ 328 if( x < size.width ) \ 329 { \ 330 arrtype t0 = src[x]; \ 331 dst1[0] = t0; \ 332 } \ 333 } \ 334} 335 336 337#define ICV_DEF_TRANSP_CASE_C3( arrtype ) \ 338{ \ 339 size.width *= 3; \ 340 srcstep /= sizeof(src[0]); \ 341 dststep /= sizeof(dst[0]); \ 342 \ 343 for( ; size.height--; src+=srcstep, dst+=3 )\ 344 { \ 345 int x; \ 346 arrtype* dst1 = dst; \ 347 \ 348 for( x = 0; x < size.width; x += 3, \ 349 dst1 += dststep ) \ 350 { \ 351 arrtype t0 = src[x]; \ 352 arrtype t1 = src[x + 1]; \ 353 arrtype t2 = src[x + 2]; \ 354 \ 355 dst1[0] = t0; \ 356 dst1[1] = t1; \ 357 dst1[2] = t2; \ 358 } \ 359 } \ 360} 361 362 363#define ICV_DEF_TRANSP_CASE_C4( arrtype ) \ 364{ \ 365 size.width *= 4; \ 366 srcstep /= sizeof(src[0]); \ 367 dststep /= sizeof(dst[0]); \ 368 \ 369 for( ; size.height--; src+=srcstep, dst+=4 )\ 370 { \ 371 int x; \ 372 arrtype* dst1 = dst; \ 373 \ 374 for( x = 0; x < size.width; x += 4, \ 375 dst1 += dststep ) \ 376 { \ 377 arrtype t0 = src[x]; \ 378 arrtype t1 = src[x + 1]; \ 379 \ 380 dst1[0] = t0; \ 381 dst1[1] = t1; \ 382 \ 383 t0 = src[x + 2]; \ 384 t1 = src[x + 3]; \ 385 \ 386 dst1[2] = t0; \ 387 dst1[3] = t1; \ 388 } \ 389 } \ 390} 391 392 393#define ICV_DEF_TRANSP_INP_FUNC( flavor, arrtype, cn ) \ 394static CvStatus CV_STDCALL \ 395icvTranspose_##flavor( arrtype* arr, int step, CvSize size )\ 396{ \ 397 assert( size.width == size.height ); \ 398 \ 399 ICV_DEF_TRANSP_INP_CASE_C##cn( arrtype, size.width ) \ 400 return CV_OK; \ 401} 402 403 404#define ICV_DEF_TRANSP_FUNC( flavor, arrtype, cn ) \ 405static CvStatus CV_STDCALL \ 406icvTranspose_##flavor( const arrtype* src, int srcstep, \ 407 arrtype* dst, int dststep, CvSize size )\ 408{ \ 409 ICV_DEF_TRANSP_CASE_C##cn( arrtype ) \ 410 return CV_OK; \ 411} 412 413 414ICV_DEF_TRANSP_INP_FUNC( 8u_C1IR, uchar, 1 ) 415ICV_DEF_TRANSP_INP_FUNC( 8u_C2IR, ushort, 1 ) 416ICV_DEF_TRANSP_INP_FUNC( 8u_C3IR, uchar, 3 ) 417ICV_DEF_TRANSP_INP_FUNC( 16u_C2IR, int, 1 ) 418ICV_DEF_TRANSP_INP_FUNC( 16u_C3IR, ushort, 3 ) 419ICV_DEF_TRANSP_INP_FUNC( 32s_C2IR, int64, 1 ) 420ICV_DEF_TRANSP_INP_FUNC( 32s_C3IR, int, 3 ) 421ICV_DEF_TRANSP_INP_FUNC( 64s_C2IR, int, 4 ) 422ICV_DEF_TRANSP_INP_FUNC( 64s_C3IR, int64, 3 ) 423ICV_DEF_TRANSP_INP_FUNC( 64s_C4IR, int64, 4 ) 424 425 426ICV_DEF_TRANSP_FUNC( 8u_C1R, uchar, 1 ) 427ICV_DEF_TRANSP_FUNC( 8u_C2R, ushort, 1 ) 428ICV_DEF_TRANSP_FUNC( 8u_C3R, uchar, 3 ) 429ICV_DEF_TRANSP_FUNC( 16u_C2R, int, 1 ) 430ICV_DEF_TRANSP_FUNC( 16u_C3R, ushort, 3 ) 431ICV_DEF_TRANSP_FUNC( 32s_C2R, int64, 1 ) 432ICV_DEF_TRANSP_FUNC( 32s_C3R, int, 3 ) 433ICV_DEF_TRANSP_FUNC( 64s_C2R, int, 4 ) 434ICV_DEF_TRANSP_FUNC( 64s_C3R, int64, 3 ) 435ICV_DEF_TRANSP_FUNC( 64s_C4R, int64, 4 ) 436 437CV_DEF_INIT_PIXSIZE_TAB_2D( Transpose, R ) 438CV_DEF_INIT_PIXSIZE_TAB_2D( Transpose, IR ) 439 440CV_IMPL void 441cvTranspose( const CvArr* srcarr, CvArr* dstarr ) 442{ 443 static CvBtFuncTable tab, inp_tab; 444 static int inittab = 0; 445 446 CV_FUNCNAME( "cvTranspose" ); 447 448 __BEGIN__; 449 450 CvMat sstub, *src = (CvMat*)srcarr; 451 CvMat dstub, *dst = (CvMat*)dstarr; 452 CvSize size; 453 int type, pix_size; 454 455 if( !inittab ) 456 { 457 icvInitTransposeIRTable( &inp_tab ); 458 icvInitTransposeRTable( &tab ); 459 inittab = 1; 460 } 461 462 if( !CV_IS_MAT( src )) 463 { 464 int coi = 0; 465 CV_CALL( src = cvGetMat( src, &sstub, &coi )); 466 if( coi != 0 ) 467 CV_ERROR( CV_BadCOI, "coi is not supported" ); 468 } 469 470 type = CV_MAT_TYPE( src->type ); 471 pix_size = CV_ELEM_SIZE(type); 472 size = cvGetMatSize( src ); 473 474 if( dstarr == srcarr ) 475 { 476 dst = src; 477 } 478 else 479 { 480 if( !CV_IS_MAT( dst )) 481 { 482 int coi = 0; 483 CV_CALL( dst = cvGetMat( dst, &dstub, &coi )); 484 485 if( coi != 0 ) 486 CV_ERROR( CV_BadCOI, "coi is not supported" ); 487 } 488 489 if( !CV_ARE_TYPES_EQ( src, dst )) 490 CV_ERROR( CV_StsUnmatchedFormats, "" ); 491 492 if( size.width != dst->height || size.height != dst->width ) 493 CV_ERROR( CV_StsUnmatchedSizes, "" ); 494 } 495 496 if( src->data.ptr == dst->data.ptr ) 497 { 498 if( size.width == size.height ) 499 { 500 CvFunc2D_1A func = (CvFunc2D_1A)(inp_tab.fn_2d[pix_size]); 501 502 if( !func ) 503 CV_ERROR( CV_StsUnsupportedFormat, "" ); 504 505 IPPI_CALL( func( src->data.ptr, src->step, size )); 506 } 507 else 508 { 509 if( size.width != 1 && size.height != 1 ) 510 CV_ERROR( CV_StsBadSize, 511 "Rectangular matrix can not be transposed inplace" ); 512 513 if( !CV_IS_MAT_CONT( src->type & dst->type )) 514 CV_ERROR( CV_StsBadFlag, "In case of inplace column/row transposition " 515 "both source and destination must be continuous" ); 516 517 if( dst == src ) 518 { 519 int t; 520 CV_SWAP( dst->width, dst->height, t ); 521 dst->step = dst->height == 1 ? 0 : pix_size; 522 } 523 } 524 } 525 else 526 { 527 CvFunc2D_2A func = (CvFunc2D_2A)(tab.fn_2d[pix_size]); 528 529 if( !func ) 530 CV_ERROR( CV_StsUnsupportedFormat, "" ); 531 532 IPPI_CALL( func( src->data.ptr, src->step, 533 dst->data.ptr, dst->step, size )); 534 } 535 536 __END__; 537} 538 539 540/****************************************************************************************\ 541* LU decomposition/back substitution * 542\****************************************************************************************/ 543 544CV_IMPL void 545cvCompleteSymm( CvMat* matrix, int LtoR ) 546{ 547 CV_FUNCNAME( "cvCompleteSymm" ); 548 549 __BEGIN__; 550 551 int i, j, nrows; 552 553 CV_ASSERT( CV_IS_MAT(matrix) && matrix->rows == matrix->cols ); 554 555 nrows = matrix->rows; 556 557 if( CV_MAT_TYPE(matrix->type) == CV_32FC1 || CV_MAT_TYPE(matrix->type) == CV_32SC1 ) 558 { 559 int* data = matrix->data.i; 560 int step = matrix->step/sizeof(data[0]); 561 int j0 = 0, j1 = nrows; 562 for( i = 0; i < nrows; i++ ) 563 { 564 if( !LtoR ) j1 = i; else j0 = i+1; 565 for( j = j0; j < j1; j++ ) 566 data[i*step + j] = data[j*step + i]; 567 } 568 } 569 else if( CV_MAT_TYPE(matrix->type) == CV_64FC1 ) 570 { 571 double* data = matrix->data.db; 572 int step = matrix->step/sizeof(data[0]); 573 int j0 = 0, j1 = nrows; 574 for( i = 0; i < nrows; i++ ) 575 { 576 if( !LtoR ) j1 = i; else j0 = i+1; 577 for( j = j0; j < j1; j++ ) 578 data[i*step + j] = data[j*step + i]; 579 } 580 } 581 else 582 CV_ERROR( CV_StsUnsupportedFormat, "" ); 583 584 __END__; 585} 586 587 588/****************************************************************************************\ 589* LU decomposition/back substitution * 590\****************************************************************************************/ 591 592#define arrtype float 593#define temptype double 594 595typedef CvStatus (CV_STDCALL * CvLUDecompFunc)( double* A, int stepA, CvSize sizeA, 596 void* B, int stepB, CvSize sizeB, 597 double* det ); 598 599typedef CvStatus (CV_STDCALL * CvLUBackFunc)( double* A, int stepA, CvSize sizeA, 600 void* B, int stepB, CvSize sizeB ); 601 602 603#define ICV_DEF_LU_DECOMP_FUNC( flavor, arrtype ) \ 604static CvStatus CV_STDCALL \ 605icvLUDecomp_##flavor( double* A, int stepA, CvSize sizeA, \ 606 arrtype* B, int stepB, CvSize sizeB, double* _det ) \ 607{ \ 608 int n = sizeA.width; \ 609 int m = 0, i; \ 610 double det = 1; \ 611 \ 612 assert( sizeA.width == sizeA.height ); \ 613 \ 614 if( B ) \ 615 { \ 616 assert( sizeA.height == sizeB.height ); \ 617 m = sizeB.width; \ 618 } \ 619 stepA /= sizeof(A[0]); \ 620 stepB /= sizeof(B[0]); \ 621 \ 622 for( i = 0; i < n; i++, A += stepA, B += stepB ) \ 623 { \ 624 int j, k = i; \ 625 double* tA = A; \ 626 arrtype* tB = 0; \ 627 double kval = fabs(A[i]), tval; \ 628 \ 629 /* find the pivot element */ \ 630 for( j = i + 1; j < n; j++ ) \ 631 { \ 632 tA += stepA; \ 633 tval = fabs(tA[i]); \ 634 \ 635 if( tval > kval ) \ 636 { \ 637 kval = tval; \ 638 k = j; \ 639 } \ 640 } \ 641 \ 642 if( kval == 0 ) \ 643 { \ 644 det = 0; \ 645 break; \ 646 } \ 647 \ 648 /* swap rows */ \ 649 if( k != i ) \ 650 { \ 651 tA = A + stepA*(k - i); \ 652 det = -det; \ 653 \ 654 for( j = i; j < n; j++ ) \ 655 { \ 656 double t; \ 657 CV_SWAP( A[j], tA[j], t ); \ 658 } \ 659 \ 660 if( m > 0 ) \ 661 { \ 662 tB = B + stepB*(k - i); \ 663 \ 664 for( j = 0; j < m; j++ ) \ 665 { \ 666 arrtype t = B[j]; \ 667 CV_SWAP( B[j], tB[j], t ); \ 668 } \ 669 } \ 670 } \ 671 \ 672 tval = 1./A[i]; \ 673 det *= A[i]; \ 674 tA = A; \ 675 tB = B; \ 676 A[i] = tval; /* to replace division with multiplication in LUBack */ \ 677 \ 678 /* update matrix and the right side of the system */ \ 679 for( j = i + 1; j < n; j++ ) \ 680 { \ 681 tA += stepA; \ 682 tB += stepB; \ 683 double alpha = -tA[i]*tval; \ 684 \ 685 for( k = i + 1; k < n; k++ ) \ 686 tA[k] = tA[k] + alpha*A[k]; \ 687 \ 688 if( m > 0 ) \ 689 for( k = 0; k < m; k++ ) \ 690 tB[k] = (arrtype)(tB[k] + alpha*B[k]); \ 691 } \ 692 } \ 693 \ 694 if( _det ) \ 695 *_det = det; \ 696 \ 697 return CV_OK; \ 698} 699 700 701ICV_DEF_LU_DECOMP_FUNC( 32f, float ) 702ICV_DEF_LU_DECOMP_FUNC( 64f, double ) 703 704 705#define ICV_DEF_LU_BACK_FUNC( flavor, arrtype ) \ 706static CvStatus CV_STDCALL \ 707icvLUBack_##flavor( double* A, int stepA, CvSize sizeA, \ 708 arrtype* B, int stepB, CvSize sizeB ) \ 709{ \ 710 int n = sizeA.width; \ 711 int m = sizeB.width, i; \ 712 \ 713 assert( m > 0 && sizeA.width == sizeA.height && \ 714 sizeA.height == sizeB.height ); \ 715 stepA /= sizeof(A[0]); \ 716 stepB /= sizeof(B[0]); \ 717 \ 718 A += stepA*(n - 1); \ 719 B += stepB*(n - 1); \ 720 \ 721 for( i = n - 1; i >= 0; i--, A -= stepA ) \ 722 { \ 723 int j, k; \ 724 for( j = 0; j < m; j++ ) \ 725 { \ 726 arrtype* tB = B + j; \ 727 double x = 0; \ 728 \ 729 for( k = n - 1; k > i; k--, tB -= stepB ) \ 730 x += A[k]*tB[0]; \ 731 \ 732 tB[0] = (arrtype)((tB[0] - x)*A[i]); \ 733 } \ 734 } \ 735 \ 736 return CV_OK; \ 737} 738 739 740ICV_DEF_LU_BACK_FUNC( 32f, float ) 741ICV_DEF_LU_BACK_FUNC( 64f, double ) 742 743static CvFuncTable lu_decomp_tab, lu_back_tab; 744static int lu_inittab = 0; 745 746static void icvInitLUTable( CvFuncTable* decomp_tab, 747 CvFuncTable* back_tab ) 748{ 749 decomp_tab->fn_2d[0] = (void*)icvLUDecomp_32f; 750 decomp_tab->fn_2d[1] = (void*)icvLUDecomp_64f; 751 back_tab->fn_2d[0] = (void*)icvLUBack_32f; 752 back_tab->fn_2d[1] = (void*)icvLUBack_64f; 753} 754 755 756 757/****************************************************************************************\ 758* Determinant of the matrix * 759\****************************************************************************************/ 760 761#define det2(m) (m(0,0)*m(1,1) - m(0,1)*m(1,0)) 762#define det3(m) (m(0,0)*(m(1,1)*m(2,2) - m(1,2)*m(2,1)) - \ 763 m(0,1)*(m(1,0)*m(2,2) - m(1,2)*m(2,0)) + \ 764 m(0,2)*(m(1,0)*m(2,1) - m(1,1)*m(2,0))) 765 766CV_IMPL double 767cvDet( const CvArr* arr ) 768{ 769 double result = 0; 770 uchar* buffer = 0; 771 int local_alloc = 0; 772 773 CV_FUNCNAME( "cvDet" ); 774 775 __BEGIN__; 776 777 CvMat stub, *mat = (CvMat*)arr; 778 int type; 779 780 if( !CV_IS_MAT( mat )) 781 { 782 CV_CALL( mat = cvGetMat( mat, &stub )); 783 } 784 785 type = CV_MAT_TYPE( mat->type ); 786 787 if( mat->width != mat->height ) 788 CV_ERROR( CV_StsBadSize, "The matrix must be square" ); 789 790 #define Mf( y, x ) ((float*)(m + y*step))[x] 791 #define Md( y, x ) ((double*)(m + y*step))[x] 792 793 if( mat->width == 2 ) 794 { 795 uchar* m = mat->data.ptr; 796 int step = mat->step; 797 798 if( type == CV_32FC1 ) 799 { 800 result = det2(Mf); 801 } 802 else if( type == CV_64FC1 ) 803 { 804 result = det2(Md); 805 } 806 else 807 { 808 CV_ERROR( CV_StsUnsupportedFormat, "" ); 809 } 810 } 811 else if( mat->width == 3 ) 812 { 813 uchar* m = mat->data.ptr; 814 int step = mat->step; 815 816 if( type == CV_32FC1 ) 817 { 818 result = det3(Mf); 819 } 820 else if( type == CV_64FC1 ) 821 { 822 result = det3(Md); 823 } 824 else 825 { 826 CV_ERROR( CV_StsUnsupportedFormat, "" ); 827 } 828 } 829 else if( mat->width == 1 ) 830 { 831 if( type == CV_32FC1 ) 832 { 833 result = mat->data.fl[0]; 834 } 835 else if( type == CV_64FC1 ) 836 { 837 result = mat->data.db[0]; 838 } 839 else 840 { 841 CV_ERROR( CV_StsUnsupportedFormat, "" ); 842 } 843 } 844 else 845 { 846 CvLUDecompFunc decomp_func; 847 CvSize size = cvGetMatSize( mat ); 848 const int worktype = CV_64FC1; 849 int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype); 850 CvMat tmat; 851 852 if( !lu_inittab ) 853 { 854 icvInitLUTable( &lu_decomp_tab, &lu_back_tab ); 855 lu_inittab = 1; 856 } 857 858 if( CV_MAT_CN( type ) != 1 || CV_MAT_DEPTH( type ) < CV_32F ) 859 CV_ERROR( CV_StsUnsupportedFormat, "" ); 860 861 if( size.width <= CV_MAX_LOCAL_MAT_SIZE ) 862 { 863 buffer = (uchar*)cvStackAlloc( buf_size ); 864 local_alloc = 1; 865 } 866 else 867 { 868 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 869 } 870 871 CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer )); 872 if( type == worktype ) 873 { 874 CV_CALL( cvCopy( mat, &tmat )); 875 } 876 else 877 CV_CALL( cvConvert( mat, &tmat )); 878 879 decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(worktype)-CV_32F]); 880 assert( decomp_func ); 881 882 IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size, 0, 0, size, &result )); 883 } 884 885 #undef Mf 886 #undef Md 887 888 /*icvCheckVector_64f( &result, 1 );*/ 889 890 __END__; 891 892 if( buffer && !local_alloc ) 893 cvFree( &buffer ); 894 895 return result; 896} 897 898 899 900/****************************************************************************************\ 901* Inverse (or pseudo-inverse) of the matrix * 902\****************************************************************************************/ 903 904#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x] 905#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x] 906#define Df( y, x ) ((float*)(dstdata + y*dststep))[x] 907#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x] 908 909CV_IMPL double 910cvInvert( const CvArr* srcarr, CvArr* dstarr, int method ) 911{ 912 CvMat* u = 0; 913 CvMat* v = 0; 914 CvMat* w = 0; 915 916 uchar* buffer = 0; 917 int local_alloc = 0; 918 double result = 0; 919 920 CV_FUNCNAME( "cvInvert" ); 921 922 __BEGIN__; 923 924 CvMat sstub, *src = (CvMat*)srcarr; 925 CvMat dstub, *dst = (CvMat*)dstarr; 926 int type; 927 928 if( !CV_IS_MAT( src )) 929 CV_CALL( src = cvGetMat( src, &sstub )); 930 931 if( !CV_IS_MAT( dst )) 932 CV_CALL( dst = cvGetMat( dst, &dstub )); 933 934 type = CV_MAT_TYPE( src->type ); 935 936 if( method == CV_SVD || method == CV_SVD_SYM ) 937 { 938 int n = MIN(src->rows,src->cols); 939 if( method == CV_SVD_SYM && src->rows != src->cols ) 940 CV_ERROR( CV_StsBadSize, "CV_SVD_SYM method is used for non-square matrix" ); 941 942 CV_CALL( u = cvCreateMat( n, src->rows, src->type )); 943 if( method != CV_SVD_SYM ) 944 CV_CALL( v = cvCreateMat( n, src->cols, src->type )); 945 CV_CALL( w = cvCreateMat( n, 1, src->type )); 946 CV_CALL( cvSVD( src, w, u, v, CV_SVD_U_T + CV_SVD_V_T )); 947 948 if( type == CV_32FC1 ) 949 result = w->data.fl[0] >= FLT_EPSILON ? 950 w->data.fl[w->rows-1]/w->data.fl[0] : 0; 951 else 952 result = w->data.db[0] >= FLT_EPSILON ? 953 w->data.db[w->rows-1]/w->data.db[0] : 0; 954 955 CV_CALL( cvSVBkSb( w, u, v ? v : u, 0, dst, CV_SVD_U_T + CV_SVD_V_T )); 956 EXIT; 957 } 958 else if( method != CV_LU ) 959 CV_ERROR( CV_StsBadArg, "Unknown inversion method" ); 960 961 if( !CV_ARE_TYPES_EQ( src, dst )) 962 CV_ERROR( CV_StsUnmatchedFormats, "" ); 963 964 if( src->width != src->height ) 965 CV_ERROR( CV_StsBadSize, "The matrix must be square" ); 966 967 if( !CV_ARE_SIZES_EQ( src, dst )) 968 CV_ERROR( CV_StsUnmatchedSizes, "" ); 969 970 if( type != CV_32FC1 && type != CV_64FC1 ) 971 CV_ERROR( CV_StsUnsupportedFormat, "" ); 972 973 if( src->width <= 3 ) 974 { 975 uchar* srcdata = src->data.ptr; 976 uchar* dstdata = dst->data.ptr; 977 int srcstep = src->step; 978 int dststep = dst->step; 979 980 if( src->width == 2 ) 981 { 982 if( type == CV_32FC1 ) 983 { 984 double d = det2(Sf); 985 if( d != 0. ) 986 { 987 double t0, t1; 988 result = d; 989 d = 1./d; 990 t0 = Sf(0,0)*d; 991 t1 = Sf(1,1)*d; 992 Df(1,1) = (float)t0; 993 Df(0,0) = (float)t1; 994 t0 = -Sf(0,1)*d; 995 t1 = -Sf(1,0)*d; 996 Df(0,1) = (float)t0; 997 Df(1,0) = (float)t1; 998 } 999 } 1000 else 1001 { 1002 double d = det2(Sd); 1003 if( d != 0. ) 1004 { 1005 double t0, t1; 1006 result = d; 1007 d = 1./d; 1008 t0 = Sd(0,0)*d; 1009 t1 = Sd(1,1)*d; 1010 Dd(1,1) = t0; 1011 Dd(0,0) = t1; 1012 t0 = -Sd(0,1)*d; 1013 t1 = -Sd(1,0)*d; 1014 Dd(0,1) = t0; 1015 Dd(1,0) = t1; 1016 } 1017 } 1018 } 1019 else if( src->width == 3 ) 1020 { 1021 if( type == CV_32FC1 ) 1022 { 1023 double d = det3(Sf); 1024 if( d != 0. ) 1025 { 1026 float t[9]; 1027 result = d; 1028 d = 1./d; 1029 1030 t[0] = (float)((Sf(1,1) * Sf(2,2) - Sf(1,2) * Sf(2,1)) * d); 1031 t[1] = (float)((Sf(0,2) * Sf(2,1) - Sf(0,1) * Sf(2,2)) * d); 1032 t[2] = (float)((Sf(0,1) * Sf(1,2) - Sf(0,2) * Sf(1,1)) * d); 1033 1034 t[3] = (float)((Sf(1,2) * Sf(2,0) - Sf(1,0) * Sf(2,2)) * d); 1035 t[4] = (float)((Sf(0,0) * Sf(2,2) - Sf(0,2) * Sf(2,0)) * d); 1036 t[5] = (float)((Sf(0,2) * Sf(1,0) - Sf(0,0) * Sf(1,2)) * d); 1037 1038 t[6] = (float)((Sf(1,0) * Sf(2,1) - Sf(1,1) * Sf(2,0)) * d); 1039 t[7] = (float)((Sf(0,1) * Sf(2,0) - Sf(0,0) * Sf(2,1)) * d); 1040 t[8] = (float)((Sf(0,0) * Sf(1,1) - Sf(0,1) * Sf(1,0)) * d); 1041 1042 Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2]; 1043 Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5]; 1044 Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8]; 1045 } 1046 } 1047 else 1048 { 1049 double d = det3(Sd); 1050 if( d != 0. ) 1051 { 1052 double t[9]; 1053 result = d; 1054 d = 1./d; 1055 1056 t[0] = (Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1)) * d; 1057 t[1] = (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2)) * d; 1058 t[2] = (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1)) * d; 1059 1060 t[3] = (Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2)) * d; 1061 t[4] = (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0)) * d; 1062 t[5] = (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2)) * d; 1063 1064 t[6] = (Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0)) * d; 1065 t[7] = (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1)) * d; 1066 t[8] = (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0)) * d; 1067 1068 Dd(0,0) = t[0]; Dd(0,1) = t[1]; Dd(0,2) = t[2]; 1069 Dd(1,0) = t[3]; Dd(1,1) = t[4]; Dd(1,2) = t[5]; 1070 Dd(2,0) = t[6]; Dd(2,1) = t[7]; Dd(2,2) = t[8]; 1071 } 1072 } 1073 } 1074 else 1075 { 1076 assert( src->width == 1 ); 1077 1078 if( type == CV_32FC1 ) 1079 { 1080 double d = Sf(0,0); 1081 if( d != 0. ) 1082 { 1083 result = d; 1084 Df(0,0) = (float)(1./d); 1085 } 1086 } 1087 else 1088 { 1089 double d = Sd(0,0); 1090 if( d != 0. ) 1091 { 1092 result = d; 1093 Dd(0,0) = 1./d; 1094 } 1095 } 1096 } 1097 } 1098 else 1099 { 1100 CvLUDecompFunc decomp_func; 1101 CvLUBackFunc back_func; 1102 CvSize size = cvGetMatSize( src ); 1103 const int worktype = CV_64FC1; 1104 int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype); 1105 CvMat tmat; 1106 1107 if( !lu_inittab ) 1108 { 1109 icvInitLUTable( &lu_decomp_tab, &lu_back_tab ); 1110 lu_inittab = 1; 1111 } 1112 1113 if( size.width <= CV_MAX_LOCAL_MAT_SIZE ) 1114 { 1115 buffer = (uchar*)cvStackAlloc( buf_size ); 1116 local_alloc = 1; 1117 } 1118 else 1119 { 1120 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 1121 } 1122 1123 CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer )); 1124 if( type == worktype ) 1125 { 1126 CV_CALL( cvCopy( src, &tmat )); 1127 } 1128 else 1129 CV_CALL( cvConvert( src, &tmat )); 1130 CV_CALL( cvSetIdentity( dst )); 1131 1132 decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]); 1133 back_func = (CvLUBackFunc)(lu_back_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]); 1134 assert( decomp_func && back_func ); 1135 1136 IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size, 1137 dst->data.ptr, dst->step, size, &result )); 1138 1139 if( result != 0 ) 1140 { 1141 IPPI_CALL( back_func( tmat.data.db, tmat.step, size, 1142 dst->data.ptr, dst->step, size )); 1143 } 1144 } 1145 1146 if( !result ) 1147 CV_CALL( cvSetZero( dst )); 1148 1149 __END__; 1150 1151 if( buffer && !local_alloc ) 1152 cvFree( &buffer ); 1153 1154 if( u || v || w ) 1155 { 1156 cvReleaseMat( &u ); 1157 cvReleaseMat( &v ); 1158 cvReleaseMat( &w ); 1159 } 1160 1161 return result; 1162} 1163 1164 1165/****************************************************************************************\ 1166* Linear system [least-squares] solution * 1167\****************************************************************************************/ 1168 1169static void 1170icvLSQ( const CvMat* A, const CvMat* B, CvMat* X ) 1171{ 1172 CvMat* AtA = 0; 1173 CvMat* AtB = 0; 1174 CvMat* W = 0; 1175 CvMat* V = 0; 1176 1177 CV_FUNCNAME( "icvLSQ" ); 1178 1179 __BEGIN__; 1180 1181 if( !CV_IS_MAT(A) || !CV_IS_MAT(B) || !CV_IS_MAT(X) ) 1182 CV_ERROR( CV_StsBadArg, "Some of required arguments is not a valid matrix" ); 1183 1184 AtA = cvCreateMat( A->cols, A->cols, A->type ); 1185 AtB = cvCreateMat( A->cols, 1, A->type ); 1186 W = cvCreateMat( A->cols, 1, A->type ); 1187 V = cvCreateMat( A->cols, A->cols, A->type ); 1188 1189 cvMulTransposed( A, AtA, 1 ); 1190 cvGEMM( A, B, 1, 0, 0, AtB, CV_GEMM_A_T ); 1191 cvSVD( AtA, W, 0, V, CV_SVD_MODIFY_A + CV_SVD_V_T ); 1192 cvSVBkSb( W, V, V, AtB, X, CV_SVD_U_T + CV_SVD_V_T ); 1193 1194 __END__; 1195 1196 cvReleaseMat( &AtA ); 1197 cvReleaseMat( &AtB ); 1198 cvReleaseMat( &W ); 1199 cvReleaseMat( &V ); 1200} 1201 1202CV_IMPL int 1203cvSolve( const CvArr* A, const CvArr* b, CvArr* x, int method ) 1204{ 1205 CvMat* u = 0; 1206 CvMat* v = 0; 1207 CvMat* w = 0; 1208 1209 uchar* buffer = 0; 1210 int local_alloc = 0; 1211 int result = 1; 1212 1213 CV_FUNCNAME( "cvSolve" ); 1214 1215 __BEGIN__; 1216 1217 CvMat sstub, *src = (CvMat*)A; 1218 CvMat dstub, *dst = (CvMat*)x; 1219 CvMat bstub, *src2 = (CvMat*)b; 1220 int type; 1221 1222 if( !CV_IS_MAT( src )) 1223 CV_CALL( src = cvGetMat( src, &sstub )); 1224 1225 if( !CV_IS_MAT( src2 )) 1226 CV_CALL( src2 = cvGetMat( src2, &bstub )); 1227 1228 if( !CV_IS_MAT( dst )) 1229 CV_CALL( dst = cvGetMat( dst, &dstub )); 1230 1231 if( method & CV_LSQ ) 1232 { 1233 icvLSQ( src, src2, dst ); 1234 EXIT; 1235 } 1236 1237 if( method == CV_SVD || method == CV_SVD_SYM ) 1238 { 1239 int n = MIN(src->rows,src->cols); 1240 1241 if( method == CV_SVD_SYM && src->rows != src->cols ) 1242 CV_ERROR( CV_StsBadSize, "CV_SVD_SYM method is used for non-square matrix" ); 1243 1244 CV_CALL( u = cvCreateMat( n, src->rows, src->type )); 1245 if( method != CV_SVD_SYM ) 1246 CV_CALL( v = cvCreateMat( n, src->cols, src->type )); 1247 CV_CALL( w = cvCreateMat( n, 1, src->type )); 1248 CV_CALL( cvSVD( src, w, u, v, CV_SVD_U_T + CV_SVD_V_T )); 1249 CV_CALL( cvSVBkSb( w, u, v ? v : u, src2, dst, CV_SVD_U_T + CV_SVD_V_T )); 1250 EXIT; 1251 } 1252 else if( method != CV_LU ) 1253 CV_ERROR( CV_StsBadArg, "Unknown inversion method" ); 1254 1255 type = CV_MAT_TYPE( src->type ); 1256 1257 if( !CV_ARE_TYPES_EQ( src, dst ) || !CV_ARE_TYPES_EQ( src, src2 )) 1258 CV_ERROR( CV_StsUnmatchedFormats, "" ); 1259 1260 if( src->width != src->height ) 1261 CV_ERROR( CV_StsBadSize, "The matrix must be square" ); 1262 1263 if( !CV_ARE_SIZES_EQ( src2, dst ) || src->width != src2->height ) 1264 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1265 1266 if( type != CV_32FC1 && type != CV_64FC1 ) 1267 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1268 1269 // check case of a single equation and small matrix 1270 if( src->width <= 3 && src2->width == 1 ) 1271 { 1272 #define bf(y) ((float*)(bdata + y*src2step))[0] 1273 #define bd(y) ((double*)(bdata + y*src2step))[0] 1274 1275 uchar* srcdata = src->data.ptr; 1276 uchar* bdata = src2->data.ptr; 1277 uchar* dstdata = dst->data.ptr; 1278 int srcstep = src->step; 1279 int src2step = src2->step; 1280 int dststep = dst->step; 1281 1282 if( src->width == 2 ) 1283 { 1284 if( type == CV_32FC1 ) 1285 { 1286 double d = det2(Sf); 1287 if( d != 0. ) 1288 { 1289 float t; 1290 d = 1./d; 1291 t = (float)((bf(0)*Sf(1,1) - bf(1)*Sf(0,1))*d); 1292 Df(1,0) = (float)((bf(1)*Sf(0,0) - bf(0)*Sf(1,0))*d); 1293 Df(0,0) = t; 1294 } 1295 else 1296 result = 0; 1297 } 1298 else 1299 { 1300 double d = det2(Sd); 1301 if( d != 0. ) 1302 { 1303 double t; 1304 d = 1./d; 1305 t = (bd(0)*Sd(1,1) - bd(1)*Sd(0,1))*d; 1306 Dd(1,0) = (bd(1)*Sd(0,0) - bd(0)*Sd(1,0))*d; 1307 Dd(0,0) = t; 1308 } 1309 else 1310 result = 0; 1311 } 1312 } 1313 else if( src->width == 3 ) 1314 { 1315 if( type == CV_32FC1 ) 1316 { 1317 double d = det3(Sf); 1318 if( d != 0. ) 1319 { 1320 float t[3]; 1321 d = 1./d; 1322 1323 t[0] = (float)(d* 1324 (bf(0)*(Sf(1,1)*Sf(2,2) - Sf(1,2)*Sf(2,1)) - 1325 Sf(0,1)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) + 1326 Sf(0,2)*(bf(1)*Sf(2,1) - Sf(1,1)*bf(2)))); 1327 1328 t[1] = (float)(d* 1329 (Sf(0,0)*(bf(1)*Sf(2,2) - Sf(1,2)*bf(2)) - 1330 bf(0)*(Sf(1,0)*Sf(2,2) - Sf(1,2)*Sf(2,0)) + 1331 Sf(0,2)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0)))); 1332 1333 t[2] = (float)(d* 1334 (Sf(0,0)*(Sf(1,1)*bf(2) - bf(1)*Sf(2,1)) - 1335 Sf(0,1)*(Sf(1,0)*bf(2) - bf(1)*Sf(2,0)) + 1336 bf(0)*(Sf(1,0)*Sf(2,1) - Sf(1,1)*Sf(2,0)))); 1337 1338 Df(0,0) = t[0]; 1339 Df(1,0) = t[1]; 1340 Df(2,0) = t[2]; 1341 } 1342 else 1343 result = 0; 1344 } 1345 else 1346 { 1347 double d = det3(Sd); 1348 if( d != 0. ) 1349 { 1350 double t[9]; 1351 1352 d = 1./d; 1353 1354 t[0] = ((Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1))*bd(0) + 1355 (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2))*bd(1) + 1356 (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1))*bd(2))*d; 1357 1358 t[1] = ((Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2))*bd(0) + 1359 (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0))*bd(1) + 1360 (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2))*bd(2))*d; 1361 1362 t[2] = ((Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0))*bd(0) + 1363 (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1))*bd(1) + 1364 (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0))*bd(2))*d; 1365 1366 Dd(0,0) = t[0]; 1367 Dd(1,0) = t[1]; 1368 Dd(2,0) = t[2]; 1369 } 1370 else 1371 result = 0; 1372 } 1373 } 1374 else 1375 { 1376 assert( src->width == 1 ); 1377 1378 if( type == CV_32FC1 ) 1379 { 1380 double d = Sf(0,0); 1381 if( d != 0. ) 1382 Df(0,0) = (float)(bf(0)/d); 1383 else 1384 result = 0; 1385 } 1386 else 1387 { 1388 double d = Sd(0,0); 1389 if( d != 0. ) 1390 Dd(0,0) = (bd(0)/d); 1391 else 1392 result = 0; 1393 } 1394 } 1395 } 1396 else 1397 { 1398 CvLUDecompFunc decomp_func; 1399 CvLUBackFunc back_func; 1400 CvSize size = cvGetMatSize( src ); 1401 CvSize dstsize = cvGetMatSize( dst ); 1402 int worktype = CV_64FC1; 1403 int buf_size = size.width*size.height*CV_ELEM_SIZE(worktype); 1404 double d = 0; 1405 CvMat tmat; 1406 1407 if( !lu_inittab ) 1408 { 1409 icvInitLUTable( &lu_decomp_tab, &lu_back_tab ); 1410 lu_inittab = 1; 1411 } 1412 1413 if( size.width <= CV_MAX_LOCAL_MAT_SIZE ) 1414 { 1415 buffer = (uchar*)cvStackAlloc( buf_size ); 1416 local_alloc = 1; 1417 } 1418 else 1419 { 1420 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 1421 } 1422 1423 CV_CALL( cvInitMatHeader( &tmat, size.height, size.width, worktype, buffer )); 1424 if( type == worktype ) 1425 { 1426 CV_CALL( cvCopy( src, &tmat )); 1427 } 1428 else 1429 CV_CALL( cvConvert( src, &tmat )); 1430 1431 if( src2->data.ptr != dst->data.ptr ) 1432 { 1433 CV_CALL( cvCopy( src2, dst )); 1434 } 1435 1436 decomp_func = (CvLUDecompFunc)(lu_decomp_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]); 1437 back_func = (CvLUBackFunc)(lu_back_tab.fn_2d[CV_MAT_DEPTH(type)-CV_32F]); 1438 assert( decomp_func && back_func ); 1439 1440 IPPI_CALL( decomp_func( tmat.data.db, tmat.step, size, 1441 dst->data.ptr, dst->step, dstsize, &d )); 1442 1443 if( d != 0 ) 1444 { 1445 IPPI_CALL( back_func( tmat.data.db, tmat.step, size, 1446 dst->data.ptr, dst->step, dstsize )); 1447 } 1448 else 1449 result = 0; 1450 } 1451 1452 if( !result ) 1453 CV_CALL( cvSetZero( dst )); 1454 1455 __END__; 1456 1457 if( buffer && !local_alloc ) 1458 cvFree( &buffer ); 1459 1460 if( u || v || w ) 1461 { 1462 cvReleaseMat( &u ); 1463 cvReleaseMat( &v ); 1464 cvReleaseMat( &w ); 1465 } 1466 1467 return result; 1468} 1469 1470 1471 1472/****************************************************************************************\ 1473* 3D vector cross-product * 1474\****************************************************************************************/ 1475 1476CV_IMPL void 1477cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr ) 1478{ 1479 CV_FUNCNAME( "cvCrossProduct" ); 1480 1481 __BEGIN__; 1482 1483 CvMat stubA, *srcA = (CvMat*)srcAarr; 1484 CvMat stubB, *srcB = (CvMat*)srcBarr; 1485 CvMat dstub, *dst = (CvMat*)dstarr; 1486 int type; 1487 1488 if( !CV_IS_MAT(srcA)) 1489 CV_CALL( srcA = cvGetMat( srcA, &stubA )); 1490 1491 type = CV_MAT_TYPE( srcA->type ); 1492 1493 if( srcA->width*srcA->height*CV_MAT_CN(type) != 3 ) 1494 CV_ERROR( CV_StsBadArg, "All the input arrays must be continuous 3-vectors" ); 1495 1496 if( !srcB || !dst ) 1497 CV_ERROR( CV_StsNullPtr, "" ); 1498 1499 if( (srcA->type & ~CV_MAT_CONT_FLAG) == (srcB->type & ~CV_MAT_CONT_FLAG) && 1500 (srcA->type & ~CV_MAT_CONT_FLAG) == (dst->type & ~CV_MAT_CONT_FLAG) ) 1501 { 1502 if( !srcB->data.ptr || !dst->data.ptr ) 1503 CV_ERROR( CV_StsNullPtr, "" ); 1504 } 1505 else 1506 { 1507 if( !CV_IS_MAT(srcB)) 1508 CV_CALL( srcB = cvGetMat( srcB, &stubB )); 1509 1510 if( !CV_IS_MAT(dst)) 1511 CV_CALL( dst = cvGetMat( dst, &dstub )); 1512 1513 if( !CV_ARE_TYPES_EQ( srcA, srcB ) || 1514 !CV_ARE_TYPES_EQ( srcB, dst )) 1515 CV_ERROR( CV_StsUnmatchedFormats, "" ); 1516 } 1517 1518 if( !CV_ARE_SIZES_EQ( srcA, srcB ) || !CV_ARE_SIZES_EQ( srcB, dst )) 1519 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1520 1521 if( CV_MAT_DEPTH(type) == CV_32F ) 1522 { 1523 float* dstdata = (float*)(dst->data.ptr); 1524 const float* src1data = (float*)(srcA->data.ptr); 1525 const float* src2data = (float*)(srcB->data.ptr); 1526 1527 if( CV_IS_MAT_CONT(srcA->type & srcB->type & dst->type) ) 1528 { 1529 dstdata[2] = src1data[0] * src2data[1] - src1data[1] * src2data[0]; 1530 dstdata[0] = src1data[1] * src2data[2] - src1data[2] * src2data[1]; 1531 dstdata[1] = src1data[2] * src2data[0] - src1data[0] * src2data[2]; 1532 } 1533 else 1534 { 1535 int step1 = srcA->step ? srcA->step/sizeof(src1data[0]) : 1; 1536 int step2 = srcB->step ? srcB->step/sizeof(src1data[0]) : 1; 1537 int step = dst->step ? dst->step/sizeof(src1data[0]) : 1; 1538 1539 dstdata[2*step] = src1data[0] * src2data[step2] - src1data[step1] * src2data[0]; 1540 dstdata[0] = src1data[step1] * src2data[step2*2] - src1data[step1*2] * src2data[step2]; 1541 dstdata[step] = src1data[step1*2] * src2data[0] - src1data[0] * src2data[step2*2]; 1542 } 1543 } 1544 else if( CV_MAT_DEPTH(type) == CV_64F ) 1545 { 1546 double* dstdata = (double*)(dst->data.ptr); 1547 const double* src1data = (double*)(srcA->data.ptr); 1548 const double* src2data = (double*)(srcB->data.ptr); 1549 1550 if( CV_IS_MAT_CONT(srcA->type & srcB->type & dst->type) ) 1551 { 1552 dstdata[2] = src1data[0] * src2data[1] - src1data[1] * src2data[0]; 1553 dstdata[0] = src1data[1] * src2data[2] - src1data[2] * src2data[1]; 1554 dstdata[1] = src1data[2] * src2data[0] - src1data[0] * src2data[2]; 1555 } 1556 else 1557 { 1558 int step1 = srcA->step ? srcA->step/sizeof(src1data[0]) : 1; 1559 int step2 = srcB->step ? srcB->step/sizeof(src1data[0]) : 1; 1560 int step = dst->step ? dst->step/sizeof(src1data[0]) : 1; 1561 1562 dstdata[2*step] = src1data[0] * src2data[step2] - src1data[step1] * src2data[0]; 1563 dstdata[0] = src1data[step1] * src2data[step2*2] - src1data[step1*2] * src2data[step2]; 1564 dstdata[step] = src1data[step1*2] * src2data[0] - src1data[0] * src2data[step2*2]; 1565 } 1566 } 1567 else 1568 { 1569 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1570 } 1571 1572 __END__; 1573} 1574 1575 1576CV_IMPL void 1577cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigenvects, int flags ) 1578{ 1579 CvMat* tmp_avg = 0; 1580 CvMat* tmp_avg_r = 0; 1581 CvMat* tmp_cov = 0; 1582 CvMat* tmp_evals = 0; 1583 CvMat* tmp_evects = 0; 1584 CvMat* tmp_evects2 = 0; 1585 CvMat* tmp_data = 0; 1586 1587 CV_FUNCNAME( "cvCalcPCA" ); 1588 1589 __BEGIN__; 1590 1591 CvMat stub, *data = (CvMat*)data_arr; 1592 CvMat astub, *avg = (CvMat*)avg_arr; 1593 CvMat evalstub, *evals = (CvMat*)eigenvals; 1594 CvMat evectstub, *evects = (CvMat*)eigenvects; 1595 int covar_flags = CV_COVAR_SCALE; 1596 int i, len, in_count, count, out_count; 1597 1598 if( !CV_IS_MAT(data) ) 1599 CV_CALL( data = cvGetMat( data, &stub )); 1600 1601 if( !CV_IS_MAT(avg) ) 1602 CV_CALL( avg = cvGetMat( avg, &astub )); 1603 1604 if( !CV_IS_MAT(evals) ) 1605 CV_CALL( evals = cvGetMat( evals, &evalstub )); 1606 1607 if( !CV_IS_MAT(evects) ) 1608 CV_CALL( evects = cvGetMat( evects, &evectstub )); 1609 1610 if( CV_MAT_CN(data->type) != 1 || CV_MAT_CN(avg->type) != 1 || 1611 CV_MAT_CN(evals->type) != 1 || CV_MAT_CN(evects->type) != 1 ) 1612 CV_ERROR( CV_StsUnsupportedFormat, "All the input and output arrays must be 1-channel" ); 1613 1614 if( CV_MAT_DEPTH(avg->type) < CV_32F || !CV_ARE_DEPTHS_EQ(avg, evals) || 1615 !CV_ARE_DEPTHS_EQ(avg, evects) ) 1616 CV_ERROR( CV_StsUnsupportedFormat, "All the output arrays must have the same type, 32fC1 or 64fC1" ); 1617 1618 if( flags & CV_PCA_DATA_AS_COL ) 1619 { 1620 len = data->rows; 1621 in_count = data->cols; 1622 covar_flags |= CV_COVAR_COLS; 1623 1624 if( avg->cols != 1 || avg->rows != len ) 1625 CV_ERROR( CV_StsBadSize, 1626 "The mean (average) vector should be data->rows x 1 when CV_PCA_DATA_AS_COL is used" ); 1627 1628 CV_CALL( tmp_avg = cvCreateMat( len, 1, CV_64F )); 1629 } 1630 else 1631 { 1632 len = data->cols; 1633 in_count = data->rows; 1634 covar_flags |= CV_COVAR_ROWS; 1635 1636 if( avg->rows != 1 || avg->cols != len ) 1637 CV_ERROR( CV_StsBadSize, 1638 "The mean (average) vector should be 1 x data->cols when CV_PCA_DATA_AS_ROW is used" ); 1639 1640 CV_CALL( tmp_avg = cvCreateMat( 1, len, CV_64F )); 1641 } 1642 1643 count = MIN(len, in_count); 1644 out_count = evals->cols + evals->rows - 1; 1645 1646 if( (evals->cols != 1 && evals->rows != 1) || out_count > count ) 1647 CV_ERROR( CV_StsBadSize, 1648 "The array of eigenvalues must be 1d vector containing " 1649 "no more than min(data->rows,data->cols) elements" ); 1650 1651 if( evects->cols != len || evects->rows != out_count ) 1652 CV_ERROR( CV_StsBadSize, 1653 "The matrix of eigenvalues must have the same number of columns as the input vector length " 1654 "and the same number of rows as the number of eigenvalues" ); 1655 1656 // "scrambled" way to compute PCA (when cols(A)>rows(A)): 1657 // B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y 1658 if( len <= in_count ) 1659 covar_flags |= CV_COVAR_NORMAL; 1660 1661 if( flags & CV_PCA_USE_AVG ){ 1662 covar_flags |= CV_COVAR_USE_AVG; 1663 CV_CALL( cvConvert( avg, tmp_avg ) ); 1664 } 1665 1666 CV_CALL( tmp_cov = cvCreateMat( count, count, CV_64F )); 1667 CV_CALL( tmp_evals = cvCreateMat( 1, count, CV_64F )); 1668 CV_CALL( tmp_evects = cvCreateMat( count, count, CV_64F )); 1669 1670 CV_CALL( cvCalcCovarMatrix( &data_arr, 0, tmp_cov, tmp_avg, covar_flags )); 1671 CV_CALL( cvSVD( tmp_cov, tmp_evals, tmp_evects, 0, CV_SVD_MODIFY_A + CV_SVD_U_T )); 1672 tmp_evects->rows = out_count; 1673 tmp_evals->cols = out_count; 1674 cvZero( evects ); 1675 cvZero( evals ); 1676 1677 if( covar_flags & CV_COVAR_NORMAL ) 1678 { 1679 CV_CALL( cvConvert( tmp_evects, evects )); 1680 } 1681 else 1682 { 1683 // CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A 1684 // CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A' 1685 int block_count = 0; 1686 1687 CV_CALL( tmp_data = cvCreateMat( count, count, CV_64F )); 1688 CV_CALL( tmp_avg_r = cvCreateMat( count, count, CV_64F )); 1689 CV_CALL( tmp_evects2 = cvCreateMat( out_count, count, CV_64F )); 1690 1691 for( i = 0; i < len; i += block_count ) 1692 { 1693 CvMat data_part, tdata_part, part, dst_part, avg_part, tmp_avg_part; 1694 int gemm_flags; 1695 1696 block_count = MIN( count, len - i ); 1697 1698 if( flags & CV_PCA_DATA_AS_COL ) 1699 { 1700 cvGetRows( data, &data_part, i, i + block_count ); 1701 cvGetRows( tmp_data, &tdata_part, 0, block_count ); 1702 cvGetRows( tmp_avg, &avg_part, i, i + block_count ); 1703 cvGetRows( tmp_avg_r, &tmp_avg_part, 0, block_count ); 1704 gemm_flags = CV_GEMM_B_T; 1705 } 1706 else 1707 { 1708 cvGetCols( data, &data_part, i, i + block_count ); 1709 cvGetCols( tmp_data, &tdata_part, 0, block_count ); 1710 cvGetCols( tmp_avg, &avg_part, i, i + block_count ); 1711 cvGetCols( tmp_avg_r, &tmp_avg_part, 0, block_count ); 1712 gemm_flags = 0; 1713 } 1714 1715 cvGetCols( tmp_evects2, &part, 0, block_count ); 1716 cvGetCols( evects, &dst_part, i, i + block_count ); 1717 1718 cvConvert( &data_part, &tdata_part ); 1719 cvRepeat( &avg_part, &tmp_avg_part ); 1720 cvSub( &tdata_part, &tmp_avg_part, &tdata_part ); 1721 cvGEMM( tmp_evects, &tdata_part, 1, 0, 0, &part, gemm_flags ); 1722 cvConvert( &part, &dst_part ); 1723 } 1724 1725 // normalize eigenvectors 1726 for( i = 0; i < out_count; i++ ) 1727 { 1728 CvMat ei; 1729 cvGetRow( evects, &ei, i ); 1730 cvNormalize( &ei, &ei ); 1731 } 1732 } 1733 1734 if( tmp_evals->rows != evals->rows ) 1735 cvReshape( tmp_evals, tmp_evals, 1, evals->rows ); 1736 cvConvert( tmp_evals, evals ); 1737 cvConvert( tmp_avg, avg ); 1738 1739 __END__; 1740 1741 cvReleaseMat( &tmp_avg ); 1742 cvReleaseMat( &tmp_avg_r ); 1743 cvReleaseMat( &tmp_cov ); 1744 cvReleaseMat( &tmp_evals ); 1745 cvReleaseMat( &tmp_evects ); 1746 cvReleaseMat( &tmp_evects2 ); 1747 cvReleaseMat( &tmp_data ); 1748} 1749 1750 1751CV_IMPL void 1752cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr, 1753 const CvArr* eigenvects, CvArr* result_arr ) 1754{ 1755 uchar* buffer = 0; 1756 int local_alloc = 0; 1757 1758 CV_FUNCNAME( "cvProjectPCA" ); 1759 1760 __BEGIN__; 1761 1762 CvMat stub, *data = (CvMat*)data_arr; 1763 CvMat astub, *avg = (CvMat*)avg_arr; 1764 CvMat evectstub, *evects = (CvMat*)eigenvects; 1765 CvMat rstub, *result = (CvMat*)result_arr; 1766 CvMat avg_repeated; 1767 int i, len, in_count; 1768 int gemm_flags, as_cols, convert_data; 1769 int block_count0, block_count, buf_size, elem_size; 1770 uchar* tmp_data_ptr; 1771 1772 if( !CV_IS_MAT(data) ) 1773 CV_CALL( data = cvGetMat( data, &stub )); 1774 1775 if( !CV_IS_MAT(avg) ) 1776 CV_CALL( avg = cvGetMat( avg, &astub )); 1777 1778 if( !CV_IS_MAT(evects) ) 1779 CV_CALL( evects = cvGetMat( evects, &evectstub )); 1780 1781 if( !CV_IS_MAT(result) ) 1782 CV_CALL( result = cvGetMat( result, &rstub )); 1783 1784 if( CV_MAT_CN(data->type) != 1 || CV_MAT_CN(avg->type) != 1 ) 1785 CV_ERROR( CV_StsUnsupportedFormat, "All the input and output arrays must be 1-channel" ); 1786 1787 if( (CV_MAT_TYPE(avg->type) != CV_32FC1 && CV_MAT_TYPE(avg->type) != CV_64FC1) || 1788 !CV_ARE_TYPES_EQ(avg, evects) || !CV_ARE_TYPES_EQ(avg, result) ) 1789 CV_ERROR( CV_StsUnsupportedFormat, 1790 "All the input and output arrays (except for data) must have the same type, 32fC1 or 64fC1" ); 1791 1792 if( (avg->cols != 1 || avg->rows != data->rows) && 1793 (avg->rows != 1 || avg->cols != data->cols) ) 1794 CV_ERROR( CV_StsBadSize, 1795 "The mean (average) vector should be either 1 x data->cols or data->rows x 1" ); 1796 1797 if( avg->cols == 1 ) 1798 { 1799 len = data->rows; 1800 in_count = data->cols; 1801 1802 gemm_flags = CV_GEMM_A_T + CV_GEMM_B_T; 1803 as_cols = 1; 1804 } 1805 else 1806 { 1807 len = data->cols; 1808 in_count = data->rows; 1809 1810 gemm_flags = CV_GEMM_B_T; 1811 as_cols = 0; 1812 } 1813 1814 if( evects->cols != len ) 1815 CV_ERROR( CV_StsUnmatchedSizes, 1816 "Eigenvectors must be stored as rows and be of the same size as input vectors" ); 1817 1818 if( result->cols > evects->rows ) 1819 CV_ERROR( CV_StsOutOfRange, 1820 "The output matrix of coefficients must have the number of columns " 1821 "less than or equal to the number of eigenvectors (number of rows in eigenvectors matrix)" ); 1822 1823 evects = cvGetRows( evects, &evectstub, 0, result->cols ); 1824 1825 block_count0 = (1 << 16)/len; 1826 block_count0 = MAX( block_count0, 4 ); 1827 block_count0 = MIN( block_count0, in_count ); 1828 elem_size = CV_ELEM_SIZE(avg->type); 1829 convert_data = CV_MAT_DEPTH(data->type) < CV_MAT_DEPTH(avg->type); 1830 1831 buf_size = block_count0*len*((block_count0 > 1) + 1)*elem_size; 1832 1833 if( buf_size < CV_MAX_LOCAL_SIZE ) 1834 { 1835 buffer = (uchar*)cvStackAlloc( buf_size ); 1836 local_alloc = 1; 1837 } 1838 else 1839 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 1840 1841 tmp_data_ptr = buffer; 1842 if( block_count0 > 1 ) 1843 { 1844 avg_repeated = cvMat( as_cols ? len : block_count0, 1845 as_cols ? block_count0 : len, avg->type, buffer ); 1846 cvRepeat( avg, &avg_repeated ); 1847 tmp_data_ptr += block_count0*len*elem_size; 1848 } 1849 else 1850 avg_repeated = *avg; 1851 1852 for( i = 0; i < in_count; i += block_count ) 1853 { 1854 CvMat data_part, norm_data, avg_part, *src = &data_part, out_part; 1855 1856 block_count = MIN( block_count0, in_count - i ); 1857 if( as_cols ) 1858 { 1859 cvGetCols( data, &data_part, i, i + block_count ); 1860 cvGetCols( &avg_repeated, &avg_part, 0, block_count ); 1861 norm_data = cvMat( len, block_count, avg->type, tmp_data_ptr ); 1862 } 1863 else 1864 { 1865 cvGetRows( data, &data_part, i, i + block_count ); 1866 cvGetRows( &avg_repeated, &avg_part, 0, block_count ); 1867 norm_data = cvMat( block_count, len, avg->type, tmp_data_ptr ); 1868 } 1869 1870 if( convert_data ) 1871 { 1872 cvConvert( src, &norm_data ); 1873 src = &norm_data; 1874 } 1875 1876 cvSub( src, &avg_part, &norm_data ); 1877 1878 cvGetRows( result, &out_part, i, i + block_count ); 1879 cvGEMM( &norm_data, evects, 1, 0, 0, &out_part, gemm_flags ); 1880 } 1881 1882 __END__; 1883 1884 if( !local_alloc ) 1885 cvFree( &buffer ); 1886} 1887 1888 1889CV_IMPL void 1890cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr, 1891 const CvArr* eigenvects, CvArr* result_arr ) 1892{ 1893 uchar* buffer = 0; 1894 int local_alloc = 0; 1895 1896 CV_FUNCNAME( "cvProjectPCA" ); 1897 1898 __BEGIN__; 1899 1900 CvMat pstub, *data = (CvMat*)proj_arr; 1901 CvMat astub, *avg = (CvMat*)avg_arr; 1902 CvMat evectstub, *evects = (CvMat*)eigenvects; 1903 CvMat rstub, *result = (CvMat*)result_arr; 1904 CvMat avg_repeated; 1905 int i, len, in_count, as_cols; 1906 int block_count0, block_count, buf_size, elem_size; 1907 1908 if( !CV_IS_MAT(data) ) 1909 CV_CALL( data = cvGetMat( data, &pstub )); 1910 1911 if( !CV_IS_MAT(avg) ) 1912 CV_CALL( avg = cvGetMat( avg, &astub )); 1913 1914 if( !CV_IS_MAT(evects) ) 1915 CV_CALL( evects = cvGetMat( evects, &evectstub )); 1916 1917 if( !CV_IS_MAT(result) ) 1918 CV_CALL( result = cvGetMat( result, &rstub )); 1919 1920 if( (CV_MAT_TYPE(avg->type) != CV_32FC1 && CV_MAT_TYPE(avg->type) != CV_64FC1) || 1921 !CV_ARE_TYPES_EQ(avg, data) || !CV_ARE_TYPES_EQ(avg, evects) || !CV_ARE_TYPES_EQ(avg, result) ) 1922 CV_ERROR( CV_StsUnsupportedFormat, 1923 "All the input and output arrays must have the same type, 32fC1 or 64fC1" ); 1924 1925 if( (avg->cols != 1 || avg->rows != result->rows) && 1926 (avg->rows != 1 || avg->cols != result->cols) ) 1927 CV_ERROR( CV_StsBadSize, 1928 "The mean (average) vector should be either 1 x result->cols or result->rows x 1" ); 1929 1930 if( avg->cols == 1 ) 1931 { 1932 len = result->rows; 1933 in_count = result->cols; 1934 as_cols = 1; 1935 } 1936 else 1937 { 1938 len = result->cols; 1939 in_count = result->rows; 1940 as_cols = 0; 1941 } 1942 1943 if( evects->cols != len ) 1944 CV_ERROR( CV_StsUnmatchedSizes, 1945 "Eigenvectors must be stored as rows and be of the same size as the output vectors" ); 1946 1947 if( data->cols > evects->rows ) 1948 CV_ERROR( CV_StsOutOfRange, 1949 "The input matrix of coefficients must have the number of columns " 1950 "less than or equal to the number of eigenvectors (number of rows in eigenvectors matrix)" ); 1951 1952 evects = cvGetRows( evects, &evectstub, 0, data->cols ); 1953 1954 block_count0 = (1 << 16)/len; 1955 block_count0 = MAX( block_count0, 4 ); 1956 block_count0 = MIN( block_count0, in_count ); 1957 elem_size = CV_ELEM_SIZE(avg->type); 1958 1959 buf_size = block_count0*len*(block_count0 > 1)*elem_size; 1960 1961 if( buf_size < CV_MAX_LOCAL_SIZE ) 1962 { 1963 buffer = (uchar*)cvStackAlloc( MAX(buf_size,16) ); 1964 local_alloc = 1; 1965 } 1966 else 1967 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 1968 1969 if( block_count0 > 1 ) 1970 { 1971 avg_repeated = cvMat( as_cols ? len : block_count0, 1972 as_cols ? block_count0 : len, avg->type, buffer ); 1973 cvRepeat( avg, &avg_repeated ); 1974 } 1975 else 1976 avg_repeated = *avg; 1977 1978 for( i = 0; i < in_count; i += block_count ) 1979 { 1980 CvMat data_part, avg_part, out_part; 1981 1982 block_count = MIN( block_count0, in_count - i ); 1983 cvGetRows( data, &data_part, i, i + block_count ); 1984 1985 if( as_cols ) 1986 { 1987 cvGetCols( result, &out_part, i, i + block_count ); 1988 cvGetCols( &avg_repeated, &avg_part, 0, block_count ); 1989 cvGEMM( evects, &data_part, 1, &avg_part, 1, &out_part, CV_GEMM_A_T + CV_GEMM_B_T ); 1990 } 1991 else 1992 { 1993 cvGetRows( result, &out_part, i, i + block_count ); 1994 cvGetRows( &avg_repeated, &avg_part, 0, block_count ); 1995 cvGEMM( &data_part, evects, 1, &avg_part, 1, &out_part, 0 ); 1996 } 1997 } 1998 1999 __END__; 2000 2001 if( !local_alloc ) 2002 cvFree( &buffer ); 2003} 2004 2005 2006/* End of file. */ 2007