bench_gemm.cpp revision 2b8756b6f1de65d3f8bffab45be6c44ceb7411fc
1
2// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2  ./a.out
3// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp  && OMP_NUM_THREADS=2  ./a.out
4
5// Compilation options:
6//
7// -DSCALAR=std::complex<double>
8// -DSCALARA=double or -DSCALARB=double
9// -DHAVE_BLAS
10// -DDECOUPLED
11//
12
13#include <iostream>
14#include <Eigen/Core>
15#include <bench/BenchTimer.h>
16
17using namespace std;
18using namespace Eigen;
19
20#ifndef SCALAR
21// #define SCALAR std::complex<float>
22#define SCALAR float
23#endif
24
25#ifndef SCALARA
26#define SCALARA SCALAR
27#endif
28
29#ifndef SCALARB
30#define SCALARB SCALAR
31#endif
32
33typedef SCALAR Scalar;
34typedef NumTraits<Scalar>::Real RealScalar;
35typedef Matrix<SCALARA,Dynamic,Dynamic> A;
36typedef Matrix<SCALARB,Dynamic,Dynamic> B;
37typedef Matrix<Scalar,Dynamic,Dynamic> C;
38typedef Matrix<RealScalar,Dynamic,Dynamic> M;
39
40#ifdef HAVE_BLAS
41
42extern "C" {
43  #include <Eigen/src/misc/blas.h>
44}
45
46static float fone = 1;
47static float fzero = 0;
48static double done = 1;
49static double szero = 0;
50static std::complex<float> cfone = 1;
51static std::complex<float> cfzero = 0;
52static std::complex<double> cdone = 1;
53static std::complex<double> cdzero = 0;
54static char notrans = 'N';
55static char trans = 'T';
56static char nonunit = 'N';
57static char lower = 'L';
58static char right = 'R';
59static int intone = 1;
60
61void blas_gemm(const MatrixXf& a, const MatrixXf& b, MatrixXf& c)
62{
63  int M = c.rows(); int N = c.cols(); int K = a.cols();
64  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
65
66  sgemm_(&notrans,&notrans,&M,&N,&K,&fone,
67         const_cast<float*>(a.data()),&lda,
68         const_cast<float*>(b.data()),&ldb,&fone,
69         c.data(),&ldc);
70}
71
72EIGEN_DONT_INLINE void blas_gemm(const MatrixXd& a, const MatrixXd& b, MatrixXd& c)
73{
74  int M = c.rows(); int N = c.cols(); int K = a.cols();
75  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
76
77  dgemm_(&notrans,&notrans,&M,&N,&K,&done,
78         const_cast<double*>(a.data()),&lda,
79         const_cast<double*>(b.data()),&ldb,&done,
80         c.data(),&ldc);
81}
82
83void blas_gemm(const MatrixXcf& a, const MatrixXcf& b, MatrixXcf& c)
84{
85  int M = c.rows(); int N = c.cols(); int K = a.cols();
86  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
87
88  cgemm_(&notrans,&notrans,&M,&N,&K,(float*)&cfone,
89         const_cast<float*>((const float*)a.data()),&lda,
90         const_cast<float*>((const float*)b.data()),&ldb,(float*)&cfone,
91         (float*)c.data(),&ldc);
92}
93
94void blas_gemm(const MatrixXcd& a, const MatrixXcd& b, MatrixXcd& c)
95{
96  int M = c.rows(); int N = c.cols(); int K = a.cols();
97  int lda = a.rows(); int ldb = b.rows(); int ldc = c.rows();
98
99  zgemm_(&notrans,&notrans,&M,&N,&K,(double*)&cdone,
100         const_cast<double*>((const double*)a.data()),&lda,
101         const_cast<double*>((const double*)b.data()),&ldb,(double*)&cdone,
102         (double*)c.data(),&ldc);
103}
104
105
106
107#endif
108
109void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci)
110{
111  cr.noalias() += ar * br;
112  cr.noalias() -= ai * bi;
113  ci.noalias() += ar * bi;
114  ci.noalias() += ai * br;
115}
116
117void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci)
118{
119  cr.noalias() += a * br;
120  ci.noalias() += a * bi;
121}
122
123void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci)
124{
125  cr.noalias() += ar * b;
126  ci.noalias() += ai * b;
127}
128
129template<typename A, typename B, typename C>
130EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c)
131{
132 c.noalias() += a * b;
133}
134
135int main(int argc, char ** argv)
136{
137  std::ptrdiff_t l1 = internal::queryL1CacheSize();
138  std::ptrdiff_t l2 = internal::queryTopLevelCacheSize();
139  std::cout << "L1 cache size     = " << (l1>0 ? l1/1024 : -1) << " KB\n";
140  std::cout << "L2/L3 cache size  = " << (l2>0 ? l2/1024 : -1) << " KB\n";
141  typedef internal::gebp_traits<Scalar,Scalar> Traits;
142  std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n";
143
144  int rep = 1;    // number of repetitions per try
145  int tries = 2;  // number of tries, we keep the best
146
147  int s = 2048;
148  int m = s;
149  int n = s;
150  int p = s;
151  int cache_size1=-1, cache_size2=l2, cache_size3 = 0;
152
153  bool need_help = false;
154  for (int i=1; i<argc;)
155  {
156    if(argv[i][0]=='-')
157    {
158      if(argv[i][1]=='s')
159      {
160        ++i;
161        s = atoi(argv[i++]);
162        m = n = p = s;
163        if(argv[i][0]!='-')
164        {
165          n = atoi(argv[i++]);
166          p = atoi(argv[i++]);
167        }
168      }
169      else if(argv[i][1]=='c')
170      {
171        ++i;
172        cache_size1 = atoi(argv[i++]);
173        if(argv[i][0]!='-')
174        {
175          cache_size2 = atoi(argv[i++]);
176          if(argv[i][0]!='-')
177            cache_size3 = atoi(argv[i++]);
178        }
179      }
180      else if(argv[i][1]=='t')
181      {
182        ++i;
183        tries = atoi(argv[i++]);
184      }
185      else if(argv[i][1]=='p')
186      {
187        ++i;
188        rep = atoi(argv[i++]);
189      }
190    }
191    else
192    {
193      need_help = true;
194      break;
195    }
196  }
197
198  if(need_help)
199  {
200    std::cout << argv[0] << " -s <matrix sizes> -c <cache sizes> -t <nb tries> -p <nb repeats>\n";
201    std::cout << "   <matrix sizes> : size\n";
202    std::cout << "   <matrix sizes> : rows columns depth\n";
203    return 1;
204  }
205
206#if EIGEN_VERSION_AT_LEAST(3,2,90)
207  if(cache_size1>0)
208    setCpuCacheSizes(cache_size1,cache_size2,cache_size3);
209#endif
210
211  A a(m,p); a.setRandom();
212  B b(p,n); b.setRandom();
213  C c(m,n); c.setOnes();
214  C rc = c;
215
216  std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n";
217  std::ptrdiff_t mc(m), nc(n), kc(p);
218  internal::computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
219  std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n";
220
221  C r = c;
222
223  // check the parallel product is correct
224  #if defined EIGEN_HAS_OPENMP
225  Eigen::initParallel();
226  int procs = omp_get_max_threads();
227  if(procs>1)
228  {
229    #ifdef HAVE_BLAS
230    blas_gemm(a,b,r);
231    #else
232    omp_set_num_threads(1);
233    r.noalias() += a * b;
234    omp_set_num_threads(procs);
235    #endif
236    c.noalias() += a * b;
237    if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
238  }
239  #elif defined HAVE_BLAS
240    blas_gemm(a,b,r);
241    c.noalias() += a * b;
242    if(!r.isApprox(c)) {
243      std::cout << r  - c << "\n";
244      std::cerr << "Warning, your product is crap!\n\n";
245    }
246  #else
247    if(1.*m*n*p<2000.*2000*2000)
248    {
249      gemm(a,b,c);
250      r.noalias() += a.cast<Scalar>() .lazyProduct( b.cast<Scalar>() );
251      if(!r.isApprox(c)) {
252        std::cout << r - c << "\n";
253        std::cerr << "Warning, your product is crap!\n\n";
254      }
255    }
256  #endif
257
258  #ifdef HAVE_BLAS
259  BenchTimer tblas;
260  c = rc;
261  BENCH(tblas, tries, rep, blas_gemm(a,b,c));
262  std::cout << "blas  cpu         " << tblas.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tblas.total(CPU_TIMER)  << "s)\n";
263  std::cout << "blas  real        " << tblas.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
264  #endif
265
266  BenchTimer tmt;
267  c = rc;
268  BENCH(tmt, tries, rep, gemm(a,b,c));
269  std::cout << "eigen cpu         " << tmt.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmt.total(CPU_TIMER)  << "s)\n";
270  std::cout << "eigen real        " << tmt.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
271
272  #ifdef EIGEN_HAS_OPENMP
273  if(procs>1)
274  {
275    BenchTimer tmono;
276    omp_set_num_threads(1);
277    Eigen::setNbThreads(1);
278    c = rc;
279    BENCH(tmono, tries, rep, gemm(a,b,c));
280    std::cout << "eigen mono cpu    " << tmono.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmono.total(CPU_TIMER)  << "s)\n";
281    std::cout << "eigen mono real   " << tmono.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
282    std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER)  << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
283  }
284  #endif
285
286  if(1.*m*n*p<30*30*30)
287  {
288      BenchTimer tmt;
289      c = rc;
290      BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b));
291      std::cout << "lazy cpu         " << tmt.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << tmt.total(CPU_TIMER)  << "s)\n";
292      std::cout << "lazy real        " << tmt.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
293  }
294
295  #ifdef DECOUPLED
296  if((NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
297  {
298    M ar(m,p); ar.setRandom();
299    M ai(m,p); ai.setRandom();
300    M br(p,n); br.setRandom();
301    M bi(p,n); bi.setRandom();
302    M cr(m,n); cr.setRandom();
303    M ci(m,n); ci.setRandom();
304
305    BenchTimer t;
306    BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci));
307    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
308    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
309  }
310  if((!NumTraits<A::Scalar>::IsComplex) && (NumTraits<B::Scalar>::IsComplex))
311  {
312    M a(m,p);  a.setRandom();
313    M br(p,n); br.setRandom();
314    M bi(p,n); bi.setRandom();
315    M cr(m,n); cr.setRandom();
316    M ci(m,n); ci.setRandom();
317
318    BenchTimer t;
319    BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci));
320    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
321    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
322  }
323  if((NumTraits<A::Scalar>::IsComplex) && (!NumTraits<B::Scalar>::IsComplex))
324  {
325    M ar(m,p); ar.setRandom();
326    M ai(m,p); ai.setRandom();
327    M b(p,n);  b.setRandom();
328    M cr(m,n); cr.setRandom();
329    M ci(m,n); ci.setRandom();
330
331    BenchTimer t;
332    BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci));
333    std::cout << "\"matlab\" cpu    " << t.best(CPU_TIMER)/rep  << "s  \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9  <<  " GFLOPS \t(" << t.total(CPU_TIMER)  << "s)\n";
334    std::cout << "\"matlab\" real   " << t.best(REAL_TIMER)/rep << "s  \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 <<  " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
335  }
336  #endif
337
338  return 0;
339}
340
341