1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
5// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11
12#ifndef EIGEN_MINRES_H_
13#define EIGEN_MINRES_H_
14
15
16namespace Eigen {
17
18    namespace internal {
19
20        /** \internal Low-level MINRES algorithm
21         * \param mat The matrix A
22         * \param rhs The right hand side vector b
23         * \param x On input and initial solution, on output the computed solution.
24         * \param precond A right preconditioner being able to efficiently solve for an
25         *                approximation of Ax=b (regardless of b)
26         * \param iters On input the max number of iteration, on output the number of performed iterations.
27         * \param tol_error On input the tolerance error, on output an estimation of the relative error.
28         */
29        template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
30        EIGEN_DONT_INLINE
31        void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
32                    const Preconditioner& precond, Index& iters,
33                    typename Dest::RealScalar& tol_error)
34        {
35            using std::sqrt;
36            typedef typename Dest::RealScalar RealScalar;
37            typedef typename Dest::Scalar Scalar;
38            typedef Matrix<Scalar,Dynamic,1> VectorType;
39
40            // Check for zero rhs
41            const RealScalar rhsNorm2(rhs.squaredNorm());
42            if(rhsNorm2 == 0)
43            {
44                x.setZero();
45                iters = 0;
46                tol_error = 0;
47                return;
48            }
49
50            // initialize
51            const Index maxIters(iters);  // initialize maxIters to iters
52            const Index N(mat.cols());    // the size of the matrix
53            const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
54
55            // Initialize preconditioned Lanczos
56            VectorType v_old(N); // will be initialized inside loop
57            VectorType v( VectorType::Zero(N) ); //initialize v
58            VectorType v_new(rhs-mat*x); //initialize v_new
59            RealScalar residualNorm2(v_new.squaredNorm());
60            VectorType w(N); // will be initialized inside loop
61            VectorType w_new(precond.solve(v_new)); // initialize w_new
62//            RealScalar beta; // will be initialized inside loop
63            RealScalar beta_new2(v_new.dot(w_new));
64            eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
65            RealScalar beta_new(sqrt(beta_new2));
66            const RealScalar beta_one(beta_new);
67            v_new /= beta_new;
68            w_new /= beta_new;
69            // Initialize other variables
70            RealScalar c(1.0); // the cosine of the Givens rotation
71            RealScalar c_old(1.0);
72            RealScalar s(0.0); // the sine of the Givens rotation
73            RealScalar s_old(0.0); // the sine of the Givens rotation
74            VectorType p_oold(N); // will be initialized in loop
75            VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
76            VectorType p(p_old); // initialize p=0
77            RealScalar eta(1.0);
78
79            iters = 0; // reset iters
80            while ( iters < maxIters )
81            {
82                // Preconditioned Lanczos
83                /* Note that there are 4 variants on the Lanczos algorithm. These are
84                 * described in Paige, C. C. (1972). Computational variants of
85                 * the Lanczos method for the eigenproblem. IMA Journal of Applied
86                 * Mathematics, 10(3), 373–381. The current implementation corresponds
87                 * to the case A(2,7) in the paper. It also corresponds to
88                 * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
89                 * Systems, 2003 p.173. For the preconditioned version see
90                 * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
91                 */
92                const RealScalar beta(beta_new);
93                v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
94//                const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
95                v = v_new; // update
96                w = w_new; // update
97//                const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
98                v_new.noalias() = mat*w - beta*v_old; // compute v_new
99                const RealScalar alpha = v_new.dot(w);
100                v_new -= alpha*v; // overwrite v_new
101                w_new = precond.solve(v_new); // overwrite w_new
102                beta_new2 = v_new.dot(w_new); // compute beta_new
103                eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
104                beta_new = sqrt(beta_new2); // compute beta_new
105                v_new /= beta_new; // overwrite v_new for next iteration
106                w_new /= beta_new; // overwrite w_new for next iteration
107
108                // Givens rotation
109                const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
110                const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
111                const RealScalar r1_hat=c*alpha-c_old*s*beta;
112                const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
113                c_old = c; // store for next iteration
114                s_old = s; // store for next iteration
115                c=r1_hat/r1; // new cosine
116                s=beta_new/r1; // new sine
117
118                // Update solution
119                p_oold = p_old;
120//                const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
121                p_old = p;
122                p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
123                x += beta_one*c*eta*p;
124
125                /* Update the squared residual. Note that this is the estimated residual.
126                The real residual |Ax-b|^2 may be slightly larger */
127                residualNorm2 *= s*s;
128
129                if ( residualNorm2 < threshold2)
130                {
131                    break;
132                }
133
134                eta=-s*eta; // update eta
135                iters++; // increment iteration number (for output purposes)
136            }
137
138            /* Compute error. Note that this is the estimated error. The real
139             error |Ax-b|/|b| may be slightly larger */
140            tol_error = std::sqrt(residualNorm2 / rhsNorm2);
141        }
142
143    }
144
145    template< typename _MatrixType, int _UpLo=Lower,
146    typename _Preconditioner = IdentityPreconditioner>
147    class MINRES;
148
149    namespace internal {
150
151        template< typename _MatrixType, int _UpLo, typename _Preconditioner>
152        struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
153        {
154            typedef _MatrixType MatrixType;
155            typedef _Preconditioner Preconditioner;
156        };
157
158    }
159
160    /** \ingroup IterativeLinearSolvers_Module
161     * \brief A minimal residual solver for sparse symmetric problems
162     *
163     * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
164     * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
165     * The vectors x and b can be either dense or sparse.
166     *
167     * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
168     * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
169     *               Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
170     * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
171     *
172     * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
173     * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
174     * and NumTraits<Scalar>::epsilon() for the tolerance.
175     *
176     * This class can be used as the direct solver classes. Here is a typical usage example:
177     * \code
178     * int n = 10000;
179     * VectorXd x(n), b(n);
180     * SparseMatrix<double> A(n,n);
181     * // fill A and b
182     * MINRES<SparseMatrix<double> > mr;
183     * mr.compute(A);
184     * x = mr.solve(b);
185     * std::cout << "#iterations:     " << mr.iterations() << std::endl;
186     * std::cout << "estimated error: " << mr.error()      << std::endl;
187     * // update b, and solve again
188     * x = mr.solve(b);
189     * \endcode
190     *
191     * By default the iterations start with x=0 as an initial guess of the solution.
192     * One can control the start using the solveWithGuess() method.
193     *
194     * MINRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
195     *
196     * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
197     */
198    template< typename _MatrixType, int _UpLo, typename _Preconditioner>
199    class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
200    {
201
202        typedef IterativeSolverBase<MINRES> Base;
203        using Base::matrix;
204        using Base::m_error;
205        using Base::m_iterations;
206        using Base::m_info;
207        using Base::m_isInitialized;
208    public:
209        using Base::_solve_impl;
210        typedef _MatrixType MatrixType;
211        typedef typename MatrixType::Scalar Scalar;
212        typedef typename MatrixType::RealScalar RealScalar;
213        typedef _Preconditioner Preconditioner;
214
215        enum {UpLo = _UpLo};
216
217    public:
218
219        /** Default constructor. */
220        MINRES() : Base() {}
221
222        /** Initialize the solver with matrix \a A for further \c Ax=b solving.
223         *
224         * This constructor is a shortcut for the default constructor followed
225         * by a call to compute().
226         *
227         * \warning this class stores a reference to the matrix A as well as some
228         * precomputed values that depend on it. Therefore, if \a A is changed
229         * this class becomes invalid. Call compute() to update it with the new
230         * matrix A, or modify a copy of A.
231         */
232        template<typename MatrixDerived>
233        explicit MINRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
234
235        /** Destructor. */
236        ~MINRES(){}
237
238        /** \internal */
239        template<typename Rhs,typename Dest>
240        void _solve_with_guess_impl(const Rhs& b, Dest& x) const
241        {
242            typedef typename Base::MatrixWrapper MatrixWrapper;
243            typedef typename Base::ActualMatrixType ActualMatrixType;
244            enum {
245              TransposeInput  =   (!MatrixWrapper::MatrixFree)
246                              &&  (UpLo==(Lower|Upper))
247                              &&  (!MatrixType::IsRowMajor)
248                              &&  (!NumTraits<Scalar>::IsComplex)
249            };
250            typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
251            EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
252            typedef typename internal::conditional<UpLo==(Lower|Upper),
253                                                  RowMajorWrapper,
254                                                  typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
255                                            >::type SelfAdjointWrapper;
256
257            m_iterations = Base::maxIterations();
258            m_error = Base::m_tolerance;
259            RowMajorWrapper row_mat(matrix());
260            for(int j=0; j<b.cols(); ++j)
261            {
262                m_iterations = Base::maxIterations();
263                m_error = Base::m_tolerance;
264
265                typename Dest::ColXpr xj(x,j);
266                internal::minres(SelfAdjointWrapper(row_mat), b.col(j), xj,
267                                 Base::m_preconditioner, m_iterations, m_error);
268            }
269
270            m_isInitialized = true;
271            m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
272        }
273
274        /** \internal */
275        template<typename Rhs,typename Dest>
276        void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
277        {
278            x.setZero();
279            _solve_with_guess_impl(b,x.derived());
280        }
281
282    protected:
283
284    };
285
286} // end namespace Eigen
287
288#endif // EIGEN_MINRES_H
289
290