19static Matrix dense_expm(
const Matrix &A) {
23 static constexpr double c[7] = {1.0, 0.5, 5.0 / 44.0,
24 1.0 / 66.0, 1.0 / 792.0, 1.0 / 15840.0,
28 double norm_inf = 0.0;
29 for (
idx i = 0; i < m; i++) {
31 for (
idx j = 0; j < m; j++)
32 row_sum += std::abs(A(i, j));
33 norm_inf = std::max(norm_inf, row_sum);
39 s = (int)std::max(0.0, std::ceil(std::log2(norm_inf / 0.5)));
43 double scale = std::ldexp(1.0, -s);
45 for (
idx i = 0; i < m; i++)
46 for (
idx j = 0; j < m; j++)
47 As(i, j) = A(i, j) *
scale;
60 Matrix V_mat(m, m, 0.0);
61 for (
idx i = 0; i < m; i++) {
62 for (
idx j = 0; j < m; j++) {
63 V_mat(i, j) = c[6] * B3(i, j) + c[4] * B2(i, j) + c[2] * B(i, j);
70 for (
idx i = 0; i < m; i++) {
71 for (
idx j = 0; j < m; j++) {
72 W(i, j) = c[5] * B2(i, j) + c[3] * B(i, j);
82 Matrix VpU(m, m, 0.0);
83 Matrix VmU(m, m, 0.0);
84 for (
idx i = 0; i < m; i++) {
85 for (
idx j = 0; j < m; j++) {
86 VpU(i, j) = V_mat(i, j) + U(i, j);
87 VmU(i, j) = V_mat(i, j) - U(i, j);
92 LUResult fac =
lu(VmU);
97 for (
int i = 0; i < s; i++) {
115 std::vector<Vector> V;
116 V.reserve(m_max + 1);
120 for (
idx i = 0; i < n; i++)
122 V.push_back(std::move(v0));
125 Matrix H(m_max + 1, m_max, 0.0);
127 int m_actual = m_max;
130 std::vector<real> h_col(m_max + 1, 0.0);
133 for (
int j = 0; j < m_max; j++) {
140 for (
int i = 0; i <= j; i++) { H(i, j) = h_col[i]; }
141 H(j + 1, j) = h_next;
149 V.push_back(std::move(w));
153 Matrix Hm(m_actual, m_actual, 0.0);
154 for (
int i = 0; i < m_actual; i++) {
155 for (
int j = 0; j < m_actual; j++) {
156 Hm(i, j) = t * H(i, j);
161 Matrix E = dense_expm(Hm);
165 for (
int j = 0; j < m_actual; j++) {
167 axpy(coeff, V[j], result);
176 return expv(t, fn, A.
n_rows(), v, m_max, tol);
Dense row-major matrix with optional GPU storage.
constexpr idx rows() const noexcept
Sparse matrix in Compressed Sparse Row (CSR) format.
Krylov subspace matrix exponential-vector product: compute exp(t*A)*v.
LU factorization with partial pivoting.
real mgs_orthogonalize(const std::vector< Vector > &basis, Vector &v, std::vector< real > &h, idx k)
Modified Gram-Schmidt: orthogonalize v against basis[0..k-1].
real beta(real a, real b)
B(a, b) – beta function.
std::function< void(const Vector &, Vector &)> MatVecFn
Callable type for matrix-free matvec: computes y = A*x.
void matvec(const Matrix &A, const Vector &x, Vector &y, Backend b=default_backend)
y = A * x
void scale(Vector &v, real alpha, Backend b=default_backend)
v *= alpha
void sparse_matvec(const SparseMatrix &A, const Vector &x, Vector &y)
y = A * x
BasicVector< real > Vector
Real-valued dense vector with full backend dispatch (CPU + GPU)
real norm(const Vector &x, Backend b=default_backend)
Euclidean norm.
void axpy(real alpha, const Vector &x, Vector &y, Backend b=default_backend)
y += alpha * x
void matmul(const Matrix &A, const Matrix &B, Matrix &C, Backend b=default_backend)
C = A * B.
Vector expv(real t, const MatVecFn &matvec, idx n, const Vector &v, int m_max=30, real tol=1e-8)
Compute exp(t*A)*v via Krylov-Pade approximation (matrix-free)
void lu_solve(const LUResult &f, const Vector &b, Vector &x)
Solve A*x = b using a precomputed LU factorization.
LUResult lu(const Matrix &A, Backend backend=lapack_backend)
LU factorization of a square matrix A with partial pivoting.
Subspace construction and orthogonalization kernels. (namespace num::kernel::subspace)