18static Matrix dense_expm(
const Matrix&
A) {
22 static constexpr double c[7] = {
43 s = (
int)std::max(0.0, std::ceil(std::log2(
norm_inf / 0.5)));
47 double scale = std::ldexp(1.0, -s);
76 W(
i,
j) = c[5] *
B2(
i,
j) + c[3] *
B(
i,
j);
86 Matrix
VpU(
m,
m, 0.0);
87 Matrix
VmU(
m,
m, 0.0);
101 for (
int i = 0;
i < s;
i++) {
102 Matrix
E2(
m,
m, 0.0);
120 std::vector<Vector> V;
121 V.reserve(
m_max + 1);
126 V.push_back(std::move(
v0));
140 for (
int i = 0;
i <=
j;
i++) {
158 V.push_back(std::move(
vj1));
Dense row-major matrix with optional GPU storage.
constexpr idx rows() const noexcept
Sparse matrix in Compressed Sparse Row (CSR) format.
Krylov subspace matrix exponential-vector product: compute exp(t*A)*v.
LU factorization with partial pivoting.
LUResult lu(const Matrix &A)
LU factorization of a square matrix A with partial pivoting.
real beta(real a, real b)
B(a, b) – beta function.
constexpr T ipow(T x) noexcept
Compute x^N at compile time via repeated squaring.
void matvec(const Matrix &A, const Vector &x, Vector &y, Backend b=default_backend)
y = A * x
void scale(Vector &v, real alpha, Backend b=default_backend)
v *= alpha
void sparse_matvec(const SparseMatrix &A, const Vector &x, Vector &y)
y = A * x
real dot(const Vector &x, const Vector &y, Backend b=default_backend)
dot product
BasicVector< real > Vector
Real-valued dense vector with full backend dispatch (CPU + GPU)
real norm(const Vector &x, Backend b=default_backend)
Euclidean norm.
void axpy(real alpha, const Vector &x, Vector &y, Backend b=default_backend)
y += alpha * x
void matmul(const Matrix &A, const Matrix &B, Matrix &C, Backend b=default_backend)
C = A * B.
Vector expv(real t, const MatVecFn &matvec, idx n, const Vector &v, int m_max=30, real tol=1e-8)
Compute exp(t*A)*v via Krylov-Padé approximation (matrix-free)
void lu_solve(const LUResult &f, const Vector &b, Vector &x)
Solve A*x = b using a precomputed LU factorization.
std::function< void(const Vector &, Vector &)> MatVecFn
Callable type for matrix-free matvec: computes y = A*x.