Linear Algebra
Master matrix operations, linear system solving, and advanced linear algebra techniques
Overview
Linear algebra is the mathematical foundation of modern algorithms, with wide applications in graphics, machine learning, network analysis, and more. This chapter explores matrix operations, Gaussian elimination, eigenvalue computation, and other core techniques for solving complex linear system problems.
Core Content
1. Basic Matrix Operations
Matrix operations are the foundation of linear algebra, including addition, multiplication, transpose, and other basic operations.
// Basic matrix operations implementation
#include <iostream>
#include <vector>
#include <iomanip>
using namespace std;
class Matrix {
private:
vector<vector<double>> data;
int rows, cols;
public:
Matrix(int r, int c) : rows(r), cols(c) {
data.resize(r, vector<double>(c, 0));
}
Matrix(vector<vector<double>>& mat) {
data = mat;
rows = mat.size();
cols = mat[0].size();
}
// Matrix addition
Matrix operator+(const Matrix& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Matrix dimensions mismatch");
}
Matrix result(rows, cols);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
result.data[i][j] = data[i][j] + other.data[i][j];
}
}
return result;
}
// Matrix multiplication
Matrix operator*(const Matrix& other) const {
if (cols != other.rows) {
throw invalid_argument("Matrix dimensions mismatch");
}
Matrix result(rows, other.cols);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < other.cols; j++) {
for (int k = 0; k < cols; k++) {
result.data[i][j] += data[i][k] * other.data[k][j];
}
}
}
return result;
}
// Matrix transpose
Matrix transpose() const {
Matrix result(cols, rows);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
result.data[j][i] = data[i][j];
}
}
return result;
}
// Matrix fast exponentiation (for recurrence relations)
Matrix power(long long n) const {
if (rows != cols) {
throw invalid_argument("Non-square matrix cannot be powered");
}
Matrix result = identity(rows);
Matrix base = *this;
while (n > 0) {
if (n % 2 == 1) {
result = result * base;
}
base = base * base;
n /= 2;
}
return result;
}
// Create identity matrix
static Matrix identity(int n) {
Matrix result(n, n);
for (int i = 0; i < n; i++) {
result.data[i][i] = 1;
}
return result;
}
// Print matrix
void print() const {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(8) << fixed << setprecision(2) << data[i][j] << " ";
}
cout << endl;
}
}
double& operator()(int i, int j) { return data[i][j]; }
const double& operator()(int i, int j) const { return data[i][j]; }
int getRows() const { return rows; }
int getCols() const { return cols; }
};
// Application: Fast Fibonacci calculation using matrix
long long fibonacciMatrix(int n) {
if (n <= 1) return n;
// Transition matrix [[1,1],[1,0]]
vector<vector<double>> trans = {{1, 1}, {1, 0}};
Matrix transition(trans);
Matrix result = transition.power(n - 1);
return (long long)(result(0, 0) + 0.5); // Round to nearest integer
}
int main() {
// Test basic operations
vector<vector<double>> a = {{1, 2}, {3, 4}};
vector<vector<double>> b = {{5, 6}, {7, 8}};
Matrix A(a), B(b);
cout << "Matrix A:" << endl;
A.print();
cout << "\nMatrix B:" << endl;
B.print();
cout << "\nA + B:" << endl;
(A + B).print();
cout << "\nA * B:" << endl;
(A * B).print();
cout << "\nA transpose:" << endl;
A.transpose().print();
// Test matrix fast exponentiation
cout << "\nFirst 10 Fibonacci numbers:" << endl;
for (int i = 0; i <= 10; i++) {
cout << "F(" << i << ") = " << fibonacciMatrix(i) << endl;
}
return 0;
}2. Gaussian Elimination
Gaussian elimination is the classic algorithm for solving linear systems, also fundamental for computing matrix inverse and determinant.
// Gaussian elimination implementation
#include <iostream>
#include <vector>
#include <cmath>
#include <iomanip>
using namespace std;
const double EPS = 1e-9;
class LinearSystem {
private:
vector<vector<double>> augmented;
int n, m;
public:
LinearSystem(vector<vector<double>>& A, vector<double>& b) {
n = A.size();
m = A[0].size();
augmented.resize(n, vector<double>(m + 1));
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
augmented[i][j] = A[i][j];
}
augmented[i][m] = b[i];
}
}
// Solve linear system using Gaussian elimination
vector<double> solve() {
int rank = 0;
vector<int> pivot_col(n, -1);
for (int col = 0; col < m && rank < n; col++) {
// Find pivot
int pivot_row = -1;
for (int row = rank; row < n; row++) {
if (abs(augmented[row][col]) > EPS) {
if (pivot_row == -1 || abs(augmented[row][col]) > abs(augmented[pivot_row][col])) {
pivot_row = row;
}
}
}
if (pivot_row == -1) continue; // Current column is all zeros
// Swap rows
if (pivot_row != rank) {
swap(augmented[pivot_row], augmented[rank]);
}
pivot_col[rank] = col;
// Elimination
for (int row = 0; row < n; row++) {
if (row != rank && abs(augmented[row][col]) > EPS) {
double factor = augmented[row][col] / augmented[rank][col];
for (int j = col; j <= m; j++) {
augmented[row][j] -= factor * augmented[rank][j];
}
}
}
rank++;
}
// Check solution existence
for (int i = rank; i < n; i++) {
if (abs(augmented[i][m]) > EPS) {
throw runtime_error("No solution exists");
}
}
vector<double> solution(m, 0);
for (int i = 0; i < rank; i++) {
if (pivot_col[i] != -1) {
solution[pivot_col[i]] = augmented[i][m] / augmented[i][pivot_col[i]];
}
}
return solution;
}
// Calculate matrix rank
int getRank() {
int rank = 0;
for (int col = 0; col < m && rank < n; col++) {
int pivot_row = -1;
for (int row = rank; row < n; row++) {
if (abs(augmented[row][col]) > EPS) {
pivot_row = row;
break;
}
}
if (pivot_row == -1) continue;
if (pivot_row != rank) {
swap(augmented[pivot_row], augmented[rank]);
}
for (int row = rank + 1; row < n; row++) {
if (abs(augmented[row][col]) > EPS) {
double factor = augmented[row][col] / augmented[rank][col];
for (int j = col; j <= m; j++) {
augmented[row][j] -= factor * augmented[rank][j];
}
}
}
rank++;
}
return rank;
}
};
// Calculate determinant
double determinant(vector<vector<double>> matrix) {
int n = matrix.size();
double det = 1;
for (int i = 0; i < n; i++) {
// Find pivot
int pivot = i;
for (int j = i + 1; j < n; j++) {
if (abs(matrix[j][i]) > abs(matrix[pivot][i])) {
pivot = j;
}
}
if (abs(matrix[pivot][i]) < EPS) return 0; // Matrix is singular
if (pivot != i) {
swap(matrix[i], matrix[pivot]);
det = -det; // Row swap changes sign
}
det *= matrix[i][i];
// Elimination
for (int j = i + 1; j < n; j++) {
if (abs(matrix[j][i]) > EPS) {
double factor = matrix[j][i] / matrix[i][i];
for (int k = i; k < n; k++) {
matrix[j][k] -= factor * matrix[i][k];
}
}
}
}
return det;
}
// Matrix inversion
vector<vector<double>> inverse(vector<vector<double>> A) {
int n = A.size();
vector<vector<double>> inv(n, vector<double>(n, 0));
// Create augmented matrix [A|I]
for (int i = 0; i < n; i++) {
inv[i][i] = 1;
}
// Gauss-Jordan elimination
for (int i = 0; i < n; i++) {
// Find pivot
int pivot = i;
for (int j = i + 1; j < n; j++) {
if (abs(A[j][i]) > abs(A[pivot][i])) {
pivot = j;
}
}
if (abs(A[pivot][i]) < EPS) {
throw runtime_error("Matrix is not invertible");
}
if (pivot != i) {
swap(A[i], A[pivot]);
swap(inv[i], inv[pivot]);
}
// Normalize pivot row
double pivot_val = A[i][i];
for (int j = 0; j < n; j++) {
A[i][j] /= pivot_val;
inv[i][j] /= pivot_val;
}
// Elimination
for (int j = 0; j < n; j++) {
if (j != i && abs(A[j][i]) > EPS) {
double factor = A[j][i];
for (int k = 0; k < n; k++) {
A[j][k] -= factor * A[i][k];
inv[j][k] -= factor * inv[i][k];
}
}
}
}
return inv;
}
int main() {
// Test linear system solving
vector<vector<double>> A = {
{2, 1, -1},
{-3, -1, 2},
{-2, 1, 2}
};
vector<double> b = {8, -11, -3};
LinearSystem system(A, b);
try {
vector<double> solution = system.solve();
cout << "System solution:" << endl;
for (int i = 0; i < solution.size(); i++) {
cout << "x" << i + 1 << " = " << fixed << setprecision(4) << solution[i] << endl;
}
} catch (const exception& e) {
cout << "Error: " << e.what() << endl;
}
// Test determinant calculation
vector<vector<double>> B = {{1, 2, 3}, {4, 5, 6}, {7, 8, 10}};
cout << "\nDeterminant: " << determinant(B) << endl;
// Test matrix inversion
try {
vector<vector<double>> C = {{2, 1}, {1, 1}};
vector<vector<double>> inv_C = inverse(C);
cout << "\nInverse matrix:" << endl;
for (auto& row : inv_C) {
for (double val : row) {
cout << setw(8) << fixed << setprecision(4) << val << " ";
}
cout << endl;
}
} catch (const exception& e) {
cout << "Error: " << e.what() << endl;
}
return 0;
}3. Advanced Applications
Eigenvalues and Eigenvectors
Eigenvalue decomposition has important applications in graph theory, principal component analysis, Markov chains, and more.
// Power iteration for largest eigenvalue
#include <iostream>
#include <vector>
#include <cmath>
#include <random>
using namespace std;
// Vector operations
double dotProduct(const vector<double>& a, const vector<double>& b) {
double sum = 0;
for (int i = 0; i < a.size(); i++) {
sum += a[i] * b[i];
}
return sum;
}
double vectorNorm(const vector<double>& v) {
return sqrt(dotProduct(v, v));
}
vector<double> normalize(const vector<double>& v) {
double norm = vectorNorm(v);
vector<double> result(v.size());
for (int i = 0; i < v.size(); i++) {
result[i] = v[i] / norm;
}
return result;
}
vector<double> matrixVectorMultiply(const vector<vector<double>>& A, const vector<double>& v) {
int n = A.size();
vector<double> result(n, 0);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
result[i] += A[i][j] * v[j];
}
}
return result;
}
// Power iteration method
pair<double, vector<double>> powerIteration(const vector<vector<double>>& A, int maxIter = 1000, double tol = 1e-9) {
int n = A.size();
// Initialize random vector
random_device rd;
mt19937 gen(rd());
uniform_real_distribution<> dis(-1.0, 1.0);
vector<double> v(n);
for (int i = 0; i < n; i++) {
v[i] = dis(gen);
}
v = normalize(v);
double eigenvalue = 0;
for (int iter = 0; iter < maxIter; iter++) {
vector<double> Av = matrixVectorMultiply(A, v);
double newEigenvalue = dotProduct(v, Av);
if (abs(newEigenvalue - eigenvalue) < tol) {
return {newEigenvalue, normalize(Av)};
}
eigenvalue = newEigenvalue;
v = normalize(Av);
}
return {eigenvalue, v};
}
// Graph adjacency matrix analysis
class GraphAnalysis {
private:
vector<vector<double>> adj;
int n;
public:
GraphAnalysis(vector<vector<int>>& edges, int nodes) : n(nodes) {
adj.resize(n, vector<double>(n, 0));
// Build adjacency matrix
for (auto& edge : edges) {
adj[edge[0]][edge[1]] = 1;
adj[edge[1]][edge[0]] = 1; // Undirected graph
}
}
// Calculate spectral radius (largest eigenvalue)
double spectralRadius() {
auto result = powerIteration(adj);
return result.first;
}
// PageRank algorithm (modified power iteration)
vector<double> pageRank(double damping = 0.85, int maxIter = 1000, double tol = 1e-9) {
// Build transition matrix
vector<vector<double>> P(n, vector<double>(n, 0));
vector<int> outDegree(n, 0);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (adj[i][j] > 0) outDegree[i]++;
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (outDegree[i] > 0) {
P[j][i] = adj[i][j] / outDegree[i];
} else {
P[j][i] = 1.0 / n; // Handle dangling nodes
}
}
}
// PageRank iteration
vector<double> rank(n, 1.0 / n);
for (int iter = 0; iter < maxIter; iter++) {
vector<double> newRank(n, (1 - damping) / n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
newRank[i] += damping * P[i][j] * rank[j];
}
}
// Check convergence
double diff = 0;
for (int i = 0; i < n; i++) {
diff += abs(newRank[i] - rank[i]);
}
if (diff < tol) break;
rank = newRank;
}
return rank;
}
};
int main() {
// Test eigenvalue calculation
vector<vector<double>> matrix = {
{4, 1, 0},
{1, 3, 1},
{0, 1, 2}
};
auto result = powerIteration(matrix);
cout << "Largest eigenvalue: " << result.first << endl;
cout << "Corresponding eigenvector:" << endl;
for (double val : result.second) {
cout << val << " ";
}
cout << endl;
// Test graph analysis
vector<vector<int>> edges = {{0, 1}, {1, 2}, {2, 0}, {1, 3}, {3, 4}, {4, 1}};
GraphAnalysis graph(edges, 5);
cout << "\nGraph spectral radius: " << graph.spectralRadius() << endl;
vector<double> pagerank = graph.pageRank();
cout << "\nPageRank values:" << endl;
for (int i = 0; i < pagerank.size(); i++) {
cout << "Node " << i << ": " << pagerank[i] << endl;
}
return 0;
}Problem-Solving Tips
β‘ Numerical Stability
Use partial pivoting to avoid numerical instability, set appropriate precision thresholds, and be careful with floating-point comparisons.
π― Algorithm Selection
Choose algorithms based on problem characteristics: Gaussian elimination for dense matrices, iterative methods for sparse matrices, approximation algorithms for large-scale problems.
π‘ Optimization Techniques
Leverage special matrix properties (symmetric, tridiagonal, etc.) to optimize algorithms, use block matrix decomposition for large-scale problems.
π Application Recognition
Recognize linear algebra applications in different domains: adjacency matrices in graph theory, PCA in machine learning, PageRank in network analysis.