#
# CHAPTER 27: Linear algebra
#
# The linear algebra functions in R consist of operators on matrix
# objects
#
# A matrix is a collection, including possible repetitions, of real
# numbers arranged in a rectangular array
#
# Uppercase letters are typically used to denote matrices, with subscripted
# lowercase letters used for their elements, for example, matrix A has
# elements a_{ij}
#
# The first subscript of an element denotes the row where the element resides
# and the second of an element denotes the column where the element resides
#
A = matrix(1:6, ncol = 3) # a 2 x 3 matrix
A # display A
A[1, 3] # the [1, 3] element of A
B = A[ , 2] # the second column as a vector
B # display B
C = A[ , 2, drop = FALSE] # the second column as a 2 x 1 matrix
C # display C
#
# The dimensions of a matrix are the number of rows and the number of columns:
# e.g., 2 x 3
#
nrow(A) # number of rows
ncol(A) # number of columns
dim(A) # dimensions
dim(B)
dim(C)
length(A) # number of elements
length(B)
length(C)
mode(A) # mode of elements
mode(B)
mode(C)
#
# The row and col functions give a matrix of the same dimensions but with
# row numbers and column numbers as entries
#
row(A) # matrix of row numbers
col(A) # matrix of column numbers
#
# Summary measures on a matrix
#
rowSums(A) # row sums of A
rowMeans(A) # row means of A
colSums(A) # column sums of A
colMeans(A) # column means of A
apply(A, 1, max) # row maximums
apply(A, 2, sd) # column standard deviations
#
# Two matrices can only be added (subtracted) when their dimensions match.
# The resulting matrix has the same dimensions as the original matrices and
# its elements are the sums (differences) of the corresponding elements of
# the two original matrices.
#
x = rpois(2, 1)
y = rpois(2, 2)
z = rpois(2, 3)
B = cbind(x, y, z)
B
rownames(B) = c("firstrow", "secondrow")
B
rownames(B)
A
B
A + B
A - B
A - 2
#
# The result of a matrix times (divided by) a scalar is a matrix with each
# element multiplied (divided by) by the scalar.
#
3 * A # multiplying by a scalar
A / 3 # dividing by a scalar
1 / A # element-wise reciprocals
A * B # element-wise multiplication
#
# If A is an m x n matrix, then the transpose of A, A ^ T or A ^ ',
# is an n x m matrix with the rows of A as the columns of A transpose.
#
t(A) # the transpose of A
t(B) # the transpose of B
#
# Two matrices can be multiplied when the number of columns of the first
# matrix matches the number of rows of the second matrix. The resulting
# matrix has the same number of rows as the first matrix and the same
# number of columns as the second matrix.
#
A %*% t(B) # matrix multiplication
#
# A square matrix has the same number of rows and columns.
#
P = matrix(1:4, 2, 2, byrow = TRUE) # 2 x 2 square matrix
P
#
# The diagonal elements of a square matrix are those elements in which the
# row and column indexes are equal. All other elements are off-diagonal
# elements.
#
diag(P) # create a vector with the diagonal elements of P
diag(A) # create a vector with the diagonal elements of A
diag(1:5) # create a diagonal matrix with diagonal elements 1:5
diag(diag(A)) # zero out off-diagonal elements
#
# The identity matrix I is a square matrix with diagonal elements equal to 1 and
# off-diagonal elements equal to 0
#
diag(4)
#
# The cross product of A is A'A. The cross product of A and B is A'B.
#
crossprod(A) # cross product A'A
crossprod(A, B) # cross product A'B
#
# The outer product of the A and B is the array C with dimensions
# c(dim(A), dim(B)) in which element C[c(xindex, yindex)] is
# FUN(A[arrayindex.x], B[arrayindex.y]). This extends to higher
# dimensions.
#
1:9 %o% 1:9 # multiplication table
x = 1:9
names(x) = x
x %o% x
outer(1:9, 1:9, FUN = "^") # outer is more general
#
# A square matrix is lower (upper) triangular if all of the elements above
# (below) the diagonal are zero.
#
lower.tri(P, diag = TRUE) # lower triangular matrix (Boolean)
upper.tri(P, diag = TRUE) # upper triangular matrix (Boolean)
P * lower.tri(P, diag = TRUE) # lower triangular matrix
#
# The inverse of a square matrix A is a matrix A ^ {-1} such that
# A A ^ {-1} = I
#
solve(P) # inverse of P
P %*% solve(P) # identity matrix
zapsmall(P %*% solve(P))
#
# Solving matrix equations:
# 3 x = 6
# (1 / 3) 3 x = (1 / 3) 6
# 1 x = 2
# x = 2
#
# A x = b
# A {-1} A x = A {-1} b
# I x = A {-1} b
# x = A {-1} b
#
solve(P, 5:6) # solve a linear system
#
# The determinant of a square matrix is a single number associated with the
# matrix. The determinant of a 2 x 2 matrix is ...
# Notation: bars
#
det(P) # calculate the determinant
#
# A square matrix A has an inverse iff the det(A) \ne 0. A matrix with
# an inverse is called a nonsingular or invertible matrix.
#
Q = matrix(c(1, 2, 2, 4), nrow = 2)
det(Q)
solve(Q)
#
# The trace of a square matrix is the sum of the diagonal elements
#
trace = function(S) sum(diag(S))
trace(Q)
#
# An eigenvector of a square matrix A is a non-zero vector v satisfying
# A %*% v = lambda * v
# where lambda is the corresponding eigenvalue.
# Characteristic polynomial: det(A - lambda * diag(nrow(A))) = 0
#
A = matrix(c(1, 2, 4, 3), 2, 2, byrow = TRUE)
eigen(A) # eigenvalues and eigenvectors of A
eigen(A)$val # eigenvalues of A
eigen(A)$vec # eigenvectors of A
A %*% eigen(A)$vec[ , 1]
eigen(A)$val[1] * eigen(A)$vec[ , 1]
#
# decompositions
#
R = matrix(c(5, 1, 1, 3), 2, 2) # from example(chol)
chol(R) # Cholesky decomposition
svd(R) # singular value decomposition
qr(R) # qr decomposition
kappa(R) # condition number
#
# If the elements of a 3 x 3 matrix are independent U(0, 1) random
# variables with positive diagonal elements and negative off-diagonal
# elements, use Monte Carlo simulation to estimate the probability
# that the matrix has a positive determinant.
#
nrep = 100000
count = 0
for (j in 1:nrep) {
m = matrix(-runif(9), 3, 3)
diag(m) = abs(diag(m))
if (det(m) > 0) count = count + 1
}
print(count / nrep)
#
# after set.seed(4), 0.04897 0.04992 0.05034 0.05041 0.05004
#