Matrix definitions

Creating a matrix

There are various ways to create a matrix. One of the most explicit way to create a matrix is to use matrix()

## matrix(x, ncol=n, nrow=m, byrow=F)   # by default, the elements of x are arranged by columns (column-wise)

amat <- matrix(c(2, 3, 5,
                 1, -6, 3), ncol=3, byrow=T)   # arrange elements row-wise
amat     # 2x3 matrix
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
bmat <- matrix(c(5, 0, -2, 
                 3, 7, 1), ncol=3, byrow=T)
bmat     # 2x3 matrix
##      [,1] [,2] [,3]
## [1,]    5    0   -2
## [2,]    3    7    1

Diagonal/identity matrix

diag() creates a diagonal matrix or extracts/replaces the diagonal of a matrix

## diag(x, nrow=n)  

(imat <- diag(1, nrow=5))      # creates a diagonal matrix using a single value (1) -> identity matrix
##      [,1] [,2] [,3] [,4] [,5]
## [1,]    1    0    0    0    0
## [2,]    0    1    0    0    0
## [3,]    0    0    1    0    0
## [4,]    0    0    0    1    0
## [5,]    0    0    0    0    1
(diagmat <- diag(1:5, nrow=5))   # creates a diagonal matrix using a vector
##      [,1] [,2] [,3] [,4] [,5]
## [1,]    1    0    0    0    0
## [2,]    0    2    0    0    0
## [3,]    0    0    3    0    0
## [4,]    0    0    0    4    0
## [5,]    0    0    0    0    5
diag(diagmat)         # extracts the diagonal of matrix 'diagmat2'
## [1] 1 2 3 4 5
diag(diagmat) <- 1    # replaces the diagonal elements of matrix 'diagmat2' with 1s
diagmat
##      [,1] [,2] [,3] [,4] [,5]
## [1,]    1    0    0    0    0
## [2,]    0    1    0    0    0
## [3,]    0    0    1    0    0
## [4,]    0    0    0    1    0
## [5,]    0    0    0    0    1
# compare two matrices 
all.equal(imat, diagmat)  # TRUE if two matrices are equal 
## [1] TRUE

Transpose

t() function returns the transpose of a matrix

amat     # original matrix 'amat'
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
(amat_t <- t(amat))   # transpose of matrix 'amat'
##      [,1] [,2]
## [1,]    2    1
## [2,]    3   -6
## [3,]    5    3
## a symmetric matrix equals its own transpose
smat <- matrix(c(1, 2, 3, 
                 2, 5, 7, 
                 3, 7, 6), ncol=3, byrow=T)  # symmetric
smat
##      [,1] [,2] [,3]
## [1,]    1    2    3
## [2,]    2    5    7
## [3,]    3    7    6
t(smat)
##      [,1] [,2] [,3]
## [1,]    1    2    3
## [2,]    2    5    7
## [3,]    3    7    6
all.equal(smat, t(smat))  # TRUE if it equals its transpose
## [1] TRUE

Matrix operations

Addition/Subtraction

# only if both matrices have the same dimensions
(cmat <- amat + bmat)  # element-wise addition
##      [,1] [,2] [,3]
## [1,]    7    3    3
## [2,]    4    1    4
(dmat <- amat - bmat)  # element-wise subtraction
##      [,1] [,2] [,3]
## [1,]   -3    3    7
## [2,]   -2  -13    2

Multiplication

Multiplication with a scalar

amat     # original matrix
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
2*amat   # scalar multiplication
##      [,1] [,2] [,3]
## [1,]    4    6   10
## [2,]    2  -12    6

Inner product of two vectors

(a <- c(1, 2, 3, 4))
## [1] 1 2 3 4
(b <- c(1, 0, 2, 5))
## [1] 1 0 2 5
## * does an element-wise multiplication
a*b
## [1]  1  0  6 20
## use %*% function
a %*% b    # inner product of two vectors gives a scalar, 
##      [,1]
## [1,]   27
# the two vectors not orthogonal because the product is not 0

## orthogonal vectors
(c <- c(1, 0))
## [1] 1 0
(d <- c(0, 1))
## [1] 0 1
c %*% d  # orthogonal vectors if product is 0
##      [,1]
## [1,]    0
(c <- c(2, -1))
## [1]  2 -1
(d <- c(.5, 1))
## [1] 0.5 1.0
c %*% d    # orthogonal vectors if product is 0
##      [,1]
## [1,]    0
## inner product of a vector with itself
a %*% a   # equals the sum of squares
##      [,1]
## [1,]   30
sum(a^2)
## [1] 30

Matrix multiplication

smat <- matrix(c(2, 3, 5,
               1, -6, 3), ncol=3, byrow=T)
smat     # 2 by 3
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
tmat <- matrix(c(1, 2,
                 -1, 4,
                 4, 0),ncol=2,byrow=T)
tmat     # 3 by 2
##      [,1] [,2]
## [1,]    1    2
## [2,]   -1    4
## [3,]    4    0
ttmat <- t(tmat) 
ttmat    # 2 by 3
##      [,1] [,2] [,3]
## [1,]    1   -1    4
## [2,]    2    4    0
## * does an element-wise multiplication (dimensions should match)
smat * ttmat
##      [,1] [,2] [,3]
## [1,]    2   -3   20
## [2,]    2  -24    0
## use %*% function
(umat <- smat %*% tmat)    # 2 by 2
##      [,1] [,2]
## [1,]   19   16
## [2,]   19  -22
(smat %*% ttmat)
## Error in smat %*% ttmat: non-conformable arguments
# ncol(smat) must be equal to nrow(tmat) 

(tmat %*% smat) # 3 by 3
##      [,1] [,2] [,3]
## [1,]    4   -9   11
## [2,]    2  -27    7
## [3,]    8   12   20
# Not commutative, order matters!

## multiplication with an identity matrix
(imat2 <- diag(1, nrow=3))  # identity matrix
##      [,1] [,2] [,3]
## [1,]    1    0    0
## [2,]    0    1    0
## [3,]    0    0    1
smat  # original matrix
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
smat %*% imat2   # identity matrix multiplied
##      [,1] [,2] [,3]
## [1,]    2    3    5
## [2,]    1   -6    3
## multiplication of a matrix with its own transpose
t(smat) %*% smat   # symmetric matrix
##      [,1] [,2] [,3]
## [1,]    5    0   13
## [2,]    0   45   -3
## [3,]   13   -3   34
smat %*% t(smat)   # different symmetric matrix
##      [,1] [,2]
## [1,]   38   -1
## [2,]   -1   46

Matrix inverse

## only square matrices have inverses
umat <- matrix(c(6, 3, 1, 
                 3, 2, 2, 
                 1, 2, 5), ncol=3, byrow=T)
umat 
##      [,1] [,2] [,3]
## [1,]    6    3    1
## [2,]    3    2    2
## [3,]    1    2    5
## use solve() function to get the inverse of a matrix
solve(umat)            # square matrix inverse 
##      [,1] [,2] [,3]
## [1,]    6  -13    4
## [2,]  -13   29   -9
## [3,]    4   -9    3
umat %*% solve(umat)   # equals to the identity matrix
##               [,1]          [,2]          [,3]
## [1,]  1.000000e+00  0.000000e+00 -2.664535e-15
## [2,] -1.776357e-15  1.000000e+00 -1.776357e-15
## [3,]  0.000000e+00 -7.105427e-15  1.000000e+00
## if columns are linearly dependent, no inverse (singular matrix)
nmat <- cbind(c(1, 3, 5), 2*c(1, 3, 5), c(2, 2, 1))
nmat
##      [,1] [,2] [,3]
## [1,]    1    2    2
## [2,]    3    6    2
## [3,]    5   10    1
det(nmat)     # determinant equals 0
## [1] 7.993606e-15
solve(nmat)   # there is no inverse (singular)
## Error in solve.default(nmat): system is computationally singular: reciprocal condition number = 1.64477e-17

Orthogonal matrix

omat <- matrix(c(0, 1, 0, 0, 
                 0, 0, 1, 0, 
                 1, 0, 0, 0,
                 0, 0, 0, 1), ncol=4, byrow=T)
omat
##      [,1] [,2] [,3] [,4]
## [1,]    0    1    0    0
## [2,]    0    0    1    0
## [3,]    1    0    0    0
## [4,]    0    0    0    1
# orthogonal if t(omat) == solve(omat)
t(omat) == solve(omat) 
##      [,1] [,2] [,3] [,4]
## [1,] TRUE TRUE TRUE TRUE
## [2,] TRUE TRUE TRUE TRUE
## [3,] TRUE TRUE TRUE TRUE
## [4,] TRUE TRUE TRUE TRUE
# orthogonal if omat %*% t(omat) == identity matrix
omat %*% t(omat)   
##      [,1] [,2] [,3] [,4]
## [1,]    1    0    0    0
## [2,]    0    1    0    0
## [3,]    0    0    1    0
## [4,]    0    0    0    1
# this means that the column vectors are orthonormal
t(omat[,1]) %*% omat[,2]  # equals 0 (orthogonal)
##      [,1]
## [1,]    0
t(omat[,1]) %*% omat[,1]; t(omat[,2]) %*% omat[,2]  # both equal 1 (normalized)
##      [,1]
## [1,]    1
##      [,1]
## [1,]    1

Spectral decomposition and SVD

Let’s read in some data.

valsci <- read.table('valsci.dat')  # 1000 respondents, 10 items
head(valsci)
##   V1 V2 V3 V4 V5 V6 V7 V8 V9 V10
## 1  4  3  3  4  3  3  3  3  3   3
## 2  4  4  4  4  4  4  4  4  4   4
## 3  3  4  2  3  2  3  2  3  3   2
## 4  3  3  3  3  2  3  2  2  2   2
## 5  3  3  3  3  3  3  3  2  3   3
## 6  3  3  3  3  1  3  2  3  3   2
cov(valsci)     # covariance matrix
##            V1        V2        V3        V4        V5        V6        V7
## V1  0.4532492 0.2390090 0.1919419 0.2231852 0.1719219 0.2333233 0.2016617
## V2  0.2390090 0.3921672 0.1888138 0.1955105 0.1838088 0.2163914 0.1881381
## V3  0.1919419 0.1888138 0.5178929 0.2073323 0.2406156 0.1908158 0.2575075
## V4  0.2231852 0.1955105 0.2073323 0.4428819 0.1943193 0.1975526 0.1720420
## V5  0.1719219 0.1838088 0.2406156 0.1943193 0.6280030 0.2628879 0.3996496
## V6  0.2333233 0.2163914 0.1908158 0.1975526 0.2628879 0.4354104 0.2644144
## V7  0.2016617 0.1881381 0.2575075 0.1720420 0.3996496 0.2644144 0.6357357
## V8  0.1884705 0.2039189 0.2383634 0.1946737 0.2934184 0.2594144 0.3464164
## V9  0.2037197 0.1843544 0.2389890 0.2210230 0.2449950 0.2521421 0.2569169
## V10 0.1722442 0.1958358 0.2022022 0.1841682 0.3933934 0.2701502 0.3740941
##            V8        V9       V10
## V1  0.1884705 0.2037197 0.1722442
## V2  0.2039189 0.1843544 0.1958358
## V3  0.2383634 0.2389890 0.2022022
## V4  0.1946737 0.2210230 0.1841682
## V5  0.2934184 0.2449950 0.3933934
## V6  0.2594144 0.2521421 0.2701502
## V7  0.3464164 0.2569169 0.3740941
## V8  0.5294484 0.2587808 0.3095335
## V9  0.2587808 0.5411772 0.2803924
## V10 0.3095335 0.2803924 0.6421461
cor(valsci)     # correlation matrix
##            V1        V2        V3        V4        V5        V6        V7
## V1  1.0000000 0.5669048 0.3961698 0.4981417 0.3222418 0.5252187 0.3756786
## V2  0.5669048 1.0000000 0.4189654 0.4691271 0.3703820 0.5236667 0.3767931
## V3  0.3961698 0.4189654 1.0000000 0.4329155 0.4219131 0.4018324 0.4487781
## V4  0.4981417 0.4691271 0.4329155 1.0000000 0.3684607 0.4498723 0.3242293
## V5  0.3222418 0.3703820 0.4219131 0.3684607 1.0000000 0.5027364 0.6324996
## V6  0.5252187 0.5236667 0.4018324 0.4498723 0.5027364 1.0000000 0.5025710
## V7  0.3756786 0.3767931 0.4487781 0.3242293 0.6324996 0.5025710 1.0000000
## V8  0.3847361 0.4475175 0.4552057 0.4020235 0.5088557 0.5402974 0.5971017
## V9  0.4113342 0.4001732 0.4514277 0.4514647 0.4202489 0.5194291 0.4380106
## V10 0.3192708 0.3902470 0.3506299 0.3453454 0.6194828 0.5109035 0.5854982
##            V8        V9       V10
## V1  0.3847361 0.4113342 0.3192708
## V2  0.4475175 0.4001732 0.3902470
## V3  0.4552057 0.4514277 0.3506299
## V4  0.4020235 0.4514647 0.3453454
## V5  0.5088557 0.4202489 0.6194828
## V6  0.5402974 0.5194291 0.5109035
## V7  0.5971017 0.4380106 0.5854982
## V8  1.0000000 0.4834488 0.5308587
## V9  0.4834488 1.0000000 0.4756413
## V10 0.5308587 0.4756413 1.0000000
cov(valsci[,1],valsci[,2]+1.4)  # adding a constant to a variable doesn't change the covariance
## [1] 0.239009

For simpler illustration, we will extract a subscale from the data.

valscisub <- valsci[,1:4]
head(valscisub)
##   V1 V2 V3 V4
## 1  4  3  3  4
## 2  4  4  4  4
## 3  3  4  2  3
## 4  3  3  3  3
## 5  3  3  3  3
## 6  3  3  3  3

Covariance matrix

Covariance matrix \(\mathbf{S}\) is computed as \[\mathbf{S=} \frac{1}{n-1} \mathbf{X'X}\] where \(\mathbf{X}\) is the data matrix with mean-centered variables (i.e., columns means were subtracted from the original data matrix).

## use 'scale()' to create a mean-centered data matrix X
## scale(x, center=T, scale=T)
X <- scale(valscisub, scale=F)   # mean-centered data
head(X)
##          V1     V2     V3     V4
## [1,]  0.698 -0.365  0.125  0.831
## [2,]  0.698  0.635  1.125  0.831
## [3,] -0.302  0.635 -0.875 -0.169
## [4,] -0.302 -0.365  0.125 -0.169
## [5,] -0.302 -0.365  0.125 -0.169
## [6,] -0.302 -0.365  0.125 -0.169
S <- t(X) %*% X * (1/(nrow(X)-1))
S
##           V1        V2        V3        V4
## V1 0.4532492 0.2390090 0.1919419 0.2231852
## V2 0.2390090 0.3921672 0.1888138 0.1955105
## V3 0.1919419 0.1888138 0.5178929 0.2073323
## V4 0.2231852 0.1955105 0.2073323 0.4428819
covmat <- cov(valscisub)
head(covmat)
##           V1        V2        V3        V4
## V1 0.4532492 0.2390090 0.1919419 0.2231852
## V2 0.2390090 0.3921672 0.1888138 0.1955105
## V3 0.1919419 0.1888138 0.5178929 0.2073323
## V4 0.2231852 0.1955105 0.2073323 0.4428819

Correlation matrix

Correlation matrix \(\mathbf{R}\) is computed as \[\mathbf{R=} \frac{1}{n-1} \mathbf{Z'Z}\] where \(\mathbf{Z}\) is the standardized data matrix.

## create a std. data matrix Z
Z <- scale(valscisub)  # standardized data
head(Z)
##              V1         V2         V3         V4
## [1,]  1.0367806 -0.5828506  0.1736961  1.2486970
## [2,]  1.0367806  1.0140004  1.5632648  1.2486970
## [3,] -0.4485784  1.0140004 -1.2158726 -0.2539468
## [4,] -0.4485784 -0.5828506  0.1736961 -0.2539468
## [5,] -0.4485784 -0.5828506  0.1736961 -0.2539468
## [6,] -0.4485784 -0.5828506  0.1736961 -0.2539468
S <- t(Z) %*% Z * (1/(nrow(Z)-1))
S
##           V1        V2        V3        V4
## V1 1.0000000 0.5669048 0.3961698 0.4981417
## V2 0.5669048 1.0000000 0.4189654 0.4691271
## V3 0.3961698 0.4189654 1.0000000 0.4329155
## V4 0.4981417 0.4691271 0.4329155 1.0000000
cormat <- cor(valscisub)
head(cormat)
##           V1        V2        V3        V4
## V1 1.0000000 0.5669048 0.3961698 0.4981417
## V2 0.5669048 1.0000000 0.4189654 0.4691271
## V3 0.3961698 0.4189654 1.0000000 0.4329155
## V4 0.4981417 0.4691271 0.4329155 1.0000000

Spectral decomposition (eigendecomposition)

Any symmetric matrix \(\mathbf{A}_{n\times n}\) can be decomposed as \[\mathbf{A=P\Lambda P'}\] where \(\mathbf{P}\) is a matrix whose columns are eigenvectors of \(\mathbf{A}\), and \(\mathbf{\Lambda}\) is a diagonal matrix with eigenvalues of \(\mathbf{A}\).

eigen() returns the spectral decomposition of a square matrix; so it computes eigenvalues and corresponding eigenvectors of the matrix.

(eigsol <- eigen(cormat))     # A = cormat
## eigen() decomposition
## $values
## [1] 2.3953334 0.6421578 0.5351480 0.4273608
## 
## $vectors
##           [,1]        [,2]       [,3]       [,4]
## [1,] 0.5196290  0.42758075 -0.1358105  0.7271285
## [2,] 0.5175370  0.34031673 -0.4377173 -0.6517235
## [3,] 0.4586217 -0.83697641 -0.2764287  0.1127998
## [4,] 0.5018040 -0.02880475  0.8447166 -0.1838935
# 'values' component has a vector of eigenvalues (lambda elements)
(eigenval <- eigsol$values)
## [1] 2.3953334 0.6421578 0.5351480 0.4273608
# 'vectors' component has a matrix of eigenvectors (P)
(P <- eigsol$vectors)
##           [,1]        [,2]       [,3]       [,4]
## [1,] 0.5196290  0.42758075 -0.1358105  0.7271285
## [2,] 0.5175370  0.34031673 -0.4377173 -0.6517235
## [3,] 0.4586217 -0.83697641 -0.2764287  0.1127998
## [4,] 0.5018040 -0.02880475  0.8447166 -0.1838935
# Check if the equation holds
(Lambda <- diag(eigenval))   # create Lambda
##          [,1]      [,2]     [,3]      [,4]
## [1,] 2.395333 0.0000000 0.000000 0.0000000
## [2,] 0.000000 0.6421578 0.000000 0.0000000
## [3,] 0.000000 0.0000000 0.535148 0.0000000
## [4,] 0.000000 0.0000000 0.000000 0.4273608
P %*% Lambda %*% t(P)        # equals A (cormat)
##           [,1]      [,2]      [,3]      [,4]
## [1,] 1.0000000 0.5669048 0.3961698 0.4981417
## [2,] 0.5669048 1.0000000 0.4189654 0.4691271
## [3,] 0.3961698 0.4189654 1.0000000 0.4329155
## [4,] 0.4981417 0.4691271 0.4329155 1.0000000
cormat
##           V1        V2        V3        V4
## V1 1.0000000 0.5669048 0.3961698 0.4981417
## V2 0.5669048 1.0000000 0.4189654 0.4691271
## V3 0.3961698 0.4189654 1.0000000 0.4329155
## V4 0.4981417 0.4691271 0.4329155 1.0000000
# Check if P is orthogonal (columns of P are orthonormal)
P %*% t(P)   # identity
##              [,1]         [,2]         [,3]         [,4]
## [1,] 1.000000e+00 0.000000e+00 0.000000e+00 2.775558e-17
## [2,] 0.000000e+00 1.000000e+00 3.747003e-16 0.000000e+00
## [3,] 0.000000e+00 3.747003e-16 1.000000e+00 3.122502e-17
## [4,] 2.775558e-17 0.000000e+00 3.122502e-17 1.000000e+00

Singular value decomposition (SVD)

Any matrix \(\mathbf{A}_{n\times p}\) can be decomposed as \[\mathbf{A=U\Sigma V'}\] where \(\mathbf{U}_{n\times n}\) and \(\mathbf{V}_{p\times p}\) are orthogonal matrices and \(\mathbf{\Sigma}_{n\times p}\) is a diagonal matrix singular values.

We can then write \(\mathbf{A'A=V\Lambda V'}\) where \(\mathbf{V}\) is a matrix whose columns are eigenvectors of \(\mathbf{A'A}\), and \(\mathbf{\Lambda}\) is a diagonal matrix with eigenvalues of \(\mathbf{A'A}\). Note that \(\mathbf{A'A}\) is a symmetric matrix.

svd() computes the singular-value decomposition of a matrix.

## compute the singular-value decomposition of the standardized score matrix Z
svdsol <- svd(Z)

## 'd' component has singular values of Z (i.e., square roots of eigenvalues of Z'Z)
svdsol$d
## [1] 48.91767 25.32816 23.12170 20.66237
(lambda <- svdsol$d^2)    # squared singular values = eigenvalues of Z'Z
## [1] 2392.9381  641.5156  534.6129  426.9334
# This closely relates to the eigenvalues of R (correlation matrix)
# Note that Z'Z/(n-1) = R
(eigenval <- lambda/(nrow(X)-1))        # eigenvalues of R (correlation matrix)
## [1] 2.3953334 0.6421578 0.5351480 0.4273608
eigsol$values      # identical to the eigenvalues derived from spectral decomposition
## [1] 2.3953334 0.6421578 0.5351480 0.4273608
## 'v' is a matrix whose columns are eigenvectors of Z'Z (these are orthonormal)
(V <- svdsol$v)
##            [,1]        [,2]       [,3]       [,4]
## [1,] -0.5196290  0.42758075 -0.1358105  0.7271285
## [2,] -0.5175370  0.34031673 -0.4377173 -0.6517235
## [3,] -0.4586217 -0.83697641 -0.2764287  0.1127998
## [4,] -0.5018040 -0.02880475  0.8447166 -0.1838935
eigsol$vectors     # identical to the eigenvectors derived from spectral decomposition 
##           [,1]        [,2]       [,3]       [,4]
## [1,] 0.5196290  0.42758075 -0.1358105  0.7271285
## [2,] 0.5175370  0.34031673 -0.4377173 -0.6517235
## [3,] 0.4586217 -0.83697641 -0.2764287  0.1127998
## [4,] 0.5018040 -0.02880475  0.8447166 -0.1838935
# Note that sign difference doesn't matter