# I'm very sorry to have taken so long to post this transcript. I neglected to save # it when I closed the R session, and had to go through a lengthy process to # reconstruct what we had done. # First, we reviewed our first way of constructing a matrix, which is to bind # columns together. This approach will be useful if you are creating a matrix # from already existing variables. > x1 <- c(1,2,3) > x2 <- c(3,4,5) > x3 <- c(1,5,4) > x <- cbind(x1,x2,x3) > x x1 x2 x3 [1,] 1 3 1 [2,] 2 4 5 [3,] 3 5 4 # Sometimes, though, it will be more natural to combine the rows into a matrix, # rather than the columns: > x1 <- c(1,3,1) > x2 <- c(2,4,5) > x3 <- c(3,5,4) > rbind(x1,x2,x3) [,1] [,2] [,3] x1 1 3 1 x2 2 4 5 x3 3 5 4 # A third way of creating a matrix is to use the matrix() function, including # the numbers themselves, the number of rows the matrix should have, and the # number of columns it should have: > x <- matrix(c(1,3,1,2,4,5,3,5,4),3,3) > x [,1] [,2] [,3] [1,] 1 2 3 [2,] 3 4 5 [3,] 1 5 4 # Note, however, that R worked down columns rather than across rows. It does # that by default; to make it organize the numbers across rows before going # down the columns, we must change that default: > x <- matrix(c(1,3,1,2,4,5,3,5,4),3,3, byrow=T) > x [,1] [,2] [,3] [1,] 1 3 1 [2,] 2 4 5 [3,] 3 5 4 # It is particularly easy to create a diagonal matrix: > I <- diag(rep(1,3)) # Here's a demonstration that shows you what the "rep" function in that # example did: > rep(1,3) [1] 1 1 1 # And here's the diagonal matrix we created with the "diag" function: > I [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 1 0 [3,] 0 0 1 # Note that I chose the variable name "I" because this is an identity # matrix. That is, if a conforming matrix is pre- or post-multiplied by # I, the result is the original matrix: > x%*%I [,1] [,2] [,3] [1,] 1 3 1 [2,] 2 4 5 [3,] 3 5 4 > I%*%x [,1] [,2] [,3] [1,] 1 3 1 [2,] 2 4 5 [3,] 3 5 4 # Here's an example of a diagonal matrix that is not an identity matrix: > z <- diag(c(1,2,3)) > z [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 2 0 [3,] 0 0 3 # Note that the inverse of a diagonal matrix is particularly easy: It's # just the diagonal matrix containing the reciprocals of the original values. > zinv <- diag(c(1,1/2,1/3)) # Here is confirmation that zinv is the inverse of z: > z%*%zinv [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 1 0 [3,] 0 0 1 > zinv%*%z [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 1 0 [3,] 0 0 1 # Matrices that are larger than 2-by-2 and that are not diagonal have # more elusive inverses. However, it is easy to make R calculate the # inverse using the "solve" function: > x [,1] [,2] [,3] [1,] 1 3 1 [2,] 2 4 5 [3,] 3 5 4 > xinv <- solve(x) > xinv [,1] [,2] [,3] [1,] -0.9 -0.7 1.1 [2,] 0.7 0.1 -0.3 [3,] -0.2 0.4 -0.2 # Here is confirmation that xinv is the inverse of x. Note that all # of the off-diagonal elements are numerically almost exactly zero, and # all of the on-diagonal elements are 1.0; so this is an identity matrix. > x%*%xinv [,1] [,2] [,3] [1,] 1.000000e+00 -1.665335e-16 8.326673e-17 [2,] -1.942890e-16 1.000000e+00 8.326673e-17 [3,] -1.110223e-16 -2.220446e-16 1.000000e+00 # Here is another example of matrix inversion: > x <- matrix(c(1,2,3,0,5,9,2,4,6),3,3) > x [,1] [,2] [,3] [1,] 1 0 2 [2,] 2 5 4 [3,] 3 9 6 > solve(x) Error in solve.default(x) : system is computationally singular: reciprocal condition number = 4.40565e-18 # R encountered an error because it is not possible to invert this matrix; that # is to say, x is "singular." # A matrix is singular if and only if its determinant is zero: > det(x) [1] -1.665335e-15 # Recall that we were able to invert z: > z [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 2 0 [3,] 0 0 3 # Now observe that z's determinant is not zero: > det(z) [1] 6 # We were also able to invert y: > y [,1] [,2] [1,] 1 2 [2,] 3 4 > solve(y) [,1] [,2] [1,] -2.0 1.0 [2,] 1.5 -0.5 # So y's determinant must be non-zero: > det(y) [1] -2 # Next, we considered the definitions of eigenvalues and eigenvectors. # (I have altered this somewhat for clarity; the matrix that was originally # called x has been changed to A in order to match the conventions used # in the Powerpoint presentation.) # Here's the matrix we'll work with: > A [,1] [,2] [,3] [1,] 1 0 2 [2,] 2 5 4 [3,] 3 9 6 # To do an eigen analysis in R, use the "eigen" function. R will # return an object with two components: the values and the # vectors: > eigen(A) $values [1] 1.208276e+01 -8.276253e-02 3.826279e-15 $vectors [,1] [,2] [,3] [1,] 0.1518855 -0.87903665 8.944272e-01 [2,] 0.5182149 -0.02862668 -1.350664e-15 [3,] 0.8416556 0.47589398 -4.472136e-01 # To refer to a particular one of those objects, we us "$" notation. If we # then want to refer to a particular element of the sub-object, we can # specify that using [] notation. So, for example, here we set a variable # that we have called "lambda" equal to the the value of the first member of # the $values vector: > lambda <- eigen(A)$values[1] > lambda [1] 12.08276 # According to our definition, lambda is an eigenvalue of A if the determinant # of A - lambda I = 0 (which implies that A - lambda I is singular). # Let's confirm that. First, recall that I has already been set to be the # 3-by-3 identity matrix: > I [,1] [,2] [,3] [1,] 1 0 0 [2,] 0 1 0 [3,] 0 0 1 > A - lambda*I [,1] [,2] [,3] [1,] -11.08276 0.000000 2.000000 [2,] 2.00000 -7.082763 4.000000 [3,] 3.00000 9.000000 -6.082763 > det(x - lambda*I) [1] 6.934167e-13 # Now let's think about the eigenvectors. Recall from our definition that # each eigenvalue (lambda) of a matrix A has a corresponding eigenvector (x), # such that Ax = lambda x. # T demonstrate this, we extract the first eigenvector. Using bracket notation, # we set x equal to the [,1] element of the eigenvectors. Bracket notation for # a matrix includes two components: the row and the column. By leaving the row # blank, we have told R that we want ALL rows for the first column: > x <- eigen(A)$vectors[ ,1] > x [1] 0.1518855 0.5182149 0.8416556 # Here's Ax: > A%*%x [,1] [1,] 1.835197 [2,] 6.261468 [3,] 10.169524 # Here's lambda x: > lambda*x [1] 1.835197 6.261468 10.169524 # Note that the values are the same, confirming that x is the eigenvector corresponding # to the eigenvalue lambda. # Let's do another example. Here's our matrix A again: > A [,1] [,2] [,3] [1,] 1 0 2 [2,] 2 5 4 [3,] 3 9 6 # And here is the eigenanalysis: > eigen(A) $values [1] 1.208276e+01 -8.276253e-02 3.826279e-15 $vectors [,1] [,2] [,3] [1,] 0.1518855 -0.87903665 8.944272e-01 [2,] 0.5182149 -0.02862668 -1.350664e-15 [3,] 0.8416556 0.47589398 -4.472136e-01 # This time, we'll confirm that the SECOND eigenvalue is really an eigenvalue: > lambda <- eigen(A)$values[2] > lambda [1] -0.08276253 > det(A-lambda*I) [1] 7.454551e-16 # And now we will confirm that the corresponding vector really is the eigenvector: > x <- eigen(A)$vectors[ ,2] > x [1] -0.87903665 -0.02862668 0.47589398 > A%*%x [,1] [1,] 0.072751298 [2,] 0.002369216 [3,] -0.039386190 > lambda*x [1] 0.072751298 0.002369216 -0.039386190