# We verified that the (X'X)^-1 matrix multiplied by the MSe provides standard errors # of regression coefficients. # Here's the element of the matrix that's relevant to the second slope (copied from SAS): > 0.0025897097 [1] 0.002589710 # We multiply it by the MSe (also from SAS)... > 0.0025897097 * 50.290426 [1] 0.1302376 # ...and take the square root: > sqrt(0.0025897097 * 50.290426) [1] 0.3608845 # Note that the result corresponded to the SE for the second slope in the SAS output. # Next, we demonstrated that the result of the "/inverse" command in SAS is what we # said it was. # We read in the regression data... > inverse <- read.table("c:/documents and settings/jvevea/desktop/inverse.txt") > inverse V1 V2 V3 1 78 13 2 2 79 14 6 3 79 13 1 . . . 98 68 15 4 99 87 16 6 100 74 12 4 # ...and create a matrix consisting of the predictors preceded by a column of 1's: > X <- cbind( rep(1,100), inverse[,2], inverse[,3]) > X [,1] [,2] [,3] [1,] 1 13 2 [2,] 1 14 6 [3,] 1 13 1 . . . [98,] 1 15 4 [99,] 1 16 6 [100,] 1 12 4 # We do the matrix calculation: > > output <- solve(t(X) %*% X) > output [,1] [,2] [,3] [1,] 0.545935670 -0.0373021109 -0.0024728986 [2,] -0.037302111 0.0029356502 -0.0007632733 [3,] -0.002472899 -0.0007632733 0.0025897097 # Note that the results correspond to the matrix we already saw in SAS. # For no reason other than the fact that it was an already available # matrix, we used that matrix to demonstrate the Cholesky decomposition. # Note that the result is an upper diagonal matrix. > temp <- chol(output) > temp [,1] [,2] [,3] [1,] 0.7388746 -0.05048504 -0.003346845 [2,] 0.0000000 0.01967006 -0.047393797 [3,] 0.0000000 0.00000000 0.018230094 # The Cholesky decomposition functions as a matrix equivalent of # the square root. If we multiply the output by itself (properly # transposing and using the correct order of matrix multiplication), # we get back the original matrix: > t(temp) %*% temp [,1] [,2] [,3] [1,] 0.545935670 -0.0373021109 -0.0024728986 [2,] -0.037302111 0.0029356502 -0.0007632733 [3,] -0.002472899 -0.0007632733 0.0025897097 > output [,1] [,2] [,3] [1,] 0.545935670 -0.0373021109 -0.0024728986 [2,] -0.037302111 0.0029356502 -0.0007632733 [3,] -0.002472899 -0.0007632733 0.0025897097 > # This will prove useful for simulating multivariate data.