%% This document created by Scientific Notebook (R) Version 3.0 \documentclass[12pt,thmsa]{article} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{sw20jart} %TCIDATA{TCIstyle=article/art4.lat,jart,sw20jart} %TCIDATA{} %TCIDATA{Created=Mon Aug 19 14:52:24 1996} %TCIDATA{LastRevised=Mon Apr 22 16:05:34 2002} %TCIDATA{Language=American English} %TCIDATA{CSTFile=Lab Report.cst} %TCIDATA{PageSetup=72,72,72,72,0} %TCIDATA{AllPages= %F=36,\PARA{038
\hfill \thepage}
%}
\input{tcilatex}
\begin{document}
\subsection{The Inverse of a Square Matrix}
\vspace{1pt}
Suppose $A$ is a $n\times n$ matrix. The purpose of these notes is to prove
that if $L$ is a left inverse of $A$, i.e., $L$ is a $n\times n$ matrix with
$LA=I_{n}$ , then $L$ is also the unique inverse of $A$ . This means that $%
L=A^{-1}$ and $A^{-1}A=AA^{-1}=I_{n}$ , where $I_{n}$ is the $n\times n$
identity matrix with $\left( I_{n}\right) _{i,\text{ }j}=\delta _{ij}$ . A
more detailed discussion, which includes left and right inverses of
non-square matrices can be found in Ben Noble's text, $Applied$ $Linear$ $%
Algebra$, Prentice-Hall 1969, pp. 132-136.
\vspace{1pt}
\paragraph{Preliminaries}
If $A^{-1}$ exists the matrix $A$ is called invertible or non-singular.
\QTP{Body Math}
$\vspace{1pt}$
\QTP{Body Math}
Lemma 1: If $A$ has a left inverse $L$ and a right inverse exists, then $%
A^{-1}=L$ .
\QTP{Body Math}
$\vspace{1pt}$
Proof: Let $R$ be any right inverse, then $\ AR=I_{n}$ .
$R=\left( LA\right) R=L\left( AR\right) =LI_{n}=L$, so $AL=I_{n}$ .
Furthermore, suppose $B$ is any left inverse, then $B=B\left( AL\right)
=\left( BA\right) L=I_{n}L=L$ . Thus, $L$ is the unique inverse of $A$ .
\vspace{1pt}
\QTP{Body Math}
Lemma 2: If $A$ is a non-singular $n\times n$ matrix , then $\left(
A^{-1}\right) ^{-1}=A$ .
\vspace{1pt}
Proof: $A^{-1}A=AA^{-1}=I_{n}$ , so $A^{-1}$ has $A$ for both a left and a
right inverse. So by Lemma 1 $A$ is the unique inverse of $A^{-1}$.
\vspace{1pt}
$A^{T}$ , the transpose of the $n\times m$ matrix $A$ , is the $m\times n$
matrix with
$\left( A^{T}\right) _{i,\text{ }j}=A_{i,\text{ }j}$ .
\vspace{1pt}
\QTP{Body Math}
Lemma 3: If $A$ is an $n\times m$ matrix and $B$ is a $m\times l$ matrix,
then $\left( AB\right) ^{T}=B^{T}A^{T}$ .
\QTP{Body Math}
$\vspace{1pt}$
Proof: $\left( AB\right) _{i,\text{ }j}=\sum_{k=1}^{n}A_{i,\text{ }k}$ $B_{k,%
\text{ }j}$,
so $\left( AB\right) _{i,\text{ }j}^{T}=\sum_{k=1}^{n}A_{j,\text{ }k}$ $B_{k,%
\text{ }i}=\sum_{k=1}^{n}\left( B\right) _{i,\text{ }k}^{T}$ $\left(
A\right) _{k,\text{ }j}^{T}=\left( B^{T}A^{T}\right) _{i,\text{ }j}$ .
\vspace{1pt}
\QTP{Body Math}
Lemma 4: If $A$ is non-singular, then $\left( A^{T}\right) ^{-1}=\left(
A^{-1}\right) ^{T}$ .
\QTP{Body Math}
$\vspace{1pt}$
Proof: $I_{n}=I_{n}^{T}=\left( A^{-1}A\right) ^{T}=A^{T}\left( A^{-1}\right)
^{T}$ and \ $I_{n}=I_{n}^{T}=\left( AA^{-1}\right) ^{T}=\left( A^{-1}\right)
^{T}A^{T}$ .
Thus, $A^{T}$ has $\left( A^{-1}\right) ^{T}$ for both a left and a right
inverse, so by lemma 1, $\left( A^{-1}\right) ^{T}$ is the unique inverse of
$A^{T}$ .
\vspace{1pt}
\QTP{Body Math}
Lemma 5: If $\{A_{l}\}_{l=1}^{p}$ is a sequence of non-singular $n\times n$
matrices, then
\QTP{Body Math}
$B\left( p\right) =A_{p}A_{p-1}...A_{2}A_{1}=\Pi _{k=1}^{p}A_{k}$ is
non-singular with $\left[ B\left( p\right) \right]
^{-1}=A_{1}^{-1}A_{2}^{-1}...A_{p-1}^{-1}A_{p}^{-1}=\Pi
_{k=1}^{p}A_{p+1-k}^{-1}$ .
\QTP{Body Math}
$\vspace{1pt}$
Proof: $B\left( 1\right) =A_{1}$ and $\left[ B\left( 1\right) \right]
^{-1}=A_{1}^{-1}$. Assume $\left[ B\left( p\right) \right] ^{-1}=\Pi
_{k=1}^{p}A_{p+1-k}^{-1}$ .
Consider $B\left( p+1\right) =A_{p+1}\Pi _{k=1}^{p}A_{k}$ $=A_{p+1}B(p)$ ,
where $A_{p+1}$is non-singular then
$\left[ B(p)\right] ^{-1}A_{p+1}^{-1}A_{p+1}B(p)=\left[ B(p)\right]
^{-1}B(p)=I_{n\text{ }}$ and $A_{p+1}B(p)\left[ B(p)\right]
^{-1}A_{p+1}^{-1}=A_{p+1}A_{p+1}^{-1}=I_{n\text{ }}$, so by lemma 1 $\left[
B(p)\right] ^{-1}A_{p+1}^{-1}$ is the unique inverse of $B(p+1)$ . Thus, $%
\left[ B\left( p+1\right) \right] ^{-1}=\left[ B\left( p\right) \right]
^{-1}A_{p+1}^{-1}=A_{1}^{-1}A_{2}^{-1}...A_{p-1}^{-1}A_{p}^{-1}A_{p+1}^{-1}=%
\Pi _{k=1}^{p+1}A_{\left( p+1\right) +1-k}^{-1}$ .
\vspace{1pt}
\paragraph{Elementary Matrices}
\vspace{1pt}
The three elementary row operations on a $n\times n$ matrix $A$ can be
achieved by matrix multiplication with an `Elementary Matrix'. The three
types of elementary matrices and their inverses are displayed below. The
inverses are stated by considering the inverse of the stated elementary row
operation.
I. Switching the $r$ and $s$ rows in the Matrix $A$ .
$\vspace{1pt}$
$\left[ E^{I}\left( r,s\right) \right] _{i,\text{ }j}=\{
\begin{array}{l}
\delta _{ij}\text{ if }i\neq r\text{ and }i\neq s \\
\delta _{rj}\text{ if }i=s \\
\delta _{sj}\text{ if }i=r\text{ }
\end{array}
=\delta _{ij}+\delta _{ir}\left( \delta _{sj}-\delta _{ij}\right) +\delta
_{is}\left( \delta _{rj}-\delta _{ij}\right) $
\QTP{Body Math}
$\vspace{1pt}$Note: $E^{I}\left( r,r\right) $ $=I_{n}$
$\left[ E^{I}\left( r,s\right) \right] ^{-1}=E^{I}\left( r,s\right) $
\vspace{1pt}
II. Multiplying through the $r^{\prime }$th row by the non-zero constant $c$
.$\vspace{1pt}$
$\left[ E^{II}\left( c,r\right) \right] _{i,\text{ }j}=\{
\begin{array}{l}
\delta _{ij}\text{ if }i\neq r\text{ } \\
c\delta _{rj}\text{ if }i=r\text{ }
\end{array}
=\delta _{ij}+\delta _{ir}\delta _{rj}\left( c-1\right) $
\QTP{Body Math}
$\vspace{1pt}$Note: $E^{II}\left( 1,r\right) $ $=I_{n}$
$\left[ E^{II}\left( c,r\right) \right] ^{-1}=E^{II}\left( c^{-1},r\right) $
\vspace{1pt}
III. Adding $c$ times row $r$ to row $s$ , where $r\neq s$ .
$\left[ E^{III}\left( c,r,s\right) \right] _{i,\text{ }j}=\{
\begin{array}{l}
\delta _{ij}\text{ if }i\neq s\text{ } \\
c\delta _{rj}+\delta _{sj}\text{ if }i=s\text{ }
\end{array}
=\delta _{ij}+c\delta _{is}\delta _{rj}$
\QTP{Body Math}
$\vspace{1pt}$Note: $E^{III}\left( 0,r,s\right) $ $=I_{n}$
$\left[ E^{III}\left( c,r,s\right) \right] ^{-1}=E^{III}\left( -c,r,s\right)
$
\vspace{1pt}
The following matrix products verify that these are the correct elementary
matrices and matrix inverses.
\vspace{1pt}I. $\left[ E^{I}\left( r,s\right) A\right]
_{i,m}=\sum_{j=1}^{n}\left( \delta _{ij}+\delta _{ir}\left( \delta
_{sj}-\delta _{ij}\right) +\delta _{is}\left( \delta _{rj}-\delta
_{ij}\right) \right) A_{j,m}$
\qquad \qquad \qquad \qquad $=A_{i,m}+\delta _{ir}\left(
A_{s,m}-A_{i,m}\right) +\delta _{is}\left( A_{r,m}-A_{i,m}\right) $
\qquad \qquad \qquad \qquad $=\{
\begin{array}{l}
A_{i,m}\text{ if }i\neq r\text{ and }i\neq s \\
A_{r,m}\text{ if }i=s\text{ } \\
A_{s,m}\text{ if }i=r\text{ }
\end{array}
$
\vspace{1pt}
$\left[ E^{I}\left( r,s\right) E^{I}\left( r,s\right) \right] _{i,m}$
$=\sum_{j=1}^{n}\left( \delta _{ij}+\delta _{ir}\left( \delta _{sj}-\delta
_{ij}\right) +\delta _{is}\left( \delta _{rj}-\delta _{ij}\right) \right)
\left( \delta _{jm}+\delta _{jr}\left( \delta _{sm}-\delta _{jm}\right)
+\delta _{js}\left( \delta _{rm}-\delta _{jm}\right) \right) $
$=\{
\begin{array}{l}
\delta _{im}\text{ if }i\neq r\text{ and }i\neq s \\
\delta _{sm}\left( 1+2\delta _{rs}\delta _{mr}-2\delta _{rs}\right) \text{
if }i=s\text{ } \\
\delta _{rm}\left( 1+2\delta _{rs}\delta _{ms}-2\delta _{rs}\right) \text{
if }i=r\text{ }
\end{array}
=
\begin{array}{l}
\delta _{im}\text{ if }i\neq r\text{ and }i\neq s \\
\delta _{sm}\text{ if }i=s\text{ } \\
\delta _{rm}\text{ if }i=r\text{ }
\end{array}
=\delta _{im}$
\vspace{1pt}
II. $\left[ E^{II}\left( c,r\right) A\right] _{i,m}=\sum_{j=1}^{n}\left(
\delta _{ij}+\delta _{ir}\delta _{rj}\left( c-1\right) \right) A_{j,m}$
\qquad \qquad \qquad \qquad $=A_{i,m}+\delta _{ir}\left( c-1\right) A_{r,m}$
\qquad \qquad \qquad \qquad $=\{
\begin{array}{l}
A_{i,m}\text{ if }i\neq r\text{ } \\
cA_{r,m}\text{ if }i=r\text{ }
\end{array}
$\vspace{1pt}
$\left[ E^{II}\left( c,r\right) E^{II}\left( c^{-1},r\right) \right] _{i,m}$
$=\sum_{j=1}^{n}\left( \delta _{ij}+\delta _{ir}\delta _{rj}\left(
c-1\right) \right) \left( \delta _{jm}+\delta _{jr}\delta _{rm}\left( \frac{1%
}{c}-1\right) \right) $
$=\delta _{im}+\left( \frac{1}{c}-1\right) \delta _{rm}\delta _{ri}+\left(
c-1\right) \delta _{ri}\delta _{rm}+\left( \frac{1}{c}-1\right) \left(
c-1\right) \delta _{ri}\delta _{rm}=\delta _{im}$
Similarly, $\left[ E^{II}\left( c^{-1},r\right) E^{II}\left( c,r\right)
\right] _{i,m}=\delta _{im}$ .
\vspace{1pt}
III. $\left[ E^{III}\left( c,r,s\right) A\right] _{i,m}=\sum_{j=1}^{n}\left(
\delta _{ij}+c\delta _{is}\delta _{rj}\right) A_{j,m}$
\qquad \qquad \qquad \qquad $=A_{i,m}+c\delta _{is}A_{r,m}$
\qquad \qquad \qquad \qquad $=\{
\begin{array}{l}
A_{i,m}\text{ if }i\neq s\text{ } \\
A_{s,m}+cA_{r,m}\text{ if }i=s\text{ }
\end{array}
$\vspace{1pt}
$\left[ E^{III}\left( c,r,s\right) E^{III}\left( -c,r,s\right) \right]
_{i,m} $
$=\sum_{j=1}^{n}\left( \delta _{ij}+c\delta _{is}\delta _{rj}\right) \left(
\delta _{jm}-c\delta _{js}\delta _{rm}\right) $
$=\delta _{im}-c\delta _{is}\delta _{rm}+c\delta _{is}\delta
_{rm}-c^{2}\delta _{is}\delta _{rs}\delta _{rm}=\delta _{im}$ , since $r\neq
s$ for the third elementary row operation. Similarly, $\left[ E^{III}\left(
-c,r,s\right) E^{III}\left( c,r,s\right) \right] _{i,m}=\delta _{im}$ .
\vspace{1pt}
\paragraph{The Fundamental Theorem of Square Matrix Inversion}
\vspace{1pt}
Given any square $n\times n$ matrix $A$ it can be transformed via a sequence
of $p$ elementary row operations into a matrix $U$ which is in reduced row
echelon form. If the $n$ rows of $A$ are linearly independent, then $U=I_{n%
\text{ }}$. If only $l$ ($l