summaryrefslogtreecommitdiff
path: root/unused_chapters/appendices.tex
blob: d31087321c9a93a39b96e812609080d4c61b99e1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
\chapter*{Errors}

\section*{Propagation of Random Errors}
Suppose one measures basically the same quantity twice. This might be the 
number of $\gamma$-rays detected in 10 minutes with a scintillation detector.
Let $n_1$ be the number detected the first time and $n_2$ the number the
second time. Assume that the average number for many such measurements is
$\overline{n}$. We may then consider a variety of averages denoted by $<>$:
\begin{eqnarray*}
\overline{n}&=&<n>\\
\overline{n_1}&=&<n>=\overline{n}\\
<n_1-\overline{n}>&=&0\\
\overline{n_2}&=&<n>\\
\sigma_n&=&\sqrt{<(n-\overline{n})^2>}
\end{eqnarray*}

The root-mean-square(rms) deviation from the mean, ( $\sigma$) is what is 
often called the 
error in a measurement.
We now determine  the ``variance'' ($\sigma^2$) expected for various combinations of 
measurements. One only needs to take the square root of $\sigma^2$ to obtain
the error. 
\begin{eqnarray*}
\sigma^2&=&<(n_1-\overline{n}+n_2-\overline{n})^2>\\
&=&<(n_1-\overline{n})^2+(n_2-\overline{n})^2+2(n_1-\overline{n})(n_2-\overline{n})>\\
&=&<(n_1-\overline{n})^2>+<(n_2-\overline{n})^2>+2<(n_1-\overline{n})(n_2-\overline{n})>\\
&=&<(n_1-\overline{n})^2>+<(n_2-\overline{n})^2>+2<(n_1-\overline{n})><(n_2-\overline{n})>\\
\sigma^2&=&\sigma_1^2+\sigma_2^2+0
\end{eqnarray*}
The average value of the last term is zero since the two measurements are
independent and one can take the averages of each part separately.

With this result it is easy to get the variance in a linear combination of
$n_1$ and $n_2$. If

\begin{displaymath}
f=a\cdot n_1 +b\cdot n_2
\end{displaymath}

then:
\begin{displaymath}
\sigma_f^2=a^2\sigma_1^2+b^2\sigma_2^2
\end{displaymath}

If the errors are small  and $f$ is a function of $n_1$ and $n_2$: $f(n_1,n_2)$
then:
\begin{equation}\label{ssgen}
\sigma_f^2=\left(\frac{\partial f}{\partial n_1}\right)^2\sigma_1^2+\left(\frac{\partial f}{\partial n_2}\right)^2\sigma_2^2
\end{equation}
It should be clear that one can extend Eq. \ref{ssgen} to arbitrary numbers of
parameters.

As an example of this latest form suppose $f=n_1\cdot n_2$ then:
\begin{displaymath}
\sigma_f^2=n_2^2\sigma_1^2+n_1^2\sigma_2^2
\end{displaymath}
or 
\begin{displaymath}
\frac{\sigma_f^2}{f^2}=\frac{\sigma_1^2}{n_1^2}+\frac{\sigma_2^2}{n_2^2}
\end{displaymath}

Thus in this case the fractionial variances  add.

Note: the $\sigma_m$ the error in the mean of $n$ measurements of the
same thing is: $\sigma_m=\sigma /\sqrt{n}$.
\subsection*{Probability Distribution Functions}
\subsubsection*{Binomial}
If the probability of {\it success} in a trial is $p$ then
the probability of  $n$ {\it successes}   in $N$ trials is:
\begin{displaymath}
P(n)=\frac{N!}{(N-n)!n!}p^n(1-p)^{N-n}
\end{displaymath}
This distribution has a mean $\mu=Np$ and variance $\sigma^2=Np(1-p)$.
This is the starting point for figuring the odds in card games, for example.
\subsubsection*{Poisson}
The probability of $n$ events is:
\begin{displaymath}
P(n)=\frac{e^{-\mu}\mu^n}{n!}
\end{displaymath}
where is the $\mu$ is the mean value and the variance, $\sigma^2=\mu$.
This is the distribution one gets, e.g., with the number of radioactive
decays detected in a fixed finite amount of time. It can be derived from
the binomial distribution in an appropriate limit.
\subsubsection*{Normal or Gaussian Distribution}
This is the first continuous probability distribution. 
\begin{displaymath}
P(x)=\frac{1}{\sqrt{2\pi\sigma^2}}e^{\frac{-(x-\mu)^2}{2\sigma^2}}
\end{displaymath}
This function, as you might guess, has mean $\mu$ and variance $\sigma^2$.
If one makes averages of almost anything one finds that the result is
almost always well described by a Normal distribution. Both the binomial and
Poisson distributions approach this distribution in  appropriate limits as
does the $\chi^2$ described below.
\subsubsection*{Chi-square distribution: $\chi^2$} 
This probability density function (pdf) has the parameter: $N_f$, the number of
degrees of freedom. It is:
\begin{displaymath}
P(x)=\frac{\frac{1}{2}\left(\frac{x}{2}\right)^{(N_f/2)-1}}{\Gamma\left(
\frac{N_f}{2}\right)}
\end{displaymath}
The mean of this pdf is: $\mu=N_f$ and the variance is: $\sigma^2=2N_f$.
The pdf is of considerable use in physics. It is used extensively in the 
fitting of histogrammed data.  
\newpage 

\appendix{Linear Least Squares}


Consider a set of experimental results measured as a function of some
parameter $x$, i.e., $E(x_i)$. Suppose that these results are expected to
be represented by a theoretical function $T(x_i)$ and that $T(x_i)$ is
in turn linearly expandable in terms of independent functions $f_j(x_i)$:
\begin{displaymath}
T(x_i)=\sum_ja_jf_j(x_i)
\end{displaymath}
Suppose now one wants to find the coefficients $a_j$ by minimizing $\chi^2$,
the sum of differences between the experimental results and the theoretical
function, squared, i.e., minimize:
\begin{displaymath}
\chi^2=\sum_i\left(\sum_ja_jf_j(x_i)-E(x_i)\right)^2
\end{displaymath}
This is found by finding:
\begin{displaymath}
0=\frac{\partial}{\partial a_k}\chi^2=
2\cdot \sum_i\left(\sum_ja_jf_j(x_i)-E(x_i)\right)\cdot f_k(x_i)
\end{displaymath}
This may be rewritten as:
\begin{equation}\label{meq}
\sum_i\left(\sum_ja_jf_j(x_i)f_k(x_i)\right)=\sum_iE(x_i)f_k(x_i)
\end{equation}
The rest is algebra. The formal solution, which can in fact be 
easily implemented, is to first define: 
\begin{eqnarray}
M_{j,k}&=&\sum_if_j(x_i)f_k(x_i\\
V_k(i)&=&\sum_iE(x_i)f_k(x_i)
\end{eqnarray}
So that Eq. \ref{meq}. becomes:
\begin{displaymath}
\sum a_jM_{j,k}=V_k
\end{displaymath}
The $a_j$ may then be found by finding the inverse of $M_{j,k }$
\begin{figure}:
\begin{displaymath}
a_j=\sum_kV_k\cdot M^{-1}_{k,j}
\end{displaymath}
Question: How does this procedure change if:
\begin{displaymath}
\chi^2=\sum_i\frac{(T(x_i)-E(x_i))^2}{\sigma(x_i)^2}
\end{displaymath}
where $\sigma(x_i)$ is the error in the measurement of $E(x_i)$?

\centerline{\epsfig{width=\linewidth,angle=-90, file=datafg.eps}}
\caption{\label{lsqf} Data Fit to a Straight Line.}
\end{figure}