summaryrefslogtreecommitdiff
path: root/manual/chapters/appendices.tex
diff options
context:
space:
mode:
authorEugeniy Mikhailov <evgmik@gmail.com>2013-08-30 17:38:34 -0400
committerEugeniy Mikhailov <evgmik@gmail.com>2013-08-30 17:42:57 -0400
commitb3c921f6e78472fcbd69ae248ca636686cd5d4cc (patch)
tree047333fc81ffd6c08e54e1593eb86f13307e5a4d /manual/chapters/appendices.tex
parent8a985f9892c45ceda9de2fccfb42d4bf5122b0a9 (diff)
downloadmanual_for_Experimental_Atomic_Physics-b3c921f6e78472fcbd69ae248ca636686cd5d4cc.tar.gz
manual_for_Experimental_Atomic_Physics-b3c921f6e78472fcbd69ae248ca636686cd5d4cc.zip
manual source dir properly named
Diffstat (limited to 'manual/chapters/appendices.tex')
-rw-r--r--manual/chapters/appendices.tex154
1 files changed, 0 insertions, 154 deletions
diff --git a/manual/chapters/appendices.tex b/manual/chapters/appendices.tex
deleted file mode 100644
index d310873..0000000
--- a/manual/chapters/appendices.tex
+++ /dev/null
@@ -1,154 +0,0 @@
-\chapter*{Errors}
-
-\section*{Propagation of Random Errors}
-Suppose one measures basically the same quantity twice. This might be the
-number of $\gamma$-rays detected in 10 minutes with a scintillation detector.
-Let $n_1$ be the number detected the first time and $n_2$ the number the
-second time. Assume that the average number for many such measurements is
-$\overline{n}$. We may then consider a variety of averages denoted by $<>$:
-\begin{eqnarray*}
-\overline{n}&=&<n>\\
-\overline{n_1}&=&<n>=\overline{n}\\
-<n_1-\overline{n}>&=&0\\
-\overline{n_2}&=&<n>\\
-\sigma_n&=&\sqrt{<(n-\overline{n})^2>}
-\end{eqnarray*}
-
-The root-mean-square(rms) deviation from the mean, ( $\sigma$) is what is
-often called the
-error in a measurement.
-We now determine the ``variance'' ($\sigma^2$) expected for various combinations of
-measurements. One only needs to take the square root of $\sigma^2$ to obtain
-the error.
-\begin{eqnarray*}
-\sigma^2&=&<(n_1-\overline{n}+n_2-\overline{n})^2>\\
-&=&<(n_1-\overline{n})^2+(n_2-\overline{n})^2+2(n_1-\overline{n})(n_2-\overline{n})>\\
-&=&<(n_1-\overline{n})^2>+<(n_2-\overline{n})^2>+2<(n_1-\overline{n})(n_2-\overline{n})>\\
-&=&<(n_1-\overline{n})^2>+<(n_2-\overline{n})^2>+2<(n_1-\overline{n})><(n_2-\overline{n})>\\
-\sigma^2&=&\sigma_1^2+\sigma_2^2+0
-\end{eqnarray*}
-The average value of the last term is zero since the two measurements are
-independent and one can take the averages of each part separately.
-
-With this result it is easy to get the variance in a linear combination of
-$n_1$ and $n_2$. If
-
-\begin{displaymath}
-f=a\cdot n_1 +b\cdot n_2
-\end{displaymath}
-
-then:
-\begin{displaymath}
-\sigma_f^2=a^2\sigma_1^2+b^2\sigma_2^2
-\end{displaymath}
-
-If the errors are small and $f$ is a function of $n_1$ and $n_2$: $f(n_1,n_2)$
-then:
-\begin{equation}\label{ssgen}
-\sigma_f^2=\left(\frac{\partial f}{\partial n_1}\right)^2\sigma_1^2+\left(\frac{\partial f}{\partial n_2}\right)^2\sigma_2^2
-\end{equation}
-It should be clear that one can extend Eq. \ref{ssgen} to arbitrary numbers of
-parameters.
-
-As an example of this latest form suppose $f=n_1\cdot n_2$ then:
-\begin{displaymath}
-\sigma_f^2=n_2^2\sigma_1^2+n_1^2\sigma_2^2
-\end{displaymath}
-or
-\begin{displaymath}
-\frac{\sigma_f^2}{f^2}=\frac{\sigma_1^2}{n_1^2}+\frac{\sigma_2^2}{n_2^2}
-\end{displaymath}
-
-Thus in this case the fractionial variances add.
-
-Note: the $\sigma_m$ the error in the mean of $n$ measurements of the
-same thing is: $\sigma_m=\sigma /\sqrt{n}$.
-\subsection*{Probability Distribution Functions}
-\subsubsection*{Binomial}
-If the probability of {\it success} in a trial is $p$ then
-the probability of $n$ {\it successes} in $N$ trials is:
-\begin{displaymath}
-P(n)=\frac{N!}{(N-n)!n!}p^n(1-p)^{N-n}
-\end{displaymath}
-This distribution has a mean $\mu=Np$ and variance $\sigma^2=Np(1-p)$.
-This is the starting point for figuring the odds in card games, for example.
-\subsubsection*{Poisson}
-The probability of $n$ events is:
-\begin{displaymath}
-P(n)=\frac{e^{-\mu}\mu^n}{n!}
-\end{displaymath}
-where is the $\mu$ is the mean value and the variance, $\sigma^2=\mu$.
-This is the distribution one gets, e.g., with the number of radioactive
-decays detected in a fixed finite amount of time. It can be derived from
-the binomial distribution in an appropriate limit.
-\subsubsection*{Normal or Gaussian Distribution}
-This is the first continuous probability distribution.
-\begin{displaymath}
-P(x)=\frac{1}{\sqrt{2\pi\sigma^2}}e^{\frac{-(x-\mu)^2}{2\sigma^2}}
-\end{displaymath}
-This function, as you might guess, has mean $\mu$ and variance $\sigma^2$.
-If one makes averages of almost anything one finds that the result is
-almost always well described by a Normal distribution. Both the binomial and
-Poisson distributions approach this distribution in appropriate limits as
-does the $\chi^2$ described below.
-\subsubsection*{Chi-square distribution: $\chi^2$}
-This probability density function (pdf) has the parameter: $N_f$, the number of
-degrees of freedom. It is:
-\begin{displaymath}
-P(x)=\frac{\frac{1}{2}\left(\frac{x}{2}\right)^{(N_f/2)-1}}{\Gamma\left(
-\frac{N_f}{2}\right)}
-\end{displaymath}
-The mean of this pdf is: $\mu=N_f$ and the variance is: $\sigma^2=2N_f$.
-The pdf is of considerable use in physics. It is used extensively in the
-fitting of histogrammed data.
-\newpage
-
-\appendix{Linear Least Squares}
-
-
-Consider a set of experimental results measured as a function of some
-parameter $x$, i.e., $E(x_i)$. Suppose that these results are expected to
-be represented by a theoretical function $T(x_i)$ and that $T(x_i)$ is
-in turn linearly expandable in terms of independent functions $f_j(x_i)$:
-\begin{displaymath}
-T(x_i)=\sum_ja_jf_j(x_i)
-\end{displaymath}
-Suppose now one wants to find the coefficients $a_j$ by minimizing $\chi^2$,
-the sum of differences between the experimental results and the theoretical
-function, squared, i.e., minimize:
-\begin{displaymath}
-\chi^2=\sum_i\left(\sum_ja_jf_j(x_i)-E(x_i)\right)^2
-\end{displaymath}
-This is found by finding:
-\begin{displaymath}
-0=\frac{\partial}{\partial a_k}\chi^2=
-2\cdot \sum_i\left(\sum_ja_jf_j(x_i)-E(x_i)\right)\cdot f_k(x_i)
-\end{displaymath}
-This may be rewritten as:
-\begin{equation}\label{meq}
-\sum_i\left(\sum_ja_jf_j(x_i)f_k(x_i)\right)=\sum_iE(x_i)f_k(x_i)
-\end{equation}
-The rest is algebra. The formal solution, which can in fact be
-easily implemented, is to first define:
-\begin{eqnarray}
-M_{j,k}&=&\sum_if_j(x_i)f_k(x_i\\
-V_k(i)&=&\sum_iE(x_i)f_k(x_i)
-\end{eqnarray}
-So that Eq. \ref{meq}. becomes:
-\begin{displaymath}
-\sum a_jM_{j,k}=V_k
-\end{displaymath}
-The $a_j$ may then be found by finding the inverse of $M_{j,k }$
-\begin{figure}:
-\begin{displaymath}
-a_j=\sum_kV_k\cdot M^{-1}_{k,j}
-\end{displaymath}
-Question: How does this procedure change if:
-\begin{displaymath}
-\chi^2=\sum_i\frac{(T(x_i)-E(x_i))^2}{\sigma(x_i)^2}
-\end{displaymath}
-where $\sigma(x_i)$ is the error in the measurement of $E(x_i)$?
-
-\centerline{\epsfig{width=\linewidth,angle=-90, file=datafg.eps}}
-\caption{\label{lsqf} Data Fit to a Straight Line.}
-\end{figure}