Commit d1f1b86b authored by uvkjt's avatar uvkjt
Browse files

Initial

parents
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\zref@newlabel[2]{}
\@writefile{toc}{\contentsline {chapter}{\numberline {2}Wahrscheinlichkeit und Statistik}{8}{chapter.2}}
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{lol}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Deskriptive Statistik}{8}{section.2.1}}
\@setckpt{gws}{
\setcounter{page}{9}
\setcounter{equation}{0}
\setcounter{enumi}{5}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
\setcounter{enumiv}{0}
\setcounter{footnote}{0}
\setcounter{mpfootnote}{0}
\setcounter{part}{0}
\setcounter{chapter}{2}
\setcounter{section}{1}
\setcounter{subsection}{0}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
\setcounter{Item}{5}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{12}
\setcounter{mdf@globalstyle@cnt}{1}
\setcounter{mdfcountframes}{0}
\setcounter{mdf@env@i}{0}
\setcounter{mdf@env@ii}{0}
\setcounter{mdf@zref@counter}{0}
\setcounter{lstnumber}{1}
\setcounter{section@level}{1}
\setcounter{lstlisting}{0}
}
\chapter{Wahrscheinlichkeit und Statistik}
\section{Deskriptive Statistik}
\ No newline at end of file
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\zref@newlabel[2]{}
\providecommand*\new@tpo@label[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\@input{telematik.aux}
\pgfsyspdfmark {pgfid1}{3729359}{15656453}
\pgfsyspdfmark {pgfid2}{3729359}{35173144}
\pgfsyspdfmark {pgfid3}{3729359}{27773425}
\pgfsyspdfmark {pgfid4}{3729359}{10916917}
\pgfsyspdfmark {pgfid5}{3729359}{16755699}
\@input{gws.aux}
\global\@namedef{scr@dte@chapter@lastmaxnumwidth}{10.84047pt}
\global\@namedef{scr@dte@section@lastmaxnumwidth}{18.37163pt}
\global\@namedef{scr@dte@subsection@lastmaxnumwidth}{26.8883pt}
File added
This diff is collapsed.
\BOOKMARK [0][-]{chapter.1}{Telematik}{}% 1
\BOOKMARK [1][-]{section.1.1}{Erinnerung aus Rechnernetze}{chapter.1}% 2
\BOOKMARK [1][-]{section.1.2}{Routers}{chapter.1}% 3
\BOOKMARK [2][-]{subsection.1.2.1}{Overview}{section.1.2}% 4
\BOOKMARK [2][-]{subsection.1.2.2}{The "Longest Prefix Matching" - LPM}{section.1.2}% 5
\BOOKMARK [2][-]{subsection.1.2.4}{Tries}{section.1.2}% 6
\BOOKMARK [2][-]{subsection.1.2.6}{Hash Tables}{section.1.2}% 7
\BOOKMARK [2][-]{subsection.1.2.7}{LPM in Hardware}{section.1.2}% 8
\BOOKMARK [1][-]{section.1.3}{Router Architecture}{chapter.1}% 9
\BOOKMARK [2][-]{subsection.1.3.1}{Packet Blocking}{section.1.3}% 10
\BOOKMARK [0][-]{chapter.2}{Wahrscheinlichkeit und Statistik}{}% 11
\BOOKMARK [1][-]{section.2.1}{Deskriptive Statistik}{chapter.2}% 12
File added
File added
\documentclass{scrreprt}
\usepackage[left=20mm, right=20mm] {geometry}
\usepackage{hyperref}
\usepackage{graphicx}
\usepackage{tabularx}
\usepackage{amsthm}
\usepackage[framemethod=TikZ]{mdframed}
\usepackage{xcolor}
\usepackage{listings}
\usepackage{soul}
\theoremstyle{definition}
\newtheorem{exmp}[subsection]{\textbf{Example}}
\newenvironment{example}{\begin{mdframed}[backgroundcolor=gray!20, topline=false, bottomline=false, leftline=false, rightline=false, innertopmargin=0pt, roundcorner=5pt]\begin{exmp}$ $\newline}
{\end{exmp}\end{mdframed}}
\newenvironment{prereq}{\begin{mdframed}[roundcorner=5pt]\textbf{Prerequisites}}
{\end{mdframed}}
\newcommand{\folie}[1]{\begin{ttfamily}\sethlcolor{lightgray}\hl{#1}\end{ttfamily}}
\begin{document}
\title{Vorlesungzusammenfassungen}
\author{Julind Mara}
\maketitle
\tableofcontents
\include{telematik}
\include{gws}
\end{document}
\ No newline at end of file
\contentsline {chapter}{\numberline {1}Telematik}{3}{chapter.1}
\contentsline {section}{\numberline {1.1}Erinnerung aus Rechnernetze}{3}{section.1.1}
\contentsline {section}{\numberline {1.2}Routers}{3}{section.1.2}
\contentsline {subsection}{\numberline {1.2.1}Overview}{3}{subsection.1.2.1}
\contentsline {subsection}{\numberline {1.2.2}The "Longest Prefix Matching" - \textit {LPM}}{4}{subsection.1.2.2}
\contentsline {subsubsection}{\nonumberline Efficent data structures for LPM}{4}{section*.2}
\contentsline {subsection}{\numberline {1.2.4}Tries}{4}{subsection.1.2.4}
\contentsline {subsubsection}{\nonumberline Binary Trie and Path Compression}{5}{section*.3}
\contentsline {subsubsection}{\nonumberline Fixed Stride Multibit Trie}{5}{section*.4}
\contentsline {subsubsection}{\nonumberline An overall Evaluation of all Tries}{5}{section*.5}
\contentsline {subsection}{\numberline {1.2.6}Hash Tables}{6}{subsection.1.2.6}
\contentsline {subsection}{\numberline {1.2.7}LPM in Hardware}{6}{subsection.1.2.7}
\contentsline {subsubsection}{\nonumberline RAM}{6}{section*.6}
\contentsline {subsubsection}{\nonumberline Binary CAM}{6}{section*.7}
\contentsline {subsubsection}{\nonumberline Ternary CAM}{6}{section*.8}
\contentsline {section}{\numberline {1.3}Router Architecture}{6}{section.1.3}
\contentsline {subsection}{\numberline {1.3.1}Packet Blocking}{7}{subsection.1.3.1}
\contentsline {chapter}{\numberline {2}Wahrscheinlichkeit und Statistik}{8}{chapter.2}
\contentsline {section}{\numberline {2.1}Deskriptive Statistik}{8}{section.2.1}
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\zref@newlabel[2]{}
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Telematik}{3}{chapter.1}}
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{lol}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {1.1}Erinnerung aus Rechnernetze}{3}{section.1.1}}
\@writefile{lot}{\contentsline {table}{\numberline {1.1}{\ignorespaces {\"U}bersetzung der Schichten}}{3}{table.1.1}}
\@writefile{toc}{\contentsline {section}{\numberline {1.2}Routers}{3}{section.1.2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.1}Overview}{3}{subsection.1.2.1}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.2}The "Longest Prefix Matching" - \textit {LPM}}{4}{subsection.1.2.2}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline Efficent data structures for LPM}{4}{section*.2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.4}Tries}{4}{subsection.1.2.4}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline Binary Trie and Path Compression}{5}{section*.3}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline Fixed Stride Multibit Trie}{5}{section*.4}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline An overall Evaluation of all Tries}{5}{section*.5}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.6}Hash Tables}{6}{subsection.1.2.6}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.7}LPM in Hardware}{6}{subsection.1.2.7}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline RAM}{6}{section*.6}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline Binary CAM}{6}{section*.7}}
\@writefile{toc}{\contentsline {subsubsection}{\nonumberline Ternary CAM}{6}{section*.8}}
\@writefile{toc}{\contentsline {section}{\numberline {1.3}Router Architecture}{6}{section.1.3}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.3.1}Packet Blocking}{7}{subsection.1.3.1}}
\@setckpt{telematik}{
\setcounter{page}{8}
\setcounter{equation}{0}
\setcounter{enumi}{5}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
\setcounter{enumiv}{0}
\setcounter{footnote}{0}
\setcounter{mpfootnote}{0}
\setcounter{part}{0}
\setcounter{chapter}{1}
\setcounter{section}{3}
\setcounter{subsection}{1}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{0}
\setcounter{table}{1}
\setcounter{Item}{5}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{10}
\setcounter{mdf@globalstyle@cnt}{1}
\setcounter{mdfcountframes}{0}
\setcounter{mdf@env@i}{0}
\setcounter{mdf@env@ii}{0}
\setcounter{mdf@zref@counter}{0}
\setcounter{lstnumber}{1}
\setcounter{section@level}{2}
\setcounter{lstlisting}{0}
}
This diff is collapsed.
\chapter{Telematik}
Da die Vorlesungsfolien auf Englisch sind, ist dieser Teil der Zusammenfassung auf Englisch.
\section{Erinnerung aus Rechnernetze}
{\"U}bersetzung einige wichtige Begriffe. Einzige Teil auf Deutsch.
\begin{table}[!h]
\centering
\begin{tabular}{l | r l}
\hline
Layer No. & DE & EN \\
\hline
Layer 0 & Physikalische Schicht & Physical Layer \\
Layer 1 & Sicherungsschicht & Data Link Layer \\
Layer 2 & Vermittlungsschicht & Network Layer \\
Layer 3 & Transportschicht & Transport Layer \\
Layer 4 & Anwendungsschicht & Application Layer \\
\hline
\end{tabular}
\caption{{\"U}bersetzung der Schichten}
\end{table}
\section{Routers}
\subsection{Overview}
A routers job is primarly an address lookup in a lookup table, and correct data forwarding of data from an input port to an output port. \par
There are 3 main types of routers: \\
\begin{mdframed}
{\centering
\newcolumntype{C}{>{\centering\arraybackslash}X}
\begin{tabularx}{\textwidth}{C c C c C}
Edge Router & $\Leftrightarrow$ & Enterprise Router & $\Leftrightarrow$ & Core Router \\
\textit{The typical home router} & &\textit{Local switches in neighbourhoods by connecting end systems} & & \textit{used by service providers and need to handle big amount of data at very fast lookup speeeds} \\
\end{tabularx}
\par}
\end{mdframed}
\par
An IP Router contains 2 main parts:
\begin{itemize}
\item The Data Part - Main job is \textbf{Forwarding} and may operate on 3 levels: Hubs (L1), Bridges (L2), Routers (L3).
\item The Control Part - Main job is Path determination aka \textbf{Routing} which utilizes Algorithms
\end{itemize}\par
The basic idea is that a Routing Algorithm (Protocol) defines the forwaring table for every router in the network, and this way, a route (Path) that a packet would take, is predefined. A nice example of this is on \folie{F6S2}.
Therefore a set of challenges needs to be fulfilled:
\begin{itemize}
\item Forwarding at line speed (independent of Packet size)
\item Short queues (therefore faster forwarding)
\item Small forwarding tables
\end{itemize}
\subsection{The "Longest Prefix Matching" - \textit{LPM}}
\begin{prereq}
A lookup in the Fwd. Table consists of identifying the matching block in the prefix part of the table (that is, checking which prefix matches the prefix of the incoming IP address) then forwarding the data to the appropriate port. Continuous blocks of addresses per output are beneficial as they enable scalability.
\end{prereq}
By that definition a problem arises: \textit{If there are 2 prefixes that match a given IP address, which one do I pick?} The given solution to the problem is selecting the most specific Prefix that matches the IP, and this means selecting the longest prefix, what the name "Longest Prefix Matching" implies. \par
\begin{example}
An IP comes in the forwarding table (156.256.7.17). A lookup is made and 2 prefixes match (156.256... and 156...).
According to the LPM method, (156.256...) is selected.
\end{example}
\subsubsection{Efficent data structures for LPM}
A good data structure should statisfy 3 requirements:
\begin{itemize}
\item Fast Lookup
\item Low Memory
\item Fast updates
\end{itemize}
\subsection{Tries}
\begin{prereq}
With these requirements a good data structure are \textit{Tries}. Some variables to enable comparisons later:
\begin{itemize}
\item N (Number of prefixes)
\item W (Length of prefix)
\item k (Length of stride - used in a type of trie, covered later)
\end{itemize}
\end{prereq}
\subsubsection{Binary Trie and Path Compression}
The main idea is when an IP Lookup takes place, there is a tree-like structure that enables a bit by bit step down in the tree to find the right prefix. The chain of read bits that leads to a node defines the prefix (you can say that the prefix is contained in the node), and the tree branch contains the read bit. A good example is on \folie{F26S2}.
This is not ideal as there could be (either long or short) chains of one childed nodes. This wastes memory and its not efficient. A solution to this problem would be compressing these paths using \textit{Path Compression}. \par
In comparison to the binary trie without compression tree, a branch contains 1 bit that is used for the comparison, and underneath the node, the bit position to be compared is written. Example on \folie{F30S2} and \folie{F31S2}.
\subsubsection{Fixed Stride Multibit Trie}
This is sort of like binary trie, but instead of comparing only one bit at a time, a set number of bits (fixed stride) is simultaneously compared. Its a tree with $2^k$ nodes and branches, where the branches are all numbered from 0 to $2^k - 1$ in binary. Example on \folie{F33S2}. But there is a catch. Typically prefixes are not of same length, but strides are therefore you have a situation where a stride can match $>1$ prefixes. To solve this \textbf{Prefix Expansion} is used.
This Prefix Expansion is basically expanding the tree with more multibit tries at certain nodes, to cover all prefixes. This is done in this manner:
\begin{example}
Lets say you have a prefix table with the longest prefix of length \textit{l}.
\begin{enumerate}
\item You generate a Multibit Trie Tree with a stride of \textit{$k_{1} <= l$}.
\item You then assign each node, the \textbf{loosest Prefix} that matches the stride. Carefeul, if a strides matches \textbf{exactly} with a prefix, use this instead, and therefore this node needs no more expansion. If none matches, leave empty.
\item You then create a Multibit Trie Tree with a stride of \textit{$k_{2} <= l - k_{1}$} at \textbf{each loosely "prefixed" node}, and repeat step 2.
\item You repeat step 3 recursively at all loosely "prefixed" nodes until there are no more left.
\item In the end the sum of all strides $k_{1}...k_{i}$ should equal l.
\end{enumerate}
\end{example}
\subsubsection{An overall Evaluation of all Tries}
All speeds are measured worst case scenario. \par
{\centering
\newcolumntype{C}{>{\centering\arraybackslash}X}
\begin{tabularx}{\textwidth}{|C|C|C|C|}
\hline
& Lookup Speed & Memory Req. & Update Speed \\
\hline
Binary trie & O(W) & O(N*W) & O(W)\\
\hline
Binary trie with path compression & O(W) & O(N) & O(W) \\
\hline
Multibit Trie with fixed stride & O($\frac{W}{k}$) & O($\frac{2^kNW}{k}$) & O($\frac{W}{k+2^k}$) \\
\hline
\end{tabularx}
\par}
\subsection{Hash Tables}
Here we try to improve the lookup speed of tries with hash tables as they can perform in O(1). But LPM does not work with Hash Tables itself so we use a combination of tries and hash tables. Basically when a hashtable lookup is made, and when a match is found, all good. If no match is found, then you perform a trie lookup, and store this lookup in the table so when its repeated, no trie lookup is needed. This is good when lookups show locality characteristics, but not useful in internet backbones.
\subsection{LPM in Hardware}
\subsubsection{RAM}
Basic idea is like the real usage of RAM, so a lookup is instant - with a single memory access. Uses the IP address, as RAM address which in turn points to a port stored in the RAM memory cell. But its pretty bad for scalability as the memory size grows exponentially with the increase in address size.
\subsubsection{Binary CAM}
Instead of using and address to find the port as in RAM, CAM uses a piece of data (An IP in bits) to find an address. Example \folie{F43S2}. It floods all the lines of the CAM registers with the data, and the first matching address that is found (because there could be multiple matching, but the lines are ordered in such a way that the more precise match is the first one) is used to find the corresponding port, using an adjacency table(not part of CAM). Inner workings on \folie{F45S2}
\subsubsection{Ternary CAM}
This allows LPM because the CAM registers can use Don't Care States, to fulfill the needed maximum length of a prefix. This practically means a sort of a multibit trie. The order of the elements in the TCAM is, longest prefix at the top and the shortest at the bottom. This way when a matched prefix is found, the first matching one in the list is the more precise, as it is longer. \par
TCAMs are very fast at a 1 lookup per clock cycle, but it has high energy demand and updates are really slow, as all registers need to be correctly reordered, otherwise the first match might not be the most precise. All in all, not scalable.
\section{Router Architecture}
A router has 3 basic components:
\begin{itemize}
\item Network Interfaces (Realizes functions of L1 and L2)
\item Routing processor (With an appropriate routing protocol)
\item Switch Fabric (Realizes internal forwarding of packets from an input to a desired output)
\end{itemize}
An example schematic on \folie{F50S2}
\subsection{Packet Blocking}
The way a router is designed, a packet blocking situation can occur, where $>1$ Packet can arrive at the same time in different input ports, and use the same output port. To solve this, here are some methods:
\begin{itemize}
\item \textbf{Overprovisioning} \\ Switch Fabric operates at a faster speed than the individual input ports.
\item \textbf{Buffering} \\ Packets are buffered somewhere in the Router. Typically in:
\begin{itemize}
\item Input. Easy to make. A FIFO Strategy or a Scheduler is used to serve the packets waiting in line. A typical Problem of this buffer placement is that a packet buffered behind a blocking packet in front, could be serviced but can't, because its waiting for the blocked one to leave. Throughout 60-70\%.
\item Output. FIFO or Non-Blocking. Throughput nearly 80-85\%. But fast memory and additional input buffer needed for packet acceptance.
\item Distributed - Idea is to buffer inside the switch Matrix. Optimal throughput but high memory requirements.
\item Central. One Big Buffer for everything, therefore low memory requirements but really fast memory.
\end{itemize}
\item \textbf{Backpressure} \\ Signal overload to input to reduce load
\item \textbf{Parallel switch fabric} \\ Multiple packet served at the same time to output port. But requires higher access speed to cope up the feed size from parallelity.
\end{itemize}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment