some changes

master
Frederik Maaßen 2 years ago
parent ad96652243
commit cf122da4dc
  1. 6
      implementation/topologies/4r4h_topo.py
  2. 4
      implementation/topologies/6r4h_topo.py
  3. 11
      thesis/content/evaluation/minimal_network.tex

@ -322,10 +322,10 @@ class FourRoutersFourHosts(CustomTopo):
"use_pre_defined_function": True,
"separate_definitions": True,
"command_pre": ("measure_packet_flow", (
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_before_failure", [0, 20000],
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_before_failure", [0, 22000],
"UDP Packet flow on all routers before failure", "udp", 100)),
"command_post": ("measure_packet_flow", (
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_after_failure", [0, 20000],
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_after_failure", [0, 22000],
"UDP Packet flow on all routers after failure", "udp", 100)),
},
@ -351,7 +351,7 @@ class FourRoutersFourHosts(CustomTopo):
"execute": {
"use_pre_defined_function": True,
"command": ("measure_packet_flow", (
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_concurrent_failure", [0, 20000],
'h1', 'h4', '10.4.0.101', ["r1", "r2", "r3", "r4"], 30, 1, "udp_concurrent_failure", [0, 22000],
"UDP Packet flow on all routers before failure", "udp", 100)),
},

@ -408,7 +408,7 @@ class SixRoutersFourHosts(CustomTopo):
'h1', 'h6', '10.6.0.101', ["r1", "r3", "r5", "r6"], 30, 1, "udp_before_failure", [0, 20000],
"UDP Packet flow on routers before failure", "udp", 100)),
"command_post": ("measure_packet_flow", (
'h1', 'h6', '10.6.0.101', ["r1", "r3", "r5", "r6"], 30, 1, "udp_after_failure", [0, 20000],
'h1', 'h6', '10.6.0.101', ["r1", "r3", "r5", "r6"], 30, 1, "udp_after_failure", [0, 22000],
"UDP Packet flow on routers after failure", "udp", 100)),
},
@ -434,7 +434,7 @@ class SixRoutersFourHosts(CustomTopo):
"execute": {
"use_pre_defined_function": True,
"command": ("measure_packet_flow", (
'h1', 'h6', '10.6.0.101', ["r1", "r3", "r5", "r6"], 30, 1, "udp_concurrent_failure", [0, 20000],
'h1', 'h6', '10.6.0.101', ["r1", "r3", "r5", "r6"], 30, 1, "udp_concurrent_failure", [0, 22000],
"UDP Packet flow on routers before failure", "udp", 100)),
},

@ -6,7 +6,7 @@
\caption{Minimal network}
\label{fig:evaluation_minimal_network}
\end{figure}
\subsection{Bandwidth}
\subsection{TCP Bandwidth}
We performed multiple tests of influences to the bandwidth with occurring failures. These were run using \textit{iperf} and a logging interval of \SI{0.5}{\second}. All data was collected from the output of the \textit{iperf} server.
\subsubsection{With FRR}
@ -30,7 +30,7 @@ We performed multiple tests of influences to the bandwidth with occurring failur
\label{fig:evaluation_minimal_bandwidth_wo_sc}
\end{figure}
We performed a bandwidth test on the minimal network, a topology with 4 routers and 3 hosts. The failure occurred after the first run and before the second run of the test in \cref{fig:evaluation_minimal_bandwidth_wo_sc}. The bandwidth does not change between runs. This is to be expected as additional hops on the path of the packet do not influence the total throughput that can be achieved, and while the looped path is passed by the traffic in both directions, the duplex nature of ethernet connections does not impose any limitations in this regard.
We performed a TCP bandwidth test on the minimal network, a topology with 4 routers and 3 hosts. The failure occurred after the first run and before the second run of the test in \cref{fig:evaluation_minimal_bandwidth_wo_sc}. The bandwidth does not change between runs. This is to be expected as additional hops on the path of the packet do not influence the total throughput that can be achieved, and while the looped path is passed by the traffic in both directions, the duplex nature of ethernet connections does not impose any limitations in this regard.
\begin{figure}
\centering
@ -83,6 +83,8 @@ In our further tests we observed that the bandwidth alone does not change heavil
As can be seen in \cref{fig:evaluation_minimal_bandwidth_sc} and \cref{fig:evaluation_minimal_bandwidth_concurrent_sc}, using ShortCut had no further influence on the achieved throughput. This is to be expected, as longer or shorter paths will only influence throughput if e.g. a link with a lower bandwidth is contained in an additional path.
\subsection{UDP Bandwidth}
\subsection{Two concurrent data transfers}
In this test we evaluated the bandwidth between H1 and H4 with a concurrent data transfer on H2 to H1. Both transfers were run with a limitation of \SI{100}{Mbps}, which constitutes the maximum allowed bandwidth in this test.
\subsubsection{With FRR}
@ -256,7 +258,7 @@ The results in the network before a failure are as to be expected and can be see
After a failure all four routers receive packets as can be seen in \cref{fig:evaluation_minimal_packet_flow_wo_sc_b}, but router R1 now receives most packets with an average of around 1500 packets while router R3 and R4 receive roughly the same amount of packets as before the failure at an average of around 1000 packets. Router R2 receives the least packets with an average of around 500 packets.
This is most likely caused by the looped path and the implications for packet travel this has. Router R1 receives all packets that are sent to H4 from H1 twice, once sending them to R2 and the second time when receiving the packets back from R2 to send them to R3. But while all packets \textbf{sent} from H1 pass R1 twice, acknowledgements sent back by the \textit{iperf} server on H4 will only pass R1 once, as R1 would not send packets with H1 as destination to R2. Router R2 on the other hand only receives packets sent to H4 but none of the ACKs sent back. This is why, when compared to the average packet count of all routers in \cref{fig:evaluation_minimal_packet_flow_wo_sc_a}, R2 receives roughly half of all packets a router would normally receive as TCP specifies that for each received packet TCP will send an ACK as answer. This also explains why router R1 forwards an average of around 1500 packets per second, forwarding data packets with around 500 packets per second twice and forwarding acknowledgement packets once with also 500 packets per second, producing an additional 50\% load on the router.
This is most likely caused by the looped path and the implications for packet travel this has. Router R1 receives all packets that are sent to H4 from H1 twice, once sending them to R2 and the second time when receiving the packets back from R2 to send them to R3. But while all packets sent from H1 pass R1 twice, acknowledgements sent back by the \textit{iperf} server on H4 will only pass R1 once, as R1 would not send packets with H1 as destination to R2. Router R2 on the other hand only receives packets sent to H4 but none of the ACKs sent back. This is why, when compared to the average packet count of all routers in \cref{fig:evaluation_minimal_packet_flow_wo_sc_a}, R2 receives roughly half of all packets a router would normally receive as TCP specifies that for each received packet TCP will send an ACK as answer. This also explains why router R1 forwards an average of around 1500 packets per second, forwarding data packets with around 500 packets per second twice and forwarding acknowledgement packets once with also 500 packets per second, producing an additional 50\% load on the router.
Aside from the changed path and therefore the inclusion of router R3 in this path, routers R3 and R4 are unaffected by the failure, forwarding each packet once.
@ -297,7 +299,7 @@ Reconfiguration of routers in Mininet does not reset the \textit{nftables} count
\label{fig:evaluation_minimal_packet_flow_concurrent_sc}
\end{figure}
When running the TCP packet flow measurements with an implementation of ShortCut running on the network however, the results change drastically, and as expected all packets sent by the \textit{iperf} transfer are forwarded by router R2 on the original route, but after the failure was introduced the router does not forward any packets. ShortCut has effectively cut out router R2 from the route, forwarding packets from R1 to R3 directly. All remaining routers R1, R3 and R4 now receive all packets and no router forwards any packets twice.
When running the TCP packet flow measurements with an implementation of ShortCut running on the network however, the results change drastically, and as expected all packets sent by the \textit{iperf} transfer are forwarded by router R2 on the original route. After the failure was introduced the router R2 does not forward any packets. ShortCut has effectively cut out router R2 from the route, forwarding packets from R1 to R3 directly. All remaining routers R1, R3 and R4 now receive all packets and no router forwards any packets twice.
\subsection{Packet flow - UDP}
@ -328,6 +330,7 @@ We repeated the packet flow test in \cref{tcp_packet_flow} using UDP to inspect
\label{fig:evaluation_minimal_packet_flow_udp_concurrent_wo_sc}
\end{figure}
When running the packet flow test measuring UDP packets the amount of packets changed drastically when compared to TCP packets. This is caused by the different window sizes \textit{iperf} uses
\subsubsection{With FRR and ShortCut}
\begin{figure}
\centering