some small changes

master
Frederik Maaßen 2 years ago
parent 515b6c60c3
commit 1f21ef3a74
  1. 1
      implementation/mininet_controller.py
  2. 6
      implementation/topologies/4r4h_topo.py
  3. 6
      implementation/topologies/6r4h_topo.py
  4. 8
      implementation/topologies/8r4h_topo.py
  5. 3
      thesis/content/evaluation/evaluation.tex
  6. 26
      thesis/content/evaluation/minimal_network.tex
  7. 34
      thesis/content/implementation/test_network.tex

@ -364,7 +364,6 @@ def measure_latency(net, sender, dest_ip, length, interval, unique_test_name, y_
shortcut_flag = "sc"
tmp_file_name = f"/tmp/{sender}_to_{dest_ip}_ping_{unique_test_name}_{shortcut_flag}"
packet_count = int(length / interval)
sleep(1)
ping_command = f"ping -c {packet_count} -i {interval} {dest_ip} > {tmp_file_name}.out &"
net[sender].cmd(ping_command)
info(f"\nUsing ping command: {ping_command}")

@ -130,8 +130,8 @@ class FourRoutersFourHosts(CustomTopo):
"execute": {
"use_pre_defined_function": True,
"separate_definitions": True,
"command_pre": ("measure_bandwidth", (['h1', 'h4'], '10.4.0.101', 15, 1, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h4'], '10.4.0.101', 15, 1, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
"command_pre": ("measure_bandwidth", (['h1', 'h4'], '10.4.0.101', 30, 0.5, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h4'], '10.4.0.101', 30, 0.5, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
},
"failures": [
{
@ -245,7 +245,7 @@ class FourRoutersFourHosts(CustomTopo):
"failures": [
{
"type": "timer",
"timing": 16,
"timing": 15,
"execute": {
"use_pre_defined_function": True,
"command": (

@ -187,8 +187,8 @@ class SixRoutersFourHosts(CustomTopo):
"execute": {
"use_pre_defined_function": True,
"separate_definitions": True,
"command_pre": ("measure_bandwidth", (['h1', 'h6'], '10.6.0.101', 15, 1, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h6'], '10.6.0.101', 15, 1, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
"command_pre": ("measure_bandwidth", (['h1', 'h6'], '10.6.0.101', 30, 0.5, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h6'], '10.6.0.101', 30, 0.5, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
},
"failures": [
{
@ -302,7 +302,7 @@ class SixRoutersFourHosts(CustomTopo):
"failures": [
{
"type": "timer",
"timing": 16,
"timing": 15,
"execute": {
"use_pre_defined_function": True,
"command": (

@ -323,7 +323,7 @@ class EightRoutersFourHosts(CustomTopo):
}
]
},
"h1_to_h8_failure_tcp_performance_test_graph": {
"tcp_bandwidth_intermediate": {
"pre_execution": {
"use_pre_defined_function": False,
"command": lambda net: net.iperf((net['h1'], net['h8'])),
@ -334,8 +334,8 @@ class EightRoutersFourHosts(CustomTopo):
"execute": {
"use_pre_defined_function": True,
"separate_definitions": True,
"command_pre": ("measure_bandwidth", (['h1', 'h8'], '10.8.0.101', 15, 1, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h8'], '10.8.0.101', 15, 1, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
"command_pre": ("measure_bandwidth", (['h1', 'h8'], '10.8.0.101', 30, 0.5, "tcp_pre_failure", "TCP Bandwidth before failure", "tcp", 100, [0, self.limit+self.limit*0.1])),
"command_post": ("measure_bandwidth", (['h1', 'h8'], '10.8.0.101', 30, 0.5, "tcp_post_failure", "TCP Bandwidth after failure", "tcp", 100, [0, self.limit+self.limit*0.1]))
},
"failures": [
{
@ -449,7 +449,7 @@ class EightRoutersFourHosts(CustomTopo):
"failures": [
{
"type": "timer",
"timing": 16,
"timing": 15,
"execute": {
"use_pre_defined_function": True,
"command": (

@ -9,3 +9,6 @@ We start with our minimal network in section \ref{eva_minimal_network}, followed
\input{content/evaluation/minimal_network}
\input{content/evaluation/failure_path_networks}
\section{Discussion of results}

@ -30,7 +30,7 @@ We performed multiple tests of influences to the bandwidth with occurring failur
\label{fig:evaluation_minimal_bandwidth_wo_sc}
\end{figure}
We performed a bandwidth test on the minimal network, a topology with 4 routers and 3 hosts. The failure occurred after the first run and before the second run of the test in \ref{fig:evaluation_minimal_bandwidth_wo_sc}. As can be seen the bandwidth does not change between runs. This is to expected as additional hops on the path of the packet do not influence the total throughput that can be achieved, and while the looped path is passed by the traffic in both directions, the duplex nature of ethernet connections does not impose any limitations in this regard.
We performed a bandwidth test on the minimal network, a topology with 4 routers and 3 hosts. The failure occurred after the first run and before the second run of the test in \cref{fig:evaluation_minimal_bandwidth_wo_sc}. The bandwidth does not change between runs. This is to be expected as additional hops on the path of the packet do not influence the total throughput that can be achieved, and while the looped path is passed by the traffic in both directions, the duplex nature of ethernet connections does not impose any limitations in this regard.
\begin{figure}
\centering
@ -39,9 +39,9 @@ We performed a bandwidth test on the minimal network, a topology with 4 routers
\label{fig:evaluation_minimal_bandwidth_concurrent_wo_sc}
\end{figure}
In \ref{fig:evaluation_minimal_bandwidth_concurrent_wo_sc} however we introduced the failure while the bandwidth test was running. The test was run for \SI{30}{\second} and the failure was introduced at around 15 seconds, which caused a drop in performance. The log output of the sending client reported the need to resend 22 packets in this time period; in all transfers before no packet loss occurred.
In \cref{fig:evaluation_minimal_bandwidth_concurrent_wo_sc} however we introduced the failure while the bandwidth test was running. The test was run for \SI{30}{\second} and the failure was introduced at around 15 seconds, which caused a drop in performance. The log output of the sending client reported the need to resend 22 packets in this time period; in all transfers before no packet loss occurred.
In addition to the already deployed transfer limit on the links between routers and hosts, we also added the bandwidth parameter -b to the execution of the \textit{iperf} client and limited the throughput to \SI{100}{Mbps}. This was done because we experienced bursts in the bandwidth test after we introduced a failure concurrent to the bandwidth test as can be seen in figure \ref{fig:evaluation_minimal_bandwidth_concurrent_wo_sc}, exceeding the limit of the network by more than 50\%. Unfortunately the additional limit did not change the behaviour. Upon further investigation we found one possible reason for this burst.
In addition to the already deployed transfer limit on the links between routers and hosts, we also added the bandwidth parameter -b to the execution of the \textit{iperf} client and limited the throughput to \SI{100}{Mbps}. This was done because we experienced bursts in the bandwidth test after we introduced a failure concurrent to the bandwidth test as can be seen in \cref{fig:evaluation_minimal_bandwidth_concurrent_wo_sc}, exceeding the limit of the network by more than 50\%. Unfortunately the additional limit did not change the behaviour. Upon further investigation we found one possible reason for this burst.
When the connection between routers is cut, our test framework uses the python api to deactivate the corresponding interfaces on both affected routers. This is done in sequence. In this example the interface on router R2 was deactivated first and the interface on router R4 was deactivated second. We implemented this behaviour after observing the default behaviour of the Mininet network. E.g. if the connection between router R2 and router R4 was only cut by deactivating the interface on router R4, router R2 would not recognize the failure and would loose all packets sent to the link. Because we deactivate the interfaces in sequence and the Mininet python api introduces delay to the operation, the interface on R2 will be deactivated while the interface on R4 will continue receiving packets already on the link and will continue sending packets to the deactivated interface on R2 for a short period of time. All packets sent to R2 in this time period will be lost. But because the \textit{iperf} server itself does not send any actual data, but only acknowledgements (ACK) for already received data, only ACKs are lost this way.
@ -251,14 +251,22 @@ To show the amount of TCP packets being forwarded on each router, we measured th
\label{fig:evaluation_minimal_packet_flow_concurrent_wo_sc}
\end{figure}
The results in the network before a failure are as to be expected and can be seen in \cref{minimal_packet_flow_wo_sc_a}. Each router on the route from H1 to H4, which includes R1, R2 and R4, report the same amount of packets at each point of measurement. While the packet count fluctuates during the measurement no packet loss was reported and the bandwidth was at an average of \SI{95}{Mbps} during the whole run of the test. This is why we assume that the fluctuations can be attributed to the mechanisms used in \textit{iperf}.
The results in the network before a failure are as to be expected and can be seen in \cref{fig:evaluation_minimal_packet_flow_wo_sc_a}. Each router on the route from H1 to H4, which includes R1, R2 and R4, report the same amount of packets at each point of measurement. While the packet count fluctuates during the measurement no packet loss was reported and the bandwidth was at an average of \SI{95}{Mbps} during the whole run of the test. This is why we assume that the fluctuations can be attributed to the mechanisms used in \textit{iperf}.
After a failure all four routers receive packets as can be seen in \cref{minimal_packet_flow_wo_sc_b}, but router R1 now receives most packets with an average of around 1500 packets while router R3 and R4 receive roughly the same amount of packets as before the failure at an average of around 1000 packets. Router R2 receives the least packets with an average of around 500 packets.
After a failure all four routers receive packets as can be seen in \cref{fig:evaluation_minimal_packet_flow_wo_sc_b}, but router R1 now receives most packets with an average of around 1500 packets while router R3 and R4 receive roughly the same amount of packets as before the failure at an average of around 1000 packets. Router R2 receives the least packets with an average of around 500 packets.
This is most likely caused by the looped path and the implications for packet travel this has. Router R1 receives all packets that are sent to H4 from H1 twice, once sending them to R2 and the second time when receiving the packets back from R2 to send them to R3. But while all packets \textbf{sent} from H1 pass R1 twice, acknowledgements sent back by the \textit{iperf} server on H4 will only pass R1 once, as R1 would not send packets with H1 as destination to R2. Router R2 on the other hand only receives packets sent to H4 but none of the ACKs sent back. This is why, when compared to the average packet count of all routers in \cref{minimal_packet_flow_wo_sc_a}, R2 receives roughly half of all packets a router would normally receive as TCP specifies that for each received packet TCP will send an ACK as answer. This also explains why router R1 forwards an average of around 1500 packets per second, forwarding data packets with around 500 packets per second twice and forwarding acknowledgement packets once with also 500 packets per second, producing an additional 50\% load on the router.
This is most likely caused by the looped path and the implications for packet travel this has. Router R1 receives all packets that are sent to H4 from H1 twice, once sending them to R2 and the second time when receiving the packets back from R2 to send them to R3. But while all packets \textbf{sent} from H1 pass R1 twice, acknowledgements sent back by the \textit{iperf} server on H4 will only pass R1 once, as R1 would not send packets with H1 as destination to R2. Router R2 on the other hand only receives packets sent to H4 but none of the ACKs sent back. This is why, when compared to the average packet count of all routers in \cref{fig:evaluation_minimal_packet_flow_wo_sc_a}, R2 receives roughly half of all packets a router would normally receive as TCP specifies that for each received packet TCP will send an ACK as answer. This also explains why router R1 forwards an average of around 1500 packets per second, forwarding data packets with around 500 packets per second twice and forwarding acknowledgement packets once with also 500 packets per second, producing an additional 50\% load on the router.
Aside from the changed path and therefore the inclusion of router R3 in this path, routers R3 and R4 are unaffected by the failure, forwarding each packet once.
When causing a failure while the bandwidth measurement is running, the failure itself will cause a sudden drop to 0 packets forwarded for a short amount of time. This can be attributed to the time the routers take to change their configuration. The \textit{nftables} counter uses the "forward" netfilter hook which is called in a pre-defined phase in the network stack of linux. Packets which are logged in the forwarding state already received a routing decision, but because Mininet needs some time to reconfigure the interface to shut down a connection, imitating a failure, the packets have to wait for the router to be ready again.
This behaviour has also been observed when measuring the latency and introducing a failure concurrently in \cref{fig:evaluation_minimal_latency_concurrent_wo_sc}, adding delay to packets to be delivered in the moment of failure.
Reconfiguration of routers in Mininet does not reset the \textit{nftables} counters either, which was confirmed in a quick test counting packets of an \textit{iperf} transfer and shutting down an interface on the same router. The packet count did not change after shutting down the interface.
\subsubsection{With FRR and ShortCut}
\label{minimal_packet_flow_with_frr_and_shortcut}
@ -288,7 +296,7 @@ Aside from the changed path and therefore the inclusion of router R3 in this pat
\label{fig:evaluation_minimal_packet_flow_concurrent_sc}
\end{figure}
When running the TCP packet flow measurements with an implementation of ShortCut running on the network
When running the TCP packet flow measurements with an implementation of ShortCut running on the network however, the results change drastically and as expected: all packets sent by the \textit{iperf} transfer are forwarded by router R2 on the original route, but after the failure was introduced the router does not forward any packets. ShortCut has effectively cut out router R2 from the route, forwarding packets from R1 to R3 directly. All remaining routers R1, R3 and R4 now receive all packets and no router forwards any packets twice.
\subsection{Packet flow - UDP}
@ -317,4 +325,6 @@ When running the TCP packet flow measurements with an implementation of ShortCut
\includegraphics[width=10cm]{tests/minimal_packet_flow_udp/packet_flow_udp_concurrent_wo_sc}
\caption{Packet flow on all routers with failure after 15 seconds}
\label{fig:evaluation_minimal_packet_flow_udp_concurrent_wo_sc}
\end{figure}
\end{figure}
\subsubsection{With FRR and ShortCut}

@ -37,31 +37,38 @@ The execution phase contains the actual testing. A command can be executed and i
Additionally failures can be introduced to the network. The key \textit{failures} contains a list of failures, defined by a type and a list of commands. The types of implemented failures are "intermediate" and "timer". Intermediate failures will be executed after a first run of the execute command, which will be repeated after the failure function was called. A failure that is of the type "timer" will start a timer in the beginning of the measurement, which will execute the defined command after a delay, which is provided through an attribute named "timing".
\subsection{Implemented commands}
\subsection{Implemented commands for measurements}
\label{implementation_commands}
A command can either be a lambda which is only dependent on the net, which will be passed into the lambda by default, or a function definition which is a tuple of the name of the function and a second nested x-tuple, depending on the function to call, containing all arguments that need to be passed to the function.
The functions are defined in the \textit{mininet\_controller} and can't be called directly, because the topologies are loaded dynamically and will not know existing function definitions until they are loaded. Because of this the \textit{mininet\_controller} contains a dictionary called "functions" which has the function name as key and an attribute called "callable" containing a lambda with the function call using the provided arguments. In the following we list the existing functions and explain their functionality, which files are created and which output can be used for further testing and evaluation.
\subsubsection{connection\_shutdown}
The function \textit{connection\_shutdown} deactivates one interface each on two specified Mininet hosts, effectively removing a connection between those two hosts. It can, however, be used to deactivate any interface on any device in Mininet and is oblivious to the network context and existing connections.
This function will use a connection which is by definition a list with 2 elements, containing the names of the hosts/nodes which are linked together, a list with 2 elements containing the names of both components for printing and a list with 2 elements containing the interfaces which should be shut down. It will then access the components in the network and use the cmd function provided by Mininet to execute an "ifconfig down *interface*" on the component, which will cause the interface to be deactivated. This is done on both sides of the connection to make sure that each router will be able to recognize the missing connection instantly.
\subsubsection{measure\_bandwidth}
\label{measure_bandwidth}
The function \textit{measure\_bandwidth} is used to measure the bandwidth between two Mininet hosts in a Mininet network using \textit{iperf}, logging and parsing results after the measurement. Results are then sent to one of the plotting functions described in \cref{plotting}.
This function will use a 2-element list of hosts, a 2-element list of IPs, a length parameter that defines how long the test will run in seconds, an interval parameter defining the interval between each log entry of \textit{iperf}, a unique test name for naming a created graph or log entry, a graph title in case a graph should be created, a flag that defines whether \textit{iperf} should use tcp or udp as transfer protocol and a bandwidth to limit the transfer rate of \textit{iperf}.
The command starts an \textit{iperf} server. While experimenting we sometimes experienced unexpected behaviour causing tests to fail. There seemed to be an issue with the timing of the \textit{iperf} server and client commands which were executed on the corresponding devices. Because the \textit{iperf} server and client were started detached and the python script executed both commands directly one after another, the client seemed to try to connect to the server while the server was still in its startup process, denying the connection. This is why we added an additional delay between server and client command execution.
Both the client and server log their output. The function will then parse the output of the server, as results from the server seemed to be more consistent, for \textit{gnuplot}, which will in turn create a plot in the "/tmp/" directory of the virtual machine as described in section \ref{plotting}.
\subsubsection{measure\_link\_usage\_bandwidth}
This function will use an two \textit{iperf} server-client pairs to start two separate bandwidth tests. The second \textit{iperf} measurement will use the port 5202 instead of the default port 5201 in case two servers are started on the same device.
The function reuses the \textit{measure\_bandwidth} function described in section \ref{measure_bandwidth}. We call the measurement done by the \textit{measure\_bandwidth} function the "main" measurement, the additional transfer used to evaluate the influence of another file transfer on the network the "additional" measurement.
The function \textit{measure\_link\_usage\_bandwidth} is used to start two separate \textit{iperf} measurements between two host-pairs and will log and parse results of both measurements. The results are then passed to the multi-plotting function described in \cref{plotting}.
The second \textit{iperf} measurement will use the port 5202 instead of the default port 5201 in case two servers are started on the same device.
The function reuses the \textit{measure\_bandwidth} function described in section \ref{measure_bandwidth}. We call the measurement done by the \textit{measure\_bandwidth} function the "main" measurement, the additional measurement used to evaluate the influence of another file transfer on the network the "additional" measurement.
The measurements are configurable by providing an interval in which \textit{iperf} will log results, as well as a length parameter to specify how long an \textit{iperf} measurement should be run.
While the main measurement is run for the exact specified time the additional measurement is run for a slightly longer time due to timing issues.
Because we reuse the \textit{measure\_bandwidth} function, the additional measurement is started with a delay. The \textit{measure\_bandwidth} functions introduces a sleep time of one second between the execution of the \textit{iperf} server command and the client command, which would cause the additional measurement to be executed a second early to the main measurement. This is why we considered this additional second in the execution and the parsing process of the \textit{iperf} output, executing the additional measurement for a longer period of time, omitting the entry for the first second and shifting all time values one second ahead. Doing this we create a log output that is nearly synced only adding the delay created by the Mininet and python overhead.
Both results are then passed to the \textit{multiplotting} function referenced in section \ref{plotting}, to create a plot containing both bandwidth measurements. The original graph created by the \textit{measure\_bandwidth} function is also saved. The combined graph receives the subtitle "\_combined".
Both results are then passed to the multi-plotting function referenced in \cref{plotting}, to create a plot containing both bandwidth measurements. The original graph created by the \textit{measure\_bandwidth} function is also saved. The combined graph receives the subtitle "\_combined".
\subsubsection{measure\_latency}
The function \textit{measure\_latency} is used to start a \textit{ping} latency test between two hosts in a Mininet network. It will log the results, parse them and then pass them to the plotting function described in \cref{plotting}.
This function will use a sender element in the network, a destination IP, a length parameter that defines how many ping packets should be sent, an interval parameter defining the delay between each ping, a unique test name, a list containing the range for the y-axis of a created graph and a graph title which will be used in naming the files of tests.
After creating a test name out of the components of the measurement, including sender and destination, which latency measurement function was used (currently only ping is used), the defined unique test name, e.g. containing information whether the test was started before or after introducing a failure, as well as whether or not ShortCut was used.
@ -73,20 +80,21 @@ After successfully parsing the output files the information about the test, incl
\subsubsection{measure\_packet\_flow}
This function will use a client and server parameter to start an \textit{iperf} transfer and will implement packet counters using the filtering capabilities of \textit{nftables} on all devices referenced in the flow measurement targets provided in the parameters of this function. Depending on the "flag" parameter they will count all packets entering the device which belong to the specified protocol. As \textit{nftables} is normally used to create firewall rulings and is used in many professional linux networks it is safe to assume that the counters will have a minimal impact on performance.
This function will use a client and server parameter to start an \textit{iperf} transfer and will implement packet counters using the filtering capabilities of \textit{nftables}. All devices referenced in the flow measurement targets provided in the parameters of this function will receive a separate counter. Depending on the "flag" parameter they will count all packets entering the device which belong to the specified protocol.
When the packet counters are created information about them will be stored in a global packet counter memory, which will later be used to access information about the counters. After initialization this status is saved in a global variable in case the script is run again during the lifetime of the network to avoid the multiple implementation of the same counter and thus errors that could occur. If this variable is already true during execution, the existing counters are reset to a value of zero instead.
When the packet counters are created information about them will be stored in a global packet counter memory. This memory is later used to access and read the values of the counters. The script saves the current state of packet counters globally in case the script is run again during the lifetime of the network to avoid the multiple implementation of the same counter and thus errors that could occur. The test uses this state to act depending whether or not the counters were already initialized. If no counters exist yet it will create them, if they were already created during runtime they are reset to a value of zero instead.
There are python libraries for reading \textit{nftables} entries but an implementation would take additional time because of the usage of network namespaces of Mininet. Each device lives in its own network namespace, which would have to be specifically accessed by the python library. Because of this concern we decided to manually check and parse the output of the command line tool of \textit{nftables}.
There are python libraries for reading \textit{nftables} entries. Because an implementation of these would take additional time and the usage of network namespaces of Mininet adds some complexity to the configuration, we decided to instead manually check and parse the output of the \textit{nftables} counter using python.
After starting a bandwidth test using \textit{iperf} on the client device, the packet counters are started which will start a python thread for each of the measurement targets. In each of these threads the bash command for displaying counters is used to access the current count. The output is saved in python, parsed and then saved to a log file which is named after the device that is being logged, including the current time of execution in a fitting format for \textit{gnuplot}.
After stopping the \textit{iperf} server on the server device the created log files are passed as a dictionary with the corresponding label for the data to the \textit{multiplotting} function explained in section \ref{plotting}.
After stopping the \textit{iperf} server the created log files are passed as a dictionary with the corresponding label for the data to the multi-plotting function explained in section \ref{plotting}.
\subsubsection{Plotting}
\subsection{Plotting functions}
\label{plotting}
Plotting results helps with visualizing differences. But performing many tests and creating graphs can become tedious and will take a lot of time. This is why we used \textit{gnuplot} to automatically create graphs. Each function that produces a log output containing results will parse its own results to only contain a time-value and a value, separated by a space, with additional values listed line-by-line.
This data is then passed to \textit{gnuplot}, which will produce an eps file containing a simple plot of our data. A test run with \textit{iperf} would look like seen in figure \ref{fig:example_plotting}. We use the "with linespoints" option, as well as the passed graph title for additional information on the plot.
Plotting results helps with visualizing differences, but performing a high number of tests and creating graphs manually quickly becomes tedious and will take a lot of time. This is why we used \textit{gnuplot} to automatically create graphs. Each function that produces a log output containing results will parse its own results to only contain a time-value and a value, separated by a space, with additional values listed line-by-line.
This data is then passed to \textit{gnuplot}, which will produce an eps file containing a simple plot of our data. A test run with \textit{iperf} would look like seen in figure \ref{fig:example_plotting}. We use the "with linespoints" option, as well as the passed graph title for additional information on the plot. A call of the plotting function also allows to specify the labels for the x- and y-axis, as well as a range for the y values. If no range was provided the test framework will use the minimum and maximum values of the results instead.
\begin{figure}
\centering
@ -95,7 +103,7 @@ This data is then passed to \textit{gnuplot}, which will produce an eps file con
\label{fig:example_plotting}
\end{figure}
In addition to plotting a single line in a graph, we also implemented a function to plot multiple data files in \textit{gnuplot} automatically. The function uses a greyscale as line colors and different dash styles for differentiating plots. It adds a defined label to each dataset. This can be used to e.g. plot the packet flow of multiple devices. A plot created with this method will look something like can be seen in figure \ref{fig:example_multiplotting}.
In addition to plotting a single line in a graph we also implemented a function to plot multiple data files in \textit{gnuplot} automatically. The function uses a greyscale as line colors and different dash styles for differentiating plots. It adds a defined label to each dataset. This can be used to e.g. plot the packet flow of multiple devices. A plot created with this method will look something like can be seen in figure \ref{fig:example_multiplotting}.
\begin{figure}
\centering