diff --git a/Benchmark.py b/Benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3d3233fa599f2cd1d4588d247b6b539fc0404fe
--- /dev/null
+++ b/Benchmark.py
@@ -0,0 +1,75 @@
+
+import utilities as util
+
+class Benchmark:
+    def __init__(self):
+        self.log_container_per_model = {}
+        self.methods_per_approach = {}
+
+    def set_log_containers(self, log_container_per_model):
+        self.log_container_per_model = log_container_per_model
+        self.__extract_methods_per_approach()
+
+    def __extract_methods_per_approach(self):
+        for model in self.log_container_per_model:
+            for approach in self.log_container_per_model[model].methods_per_approach:
+                for method in self.log_container_per_model[model].methods_per_approach[approach]:
+                    if approach in self.methods_per_approach:
+                        if method not in self.methods_per_approach[approach]:
+                            self.methods_per_approach[approach].append(method)
+                    else:
+                        self.methods_per_approach[approach] = [method]
+
+    def generate_tex_table(self):
+        tex_file = open("performance_table.tex", "w")
+
+        tex_file.write("\n\\begin{table*}\n")
+        tex_file.write("\\begin{tabular}{|c| c|")
+        for model in self.log_container_per_model:
+            tex_file.write(" c c c c|")
+        tex_file.write("}\n")
+        tex_file.write("\\hline\n")
+
+        # Header - model names
+        tex_file.write("\\multicolumn{2}{|c|}{}")
+
+        # Put models into array to ensure the order is always maintained.
+        models = []
+        for model in self.log_container_per_model:
+            tex_file.write(" & \\multicolumn{{4}}{{|c|}}{{{}}}".format(model))
+            models.append(model)
+        tex_file.write("\\\\\n")
+        
+        # Header - column names
+        tex_file.write("\\hline\n")
+        tex_file.write("Approach & Method")
+        for model in models:
+            tex_file.write(" & \\#VPC & \\makecell{\\#VPC\\\\used} & \\#OVP & \\%")
+        tex_file.write("\\\\\n")
+
+
+        for approach in self.methods_per_approach:
+            method_count = len(self.methods_per_approach[approach])
+            tex_file.write("\n\\hline\n")
+            tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
+
+            for method in self.methods_per_approach[approach]:
+                tex_file.write("\n& \makecell{{{}}}".format(method))
+
+                for model in models:
+                    # self.log_container_per_model[model].get_best_log()
+                    logs = self.log_container_per_model[model].get_logs_by_method(method)
+                    if len(logs) == 0:
+                        tex_file.write(" & - & - & - & -")
+                        continue
+
+                    VPC_count = logs[0].VPC["count"] + logs[0].VPC["discarded_count"]
+                    VPC_used = logs[0].VPC["count"]
+                    OVP = len(logs[0].optimization["OVP"])
+                    coverage = util.set_precision(logs[0].coverage["percent_fraction"] * 100, 2)
+                    tex_file.write(" & {} & {} & {} & {}".format(VPC_count, VPC_used, OVP, coverage))
+                tex_file.write("\\\\")
+
+        tex_file.write("\n\\end{tabular}")
+        tex_file.write("\n\\end{table*}\n")
+        tex_file.close()
diff --git a/LogContainer.py b/LogContainer.py
index cf6e97a9f7d5d42b5b9c0fe3b481bb1385225460..00acc9ce75f4649e1fa5a3230e0fad9e11151557 100644
--- a/LogContainer.py
+++ b/LogContainer.py
@@ -39,3 +39,15 @@ class LogContainer:
         pp = pprint.PrettyPrinter(indent=4)
         print("Methods per approach:")
         pp.pprint (self.methods_per_approach)
+
+    def get_logs_by_method(self, method):
+        approach = self.approaches_per_method[method]
+        if approach not in self.logs_per_approach:
+            return []
+
+        method_logs = []
+        for log in self.logs_per_approach[approach]:
+            if log.VPC["method"] == method:
+                method_logs.append(log)
+
+        return method_logs
diff --git a/utilities.py b/utilities.py
index 9890f77eb69ee033fb1cfcc89c348343268e0780..8b871afad5d32d6e722b1e8c12c0645e3810b6cd 100644
--- a/utilities.py
+++ b/utilities.py
@@ -1,3 +1,7 @@
 
 def convert_to_percentage(fraction_array):
     return [element * 100 for element in fraction_array]
+
+def set_precision(value, decimal_count):
+    precision_value = 10 ** decimal_count
+    return int(value * precision_value) / precision_value
diff --git a/vob.py b/vob.py
index 75734c0cf1588e08d69848df04f5f1177541b165..e139410c230d0ece234fe51a7374d8cb54058ec6 100644
--- a/vob.py
+++ b/vob.py
@@ -1,6 +1,8 @@
+
+from Benchmark import Benchmark
 from Log import Log
-from Vis import Vis
 from LogContainer import LogContainer
+from Vis import Vis
 
 import sys
 import json
@@ -22,7 +24,7 @@ def main():
         methods_per_approach = json.load(methods_per_approach_file)
 
     # Load log files and sort them per model.
-    log_containers_per_model = {}
+    log_container_per_model = {}
     logs = []
     for filename in ovp_paths:
         log = Log(filename)
@@ -33,12 +35,12 @@ def main():
             continue
 
         model_name = log.model["name"]
-        if model_name not in log_containers_per_model:
+        if model_name not in log_container_per_model:
             print(model_name)
-            log_containers_per_model[model_name] = LogContainer(methods_per_approach)
+            log_container_per_model[model_name] = LogContainer(methods_per_approach)
 
         try:
-            log_containers_per_model[model_name].add_log(log)
+            log_container_per_model[model_name].add_log(log)
         except Exception as e:
             print("Error: {}\nSkipping file".format(e))
             continue
@@ -46,14 +48,18 @@ def main():
 
     # Generate per-approach coverage graphs for each model
     vis = Vis()
-    for model in log_containers_per_model:
+    for model in log_container_per_model:
         print("Model name: {}".format(model))
-        log_containers_per_model[model].print_status()
-        vis.set_logs(log_containers_per_model[model])
+        log_container_per_model[model].print_status()
+        vis.set_logs(log_container_per_model[model])
         vis.generate_graphs()
         vis.save_graphs(prefix=model, output_path="./data/")
         # vis.show_graphs()
 
+    benchmark = Benchmark()
+    benchmark.set_log_containers(log_container_per_model)
+    benchmark.generate_tex_table()
+
 
 if __name__ == "__main__":
     main()