diff --git a/Benchmark.py b/Benchmark.py
index c13b510f554bac6a150a6e22ae169098de563948..16085a29394be0c4b33e2ac434e0d8edc31519ca 100644
--- a/Benchmark.py
+++ b/Benchmark.py
@@ -1,4 +1,3 @@
-
 import utilities as util
 from LogContainer import LogContainer
 
@@ -23,13 +22,22 @@ class Benchmark:
                     else:
                         self.methods_per_approach[approach] = [method]
 
-    def generate_performance_tex_table(self):
-        tex_file = open("performance_table.tex", "w")
+    def generate_performance_tex_table(self, output_path = "./", coverage_threshold=0.99, with_discarded=True):
+        filename = output_path
+        if with_discarded:
+            filename += "performance_table_discarded.tex"
+        else:
+            filename += "performance_table.tex"
+        tex_file = open(filename, "w")
 
         tex_file.write("\n\\begin{table*}\n")
+        tex_file.write("\\centering\n")
         tex_file.write("\\begin{tabular}{|c| c|")
         for model in self.log_container_per_model:
-            tex_file.write(" c c c c|")
+            if with_discarded:
+                tex_file.write(" c c c c|")
+            else:
+                tex_file.write(" c c c|")
         tex_file.write("}\n")
         tex_file.write("\\hline\n")
 
@@ -38,48 +46,84 @@ class Benchmark:
 
         # Put models into array to ensure the order is always maintained.
         models = []
-        for model in self.log_container_per_model:
-            tex_file.write(" & \\multicolumn{{4}}{{|c|}}{{{}}}".format(model))
-            models.append(model)
-        tex_file.write("\\\\\n")
-        
-        # Header - column names
-        tex_file.write("\\hline\n")
-        tex_file.write("Approach & Method")
-        for model in models:
-            tex_file.write(" & \\#VPC & \\makecell{\\#VPC\\\\used} & \\#OVP & \\%")
-        tex_file.write("\\\\\n")
+        if with_discarded:
+            for model in self.log_container_per_model:
+                tex_file.write(" & \\multicolumn{{4}}{{|c|}}{{{}}}".format(model))
+                models.append(model)
+            tex_file.write("\\\\\n")
+            
+            # Header - column names
+            tex_file.write("\\hline\n")
+            tex_file.write("Approach & Method")
+            for model in models:
+                tex_file.write(" & \\#VPC & \\makecell{\\#VPC\\\\used} & \\#OVP & \\%")
+            tex_file.write("\\\\\n")
 
 
-        for approach in self.methods_per_approach:
-            method_count = len(self.methods_per_approach[approach])
-            tex_file.write("\n\\hline\n")
-            tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
+            for approach in self.methods_per_approach:
+                method_count = len(self.methods_per_approach[approach])
+                tex_file.write("\n\\hline\n")
+                tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
 
-            for method in self.methods_per_approach[approach]:
-                tex_file.write("\n& \makecell{{{}}}".format(method))
+                for method in self.methods_per_approach[approach]:
+                    tex_file.write("\n& \makecell{{{}}}".format(method))
+
+                    for model in models:
+                        try:
+                            best_log = self.log_container_per_model[model].get_best_log(method, coverage_threshold)
+                        except Exception as e:
+                            tex_file.write(" & - & - & - & -")
+                            continue
+
+                        VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"]
+                        VPC_used = best_log.VPC["count"]
+                        OVP = len(best_log.optimization["OVP"])
+                        coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2)
+                        tex_file.write(" & {} & {} & {} & {}".format(VPC_count, VPC_used, OVP, coverage))
+                    tex_file.write("\\\\")
+        else:
+            for model in self.log_container_per_model:
+                tex_file.write(" & \\multicolumn{{3}}{{|c|}}{{{}}}".format(model))
+                models.append(model)
+            tex_file.write("\\\\\n")
+            
+            # Header - column names
+            tex_file.write("\\hline\n")
+            tex_file.write("Approach & Method")
+            for model in models:
+                tex_file.write(" & \\#VPC & \\#OVP & \\%")
+            tex_file.write("\\\\\n")
 
-                for model in models:
-                    try:
-                        best_log = self.log_container_per_model[model].get_best_log(method)
-                    except:
-                        tex_file.write(" & - & - & - & -")
-                        continue
 
-                    VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"]
-                    VPC_used = best_log.VPC["count"]
-                    OVP = len(best_log.optimization["OVP"])
-                    coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2)
-                    tex_file.write(" & {} & {} & {} & {}".format(VPC_count, VPC_used, OVP, coverage))
-                tex_file.write("\\\\")
+            for approach in self.methods_per_approach:
+                method_count = len(self.methods_per_approach[approach])
+                tex_file.write("\n\\hline\n")
+                tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
 
-        tex_file.write("\n\\end{tabular}")
+                for method in self.methods_per_approach[approach]:
+                    tex_file.write("\n& \makecell{{{}}}".format(method))
+
+                    for model in models:
+                        try:
+                            best_log = self.log_container_per_model[model].get_best_log(method, coverage_threshold)
+                        except Exception as e:
+                            tex_file.write(" & - & - & -")
+                            continue
+
+                        VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"]
+                        OVP = len(best_log.optimization["OVP"])
+                        coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2)
+                        tex_file.write(" & {} & {} & {}".format(VPC_count, OVP, coverage))
+                    tex_file.write("\\\\")
+
+        tex_file.write("\n\\hline\n")
+        tex_file.write("\\end{tabular}")
         tex_file.write("\n\\end{table*}\n")
         tex_file.close()
 
     # \usepackage{longtable} needed.
-    def generate_complete_tex_table(self):
-        tex_file = open("complete_table.tex", "w")
+    def generate_complete_tex_table(self, output_path = "./"):
+        tex_file = open(output_path + "complete_table.tex", "w")
 
         for model in self.log_container_per_model:
             tex_file.write("\n\\begin{longtable}{|c c c c c c c c|}\n")
@@ -138,6 +182,28 @@ class Benchmark:
         print("discarded_per_approach: {}".format(discarded_per_approach))
         return discarded_per_approach
 
+    def get_average_discarded_per_method(self):
+        discarded_per_method_list = {}
+        for approach in self.methods_per_approach:
+            for method in self.methods_per_approach[approach]:
+                for model in self.log_container_per_model:
+                    log_container = LogContainer(self.log_container_per_model[model].get_methods_per_approach())
+                    log_container.add_logs(self.log_container_per_model[model].get_logs_by_method(method))
+                    if log_container.size() == 0:
+                        continue
+
+                    container_avg = log_container.get_avg_discarded()
+                    if method in discarded_per_method_list:
+                        discarded_per_method_list[method].append(container_avg)
+                    else:
+                        discarded_per_method_list[method] = [container_avg]
+        discarded_per_method = {}
+        for method in discarded_per_method_list:
+            discarded_per_method[method] = np.sum(discarded_per_method_list[method]) / len(discarded_per_method_list[method])
+
+        print ("discarded_per_method: {}".format(discarded_per_method))
+        return discarded_per_method
+
     def get_average_discarded_per_model(self):
         discarded_per_model = {}
         for model in self.log_container_per_model:
@@ -146,6 +212,24 @@ class Benchmark:
         print("discarded_per_model {}".format(discarded_per_model))
         return discarded_per_model
 
+    def get_average_discarded_per_model_per_method(self):
+        discarded_per_model_method = {}
+        for model in self.log_container_per_model:
+            for approach in self.methods_per_approach:
+                for method in self.methods_per_approach[approach]:
+                    log_container = LogContainer(self.log_container_per_model[model].get_methods_per_approach())
+                    log_container.add_logs(self.log_container_per_model[model].get_logs_by_method(method))
+                    if log_container.size() == 0:
+                        continue
+                    if model in discarded_per_model_method:
+                        discarded_per_model_method[model][method] = log_container.get_avg_discarded()
+                    else:
+                        discarded_per_model_method[model] = {}
+                        discarded_per_model_method[model][method] = log_container.get_avg_discarded()
+
+        print("discarded_per_model_method {}".format(discarded_per_model_method))
+        return discarded_per_model_method
+
     def get_average_discarded_per_model_per_approach(self):
         discarded_per_model_approach = {}
         for model in self.log_container_per_model:
diff --git a/LogContainer.py b/LogContainer.py
index b9e835f45bd0a1d1fa65ac76778d9b8fe06e10a3..43a88e531bbc53fd9811e3757ecfbb73d41ac926 100644
--- a/LogContainer.py
+++ b/LogContainer.py
@@ -77,16 +77,15 @@ class LogContainer:
 
     # Log which obtains coverage over 99% with minimal number of viewpoint candidates
     # If no log obrains coverage over 99%, the log with the greatest coverage is considered.
-    def get_best_log(self, method):
+    def get_best_log(self, method, coverage_threshold=0.99):
         method_logs = self.get_logs_by_method(method)
         if len(method_logs) == 0:
             raise Exception("Error: No logs available for given method ({})".format(method))
 
         # Find logs with coverage >99%.
-        high_coverage_logs = self.__filter_coverage_threshold(method_logs, 0.99, ComparisonType.GEQ)
-
+        high_coverage_logs = self.__filter_coverage_threshold(method_logs, coverage_threshold, ComparisonType.GEQ)
         if len(high_coverage_logs) > 0:
-            return self.max_coverage_log(high_coverage_logs)
+            return self.min_used_vpc_log(high_coverage_logs)
         else:
             return self.max_coverage_log(method_logs)
 
@@ -118,8 +117,23 @@ class LogContainer:
         for log in input_logs:
             if log.coverage["percent_fraction"] > max_coverage:
                 max_log = log
+                max_coverage = log.coverage["percent_fraction"]
         return max_log
 
+    def min_used_vpc_log(self, input_logs=None):
+        if not input_logs:
+            input_logs = self.logs
+        if len(input_logs) == 0:
+            raise Exception("Error: no logs available.")
+
+        min_VPC  = input_logs[0].VPC["count"]
+        min_log = input_logs[0]
+        for log in input_logs:
+            if log.VPC["count"] < min_VPC:
+                min_log = log
+                min_VPC = log.VPC["count"]
+        return min_log
+
     def __parse_methods_per_approach(self):
         self.approaches_per_method = {}
         for approach in self.methods_per_approach:
diff --git a/object_space_exploration_per_approach_methods.json b/object_space_exploration_per_approach_methods.json
index 0a1b8923487864512bbe835969cd0ce1200e7022..745c226e72f10401d6a5036940c300d4288df23a 100644
--- a/object_space_exploration_per_approach_methods.json
+++ b/object_space_exploration_per_approach_methods.json
@@ -1,16 +1,13 @@
 {
     "SphereSampling": [
-        "UniformSphereAvgBbox",
-        "UniformSphereConvexHull",
-        "UniformSphereFocusRepositioned",
-        "UniformSphereMaxBbox",
-        "UniformSphereMinBbox",
-        "UniformSphereMixedRepositioned",
-        "UniformSphereNoDisplacement",
-        "UniformSphere",
-        "UniformSphereCentroid"],
+        "AvgBbox",
+        "ConvexHull",
+        "FocusRepositioned",
+        "MaxBbox",
+        "MinBbox",
+        "MixedRepositioned",
+        "NoDisplacement"],
     "VertexBased": [
-        "VertexCentroid",
         "VertexBBoxCenter",
         "VertexNormal"],
     "GeometryBased": [
diff --git a/ovp_paths.json b/ovp_paths.json
index 4bf18b5e524d91896a04bd4b90505b021f610c17..790c98524113e9cd50a02cac1068d007055a3cb5 100644
--- a/ovp_paths.json
+++ b/ovp_paths.json
@@ -1,3 +1,6 @@
 [
-    "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithFiltering/face_shield"
+    "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/gear",
+    "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/hirth",
+    "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/time_shaft",
+    "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/face_shield"
 ]
diff --git a/vob.py b/vob.py
index e82e23ec8e737a855e66bee47e91e828131d11a5..50fdbcdd55df50a59ed4a7c26ba96bbc51752936 100644
--- a/vob.py
+++ b/vob.py
@@ -82,12 +82,15 @@ def main():
 
     benchmark = Benchmark()
     benchmark.set_log_containers(log_container_per_model)
-    benchmark.generate_performance_tex_table()
-    benchmark.generate_complete_tex_table()
+    benchmark.generate_performance_tex_table(output_path="./data/", coverage_threshold=0.98, with_discarded=True)
+    benchmark.generate_performance_tex_table(output_path="./data/", coverage_threshold=0.98, with_discarded=False)
+    benchmark.generate_complete_tex_table(output_path="./data/")
     benchmark.get_average_RT_duration_per_model()
     benchmark.get_average_discarded_per_approach()
+    benchmark.get_average_discarded_per_method()
     benchmark.get_average_discarded_per_model()
     benchmark.get_average_discarded_per_model_per_approach()
+    benchmark.get_average_discarded_per_model_per_method()
 
 
 if __name__ == "__main__":