Skip to content
Snippets Groups Projects
Commit 28a6a795 authored by gospodnetic's avatar gospodnetic
Browse files

Get discarded average per method and model_method, generate performance tables...

Get discarded average per method and model_method, generate performance tables with or without discarded viewpoints, fix max_coverage and min_VPC functions
parent 8b42064c
No related branches found
No related tags found
No related merge requests found
import utilities as util import utilities as util
from LogContainer import LogContainer from LogContainer import LogContainer
...@@ -23,13 +22,22 @@ class Benchmark: ...@@ -23,13 +22,22 @@ class Benchmark:
else: else:
self.methods_per_approach[approach] = [method] self.methods_per_approach[approach] = [method]
def generate_performance_tex_table(self): def generate_performance_tex_table(self, output_path = "./", coverage_threshold=0.99, with_discarded=True):
tex_file = open("performance_table.tex", "w") filename = output_path
if with_discarded:
filename += "performance_table_discarded.tex"
else:
filename += "performance_table.tex"
tex_file = open(filename, "w")
tex_file.write("\n\\begin{table*}\n") tex_file.write("\n\\begin{table*}\n")
tex_file.write("\\centering\n")
tex_file.write("\\begin{tabular}{|c| c|") tex_file.write("\\begin{tabular}{|c| c|")
for model in self.log_container_per_model: for model in self.log_container_per_model:
tex_file.write(" c c c c|") if with_discarded:
tex_file.write(" c c c c|")
else:
tex_file.write(" c c c|")
tex_file.write("}\n") tex_file.write("}\n")
tex_file.write("\\hline\n") tex_file.write("\\hline\n")
...@@ -38,48 +46,84 @@ class Benchmark: ...@@ -38,48 +46,84 @@ class Benchmark:
# Put models into array to ensure the order is always maintained. # Put models into array to ensure the order is always maintained.
models = [] models = []
for model in self.log_container_per_model: if with_discarded:
tex_file.write(" & \\multicolumn{{4}}{{|c|}}{{{}}}".format(model)) for model in self.log_container_per_model:
models.append(model) tex_file.write(" & \\multicolumn{{4}}{{|c|}}{{{}}}".format(model))
tex_file.write("\\\\\n") models.append(model)
tex_file.write("\\\\\n")
# Header - column names
tex_file.write("\\hline\n") # Header - column names
tex_file.write("Approach & Method") tex_file.write("\\hline\n")
for model in models: tex_file.write("Approach & Method")
tex_file.write(" & \\#VPC & \\makecell{\\#VPC\\\\used} & \\#OVP & \\%") for model in models:
tex_file.write("\\\\\n") tex_file.write(" & \\#VPC & \\makecell{\\#VPC\\\\used} & \\#OVP & \\%")
tex_file.write("\\\\\n")
for approach in self.methods_per_approach: for approach in self.methods_per_approach:
method_count = len(self.methods_per_approach[approach]) method_count = len(self.methods_per_approach[approach])
tex_file.write("\n\\hline\n") tex_file.write("\n\\hline\n")
tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach)) tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
for method in self.methods_per_approach[approach]: for method in self.methods_per_approach[approach]:
tex_file.write("\n& \makecell{{{}}}".format(method)) tex_file.write("\n& \makecell{{{}}}".format(method))
for model in models:
try:
best_log = self.log_container_per_model[model].get_best_log(method, coverage_threshold)
except Exception as e:
tex_file.write(" & - & - & - & -")
continue
VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"]
VPC_used = best_log.VPC["count"]
OVP = len(best_log.optimization["OVP"])
coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2)
tex_file.write(" & {} & {} & {} & {}".format(VPC_count, VPC_used, OVP, coverage))
tex_file.write("\\\\")
else:
for model in self.log_container_per_model:
tex_file.write(" & \\multicolumn{{3}}{{|c|}}{{{}}}".format(model))
models.append(model)
tex_file.write("\\\\\n")
# Header - column names
tex_file.write("\\hline\n")
tex_file.write("Approach & Method")
for model in models:
tex_file.write(" & \\#VPC & \\#OVP & \\%")
tex_file.write("\\\\\n")
for model in models:
try:
best_log = self.log_container_per_model[model].get_best_log(method)
except:
tex_file.write(" & - & - & - & -")
continue
VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"] for approach in self.methods_per_approach:
VPC_used = best_log.VPC["count"] method_count = len(self.methods_per_approach[approach])
OVP = len(best_log.optimization["OVP"]) tex_file.write("\n\\hline\n")
coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2) tex_file.write("\\multirow{{{}}}{{*}}{{\\makecell{{{}}}}}".format(method_count, approach))
tex_file.write(" & {} & {} & {} & {}".format(VPC_count, VPC_used, OVP, coverage))
tex_file.write("\\\\")
tex_file.write("\n\\end{tabular}") for method in self.methods_per_approach[approach]:
tex_file.write("\n& \makecell{{{}}}".format(method))
for model in models:
try:
best_log = self.log_container_per_model[model].get_best_log(method, coverage_threshold)
except Exception as e:
tex_file.write(" & - & - & -")
continue
VPC_count = best_log.VPC["count"] + best_log.VPC["discarded_count"]
OVP = len(best_log.optimization["OVP"])
coverage = util.set_precision(best_log.coverage["percent_fraction"] * 100, 2)
tex_file.write(" & {} & {} & {}".format(VPC_count, OVP, coverage))
tex_file.write("\\\\")
tex_file.write("\n\\hline\n")
tex_file.write("\\end{tabular}")
tex_file.write("\n\\end{table*}\n") tex_file.write("\n\\end{table*}\n")
tex_file.close() tex_file.close()
# \usepackage{longtable} needed. # \usepackage{longtable} needed.
def generate_complete_tex_table(self): def generate_complete_tex_table(self, output_path = "./"):
tex_file = open("complete_table.tex", "w") tex_file = open(output_path + "complete_table.tex", "w")
for model in self.log_container_per_model: for model in self.log_container_per_model:
tex_file.write("\n\\begin{longtable}{|c c c c c c c c|}\n") tex_file.write("\n\\begin{longtable}{|c c c c c c c c|}\n")
...@@ -138,6 +182,28 @@ class Benchmark: ...@@ -138,6 +182,28 @@ class Benchmark:
print("discarded_per_approach: {}".format(discarded_per_approach)) print("discarded_per_approach: {}".format(discarded_per_approach))
return discarded_per_approach return discarded_per_approach
def get_average_discarded_per_method(self):
discarded_per_method_list = {}
for approach in self.methods_per_approach:
for method in self.methods_per_approach[approach]:
for model in self.log_container_per_model:
log_container = LogContainer(self.log_container_per_model[model].get_methods_per_approach())
log_container.add_logs(self.log_container_per_model[model].get_logs_by_method(method))
if log_container.size() == 0:
continue
container_avg = log_container.get_avg_discarded()
if method in discarded_per_method_list:
discarded_per_method_list[method].append(container_avg)
else:
discarded_per_method_list[method] = [container_avg]
discarded_per_method = {}
for method in discarded_per_method_list:
discarded_per_method[method] = np.sum(discarded_per_method_list[method]) / len(discarded_per_method_list[method])
print ("discarded_per_method: {}".format(discarded_per_method))
return discarded_per_method
def get_average_discarded_per_model(self): def get_average_discarded_per_model(self):
discarded_per_model = {} discarded_per_model = {}
for model in self.log_container_per_model: for model in self.log_container_per_model:
...@@ -146,6 +212,24 @@ class Benchmark: ...@@ -146,6 +212,24 @@ class Benchmark:
print("discarded_per_model {}".format(discarded_per_model)) print("discarded_per_model {}".format(discarded_per_model))
return discarded_per_model return discarded_per_model
def get_average_discarded_per_model_per_method(self):
discarded_per_model_method = {}
for model in self.log_container_per_model:
for approach in self.methods_per_approach:
for method in self.methods_per_approach[approach]:
log_container = LogContainer(self.log_container_per_model[model].get_methods_per_approach())
log_container.add_logs(self.log_container_per_model[model].get_logs_by_method(method))
if log_container.size() == 0:
continue
if model in discarded_per_model_method:
discarded_per_model_method[model][method] = log_container.get_avg_discarded()
else:
discarded_per_model_method[model] = {}
discarded_per_model_method[model][method] = log_container.get_avg_discarded()
print("discarded_per_model_method {}".format(discarded_per_model_method))
return discarded_per_model_method
def get_average_discarded_per_model_per_approach(self): def get_average_discarded_per_model_per_approach(self):
discarded_per_model_approach = {} discarded_per_model_approach = {}
for model in self.log_container_per_model: for model in self.log_container_per_model:
......
...@@ -77,16 +77,15 @@ class LogContainer: ...@@ -77,16 +77,15 @@ class LogContainer:
# Log which obtains coverage over 99% with minimal number of viewpoint candidates # Log which obtains coverage over 99% with minimal number of viewpoint candidates
# If no log obrains coverage over 99%, the log with the greatest coverage is considered. # If no log obrains coverage over 99%, the log with the greatest coverage is considered.
def get_best_log(self, method): def get_best_log(self, method, coverage_threshold=0.99):
method_logs = self.get_logs_by_method(method) method_logs = self.get_logs_by_method(method)
if len(method_logs) == 0: if len(method_logs) == 0:
raise Exception("Error: No logs available for given method ({})".format(method)) raise Exception("Error: No logs available for given method ({})".format(method))
# Find logs with coverage >99%. # Find logs with coverage >99%.
high_coverage_logs = self.__filter_coverage_threshold(method_logs, 0.99, ComparisonType.GEQ) high_coverage_logs = self.__filter_coverage_threshold(method_logs, coverage_threshold, ComparisonType.GEQ)
if len(high_coverage_logs) > 0: if len(high_coverage_logs) > 0:
return self.max_coverage_log(high_coverage_logs) return self.min_used_vpc_log(high_coverage_logs)
else: else:
return self.max_coverage_log(method_logs) return self.max_coverage_log(method_logs)
...@@ -118,8 +117,23 @@ class LogContainer: ...@@ -118,8 +117,23 @@ class LogContainer:
for log in input_logs: for log in input_logs:
if log.coverage["percent_fraction"] > max_coverage: if log.coverage["percent_fraction"] > max_coverage:
max_log = log max_log = log
max_coverage = log.coverage["percent_fraction"]
return max_log return max_log
def min_used_vpc_log(self, input_logs=None):
if not input_logs:
input_logs = self.logs
if len(input_logs) == 0:
raise Exception("Error: no logs available.")
min_VPC = input_logs[0].VPC["count"]
min_log = input_logs[0]
for log in input_logs:
if log.VPC["count"] < min_VPC:
min_log = log
min_VPC = log.VPC["count"]
return min_log
def __parse_methods_per_approach(self): def __parse_methods_per_approach(self):
self.approaches_per_method = {} self.approaches_per_method = {}
for approach in self.methods_per_approach: for approach in self.methods_per_approach:
......
{ {
"SphereSampling": [ "SphereSampling": [
"UniformSphereAvgBbox", "AvgBbox",
"UniformSphereConvexHull", "ConvexHull",
"UniformSphereFocusRepositioned", "FocusRepositioned",
"UniformSphereMaxBbox", "MaxBbox",
"UniformSphereMinBbox", "MinBbox",
"UniformSphereMixedRepositioned", "MixedRepositioned",
"UniformSphereNoDisplacement", "NoDisplacement"],
"UniformSphere",
"UniformSphereCentroid"],
"VertexBased": [ "VertexBased": [
"VertexCentroid",
"VertexBBoxCenter", "VertexBBoxCenter",
"VertexNormal"], "VertexNormal"],
"GeometryBased": [ "GeometryBased": [
......
[ [
"/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithFiltering/face_shield" "/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/gear",
"/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/hirth",
"/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/time_shaft",
"/home/pastrva/Projects/VirtualImageProcessing/Fileserver/Papers/2020_Generate_and_Test_Comparison/Results/WithoutFiltering/face_shield"
] ]
...@@ -82,12 +82,15 @@ def main(): ...@@ -82,12 +82,15 @@ def main():
benchmark = Benchmark() benchmark = Benchmark()
benchmark.set_log_containers(log_container_per_model) benchmark.set_log_containers(log_container_per_model)
benchmark.generate_performance_tex_table() benchmark.generate_performance_tex_table(output_path="./data/", coverage_threshold=0.98, with_discarded=True)
benchmark.generate_complete_tex_table() benchmark.generate_performance_tex_table(output_path="./data/", coverage_threshold=0.98, with_discarded=False)
benchmark.generate_complete_tex_table(output_path="./data/")
benchmark.get_average_RT_duration_per_model() benchmark.get_average_RT_duration_per_model()
benchmark.get_average_discarded_per_approach() benchmark.get_average_discarded_per_approach()
benchmark.get_average_discarded_per_method()
benchmark.get_average_discarded_per_model() benchmark.get_average_discarded_per_model()
benchmark.get_average_discarded_per_model_per_approach() benchmark.get_average_discarded_per_model_per_approach()
benchmark.get_average_discarded_per_model_per_method()
if __name__ == "__main__": if __name__ == "__main__":
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment