diff --git a/python/dune/perftool/sumfact/autotune.py b/python/dune/perftool/sumfact/autotune.py
index 9933e5852e13cc9182a358e6799094ceeedf64fc..24f85dc0bfc4644c2ac83c6b6f2296c6455d9e68 100644
--- a/python/dune/perftool/sumfact/autotune.py
+++ b/python/dune/perftool/sumfact/autotune.py
@@ -164,9 +164,15 @@ def generate_standalone_code(sf, filename, logname):
 
 
 def autotune_realization(sf):
-    name = "autotune_sumfact_{}".format(sf.function_name)
-    filename = "{}.cc".format(name)
-    logname = "{}.log".format(name)
+    # Make sure that the benchmark directory exists
+    dir = os.path.join(get_option("project_basedir"), "autotune-benchmarks")
+    if not os.path.exists(dir):
+        os.mkdir(dir)
+
+    basename = "autotune_sumfact_{}".format(sf.function_name)
+    name = os.path.join(dir, "autotune_sumfact_{}".format(sf.function_name))
+    filename = os.path.join(dir, "{}.cc".format(basename))
+    logname = os.path.join(dir, "{}.log".format(basename))
 
     # If the log file already exists, we can reuse the benchmark results
     # and do not need to rerun it.
@@ -180,7 +186,7 @@ def autotune_realization(sf):
 
         # Run the benchmark program
         devnull = open(os.devnull, 'w')
-        ret = subprocess.call(["./{}".format(name)], stdout=devnull, stderr=subprocess.STDOUT)
+        ret = subprocess.call([name], stdout=devnull, stderr=subprocess.STDOUT)
         assert ret == 0
 
     # Extract the result form the log file