From 5baf3ae54bcf518650df8a5594f260642adc140c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ren=C3=A9=20He=C3=9F?= <rene.hess@iwr.uni-heidelberg.de>
Date: Tue, 2 Apr 2019 08:03:17 +0200
Subject: [PATCH] Fix PEP 8

---
 .../loopy/transformations/performance.py      |  1 +
 .../transformations/remove_reductions.py      |  4 ++--
 python/dune/codegen/sumfact/autotune.py       |  5 +++--
 python/dune/codegen/sumfact/realization.py    |  3 ++-
 .../dune/codegen/sumfact/transformations.py   | 22 +++++++++----------
 5 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/python/dune/codegen/loopy/transformations/performance.py b/python/dune/codegen/loopy/transformations/performance.py
index b8995e32..02644e80 100644
--- a/python/dune/codegen/loopy/transformations/performance.py
+++ b/python/dune/codegen/loopy/transformations/performance.py
@@ -1,6 +1,7 @@
 from dune.codegen.options import get_form_option
 from dune.codegen.sumfact.transformations import sumfact_performance_transformations
 
+
 def performance_transformations(kernel, signature):
     if get_form_option("sumfact"):
         kernel = sumfact_performance_transformations(kernel, signature)
diff --git a/python/dune/codegen/loopy/transformations/remove_reductions.py b/python/dune/codegen/loopy/transformations/remove_reductions.py
index f5381612..dd083951 100644
--- a/python/dune/codegen/loopy/transformations/remove_reductions.py
+++ b/python/dune/codegen/loopy/transformations/remove_reductions.py
@@ -2,6 +2,7 @@ import loopy as lp
 
 import pymbolic.primitives as prim
 
+
 def remove_reduction(knl, match):
     """Removes all matching reductions and do direct accumulation in assignee instead"""
 
@@ -21,7 +22,7 @@ def remove_reduction(knl, match):
             knl = lp.remove_instructions(knl, set([instr.id]))
 
             # Add instruction that sets assignee to zero
-            id_zero = instr.id  + '_set_zero'
+            id_zero = instr.id + '_set_zero'
             instructions.append(lp.Assignment(instr.assignee,
                                               0,
                                               within_inames=instr.within_inames,
@@ -41,7 +42,6 @@ def remove_reduction(knl, match):
                                               depends_on=frozenset((id_zero,) + tuple(depends_on)),
                                               tags=('assignment',)))
 
-
             knl = knl.copy(instructions=knl.instructions + instructions)
 
             for dep in depending:
diff --git a/python/dune/codegen/sumfact/autotune.py b/python/dune/codegen/sumfact/autotune.py
index e89f13c2..a8dfd4ac 100644
--- a/python/dune/codegen/sumfact/autotune.py
+++ b/python/dune/codegen/sumfact/autotune.py
@@ -19,6 +19,7 @@ from dune.codegen.sumfact.realization import realize_sumfact_kernel_function
 from dune.codegen.options import get_option, set_option
 from dune.codegen.error import CodegenAutotuneError
 
+
 def get_cmake_cache_entry(entry):
     for line in open(os.path.join(get_option("project_basedir"), "CMakeCache.txt"), "r"):
         match = re.match("{}:[INTERNAL|FILEPATH|BOOL|STRING|PATH|UNINITIALIZED|STATIC]+=(.*)".format(entry), line)
@@ -319,7 +320,7 @@ def generate_standalone_kernel_code(kernel, signature, filename):
         # Get a list of the function argument names
         assert len(signature) == 1
         sig = signature[0]
-        arguments = sig[sig.find('(') +1:sig.find(')')].split(',')
+        arguments = sig[sig.find('(') + 1:sig.find(')')].split(',')
         arguments = [a.split(' ')[-1] for a in arguments]
 
         global_args = [a for a in kernel.args if a.name not in arguments]
@@ -338,7 +339,7 @@ def generate_standalone_kernel_code(kernel, signature, filename):
 
         # Generate function we want to benchmark
         f.write('\n')
-        f.write(sig[0:sig.find(')')+1])
+        f.write(sig[0:sig.find(')') + 1])
         f.writelines(lp.generate_body(kernel))
         f.write('\n\n')
 
diff --git a/python/dune/codegen/sumfact/realization.py b/python/dune/codegen/sumfact/realization.py
index 38171bdb..461f5b0a 100644
--- a/python/dune/codegen/sumfact/realization.py
+++ b/python/dune/codegen/sumfact/realization.py
@@ -65,6 +65,7 @@ def _max_sum_factorization_buffer_size(sf):
                product(m.basis_size for m in sf.matrix_sequence_cost_permuted) * sf.vector_width)
     return size
 
+
 @kernel_cached
 def _realize_sum_factorization_kernel(sf):
     insn_dep = sf.insn_dep
@@ -145,7 +146,7 @@ class BufferSwitcher(object):
         # sure it is big enough.
         assert sf
         size = _max_sum_factorization_buffer_size(sf)
-        globalarg(bs, shape=(size,), alignment=alignment, dim_tags=['f',])
+        globalarg(bs, shape=(size,), alignment=alignment, dim_tags=['f', ])
 
         temporary_variable(name,
                            managed=True,
diff --git a/python/dune/codegen/sumfact/transformations.py b/python/dune/codegen/sumfact/transformations.py
index ffe5ca41..bbc6c4f7 100644
--- a/python/dune/codegen/sumfact/transformations.py
+++ b/python/dune/codegen/sumfact/transformations.py
@@ -10,6 +10,7 @@ from dune.codegen.options import get_form_option, get_option
 from dune.codegen.pdelab.geometry import world_dimension
 from dune.codegen.error import CodegenAutotuneError
 
+
 def move_zero_assignment_up(kernel, move_up_inames):
     if len(move_up_inames) == 0:
         return kernel
@@ -23,9 +24,9 @@ def move_zero_assignment_up(kernel, move_up_inames):
         instr_iname_set = set(i.assignee.index_tuple)
         if move_iname_set.issubset(instr_iname_set):
             # There should be only one matching instruction
-            assert (instr==None)
+            assert instr is None
             instr = i
-    assert (instr!=None)
+    assert instr is not None
 
     # Remove it
     kernel = lp.remove_instructions(kernel, set([instr.id]))
@@ -70,7 +71,7 @@ def move_zero_assignment_up(kernel, move_up_inames):
     instructions.append(instr.copy(assignee=assignee,
                                    within_inames=frozenset(within_inames)))
     kernel = kernel.copy(instructions=kernel.instructions + instructions,
-                   domains=domains)
+                         domains=domains)
 
     # Add dependency to inner assignment instructions
     cond = lp.match.Tagged('assignment')
@@ -80,7 +81,7 @@ def move_zero_assignment_up(kernel, move_up_inames):
         instr_iname_set = set(i.assignee.index_tuple)
         if move_iname_set.issubset(instr_iname_set):
             # There should be only one matching instruction
-            assert (instr==None)
+            assert instr is None
             instr = i
 
     id_zero = instructions[0].id
@@ -120,18 +121,18 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order):
     # TODO: In principle there is no need to be dimension dependent. I'm just
     # not sure how to pass the iname_order in the general case. This probably
     # needs a rework anyway so I just do the 3D case first.
-    assert dim==3
+    assert dim == 3
 
     kernel = remove_all_reductions(kernel)
 
     # TODO: Doc after rewrite
     reduction_iname = 'j'
-    iname_dict = { 'l' : 'sf_out_inames_2',
-                   'k' : 'sf_out_inames_1',
-                   'i' : 'sf_out_inames_0',
-                   'j' : 'sf_red'}
+    iname_dict = {'l': 'sf_out_inames_2',
+                  'k': 'sf_out_inames_1',
+                  'i': 'sf_out_inames_0',
+                  'j': 'sf_red'}
     reduction_index = iname_order.index(reduction_iname)
-    move_up_inames = list(map(lambda x: iname_dict[x], iname_order[reduction_index+1:]))
+    move_up_inames = list(map(lambda x: iname_dict[x], iname_order[reduction_index + 1:]))
 
     # cond = lp.match.Tagged('set_zero')
     cond = lp.match.Tagged('assignment')
@@ -155,7 +156,6 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order):
         reduction_index = reduction_index.pop()
         reduction_iname = 'sf_red_{}'.format(reduction_index)
 
-
         prefered_iname_order = []
         for i in inames:
             if i not in current_move_up_inames and i.find('vec') == -1:
-- 
GitLab