Skip to content
Snippets Groups Projects
Commit 5baf3ae5 authored by René Heß's avatar René Heß
Browse files

Fix PEP 8

parent 0aeefd6d
No related branches found
No related tags found
No related merge requests found
from dune.codegen.options import get_form_option from dune.codegen.options import get_form_option
from dune.codegen.sumfact.transformations import sumfact_performance_transformations from dune.codegen.sumfact.transformations import sumfact_performance_transformations
def performance_transformations(kernel, signature): def performance_transformations(kernel, signature):
if get_form_option("sumfact"): if get_form_option("sumfact"):
kernel = sumfact_performance_transformations(kernel, signature) kernel = sumfact_performance_transformations(kernel, signature)
......
...@@ -2,6 +2,7 @@ import loopy as lp ...@@ -2,6 +2,7 @@ import loopy as lp
import pymbolic.primitives as prim import pymbolic.primitives as prim
def remove_reduction(knl, match): def remove_reduction(knl, match):
"""Removes all matching reductions and do direct accumulation in assignee instead""" """Removes all matching reductions and do direct accumulation in assignee instead"""
...@@ -21,7 +22,7 @@ def remove_reduction(knl, match): ...@@ -21,7 +22,7 @@ def remove_reduction(knl, match):
knl = lp.remove_instructions(knl, set([instr.id])) knl = lp.remove_instructions(knl, set([instr.id]))
# Add instruction that sets assignee to zero # Add instruction that sets assignee to zero
id_zero = instr.id + '_set_zero' id_zero = instr.id + '_set_zero'
instructions.append(lp.Assignment(instr.assignee, instructions.append(lp.Assignment(instr.assignee,
0, 0,
within_inames=instr.within_inames, within_inames=instr.within_inames,
...@@ -41,7 +42,6 @@ def remove_reduction(knl, match): ...@@ -41,7 +42,6 @@ def remove_reduction(knl, match):
depends_on=frozenset((id_zero,) + tuple(depends_on)), depends_on=frozenset((id_zero,) + tuple(depends_on)),
tags=('assignment',))) tags=('assignment',)))
knl = knl.copy(instructions=knl.instructions + instructions) knl = knl.copy(instructions=knl.instructions + instructions)
for dep in depending: for dep in depending:
......
...@@ -19,6 +19,7 @@ from dune.codegen.sumfact.realization import realize_sumfact_kernel_function ...@@ -19,6 +19,7 @@ from dune.codegen.sumfact.realization import realize_sumfact_kernel_function
from dune.codegen.options import get_option, set_option from dune.codegen.options import get_option, set_option
from dune.codegen.error import CodegenAutotuneError from dune.codegen.error import CodegenAutotuneError
def get_cmake_cache_entry(entry): def get_cmake_cache_entry(entry):
for line in open(os.path.join(get_option("project_basedir"), "CMakeCache.txt"), "r"): for line in open(os.path.join(get_option("project_basedir"), "CMakeCache.txt"), "r"):
match = re.match("{}:[INTERNAL|FILEPATH|BOOL|STRING|PATH|UNINITIALIZED|STATIC]+=(.*)".format(entry), line) match = re.match("{}:[INTERNAL|FILEPATH|BOOL|STRING|PATH|UNINITIALIZED|STATIC]+=(.*)".format(entry), line)
...@@ -319,7 +320,7 @@ def generate_standalone_kernel_code(kernel, signature, filename): ...@@ -319,7 +320,7 @@ def generate_standalone_kernel_code(kernel, signature, filename):
# Get a list of the function argument names # Get a list of the function argument names
assert len(signature) == 1 assert len(signature) == 1
sig = signature[0] sig = signature[0]
arguments = sig[sig.find('(') +1:sig.find(')')].split(',') arguments = sig[sig.find('(') + 1:sig.find(')')].split(',')
arguments = [a.split(' ')[-1] for a in arguments] arguments = [a.split(' ')[-1] for a in arguments]
global_args = [a for a in kernel.args if a.name not in arguments] global_args = [a for a in kernel.args if a.name not in arguments]
...@@ -338,7 +339,7 @@ def generate_standalone_kernel_code(kernel, signature, filename): ...@@ -338,7 +339,7 @@ def generate_standalone_kernel_code(kernel, signature, filename):
# Generate function we want to benchmark # Generate function we want to benchmark
f.write('\n') f.write('\n')
f.write(sig[0:sig.find(')')+1]) f.write(sig[0:sig.find(')') + 1])
f.writelines(lp.generate_body(kernel)) f.writelines(lp.generate_body(kernel))
f.write('\n\n') f.write('\n\n')
......
...@@ -65,6 +65,7 @@ def _max_sum_factorization_buffer_size(sf): ...@@ -65,6 +65,7 @@ def _max_sum_factorization_buffer_size(sf):
product(m.basis_size for m in sf.matrix_sequence_cost_permuted) * sf.vector_width) product(m.basis_size for m in sf.matrix_sequence_cost_permuted) * sf.vector_width)
return size return size
@kernel_cached @kernel_cached
def _realize_sum_factorization_kernel(sf): def _realize_sum_factorization_kernel(sf):
insn_dep = sf.insn_dep insn_dep = sf.insn_dep
...@@ -145,7 +146,7 @@ class BufferSwitcher(object): ...@@ -145,7 +146,7 @@ class BufferSwitcher(object):
# sure it is big enough. # sure it is big enough.
assert sf assert sf
size = _max_sum_factorization_buffer_size(sf) size = _max_sum_factorization_buffer_size(sf)
globalarg(bs, shape=(size,), alignment=alignment, dim_tags=['f',]) globalarg(bs, shape=(size,), alignment=alignment, dim_tags=['f', ])
temporary_variable(name, temporary_variable(name,
managed=True, managed=True,
......
...@@ -10,6 +10,7 @@ from dune.codegen.options import get_form_option, get_option ...@@ -10,6 +10,7 @@ from dune.codegen.options import get_form_option, get_option
from dune.codegen.pdelab.geometry import world_dimension from dune.codegen.pdelab.geometry import world_dimension
from dune.codegen.error import CodegenAutotuneError from dune.codegen.error import CodegenAutotuneError
def move_zero_assignment_up(kernel, move_up_inames): def move_zero_assignment_up(kernel, move_up_inames):
if len(move_up_inames) == 0: if len(move_up_inames) == 0:
return kernel return kernel
...@@ -23,9 +24,9 @@ def move_zero_assignment_up(kernel, move_up_inames): ...@@ -23,9 +24,9 @@ def move_zero_assignment_up(kernel, move_up_inames):
instr_iname_set = set(i.assignee.index_tuple) instr_iname_set = set(i.assignee.index_tuple)
if move_iname_set.issubset(instr_iname_set): if move_iname_set.issubset(instr_iname_set):
# There should be only one matching instruction # There should be only one matching instruction
assert (instr==None) assert instr is None
instr = i instr = i
assert (instr!=None) assert instr is not None
# Remove it # Remove it
kernel = lp.remove_instructions(kernel, set([instr.id])) kernel = lp.remove_instructions(kernel, set([instr.id]))
...@@ -70,7 +71,7 @@ def move_zero_assignment_up(kernel, move_up_inames): ...@@ -70,7 +71,7 @@ def move_zero_assignment_up(kernel, move_up_inames):
instructions.append(instr.copy(assignee=assignee, instructions.append(instr.copy(assignee=assignee,
within_inames=frozenset(within_inames))) within_inames=frozenset(within_inames)))
kernel = kernel.copy(instructions=kernel.instructions + instructions, kernel = kernel.copy(instructions=kernel.instructions + instructions,
domains=domains) domains=domains)
# Add dependency to inner assignment instructions # Add dependency to inner assignment instructions
cond = lp.match.Tagged('assignment') cond = lp.match.Tagged('assignment')
...@@ -80,7 +81,7 @@ def move_zero_assignment_up(kernel, move_up_inames): ...@@ -80,7 +81,7 @@ def move_zero_assignment_up(kernel, move_up_inames):
instr_iname_set = set(i.assignee.index_tuple) instr_iname_set = set(i.assignee.index_tuple)
if move_iname_set.issubset(instr_iname_set): if move_iname_set.issubset(instr_iname_set):
# There should be only one matching instruction # There should be only one matching instruction
assert (instr==None) assert instr is None
instr = i instr = i
id_zero = instructions[0].id id_zero = instructions[0].id
...@@ -120,18 +121,18 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order): ...@@ -120,18 +121,18 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order):
# TODO: In principle there is no need to be dimension dependent. I'm just # TODO: In principle there is no need to be dimension dependent. I'm just
# not sure how to pass the iname_order in the general case. This probably # not sure how to pass the iname_order in the general case. This probably
# needs a rework anyway so I just do the 3D case first. # needs a rework anyway so I just do the 3D case first.
assert dim==3 assert dim == 3
kernel = remove_all_reductions(kernel) kernel = remove_all_reductions(kernel)
# TODO: Doc after rewrite # TODO: Doc after rewrite
reduction_iname = 'j' reduction_iname = 'j'
iname_dict = { 'l' : 'sf_out_inames_2', iname_dict = {'l': 'sf_out_inames_2',
'k' : 'sf_out_inames_1', 'k': 'sf_out_inames_1',
'i' : 'sf_out_inames_0', 'i': 'sf_out_inames_0',
'j' : 'sf_red'} 'j': 'sf_red'}
reduction_index = iname_order.index(reduction_iname) reduction_index = iname_order.index(reduction_iname)
move_up_inames = list(map(lambda x: iname_dict[x], iname_order[reduction_index+1:])) move_up_inames = list(map(lambda x: iname_dict[x], iname_order[reduction_index + 1:]))
# cond = lp.match.Tagged('set_zero') # cond = lp.match.Tagged('set_zero')
cond = lp.match.Tagged('assignment') cond = lp.match.Tagged('assignment')
...@@ -155,7 +156,6 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order): ...@@ -155,7 +156,6 @@ def reorder_loops_in_tensor_contraction(kernel, iname_order):
reduction_index = reduction_index.pop() reduction_index = reduction_index.pop()
reduction_iname = 'sf_red_{}'.format(reduction_index) reduction_iname = 'sf_red_{}'.format(reduction_index)
prefered_iname_order = [] prefered_iname_order = []
for i in inames: for i in inames:
if i not in current_move_up_inames and i.find('vec') == -1: if i not in current_move_up_inames and i.find('vec') == -1:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment