diff --git a/b_asic/architecture.py b/b_asic/architecture.py
index 7f0722a40f81ddcf37f292fb49a95819f975f8aa..9ceaacc72db404c0f998c44651ea79ddfef1aa08 100644
--- a/b_asic/architecture.py
+++ b/b_asic/architecture.py
@@ -561,7 +561,12 @@ class Memory(Resource):
                 pass
         return ""
 
-    def assign(self, strategy: str = "left_edge") -> None:
+    def assign(
+        self,
+        strategy: Literal[
+            "left_edge", "greedy_graph_color", "ilp_graph_color"
+        ] = "left_edge",
+    ) -> None:
         """
         Perform assignment of the memory variables.
 
@@ -573,7 +578,8 @@ class Memory(Resource):
 
             * 'RAM'
                 * 'left_edge': Left-edge algorithm.
-                * 'graph_color': Graph-coloring based on exclusion graph.
+                * 'greedy_graph_color': Greedy graph-coloring based on exclusion graph.
+                * 'ilp_graph_color': Optimal graph-coloring based on exclusion graph.
             * 'register'
                 * ...
         """
diff --git a/b_asic/resources.py b/b_asic/resources.py
index e09af3adc476df75af6c128e016554b10bd6a284..95b42d821ae4ac5f7fe713f21aba27d054bd0444 100644
--- a/b_asic/resources.py
+++ b/b_asic/resources.py
@@ -913,21 +913,27 @@ class ProcessCollection:
 
     def split_on_execution_time(
         self,
-        strategy: Literal["graph_color", "left_edge"] = "left_edge",
+        strategy: Literal[
+            "left_edge",
+            "greedy_graph_color",
+            "ilp_graph_color",
+        ] = "left_edge",
         coloring_strategy: str = "saturation_largest_first",
+        max_colors: int | None = None,
+        solver: PULP_CBC_CMD | GUROBI | None = None,
     ) -> list["ProcessCollection"]:
         """
         Split based on overlapping execution time.
 
         Parameters
         ----------
-        strategy : {'graph_color', 'left_edge'}, default: 'left_edge'
+        strategy : {'ilp_graph_color', 'greedy_graph_color', 'left_edge'}, default: 'left_edge'
             The strategy used when splitting based on execution times.
 
         coloring_strategy : str, default: 'saturation_largest_first'
             Node ordering strategy passed to
             :func:`networkx.algorithms.coloring.greedy_color`.
-            This parameter is only considered if *strategy* is set to 'graph_color'.
+            This parameter is only considered if *strategy* is set to 'greedy_graph_color'.
             One of
 
             * 'largest_first'
@@ -938,12 +944,24 @@ class ProcessCollection:
             * 'connected_sequential_dfs' or 'connected_sequential'
             * 'saturation_largest_first' or 'DSATUR'
 
+        max_colors : int, optional
+            The maximum amount of colors to split based on,
+            only required if strategy is an ILP method.
+
+        solver : PuLP MIP solver object, optional
+            Only used if strategy is an ILP method.
+            Valid options are:
+                * PULP_CBC_CMD() - preinstalled with the package
+                * GUROBI() - required licence but likely faster
+
         Returns
         -------
         A list of new ProcessCollection objects with the process splitting.
         """
-        if strategy == "graph_color":
-            return self._graph_color_assignment(coloring_strategy)
+        if strategy == "ilp_graph_color":
+            return self._ilp_graph_color_assignment(max_colors, solver)
+        elif strategy == "greedy_graph_color":
+            return self._greedy_graph_color_assignment(coloring_strategy)
         elif strategy == "left_edge":
             return self._left_edge_assignment()
         else:
@@ -1841,7 +1859,92 @@ class ProcessCollection:
     def __iter__(self):
         return iter(self._collection)
 
-    def _graph_color_assignment(
+    def _ilp_graph_color_assignment(
+        self,
+        max_colors: int | None = None,
+        solver: PULP_CBC_CMD | GUROBI | None = None,
+    ) -> list["ProcessCollection"]:
+        for process in self:
+            if process.execution_time > self.schedule_time:
+                raise ValueError(
+                    f"{process} has execution time greater than the schedule time"
+                )
+
+        cell_assignment: dict[int, ProcessCollection] = {}
+        exclusion_graph = self.create_exclusion_graph_from_execution_time()
+
+        nodes = list(exclusion_graph.nodes())
+        edges = list(exclusion_graph.edges())
+
+        if max_colors is None:
+            # get an initial estimate using NetworkX greedy graph coloring
+            coloring = nx.coloring.greedy_color(
+                exclusion_graph, strategy="saturation_largest_first"
+            )
+            max_colors = len(set(coloring.values()))
+        colors = range(max_colors)
+
+        # find the minimal amount of colors (memories)
+
+        # binary variables:
+        #   x[node, color] - whether node is colored in a certain color
+        #   c[color] - whether color is used
+        x = LpVariable.dicts("x", (nodes, colors), cat=LpBinary)
+        c = LpVariable.dicts("c", colors, cat=LpBinary)
+        problem = LpProblem()
+        problem += lpSum(c[i] for i in colors)
+
+        # constraints:
+        #   (1) - nodes have exactly one color
+        #   (2) - adjacent nodes cannot have the same color
+        #   (3) - only permit assignments if color is used
+        #   (4) - reduce solution space by assigning colors to the largest clique
+        #   (5 & 6) - reduce solution space by ignoring the symmetry caused
+        #       by cycling the graph colors
+        for node in nodes:
+            problem += lpSum(x[node][i] for i in colors) == 1
+        for u, v in edges:
+            for color in colors:
+                problem += x[u][color] + x[v][color] <= 1
+        for node in nodes:
+            for color in colors:
+                problem += x[node][color] <= c[color]
+        max_clique = next(nx.find_cliques(exclusion_graph))
+        for color, node in enumerate(max_clique):
+            problem += x[node][color] == c[color] == 1
+        for color in colors:
+            problem += c[color] <= lpSum(x[node][color] for node in nodes)
+        for color in colors[:-1]:
+            problem += c[color + 1] <= c[color]
+
+        if solver is None:
+            solver = PULP_CBC_CMD()
+
+        status = problem.solve(solver)
+
+        if status != LpStatusOptimal:
+            raise ValueError(
+                "Optimal solution could not be found via ILP, use another method."
+            )
+
+        node_colors = {}
+        for node in nodes:
+            for i in colors:
+                if value(x[node][i]) == 1:
+                    node_colors[node] = i
+
+        # reduce the solution by removing unused colors
+        sorted_unique_values = sorted(set(node_colors.values()))
+        coloring_mapping = {val: i for i, val in enumerate(sorted_unique_values)}
+        coloring = {key: coloring_mapping[node_colors[key]] for key in node_colors}
+
+        for process, cell in coloring.items():
+            if cell not in cell_assignment:
+                cell_assignment[cell] = ProcessCollection([], self._schedule_time)
+            cell_assignment[cell].add_process(process)
+        return list(cell_assignment.values())
+
+    def _greedy_graph_color_assignment(
         self,
         coloring_strategy: str = "saturation_largest_first",
         *,
@@ -1869,7 +1972,6 @@ class ProcessCollection:
         """
         for process in self:
             if process.execution_time > self.schedule_time:
-                # Can not assign process to any cell
                 raise ValueError(
                     f"{process} has execution time greater than the schedule time"
                 )
diff --git a/b_asic/scheduler.py b/b_asic/scheduler.py
index 05fe476311c956dec56d84770db666dc5ab61ae5..88ce2c8465be62c5d4f9a1764768f76fe23bbf19 100644
--- a/b_asic/scheduler.py
+++ b/b_asic/scheduler.py
@@ -348,6 +348,11 @@ class ListScheduler(Scheduler):
     List-based scheduler that schedules the operations while complying to the given
     constraints.
 
+    .. admonition:: Important
+
+       Will only work on non-recursive SFGs.
+       For recursive SFGs use RecursiveListScheduler instead.
+
     Parameters
     ----------
     sort_order : tuple[tuple[int, bool]]
@@ -411,12 +416,6 @@ class ListScheduler(Scheduler):
         log.debug("Scheduler initializing")
         self._initialize_scheduler(schedule)
 
-        if self._sfg.loops and self._schedule.cyclic:
-            raise ValueError(
-                "ListScheduler does not support cyclic scheduling of "
-                "recursive algorithms. Use RecursiveListScheduler instead."
-            )
-
         if self._input_times:
             self._place_inputs_on_given_times()
             self._remaining_ops = [
@@ -504,7 +503,8 @@ class ListScheduler(Scheduler):
         for op_id in ready_ops:
             reads = 0
             for op_input in self._sfg.find_by_id(op_id).inputs:
-                source_op = op_input.signals[0].source.operation
+                source_port = op_input.signals[0].source
+                source_op = source_port.operation
                 if isinstance(source_op, DontCare):
                     continue
                 if isinstance(source_op, Delay):
@@ -512,7 +512,8 @@ class ListScheduler(Scheduler):
                     continue
                 if (
                     self._schedule.start_times[source_op.graph_id]
-                    != self._current_time - 1
+                    + source_port.latency_offset
+                    != self._current_time + op_input.latency_offset
                 ):
                     reads += 1
             op_reads[op_id] = reads
@@ -584,14 +585,16 @@ class ListScheduler(Scheduler):
         if self._max_concurrent_reads:
             tmp_used_reads = {}
             for op_input in op.inputs:
-                source_op = op_input.signals[0].source.operation
+                source_port = op_input.signals[0].source
+                source_op = source_port.operation
                 if isinstance(source_op, (Delay, DontCare)):
                     continue
+                input_read_time = self._current_time + op_input.latency_offset
                 if (
                     self._schedule.start_times[source_op.graph_id]
-                    != self._current_time - 1
+                    + source_port.latency_offset
+                    != input_read_time
                 ):
-                    input_read_time = self._current_time + op_input.latency_offset
                     if self._schedule._schedule_time:
                         input_read_time %= self._schedule._schedule_time
 
@@ -821,14 +824,15 @@ class ListScheduler(Scheduler):
 
     def _update_port_reads(self, next_op: "Operation") -> None:
         for input_port in next_op.inputs:
-            source_op = input_port.signals[0].source.operation
+            source_port = input_port.signals[0].source
+            source_op = source_port.operation
+            time = self._current_time + input_port.latency_offset
             if (
-                not isinstance(source_op, DontCare)
-                and not isinstance(source_op, Delay)
+                not isinstance(source_op, (DontCare, Delay))
                 and self._schedule.start_times[source_op.graph_id]
-                != self._current_time - 1
+                + source_port.latency_offset
+                != time
             ):
-                time = self._current_time + input_port.latency_offset
                 if self._schedule._schedule_time:
                     time %= self._schedule._schedule_time
 
diff --git a/examples/memory_constrained_scheduling.py b/examples/memory_constrained_scheduling.py
index fe87b123c87243c4562846dd0c7958ef0cc4e454..d7fc95e83702e50fb1d3734e6d43b2cdb366923c 100644
--- a/examples/memory_constrained_scheduling.py
+++ b/examples/memory_constrained_scheduling.py
@@ -73,7 +73,7 @@ for i, mem in enumerate(mem_vars_set):
     memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
     memories.append(memory)
     mem.show(title=f"{memory.entity_name}")
-    memory.assign("graph_color")
+    memory.assign("greedy_graph_color")
     memory.show_content(title=f"Assigned {memory.entity_name}")
 
 direct.show(title="Direct interconnects")
@@ -130,7 +130,7 @@ for i, mem in enumerate(mem_vars_set):
     memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
     memories.append(memory)
     mem.show(title=f"{memory.entity_name}")
-    memory.assign("graph_color")
+    memory.assign("greedy_graph_color")
     memory.show_content(title=f"Assigned {memory.entity_name}")
 
 direct.show(title="Direct interconnects")
diff --git a/test/integration/test_sfg_to_architecture.py b/test/integration/test_sfg_to_architecture.py
index 7ac225505c6c37bb450adb550676e678617f91a8..821cdcfdf3e2f5e31b9ac426a466a41ffde7256a 100644
--- a/test/integration/test_sfg_to_architecture.py
+++ b/test/integration/test_sfg_to_architecture.py
@@ -72,7 +72,7 @@ def test_pe_constrained_schedule():
     # for i, mem in enumerate(mem_vars_set):
     #     memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
     #     memories.append(memory)
-    #     memory.assign("graph_color")
+    #     memory.assign("greedy_graph_color")
 
     # arch = Architecture(
     #     {mads0, mads1, reciprocal_pe, pe_in, pe_out},
@@ -137,7 +137,7 @@ def test_pe_and_memory_constrained_schedule():
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         {bf_pe, mul_pe, pe_in, pe_out},
@@ -172,7 +172,7 @@ def test_left_edge(mem_variables_fft32):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -199,7 +199,7 @@ def test_min_pe_to_mem(mem_variables_fft32):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -226,7 +226,7 @@ def test_min_mem_to_pe(mem_variables_fft32):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -253,7 +253,7 @@ def test_greedy_graph_coloring(mem_variables_fft32):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -280,7 +280,7 @@ def test_equitable_color(mem_variables_fft32):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("greedy_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -306,7 +306,7 @@ def test_ilp_color(mem_variables_fft16):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -333,7 +333,7 @@ def test_ilp_color_with_colors_given(mem_variables_fft16):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -360,7 +360,7 @@ def test_ilp_color_input_mux(mem_variables_fft16):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -387,7 +387,7 @@ def test_ilp_color_output_mux(mem_variables_fft16):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -415,7 +415,7 @@ def test_ilp_color_total_mux(mem_variables_fft16):
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
@@ -480,7 +480,7 @@ def test_ilp_resource_algorithm_custom_solver():
     for i, mem in enumerate(mem_vars_set):
         memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
         memories.append(memory)
-        memory.assign("graph_color")
+        memory.assign("ilp_graph_color")
 
     arch = Architecture(
         processing_elements,
diff --git a/test/unit/test_list_schedulers.py b/test/unit/test_list_schedulers.py
index 1d4c1bd5ff177710afe66bd59eceefa9402a8466..8a0a5f0115ab42675fd58121a16bb4816ef9da2a 100644
--- a/test/unit/test_list_schedulers.py
+++ b/test/unit/test_list_schedulers.py
@@ -4,6 +4,7 @@ import numpy as np
 import pytest
 from scipy import signal
 
+from b_asic.architecture import Architecture, Memory, ProcessingElement
 from b_asic.core_operations import (
     MADS,
     Addition,
@@ -1531,6 +1532,8 @@ class TestHybridScheduler:
         }
         assert schedule.schedule_time == 6
 
+        _validate_recreated_sfg_fft(schedule, points=4, delays=[0, 0, 1, 1])
+
         _, mem_vars = schedule.get_memory_variables().split_on_length()
         assert mem_vars.read_ports_bound() <= 2
         assert mem_vars.write_ports_bound() <= 3
@@ -1784,37 +1787,167 @@ class TestListScheduler:
             ),
         )
 
-    def test_cyclic_and_recursive_loops(self):
-        N = 3
-        Wc = 0.2
-        b, a = signal.butter(N, Wc, btype="lowpass", output="ba")
-        sfg = direct_form_1_iir(b, a)
+    def test_execution_time_not_one_port_constrained(self):
+        sfg = radix_2_dif_fft(points=16)
 
-        sfg.set_latency_of_type_name(ConstantMultiplication.type_name(), 2)
-        sfg.set_execution_time_of_type_name(ConstantMultiplication.type_name(), 1)
-        sfg.set_latency_of_type_name(Addition.type_name(), 3)
-        sfg.set_execution_time_of_type_name(Addition.type_name(), 1)
+        sfg.set_latency_of_type(Butterfly, 3)
+        sfg.set_latency_of_type(ConstantMultiplication, 10)
+        sfg.set_execution_time_of_type(Butterfly, 2)
+        sfg.set_execution_time_of_type(ConstantMultiplication, 10)
 
-        resources = {
-            Addition.type_name(): 1,
-            ConstantMultiplication.type_name(): 1,
-            Input.type_name(): 1,
-            Output.type_name(): 1,
-        }
+        resources = {Butterfly.type_name(): 1, ConstantMultiplication.type_name(): 1}
 
-        with pytest.raises(
-            ValueError,
-            match="ListScheduler does not support cyclic scheduling of recursive algorithms. Use RecursiveListScheduler instead.",
-        ):
-            Schedule(
-                sfg,
-                scheduler=ListScheduler(
-                    sort_order=((1, True), (3, False), (4, False)),
-                    max_resources=resources,
-                ),
-                cyclic=True,
-                schedule_time=sfg.iteration_period_bound(),
-            )
+        schedule = Schedule(
+            sfg,
+            scheduler=ListScheduler(
+                sort_order=((2, True), (3, True)),
+                max_resources=resources,
+                max_concurrent_reads=2,
+                max_concurrent_writes=2,
+            ),
+        )
+
+        direct, mem_vars = schedule.get_memory_variables().split_on_length()
+        assert mem_vars.read_ports_bound() == 2
+        assert mem_vars.write_ports_bound() == 2
+        _validate_recreated_sfg_fft(schedule, points=16)
+
+        schedule = Schedule(
+            sfg,
+            scheduler=ListScheduler(
+                sort_order=((1, True), (3, False)),
+                max_resources=resources,
+                max_concurrent_reads=2,
+                max_concurrent_writes=2,
+            ),
+        )
+
+        direct, mem_vars = schedule.get_memory_variables().split_on_length()
+        assert mem_vars.read_ports_bound() == 2
+        assert mem_vars.write_ports_bound() == 2
+        _validate_recreated_sfg_fft(schedule, points=16)
+
+        operations = schedule.get_operations()
+        bfs = operations.get_by_type_name(Butterfly.type_name())
+        const_muls = operations.get_by_type_name(ConstantMultiplication.type_name())
+        inputs = operations.get_by_type_name(Input.type_name())
+        outputs = operations.get_by_type_name(Output.type_name())
+
+        bf_pe = ProcessingElement(bfs, entity_name="bf1")
+        mul_pe = ProcessingElement(const_muls, entity_name="mul1")
+
+        pe_in = ProcessingElement(inputs, entity_name="input")
+        pe_out = ProcessingElement(outputs, entity_name="output")
+
+        processing_elements = [bf_pe, mul_pe, pe_in, pe_out]
+
+        mem_vars = schedule.get_memory_variables()
+        direct, mem_vars = mem_vars.split_on_length()
+
+        mem_vars_set = mem_vars.split_on_ports(
+            read_ports=1,
+            write_ports=1,
+            total_ports=2,
+            strategy="ilp_graph_color",
+            processing_elements=processing_elements,
+            max_colors=2,
+        )
+
+        memories = []
+        for i, mem in enumerate(mem_vars_set):
+            memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
+            memories.append(memory)
+            memory.assign("greedy_graph_color")
+
+        arch = Architecture(
+            processing_elements,
+            memories,
+            direct_interconnects=direct,
+        )
+        assert len(arch.processing_elements) == 4
+        assert len(arch.memories) == 2
+
+    def test_execution_time_not_one_and_latency_offsets_port_constrained(self):
+        sfg = radix_2_dif_fft(points=16)
+
+        sfg.set_latency_offsets_of_type(
+            Butterfly, {"in0": 0, "in1": 1, "out0": 2, "out1": 3}
+        )
+        sfg.set_latency_of_type(ConstantMultiplication, 7)
+        sfg.set_execution_time_of_type(Butterfly, 2)
+        sfg.set_execution_time_of_type(ConstantMultiplication, 5)
+
+        resources = {Butterfly.type_name(): 1, ConstantMultiplication.type_name(): 1}
+
+        schedule = Schedule(
+            sfg,
+            scheduler=ListScheduler(
+                sort_order=((2, True), (3, True)),
+                max_resources=resources,
+                max_concurrent_reads=2,
+                max_concurrent_writes=2,
+            ),
+        )
+
+        direct, mem_vars = schedule.get_memory_variables().split_on_length()
+        assert mem_vars.read_ports_bound() == 2
+        assert mem_vars.write_ports_bound() == 2
+        _validate_recreated_sfg_fft(schedule, points=16)
+
+        schedule = Schedule(
+            sfg,
+            scheduler=ListScheduler(
+                sort_order=((1, True), (3, False)),
+                max_resources=resources,
+                max_concurrent_reads=2,
+                max_concurrent_writes=2,
+            ),
+        )
+
+        direct, mem_vars = schedule.get_memory_variables().split_on_length()
+        assert mem_vars.read_ports_bound() == 2
+        assert mem_vars.write_ports_bound() == 2
+        _validate_recreated_sfg_fft(schedule, points=16)
+
+        operations = schedule.get_operations()
+        bfs = operations.get_by_type_name(Butterfly.type_name())
+        const_muls = operations.get_by_type_name(ConstantMultiplication.type_name())
+        inputs = operations.get_by_type_name(Input.type_name())
+        outputs = operations.get_by_type_name(Output.type_name())
+
+        bf_pe = ProcessingElement(bfs, entity_name="bf1")
+        mul_pe = ProcessingElement(const_muls, entity_name="mul1")
+
+        pe_in = ProcessingElement(inputs, entity_name="input")
+        pe_out = ProcessingElement(outputs, entity_name="output")
+
+        processing_elements = [bf_pe, mul_pe, pe_in, pe_out]
+
+        mem_vars = schedule.get_memory_variables()
+        direct, mem_vars = mem_vars.split_on_length()
+
+        mem_vars_set = mem_vars.split_on_ports(
+            read_ports=1,
+            write_ports=1,
+            total_ports=2,
+            strategy="ilp_graph_color",
+            processing_elements=processing_elements,
+            max_colors=2,
+        )
+
+        memories = []
+        for i, mem in enumerate(mem_vars_set):
+            memory = Memory(mem, memory_type="RAM", entity_name=f"memory{i}")
+            memories.append(memory)
+            memory.assign("greedy_graph_color")
+
+        arch = Architecture(
+            processing_elements,
+            memories,
+            direct_interconnects=direct,
+        )
+        assert len(arch.processing_elements) == 4
+        assert len(arch.memories) == 2
 
 
 class TestRecursiveListScheduler:
@@ -1982,7 +2115,7 @@ def _validate_recreated_sfg_fft(
     # constant input -> impulse (with weight=points) output
     sim = Simulation(schedule.sfg, [Constant() for i in range(points)])
     sim.run_for(128)
-    assert np.allclose(sim.results["0"], points)
+    assert np.allclose(sim.results["0"][delays[0] :], points)
     for i in range(1, points):
         assert np.all(np.isclose(sim.results[str(i)][delays[i] :], 0))
 
diff --git a/test/unit/test_resources.py b/test/unit/test_resources.py
index e7a93ba96b53e4ca659cf333d053980f681f8722..e64f4cea2ee541d93a28884a779b92c46461c826 100644
--- a/test/unit/test_resources.py
+++ b/test/unit/test_resources.py
@@ -160,7 +160,7 @@ class TestProcessCollectionPlainMemoryVariable:
         collection = generate_matrix_transposer(4, min_lifetime=5)
         assignment_left_edge = collection._left_edge_assignment()
         assignment_graph_color = collection.split_on_execution_time(
-            strategy="graph_color", coloring_strategy="saturation_largest_first"
+            strategy="greedy_graph_color", coloring_strategy="saturation_largest_first"
         )
         assert len(assignment_left_edge) == 18
         assert len(assignment_graph_color) == 16
@@ -182,7 +182,9 @@ class TestProcessCollectionPlainMemoryVariable:
             collection = generate_matrix_transposer(
                 rows=rows, cols=cols, min_lifetime=0
             )
-            assignment = collection.split_on_execution_time(strategy="graph_color")
+            assignment = collection.split_on_execution_time(
+                strategy="greedy_graph_color"
+            )
             collection.generate_memory_based_storage_vhdl(
                 filename=(
                     "b_asic/codegen/testbench/"
@@ -334,12 +336,12 @@ class TestProcessCollectionPlainMemoryVariable:
         assert exclusion_graph.degree(p2) == 1
         assert exclusion_graph.degree(p3) == 3
 
-    def test_left_edge_maximum_lifetime(self):
+    def test_split_on_execution_time_maximum_lifetime(self):
         a = PlainMemoryVariable(2, 0, {0: 1}, "cmul1.0")
         b = PlainMemoryVariable(4, 0, {0: 7}, "cmul4.0")
         c = PlainMemoryVariable(5, 0, {0: 4}, "cmul5.0")
         collection = ProcessCollection([a, b, c], schedule_time=7, cyclic=True)
-        for strategy in ("graph_color", "left_edge"):
+        for strategy in ("greedy_graph_color", "left_edge", "ilp_graph_color"):
             assignment = collection.split_on_execution_time(strategy)
             assert len(assignment) == 2
             a_idx = 0 if a in assignment[0] else 1
@@ -349,7 +351,7 @@ class TestProcessCollectionPlainMemoryVariable:
     def test_split_on_execution_lifetime_assert(self):
         a = PlainMemoryVariable(3, 0, {0: 10}, "MV0")
         collection = ProcessCollection([a], schedule_time=9, cyclic=True)
-        for strategy in ("graph_color", "left_edge"):
+        for strategy in ("greedy_graph_color", "left_edge", "ilp_graph_color"):
             with pytest.raises(
                 ValueError,
                 match="MV0 has execution time greater than the schedule time",