diff --git a/b_asic/resources.py b/b_asic/resources.py index 95495672aeffa07abc7c767d4c08ab3e117fd269..cf74a1ea1d6b1e65ecbfe863bd461543d99978a1 100644 --- a/b_asic/resources.py +++ b/b_asic/resources.py @@ -754,7 +754,7 @@ class ProcessCollection: exclusion_graph.add_edge(process1, process2) return exclusion_graph - def split_execution_time( + def split_on_execution_time( self, heuristic: str = "graph_color", coloring_strategy: str = "saturation_largest_first", @@ -795,7 +795,7 @@ class ProcessCollection: else: raise ValueError(f"Invalid heuristic '{heuristic}'") - def split_ports( + def split_on_ports( self, heuristic: str = "graph_color", read_ports: Optional[int] = None, @@ -803,7 +803,9 @@ class ProcessCollection: total_ports: Optional[int] = None, ) -> Set["ProcessCollection"]: """ - Split this process storage based on concurrent read/write times according to some heuristic. + Split this process storage based on concurrent read/write times according. + + Different heurstic methods can be used. Parameters ---------- @@ -963,6 +965,7 @@ class ProcessCollection: def left_edge_cell_assignment(self) -> Dict[int, "ProcessCollection"]: """ Perform cell assignment of the processes in this collection using the left-edge algorithm. + Two or more processes can share a single cell if, and only if, they have no overlaping time alive. Returns @@ -1112,18 +1115,22 @@ class ProcessCollection: input_sync=input_sync, ) - def split_on_length(self, length: int = 0): + def split_on_length( + self, length: int = 0 + ) -> Tuple["ProcessCollection", "ProcessCollection"]: """ - Split the current ProcessCollection into two new ProcessCollection based on exectuion time length. + Split into two new ProcessCollections based on execution time length. Parameters ---------- length : int, default: 0 - The execution time length to split on. Length is inclusive for the smaller collection. + The execution time length to split on. Length is inclusive for the smaller + collection. Returns ------- - A tuple of two ProcessCollections, one with short than or equal execution times and one with greater execution times. + A tuple of two ProcessCollections, one with shorter than or equal execution + times and one with longer execution times. """ short = set() long = set() diff --git a/test/test_architecture.py b/test/test_architecture.py index ba9f910f6d590850f1626ce5e7352a8379d43153..4ff42db11f381c722a1cae96e1e9a2772b70a6fd 100644 --- a/test/test_architecture.py +++ b/test/test_architecture.py @@ -1,7 +1,6 @@ from itertools import chain -from typing import List, Set, cast +from typing import List, cast -import matplotlib.pyplot as plt import pytest from b_asic.architecture import Architecture, Memory, ProcessingElement @@ -9,7 +8,6 @@ from b_asic.core_operations import Addition, ConstantMultiplication from b_asic.process import MemoryVariable, OperatorProcess from b_asic.resources import ProcessCollection from b_asic.schedule import Schedule -from b_asic.signal_flow_graph import SFG from b_asic.special_operations import Input, Output @@ -32,10 +30,10 @@ def test_extract_processing_elements(schedule_direct_form_iir_lp_filter: Schedul operations = schedule_direct_form_iir_lp_filter.get_operations() # Split into new process collections on overlapping execution time - adders = operations.get_by_type_name(Addition.type_name()).split_execution_time() + adders = operations.get_by_type_name(Addition.type_name()).split_on_execution_time() const_mults = operations.get_by_type_name( ConstantMultiplication.type_name() - ).split_execution_time() + ).split_on_execution_time() # List of ProcessingElements processing_elements: List[ProcessingElement] = [] @@ -69,15 +67,15 @@ def test_architecture(schedule_direct_form_iir_lp_filter: Schedule): operations = schedule_direct_form_iir_lp_filter.get_operations() # Split operations further into chunks - adders = operations.get_by_type_name(Addition.type_name()).split_execution_time() + adders = operations.get_by_type_name(Addition.type_name()).split_on_execution_time() assert len(adders) == 1 const_mults = operations.get_by_type_name( ConstantMultiplication.type_name() - ).split_execution_time() + ).split_on_execution_time() assert len(const_mults) == 1 - inputs = operations.get_by_type_name(Input.type_name()).split_execution_time() + inputs = operations.get_by_type_name(Input.type_name()).split_on_execution_time() assert len(inputs) == 1 - outputs = operations.get_by_type_name(Output.type_name()).split_execution_time() + outputs = operations.get_by_type_name(Output.type_name()).split_on_execution_time() assert len(outputs) == 1 # Create necessary processing elements @@ -93,7 +91,7 @@ def test_architecture(schedule_direct_form_iir_lp_filter: Schedule): # Create Memories from the memory variables memories: List[Memory] = [ - Memory(pc) for pc in mvs.split_ports(read_ports=1, write_ports=1) + Memory(pc) for pc in mvs.split_on_ports(read_ports=1, write_ports=1) ] assert len(memories) == 1 for i, memory in enumerate(memories): diff --git a/test/test_resources.py b/test/test_resources.py index 8d84607bd08218eed2e7f174d798dc0173794b47..1d8e597b1ed3770803a68be5b870fc992f2ef1c5 100644 --- a/test/test_resources.py +++ b/test/test_resources.py @@ -27,7 +27,7 @@ class TestProcessCollectionPlainMemoryVariable: return fig def test_split_memory_variable(self, simple_collection: ProcessCollection): - collection_split = simple_collection.split_ports( + collection_split = simple_collection.split_on_ports( heuristic="graph_color", read_ports=1, write_ports=1, total_ports=2 ) assert len(collection_split) == 3 @@ -113,15 +113,15 @@ class TestProcessCollectionPlainMemoryVariable: def test_interleaver_issue175(self): with open('test/fixtures/interleaver-two-port-issue175.p', 'rb') as f: interleaver_collection: ProcessCollection = pickle.load(f) - assert len(interleaver_collection.split_ports(total_ports=1)) == 2 + assert len(interleaver_collection.split_on_ports(total_ports=1)) == 2 def test_generate_random_interleaver(self): for _ in range(10): for size in range(5, 20, 5): collection = generate_random_interleaver(size) - assert len(collection.split_ports(read_ports=1, write_ports=1)) == 1 + assert len(collection.split_on_ports(read_ports=1, write_ports=1)) == 1 if any(var.execution_time for var in collection.collection): - assert len(collection.split_ports(total_ports=1)) == 2 + assert len(collection.split_on_ports(total_ports=1)) == 2 def test_len_process_collection(self, simple_collection: ProcessCollection): assert len(simple_collection) == 7