diff --git a/b_asic/core_operations.py b/b_asic/core_operations.py index c822fe8f4843c26f5dfabc33a48f3cbe92d74eef..6459948ca58a879a7853ea2e5846509d567ee79e 100644 --- a/b_asic/core_operations.py +++ b/b_asic/core_operations.py @@ -22,7 +22,7 @@ class Constant(AbstractOperation): def __init__(self, value: Number = 0, name: Name = ""): """Construct a Constant operation with the given value.""" - super().__init__(input_count=0, output_count=1, name=name) + super().__init__(input_count=0, output_count=1, name=name, latency_offsets={'out0' : 0}) self.set_param("value", value) @classmethod diff --git a/b_asic/operation.py b/b_asic/operation.py index 2ce783963d883f65c1ec29d3dddb7bf0eeccbd6f..c0985f9f6db1bd0c02ee143ad56070f1831310b1 100644 --- a/b_asic/operation.py +++ b/b_asic/operation.py @@ -12,7 +12,7 @@ import collections from abc import abstractmethod from numbers import Number -from typing import NewType, List, Dict, Sequence, Iterable, Mapping, MutableMapping, Optional, Any, Set, Union +from typing import NewType, List, Dict, Sequence, Iterable, Mapping, MutableMapping, Optional, Any, Set, Union, Tuple ResultKey = NewType("ResultKey", str) @@ -248,6 +248,38 @@ class Operation(GraphComponent, SignalSourceProvider): """ raise NotImplementedError + @property + @abstractmethod + def execution_time(self) -> int: + """Get the execution time of the operation, which is the time it takes before the + processing element implementing the operation can be reused for starting another operation. + """ + raise NotImplementedError + + @execution_time.setter + @abstractmethod + def execution_time(self, latency: int) -> None: + """Sets the execution time of the operation to the specified integer + value. The execution time cannot be a negative integer. + """ + raise NotImplementedError + + @abstractmethod + def get_plot_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + """Get a tuple constaining coordinates for the two polygons outlining + the latency and execution time of the operation. + The polygons are corresponding to a start time of 0 and are of height 1. + """ + raise NotImplementedError + + @abstractmethod + def get_io_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + """Get a tuple constaining coordinates for inputs and outputs, respectively. + These maps to the polygons and are corresponding to a start time of 0 + and height 1. + """ + raise NotImplementedError + class AbstractOperation(Operation, AbstractGraphComponent): """Generic abstract operation base class. @@ -258,6 +290,7 @@ class AbstractOperation(Operation, AbstractGraphComponent): _input_ports: List[InputPort] _output_ports: List[OutputPort] + _execution_time: Union[int, None] def __init__(self, input_count: int, output_count: int, name: Name = "", input_sources: Optional[Sequence[Optional[SignalSourceProvider]]] = None, latency: Optional[int] = None, latency_offsets: Optional[Dict[str, int]] = None): """Construct an operation with the given input/output count. @@ -272,6 +305,7 @@ class AbstractOperation(Operation, AbstractGraphComponent): self._input_ports = [InputPort(self, i) for i in range(input_count)] self._output_ports = [OutputPort(self, i) for i in range(output_count)] + self._execution_time = None # Connect given input sources, if any. if input_sources is not None: @@ -523,6 +557,7 @@ class AbstractOperation(Operation, AbstractGraphComponent): new_component.input(i).latency_offset = inp.latency_offset for i, outp in enumerate(self.outputs): new_component.output(i).latency_offset = outp.latency_offset + new_component.execution_time = self._execution_time return new_component def inputs_required_for_output(self, output_index: int) -> Iterable[int]: @@ -614,3 +649,49 @@ class AbstractOperation(Operation, AbstractGraphComponent): else: raise ValueError( "Incorrectly formatted string, expected 'in' + index or 'out' + index") + + @property + def execution_time(self) -> int: + if self._execution_time is None: + raise ValueError("No execution time specified.") + return self._execution_time + + @execution_time.setter + def execution_time(self, execution_time: int) -> None: + assert execution_time is None or execution_time >= 0 , "Negative execution time entered." + self._execution_time = execution_time + + def get_plot_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + return (self._get_plot_coordinates_for_latency(), self._get_plot_coordinates_for_execution_time()) + + def _get_plot_coordinates_for_execution_time(self) -> List[List[Number]]: + # Always a rectangle, but easier if coordinates are returned + return [[0, 0], [0, 1], [self.execution_time, 1], [self.execution_time, 0], [0, 0]] + + def _get_plot_coordinates_for_latency(self) -> List[List[Number]]: + # Points for latency polygon + latency = [] + # Remember starting point + start_point = [self.inputs[0].latency_offset, 0] + num_in = len(self.inputs) + latency.append(start_point) + for k in range(1, num_in): + latency.append([self.inputs[k-1].latency_offset, k/num_in]) + latency.append([self.inputs[k].latency_offset, k/num_in]) + latency.append([self.inputs[num_in-1].latency_offset, 1]) + + num_out = len(self.outputs) + latency.append([self.outputs[num_out-1].latency_offset, 1]) + for k in reversed(range(1, num_out)): + latency.append([self.outputs[k].latency_offset, k/num_out]) + latency.append([self.outputs[k-1].latency_offset, k/num_out]) + latency.append([self.outputs[0].latency_offset, 0]) + # Close the polygon + latency.append(start_point) + + return latency + + def get_io_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + input_coords = [[self.inputs[k].latency_offset, (1+2*k)/(2*len(self.inputs))] for k in range(len(self.inputs))] + output_coords = [[self.outputs[k].latency_offset, (1+2*k)/(2*len(self.outputs))] for k in range(len(self.outputs))] + return (input_coords, output_coords) diff --git a/b_asic/schema.py b/b_asic/schema.py index 25f18eb6fb423009edd52f3d2f40024fb37e3422..63829e8300c319da662bdbcb5729c25fa1ed96a4 100644 --- a/b_asic/schema.py +++ b/b_asic/schema.py @@ -3,11 +3,18 @@ Contains the schema class for scheduling operations in an SFG. """ -from typing import Dict, List, Optional +from collections import defaultdict +from typing import Dict, List, Optional, Tuple +import matplotlib.pyplot as plt +from matplotlib.ticker import MaxNLocator +import numpy as np +from scipy import interpolate +import sys +import io from b_asic.signal_flow_graph import SFG from b_asic.graph_component import GraphID -from b_asic.operation import Operation +from b_asic.special_operations import Delay, Output class Schema: @@ -24,7 +31,7 @@ class Schema: """Construct a Schema from an SFG.""" self._sfg = sfg self._start_times = dict() - self._laps = dict() + self._laps = defaultdict(lambda: 0) self._cyclic = cyclic self._resolution = resolution @@ -34,12 +41,7 @@ class Schema: raise NotImplementedError( f"No algorithm with name: {scheduling_alg} defined.") - max_end_time = 0 - for op_id, op_start_time in self._start_times.items(): - op = self._sfg.find_by_id(op_id) - for outport in op.outputs: - max_end_time = max( - max_end_time, op_start_time + outport.latency_offset) + max_end_time = self._get_max_end_time() if not self._cyclic: if schedule_time is None: @@ -55,15 +57,153 @@ class Schema: assert op_id in self._start_times, "No operation with the specified op_id in this schema." return self._start_times[op_id] + def _get_max_end_time(self) -> int: + max_end_time = 0 + for op_id, op_start_time in self._start_times.items(): + op = self._sfg.find_by_id(op_id) + for outport in op.outputs: + max_end_time = max( + max_end_time, op_start_time + outport.latency_offset) + return max_end_time + def forward_slack(self, op_id: GraphID) -> int: - raise NotImplementedError + assert op_id in self._start_times, "No operation with the specified op_id in this schema." + slack = sys.maxsize + output_slacks = self._forward_slacks(op_id) + # Make more pythonic + for signal_slacks in output_slacks.values(): + for signal_slack in signal_slacks.values(): + slack = min(slack, signal_slack) + return slack + + def _forward_slacks(self, op_id: GraphID) -> Dict["OutputPort", Dict["Signal", int]]: + ret = dict() + start_time = self._start_times[op_id] + op = self._sfg.find_by_id(op_id) + for output_port in op.outputs: + output_slacks = dict() + available_time = start_time + output_port.latency_offset + + for signal in output_port.signals: + usage_time = (signal.destination.latency_offset + + self._start_times[signal.destination.operation.graph_id] + + self._schedule_time*self._laps[signal.graph_id]) + output_slacks[signal] = usage_time - available_time + ret[output_port] = output_slacks + return ret def backward_slack(self, op_id: GraphID) -> int: - raise NotImplementedError + assert op_id in self._start_times, "No operation with the specified op_id in this schema." + slack = sys.maxsize + input_slacks = self._backward_slacks(op_id) + # Make more pythonic + for signal_slacks in input_slacks.values(): + for signal_slack in signal_slacks.values(): + slack = min(slack, signal_slack) + return slack + + def _backward_slacks(self, op_id: GraphID) -> Dict["OutputPort", Dict["Signal", int]]: + ret = dict() + start_time = self._start_times[op_id] + op = self._sfg.find_by_id(op_id) + for input_port in op.inputs: + input_slacks = dict() + usage_time = start_time + input_port.latency_offset + + for signal in input_port.signals: + available_time = (signal.source.latency_offset + + self._start_times[signal.source.operation.graph_id] - + self._schedule_time*self._laps[signal.graph_id]) + input_slacks[signal] = usage_time - available_time + ret[input_port] = input_slacks + return ret + + + def slacks(self, op_id: GraphID) -> Tuple[int, int]: + assert op_id in self._start_times, "No operation with the specified op_id in this schema." + return (self.backward_slack(op_id), self.forward_slack(op_id)) def print_slacks(self) -> None: raise NotImplementedError + def set_schedule_time(self, time: int) -> "Schema": + assert self._get_max_end_time() < time, "New schedule time to short." + self._schedule_time = time + return self + + @property + def schedule_time(self) -> int: + return self._schedule_time + + def increase_time_resolution(self, factor: int) -> "Schema": + raise NotImplementedError + + def decrease_time_resolution(self, factor: int) -> "Schema": + raise NotImplementedError + + def move_operation(self, op_id: GraphID, time: int) -> "Schema": + assert op_id in self._start_times, "No operation with the specified op_id in this schema." + + (backward_slack, forward_slack) = self.slacks(op_id) + if time < 0: + if -time > backward_slack: + raise ValueError + else: + if time > forward_slack: + raise ValueError + + tmp_start = self._start_times[op_id] + time + new_start = tmp_start % self._schedule_time + + # Update input laps + input_slacks = self._backward_slacks(op_id) + for in_port, signal_slacks in input_slacks.items(): + tmp_usage = tmp_start + in_port.latency_offset + new_usage = tmp_usage % self._schedule_time + for signal, signal_slack in signal_slacks.items(): + new_slack = signal_slack + time + old_laps = self._laps[signal.graph_id] + tmp_prev_available = tmp_usage - new_slack + prev_available = tmp_prev_available % self._schedule_time + laps = new_slack // self._schedule_time + if new_usage < prev_available: + laps += 1 + print([signal_slack, new_slack, old_laps, laps, new_usage, prev_available, tmp_usage, tmp_prev_available]) + self._laps[signal.graph_id] = laps + + # Update output laps + output_slacks = self._forward_slacks(op_id) + for out_port, signal_slacks in output_slacks.items(): + tmp_available = tmp_start + out_port.latency_offset + new_available = tmp_available % self._schedule_time + for signal, signal_slack in signal_slacks.items(): + new_slack = signal_slack - time + tmp_next_usage = tmp_available + new_slack + next_usage = tmp_next_usage % self._schedule_time + laps = new_slack // self._schedule_time + if next_usage < new_available: + laps += 1 + if new_available == 0 and new_slack > 0: + laps += 1 + self._laps[signal.graph_id] = laps + + + # Set new start time + self._start_times[op_id] = new_start + return self + + def _remove_delays(self) -> None: + delay_list = self._sfg.find_by_type_name(Delay.type_name()) + while delay_list: + delay_op = delay_list[0] + delay_input_id = delay_op.input(0).signals[0].graph_id + delay_output_ids = [sig.graph_id for sig in delay_op.output(0).signals] + self._sfg = self._sfg.remove_operation(delay_op.graph_id) + for output_id in delay_output_ids: + self._laps[output_id] += 1 + self._laps[delay_input_id] + del self._laps[delay_input_id] + delay_list = self._sfg.find_by_type_name(Delay.type_name()) + def _schedule_asap(self) -> None: pl = self._sfg.get_precedence_list() @@ -71,11 +211,19 @@ class Schema: print("Empty signal flow graph cannot be scheduled.") return - non_schedulable_ops = set((outp.operation.graph_id for outp in pl[0])) + non_schedulable_ops = set() + for outport in pl[0]: + op = outport.operation + if op.type_name() not in [Delay.type_name()]: + if op.graph_id not in self._start_times: + # Set start time of all operations in the first iter to 0 + self._start_times[op.graph_id] = 0 + else: + non_schedulable_ops.add(op.graph_id) for outport in pl[1]: op = outport.operation - if op not in self._start_times: + if op.graph_id not in self._start_times: # Set start time of all operations in the first iter to 0 self._start_times[op.graph_id] = 0 @@ -86,7 +234,6 @@ class Schema: # Schedule the operation if it doesn't have a start time yet. op_start_time = 0 for inport in op.inputs: - print(inport.operation.graph_id) assert len( inport.signals) == 1, "Error in scheduling, dangling input port detected." assert inport.signals[0].source is not None, "Error in scheduling, signal with no source detected." @@ -110,3 +257,106 @@ class Schema: op_start_time, op_start_time_from_in) self._start_times[op.graph_id] = op_start_time + for output in self._sfg.find_by_type_name(Output.type_name()): + source_port = output.inputs[0].signals[0].source + if source_port.operation.graph_id in non_schedulable_ops: + self._start_times[output.graph_id] = 0 + else: + self._start_times[output.graph_id] = self._start_times[source_port.operation.graph_id] + source_port.latency_offset + self._remove_delays() + + def _plot_schedule(self): + def _draw_arrow2(start, end): + if end[0] < start[0]: # Wrap around + plt.plot([start[0], self._schedule_time], [start[1], start[1]]) + plt.plot([0, end[0]], [end[1], end[1]]) + elif end[0] == start[0]: + plt.plot([start[0], start[0] + 0.2, start[0] + 0.2, start[0] - 0.2, start[0] - 0.2, start[0]], + [start[1], start[1], (start[1] + end[1])/2, (start[1] + end[1])/2, end[1], end[1]]) + else: + plt.plot([start[0], (start[0] + end[0])/2, (start[0] + end[0])/2, end[0]], + [start[1], start[1], end[1], end[1]]) + + def _draw_spline(x, y): + l = len(x) + t = np.linspace(0, 1, l-2, endpoint=True) + t = np.append([0, 0, 0], t) + t = np.append(t, [1, 1, 1]) + tck = [t, [x, y], 3] + u3 = np.linspace(0, 1, 50, endpoint=True) + out = interpolate.splev(u3, tck) + plt.plot(out[0], out[1], color='black') + + def _draw_arrow(start, end, name="", laps=0): + if end[0] < start[0] or laps > 0: # Wrap around + plt.plot([start[0], self._schedule_time + 0.2], [start[1], start[1]], color='black') + plt.plot([-0.2, end[0]], [end[1], end[1]], color='black') + plt.text(self._schedule_time + 0.2, start[1], name, verticalalignment='center') + plt.text(-0.2, end[1], "{}: {}".format(name, laps), verticalalignment='center', horizontalalignment='right') + + elif end[0] == start[0]: + _draw_spline([start[0], start[0] + 0.2, start[0] + 0.2, start[0] - 0.2, start[0] - 0.2, start[0]], + [start[1], start[1], (start[1] + end[1])/2, (start[1] + end[1])/2, end[1], end[1]]) + else: + _draw_spline([start[0], (start[0] + end[0])/2, (start[0] + end[0])/2, end[0]], + [start[1], start[1], end[1], end[1]]) + + def _draw_offset_arrow(start, end, start_offset, end_offset, name="", laps=0): + _draw_arrow([start[0] + start_offset[0], start[1] + start_offset[1]], + [end[0] + end_offset[0], end[1] + end_offset[1]], name=name, laps=laps) + + ypos = 0.5 + ytickpositions = [] + yticklabels = [] + plt.grid(zorder=0.5) + ypositions = dict() + for op_id, op_start_time in self._start_times.items(): + op = self._sfg.find_by_id(op_id) + latency_coords, execution_time_coords = op.get_plot_coordinates() + _x, _y = zip(*latency_coords) + x = np.array(_x) + y = np.array(_y) + plt.fill(x + op_start_time, y + ypos) + _x, _y = zip(*execution_time_coords) + x = np.array(_x) + y = np.array(_y) + plt.plot(x + op_start_time, y + ypos, color='black', linewidth=3, alpha=0.5) + ytickpositions.append(ypos + 0.5) + yticklabels.append(self._sfg.find_by_id(op_id).name) + ypositions[op_id] = ypos + ypos += 1.5 + + for op_id, op_start_time in self._start_times.items(): + op = self._sfg.find_by_id(op_id) + _, out_coords = op.get_io_coordinates() + source_ypos = ypositions[op_id] + for output_port in op.outputs: + for output_signal in output_port.signals: + dest_op = output_signal.destination.operation + dest_start_time = self._start_times[dest_op.graph_id] + dest_ypos = ypositions[dest_op.graph_id] + dest_in_coords, _ = output_signal.destination.operation.get_io_coordinates() + _draw_offset_arrow(out_coords[output_port.index], + dest_in_coords[output_signal.destination.index], + [op_start_time, source_ypos], + [dest_start_time, dest_ypos], name=op_id, + laps=self._laps[output_signal.graph_id]) + + plt.yticks(ytickpositions, yticklabels) + plt.axis([-1, self._schedule_time+1, 0, ypos]) + plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) + plt.plot([0, 0], [0, ypos], linestyle='--', color='black') + plt.plot([self._schedule_time, self._schedule_time], [0, ypos], linestyle='--', color='black') + + def plot_schedule(self) -> None: + plt.figure() + self._plot_schedule() + plt.show() + + def _repr_svg_(self): + plt.figure() + self._plot_schedule() + f = io.StringIO() + plt.savefig(f, format='svg') + + return f.getvalue() diff --git a/b_asic/signal_flow_graph.py b/b_asic/signal_flow_graph.py index 3e6254d48bd239f1d749b9e88e66503308c19b59..15e624d33944269089d02b73adc45930d7147afb 100644 --- a/b_asic/signal_flow_graph.py +++ b/b_asic/signal_flow_graph.py @@ -10,6 +10,7 @@ from io import StringIO from queue import PriorityQueue import itertools as it from graphviz import Digraph +import re from graphviz.backend import FORMATS as GRAPHVIZ_FORMATS, ENGINES as GRAPHVIZ_ENGINES from b_asic.port import SignalSourceProvider, OutputPort @@ -31,10 +32,14 @@ class GraphIDGenerator: """Construct a GraphIDGenerator.""" self._next_id_number = defaultdict(lambda: id_number_offset) - def next_id(self, type_name: TypeName) -> GraphID: + def next_id(self, type_name: TypeName, used_ids: MutableSet = set()) -> GraphID: """Get the next graph id for a certain graph id type.""" self._next_id_number[type_name] += 1 - return type_name + str(self._next_id_number[type_name]) + new_id = type_name + str(self._next_id_number[type_name]) + while (new_id in used_ids): + self._next_id_number[type_name] += 1 + new_id = type_name + str(self._next_id_number[type_name]) + return new_id @property def id_number_offset(self) -> GraphIDNumber: @@ -89,6 +94,7 @@ class SFG(AbstractOperation): name=name, input_sources=input_sources) self._components_by_id = dict() + self._used_ids = set() self._components_by_name = defaultdict(list) self._components_dfs_order = [] self._operations_dfs_order = [] @@ -101,6 +107,7 @@ class SFG(AbstractOperation): self._original_output_signals_to_indices = {} self._precedence_list = None + # Setup input signals. if input_signals is not None: for input_index, signal in enumerate(input_signals): @@ -346,14 +353,9 @@ class SFG(AbstractOperation): Keyword arguments: type_name: The type_name of the desired components. """ - i = self.id_number_offset + 1 - components = [] - found_comp = self.find_by_id(type_name + str(i)) - while found_comp is not None: - components.append(found_comp) - i += 1 - found_comp = self.find_by_id(type_name + str(i)) - + reg = "{}[0-9]+".format(type_name) + p = re.compile(reg) + components = [val for key, val in self._components_by_id.items() if p.match(key)] return components def find_by_id(self, graph_id: GraphID) -> Optional[GraphComponent]: @@ -665,6 +667,11 @@ class SFG(AbstractOperation): for op in self.find_by_type_name(type_name): op.set_latency(latency) + def set_execution_time_of_type(self, type_name: TypeName, execution_time: int) -> None: + """Set the execution time of all components with the given type name.""" + for op in self.find_by_type_name(type_name): + op.execution_time = execution_time + def set_latency_offsets_of_type(self, type_name: TypeName, latency_offsets: Dict[str, int]) -> None: """Set the latency offset of all components with the given type name.""" for op in self.find_by_type_name(type_name): @@ -703,9 +710,11 @@ class SFG(AbstractOperation): assert original_component not in self._original_components_to_new, "Tried to add duplicate SFG component" new_component = original_component.copy_component() self._original_components_to_new[original_component] = new_component - new_id = self._graph_id_generator.next_id(new_component.type_name()) - new_component.graph_id = new_id - self._components_by_id[new_id] = new_component + if not new_component.graph_id or new_component.graph_id in self._used_ids: + new_id = self._graph_id_generator.next_id(new_component.type_name(), self._used_ids) + new_component.graph_id = new_id + self._used_ids.add(new_component.graph_id) + self._components_by_id[new_component.graph_id] = new_component self._components_by_name[new_component.name].append(new_component) return new_component diff --git a/b_asic/special_operations.py b/b_asic/special_operations.py index dc84f0bcb8186b0db31ca96551f2876f48b9b523..88c729314465d95454c2d55d69911eac48f489a1 100644 --- a/b_asic/special_operations.py +++ b/b_asic/special_operations.py @@ -5,7 +5,7 @@ normal operations in an SFG. """ from numbers import Number -from typing import Optional, Sequence +from typing import Optional, Sequence, Tuple, List from b_asic.operation import AbstractOperation, ResultKey, DelayMap, MutableResultMap, MutableDelayMap from b_asic.graph_component import Name, TypeName @@ -21,7 +21,7 @@ class Input(AbstractOperation): def __init__(self, name: Name = ""): """Construct an Input operation.""" - super().__init__(input_count=0, output_count=1, name=name) + super().__init__(input_count=0, output_count=1, name=name, latency_offsets={'out0' : 0}) self.set_param("value", 0) @classmethod @@ -41,6 +41,13 @@ class Input(AbstractOperation): """Set the current value of this input.""" self.set_param("value", value) + def get_plot_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + return ([[-0.5, 0], [-0.5, 1], [-0.25, 1], [0, 0.5], [-0.25, 0], [-0.5, 0]], + [[-0.5, 0], [-0.5, 1], [-0.25, 1], [0, 0.5], [-0.25, 0], [-0.5, 0]]) + + def get_io_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + return ([], [[0, 0.5]]) + class Output(AbstractOperation): """Output operation. @@ -53,7 +60,7 @@ class Output(AbstractOperation): def __init__(self, src0: Optional[SignalSourceProvider] = None, name: Name = ""): """Construct an Output operation.""" super().__init__(input_count=1, output_count=0, - name=name, input_sources=[src0]) + name=name, input_sources=[src0], latency_offsets={'in0' : 0}) @classmethod def type_name(cls) -> TypeName: @@ -62,6 +69,13 @@ class Output(AbstractOperation): def evaluate(self, _): return None + def get_plot_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + return ([[0, 0], [0, 1], [0.25, 1], [0.5, 0.5], [0.25, 0], [0, 0]], + [[0, 0], [0, 1], [0.25, 1], [0.5, 0.5], [0.25, 0], [0, 0]]) + + def get_io_coordinates(self) -> Tuple[List[List[Number]], List[List[Number]]]: + return ([[0, 0.5]], []) + class Delay(AbstractOperation): """Unit delay operation. diff --git a/setup.py b/setup.py index 42adeb79afbd80b1fda1db4cdc36cc911cb9379d..41376eb294fdee980533063ac4e207e67750b20b 100644 --- a/setup.py +++ b/setup.py @@ -81,7 +81,8 @@ setuptools.setup( "pyside2", "qtpy", "graphviz", - "matplotlib" + "matplotlib", + "scipy" ], packages=["b_asic", "b_asic/GUI"], ext_modules=[CMakeExtension("b_asic")], diff --git a/test/test_operation.py b/test/test_operation.py index f4af81b57b8d30fe71025ab82f75f57f195ce350..33f51bd1a2378600ee460635eb3deeacc58e9fc9 100644 --- a/test/test_operation.py +++ b/test/test_operation.py @@ -177,10 +177,67 @@ class TestLatency: assert bfly.latency_offsets == {'in0': 3, "in1": None, "out0": None, 'out1': 5} +class TestExecutionTime: + def test_execution_time_constructor(self): + pass + + def test_set_execution_time(self): + bfly = Butterfly() + bfly.execution_time = 3 + + assert bfly.execution_time == 3 + + class TestCopyOperation: - def test_copy_buttefly_latency_offsets(self): + def test_copy_butterfly_latency_offsets(self): bfly = Butterfly(latency_offsets={'in0': 4, 'in1': 2, 'out0': 10, 'out1': 9}) bfly_copy = bfly.copy_component() assert bfly_copy.latency_offsets == {'in0': 4, 'in1': 2, 'out0': 10, 'out1': 9} + + def test_copy_execution_time(self): + add = Addition() + add.execution_time = 2 + + add_copy = add.copy_component() + + assert add_copy.execution_time == 2 + + +class TestPlotCoordinates(): + def test_simple_case(self): + cmult = ConstantMultiplication(0.5) + cmult.execution_time = 1 + cmult.set_latency(3) + + lat, exe = cmult.get_plot_coordinates() + assert lat == [[0, 0], [0, 1], [3, 1], [3, 0], [0, 0]] + assert exe == [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]] + + def test_complicated_case(self): + bfly = Butterfly(latency_offsets={'in0': 2, 'in1': 3, 'out0': 5, 'out1': 10}) + bfly.execution_time = 7 + + lat, exe = bfly.get_plot_coordinates() + assert lat == [[2, 0], [2, 0.5], [3, 0.5], [3, 1], [10, 1], [10, 0.5], [5, 0.5], [5, 0], [2, 0]] + assert exe == [[0, 0], [0, 1], [7, 1], [7, 0], [0, 0]] + + +class TestIOCoordinates(): + def test_simple_case(self): + cmult = ConstantMultiplication(0.5) + cmult.execution_time = 1 + cmult.set_latency(3) + + i_c, o_c = cmult.get_io_coordinates() + assert i_c == [[0, 0.5]] + assert o_c == [[3, 0.5]] + + def test_complicated_case(self): + bfly = Butterfly(latency_offsets={'in0': 2, 'in1': 3, 'out0': 5, 'out1': 10}) + bfly.execution_time = 7 + + i_c, o_c = bfly.get_io_coordinates() + assert i_c == [[2, 0.25], [3, 0.75]] + assert o_c == [[5, 0.25], [10, 0.75]] diff --git a/test/test_schema.py b/test/test_schema.py index 78a713a9ceda574feede72f48bacf02e6a9c4025..0527e7bb40160ac9b562ceeccf5accd07f5be47f 100644 --- a/test/test_schema.py +++ b/test/test_schema.py @@ -1,6 +1,7 @@ """ B-ASIC test suite for the schema module and Schema class. """ +import pytest from b_asic import Schema, Addition, ConstantMultiplication @@ -12,7 +13,7 @@ class TestInit: schema = Schema(sfg_simple_filter) - assert schema._start_times == {"add1": 4, "cmul1": 0} + assert schema._start_times == {"in1": 0, "add1": 4, "cmul1": 0, "out1": 0} def test_complicated_single_outputs_normal_latency(self, precedence_sfg_delays): precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 4) @@ -28,8 +29,8 @@ class TestInit: op_name = precedence_sfg_delays.find_by_id(op_id).name start_times_names[op_name] = start_time - assert start_times_names == {"C0": 0, "B1": 0, "B2": 0, "ADD2": 3, "ADD1": 7, "Q1": 11, - "A0": 14, "A1": 0, "A2": 0, "ADD3": 3, "ADD4": 17} + assert start_times_names == {"IN1": 0, "C0": 0, "B1": 0, "B2": 0, "ADD2": 3, "ADD1": 7, "Q1": 11, + "A0": 14, "A1": 0, "A2": 0, "ADD3": 3, "ADD4": 17, "OUT1": 21} def test_complicated_single_outputs_complex_latencies(self, precedence_sfg_delays): precedence_sfg_delays.set_latency_offsets_of_type(ConstantMultiplication.type_name(), {'in0': 3, 'out0': 5}) @@ -53,8 +54,8 @@ class TestInit: op_name = precedence_sfg_delays.find_by_id(op_id).name start_times_names[op_name] = start_time - assert start_times_names == {'C0': 0, 'B1': 0, 'B2': 0, 'ADD2': 3, 'ADD1': 5, 'Q1': 6, 'A0': 12, - 'A1': 0, 'A2': 0, 'ADD3': 3, 'ADD4': 8} + assert start_times_names == {'IN1': 0, 'C0': 0, 'B1': 0, 'B2': 0, 'ADD2': 3, 'ADD1': 5, 'Q1': 6, 'A0': 12, + 'A1': 0, 'A2': 0, 'ADD3': 3, 'ADD4': 8, 'OUT1': 17} def test_independent_sfg(self, sfg_two_inputs_two_outputs_independent_with_cmul): schema = Schema(sfg_two_inputs_two_outputs_independent_with_cmul, scheduling_alg="ASAP") @@ -64,4 +65,83 @@ class TestInit: op_name = sfg_two_inputs_two_outputs_independent_with_cmul.find_by_id(op_id).name start_times_names[op_name] = start_time - assert start_times_names == {'CMUL1': 0, 'CMUL2': 5, "ADD1": 0, "CMUL3": 7} + assert start_times_names == {'C1': 0, 'IN1': 0, 'IN2': 0, 'CMUL1': 0, 'CMUL2': 5, + "ADD1": 0, "CMUL3": 7, 'OUT1': 9, 'OUT2': 10} + + +class TestSlacks: + def test_forward_backward_slack_normal_latency(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 1) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + assert schema.forward_slack(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id) == 7 + assert schema.backward_slack(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id) == 0 + + assert schema.forward_slack(precedence_sfg_delays.find_by_name("A2")[0].graph_id) == 0 + assert schema.backward_slack(precedence_sfg_delays.find_by_name("A2")[0].graph_id) == 16 + + def test_slacks_normal_latency(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 1) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + assert schema.slacks(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id) == (0, 7) + assert schema.slacks(precedence_sfg_delays.find_by_name("A2")[0].graph_id) == (16, 0) + + +class TestRescheduling: + def test_move_operation(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 4) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + + schema.move_operation(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id, 4) + schema.move_operation(precedence_sfg_delays.find_by_name("A2")[0].graph_id, 2) + + start_times_names = dict() + for op_id, start_time in schema._start_times.items(): + op_name = precedence_sfg_delays.find_by_id(op_id).name + start_times_names[op_name] = start_time + + assert start_times_names == {"IN1": 0, "C0": 0, "B1": 0, "B2": 0, "ADD2": 3, "ADD1": 7, "Q1": 11, + "A0": 14, "A1": 0, "A2": 2, "ADD3": 7, "ADD4": 17, "OUT1": 21} + + def test_move_operation_slack_after_rescheduling(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 1) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + add3_id = precedence_sfg_delays.find_by_name("ADD3")[0].graph_id + schema.move_operation(add3_id, 4) + assert schema.forward_slack(add3_id) == 3 + assert schema.backward_slack(add3_id) == 4 + + a2_id = precedence_sfg_delays.find_by_name("A2")[0].graph_id + assert schema.forward_slack(a2_id) == 4 + assert schema.backward_slack(a2_id) == 16 + + schema.move_operation(a2_id, 2) + + assert schema.forward_slack(add3_id) == 3 + assert schema.backward_slack(add3_id) == 2 + + assert schema.forward_slack(a2_id) == 2 + assert schema.backward_slack(a2_id) == 18 + + def test_move_operation_incorrect_move_backward(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 1) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + with pytest.raises(ValueError): + schema.move_operation(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id, -4) + + def test_move_operation_incorrect_move_forward(self, precedence_sfg_delays): + precedence_sfg_delays.set_latency_of_type(Addition.type_name(), 1) + precedence_sfg_delays.set_latency_of_type(ConstantMultiplication.type_name(), 3) + + schema = Schema(precedence_sfg_delays, scheduling_alg="ASAP") + with pytest.raises(ValueError): + schema.move_operation(precedence_sfg_delays.find_by_name("ADD3")[0].graph_id, 10)