Skip to content
Snippets Groups Projects
Commit bd9caefb authored by Oscar Gustafsson's avatar Oscar Gustafsson :bicyclist:
Browse files

Various cleanups, performance improvements, and new method

parent 669e44a3
No related branches found
No related tags found
1 merge request!495Various cleanups, performance improvements, and new method
Pipeline #158981 passed
...@@ -265,7 +265,7 @@ class Resource(HardwareBlock): ...@@ -265,7 +265,7 @@ class Resource(HardwareBlock):
self.plot_content(ax, **kwargs) self.plot_content(ax, **kwargs)
height = 0.4 height = 0.4
if title: if title:
height += 0.4 height = 0.8
fig.suptitle(title) fig.suptitle(title)
fig.set_figheight(math.floor(max(ax.get_ylim())) * 0.3 + height) fig.set_figheight(math.floor(max(ax.get_ylim())) * 0.3 + height)
fig.show() # type: ignore fig.show() # type: ignore
......
import copy import copy
import sys
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, cast from typing import TYPE_CHECKING, cast
...@@ -123,9 +122,9 @@ class ASAPScheduler(Scheduler): ...@@ -123,9 +122,9 @@ class ASAPScheduler(Scheduler):
max_end_time = schedule.get_max_end_time() max_end_time = schedule.get_max_end_time()
if schedule.schedule_time is None: if schedule._schedule_time is None:
schedule.set_schedule_time(max_end_time) schedule.set_schedule_time(max_end_time)
elif schedule.schedule_time < max_end_time: elif schedule._schedule_time < max_end_time:
raise ValueError(f"Too short schedule time. Minimum is {max_end_time}.") raise ValueError(f"Too short schedule time. Minimum is {max_end_time}.")
schedule.sort_y_locations_on_start_times() schedule.sort_y_locations_on_start_times()
...@@ -159,7 +158,7 @@ class ALAPScheduler(Scheduler): ...@@ -159,7 +158,7 @@ class ALAPScheduler(Scheduler):
outport.operation.graph_id outport.operation.graph_id
] + schedule.forward_slack(outport.operation.graph_id) ] + schedule.forward_slack(outport.operation.graph_id)
self.op_laps[outport.operation.graph_id] = ( self.op_laps[outport.operation.graph_id] = (
new_unwrapped_start_time // schedule.schedule_time new_unwrapped_start_time // schedule._schedule_time
) )
schedule.move_operation_alap(outport.operation.graph_id) schedule.move_operation_alap(outport.operation.graph_id)
...@@ -167,7 +166,7 @@ class ALAPScheduler(Scheduler): ...@@ -167,7 +166,7 @@ class ALAPScheduler(Scheduler):
slack = min(schedule.start_times.values()) slack = min(schedule.start_times.values())
for op_id in schedule.start_times.keys(): for op_id in schedule.start_times.keys():
schedule.move_operation(op_id, -slack) schedule.move_operation(op_id, -slack)
schedule.set_schedule_time(schedule.schedule_time - slack) schedule.set_schedule_time(schedule._schedule_time - slack)
schedule.sort_y_locations_on_start_times() schedule.sort_y_locations_on_start_times()
...@@ -223,7 +222,7 @@ class ListScheduler(Scheduler): ...@@ -223,7 +222,7 @@ class ListScheduler(Scheduler):
raise ValueError("Provided max_concurrent_reads must be an integer.") raise ValueError("Provided max_concurrent_reads must be an integer.")
if max_concurrent_reads <= 0: if max_concurrent_reads <= 0:
raise ValueError("Provided max_concurrent_reads must be larger than 0.") raise ValueError("Provided max_concurrent_reads must be larger than 0.")
self._max_concurrent_reads = max_concurrent_reads or sys.maxsize self._max_concurrent_reads = max_concurrent_reads or 0
if max_concurrent_writes is not None: if max_concurrent_writes is not None:
if not isinstance(max_concurrent_writes, int): if not isinstance(max_concurrent_writes, int):
...@@ -232,7 +231,7 @@ class ListScheduler(Scheduler): ...@@ -232,7 +231,7 @@ class ListScheduler(Scheduler):
raise ValueError( raise ValueError(
"Provided max_concurrent_writes must be larger than 0." "Provided max_concurrent_writes must be larger than 0."
) )
self._max_concurrent_writes = max_concurrent_writes or sys.maxsize self._max_concurrent_writes = max_concurrent_writes or 0
if input_times is not None: if input_times is not None:
if not isinstance(input_times, dict): if not isinstance(input_times, dict):
...@@ -289,7 +288,7 @@ class ListScheduler(Scheduler): ...@@ -289,7 +288,7 @@ class ListScheduler(Scheduler):
if self._output_delta_times: if self._output_delta_times:
self._handle_outputs() self._handle_outputs()
if self._schedule.schedule_time is None: if self._schedule._schedule_time is None:
self._schedule.set_schedule_time(self._schedule.get_max_end_time()) self._schedule.set_schedule_time(self._schedule.get_max_end_time())
self._schedule.remove_delays() self._schedule.remove_delays()
self._handle_dont_cares() self._handle_dont_cares()
...@@ -377,95 +376,100 @@ class ListScheduler(Scheduler): ...@@ -377,95 +376,100 @@ class ListScheduler(Scheduler):
def _execution_times_in_time(self, op: "Operation", time: int) -> int: def _execution_times_in_time(self, op: "Operation", time: int) -> int:
count = 0 count = 0
for other_op_id, start_time in self._schedule.start_times.items(): for other_op_id, start_time in self._schedule.start_times.items():
if self._schedule.schedule_time is not None: if other_op_id != op._graph_id:
start_time = start_time % self._schedule.schedule_time if self._schedule._schedule_time is not None:
start_time = start_time % self._schedule._schedule_time
if time >= start_time: if time >= start_time:
if time < start_time + max( if time < start_time + max(
self._cached_execution_times[other_op_id], 1 self._cached_execution_times[other_op_id], 1
): ):
if isinstance(self._sfg.find_by_id(other_op_id), type(op)): if isinstance(self._sfg.find_by_id(other_op_id), type(op)):
if other_op_id != op.graph_id:
count += 1 count += 1
return count return count
def _op_satisfies_resource_constraints(self, op: "Operation") -> bool: def _op_satisfies_resource_constraints(self, op: "Operation") -> bool:
if self._schedule.schedule_time is not None: if self._schedule._schedule_time is not None:
time_slot = self._current_time % self._schedule.schedule_time time_slot = self._current_time % self._schedule._schedule_time
else: else:
time_slot = self._current_time time_slot = self._current_time
count = self._execution_times_in_time(op, time_slot) count = self._execution_times_in_time(op, time_slot)
return count < self._remaining_resources[op.type_name()] return count < self._remaining_resources[op.type_name()]
def _op_satisfies_concurrent_writes(self, op: "Operation") -> bool: def _op_satisfies_concurrent_writes(self, op: "Operation") -> bool:
tmp_used_writes = {} if self._max_concurrent_writes:
if not isinstance(op, Output): tmp_used_writes = {}
for i in range(len(op.outputs)): if not isinstance(op, Output):
output_ready_time = ( for i in range(len(op.outputs)):
self._current_time output_ready_time = (
+ self._cached_latency_offsets[op.graph_id][f"out{i}"] self._current_time
) + self._cached_latency_offsets[op.graph_id][f"out{i}"]
if self._schedule.schedule_time: )
output_ready_time %= self._schedule.schedule_time if self._schedule._schedule_time:
output_ready_time %= self._schedule._schedule_time
writes_in_time = 0
for item in self._schedule.start_times.items(): writes_in_time = 0
offsets = [ for item in self._schedule.start_times.items():
offset offsets = [
for port_id, offset in self._cached_latency_offsets[ offset
item[0] for port_id, offset in self._cached_latency_offsets[
].items() item[0]
if port_id.startswith("out") ].items()
] if port_id.startswith("out")
write_times = [item[1] + offset for offset in offsets] ]
writes_in_time += write_times.count(output_ready_time) write_times = [item[1] + offset for offset in offsets]
writes_in_time += write_times.count(output_ready_time)
write_time = (
self._current_time write_time = (
+ self._cached_latency_offsets[op.graph_id][f"out{i}"] self._current_time
) + self._cached_latency_offsets[op.graph_id][f"out{i}"]
if self._schedule.schedule_time: )
write_time %= self._schedule.schedule_time if self._schedule._schedule_time:
write_time %= self._schedule._schedule_time
if tmp_used_writes.get(write_time):
tmp_used_writes[write_time] += 1 if tmp_used_writes.get(write_time):
else: tmp_used_writes[write_time] += 1
tmp_used_writes[write_time] = 1 else:
tmp_used_writes[write_time] = 1
if (
self._max_concurrent_writes if (
- writes_in_time self._max_concurrent_writes
- tmp_used_writes[write_time] - writes_in_time
< 0 - tmp_used_writes[write_time]
): < 0
return False ):
return False
return True return True
def _op_satisfies_concurrent_reads(self, op: "Operation") -> bool: def _op_satisfies_concurrent_reads(self, op: "Operation") -> bool:
tmp_used_reads = {} if self._max_concurrent_reads:
for i, op_input in enumerate(op.inputs): tmp_used_reads = {}
source_op = op_input.signals[0].source.operation for i, op_input in enumerate(op.inputs):
if isinstance(source_op, Delay) or isinstance(source_op, DontCare): source_op = op_input.signals[0].source.operation
continue if isinstance(source_op, Delay) or isinstance(source_op, DontCare):
if self._schedule.start_times[source_op.graph_id] != self._current_time - 1: continue
input_read_time = (
self._current_time
+ self._cached_latency_offsets[op.graph_id][f"in{i}"]
)
if self._schedule.schedule_time:
input_read_time %= self._schedule.schedule_time
if tmp_used_reads.get(input_read_time):
tmp_used_reads[input_read_time] += 1
else:
tmp_used_reads[input_read_time] = 1
prev_used = self._used_reads.get(input_read_time) or 0
if ( if (
self._max_concurrent_reads self._schedule.start_times[source_op.graph_id]
< prev_used + tmp_used_reads[input_read_time] != self._current_time - 1
): ):
return False input_read_time = (
self._current_time
+ self._cached_latency_offsets[op.graph_id][f"in{i}"]
)
if self._schedule._schedule_time:
input_read_time %= self._schedule._schedule_time
if tmp_used_reads.get(input_read_time):
tmp_used_reads[input_read_time] += 1
else:
tmp_used_reads[input_read_time] = 1
prev_used = self._used_reads.get(input_read_time) or 0
if (
self._max_concurrent_reads
< prev_used + tmp_used_reads[input_read_time]
):
return False
return True return True
def _op_satisfies_data_dependencies(self, op: "Operation") -> bool: def _op_satisfies_data_dependencies(self, op: "Operation") -> bool:
...@@ -476,22 +480,20 @@ class ListScheduler(Scheduler): ...@@ -476,22 +480,20 @@ class ListScheduler(Scheduler):
if isinstance(source_op, Delay) or isinstance(source_op, DontCare): if isinstance(source_op, Delay) or isinstance(source_op, DontCare):
continue continue
source_op_graph_id = source_op.graph_id if source_op.graph_id in self._remaining_ops:
if source_op_graph_id in self._remaining_ops:
return False return False
if self._schedule.schedule_time is not None: if self._schedule._schedule_time is not None:
available_time = ( available_time = (
self._schedule.start_times.get(source_op_graph_id) self._schedule.start_times[source_op.graph_id]
+ self._op_laps[source_op.graph_id] * self._schedule.schedule_time + self._op_laps[source_op.graph_id] * self._schedule._schedule_time
+ self._cached_latency_offsets[source_op.graph_id][ + self._cached_latency_offsets[source_op.graph_id][
f"out{source_port.index}" f"out{source_port.index}"
] ]
) )
else: else:
available_time = ( available_time = (
self._schedule.start_times.get(source_op_graph_id) self._schedule.start_times[source_op.graph_id]
+ self._cached_latency_offsets[source_op.graph_id][ + self._cached_latency_offsets[source_op.graph_id][
f"out{source_port.index}" f"out{source_port.index}"
] ]
...@@ -546,22 +548,22 @@ class ListScheduler(Scheduler): ...@@ -546,22 +548,22 @@ class ListScheduler(Scheduler):
f"Provided output delta time with GraphID {key} cannot be found in the provided SFG." f"Provided output delta time with GraphID {key} cannot be found in the provided SFG."
) )
if self._schedule._cyclic and self._schedule.schedule_time is not None: if self._schedule._cyclic and self._schedule._schedule_time is not None:
iteration_period_bound = self._sfg.iteration_period_bound() iteration_period_bound = self._sfg.iteration_period_bound()
if self._schedule.schedule_time < iteration_period_bound: if self._schedule._schedule_time < iteration_period_bound:
raise ValueError( raise ValueError(
f"Provided scheduling time {self._schedule.schedule_time} must be larger or equal to the" f"Provided scheduling time {self._schedule._schedule_time} must be larger or equal to the"
f" iteration period bound: {iteration_period_bound}." f" iteration period bound: {iteration_period_bound}."
) )
if self._schedule.schedule_time is not None: if self._schedule._schedule_time is not None:
for resource_type, resource_amount in self._max_resources.items(): for resource_type, resource_amount in self._max_resources.items():
if resource_amount < self._sfg.resource_lower_bound( if resource_amount < self._sfg.resource_lower_bound(
resource_type, self._schedule.schedule_time resource_type, self._schedule._schedule_time
): ):
raise ValueError( raise ValueError(
f"Amount of resource: {resource_type} is not enough to " f"Amount of resource: {resource_type} is not enough to "
f"realize schedule for scheduling time: {self._schedule.schedule_time}." f"realize schedule for scheduling time: {self._schedule._schedule_time}."
) )
alap_schedule = copy.copy(self._schedule) alap_schedule = copy.copy(self._schedule)
...@@ -570,17 +572,17 @@ class ListScheduler(Scheduler): ...@@ -570,17 +572,17 @@ class ListScheduler(Scheduler):
alap_scheduler.apply_scheduling(alap_schedule) alap_scheduler.apply_scheduling(alap_schedule)
self._alap_start_times = alap_schedule.start_times self._alap_start_times = alap_schedule.start_times
self._alap_op_laps = alap_scheduler.op_laps self._alap_op_laps = alap_scheduler.op_laps
self._alap_schedule_time = alap_schedule.schedule_time self._alap_schedule_time = alap_schedule._schedule_time
self._schedule.start_times = {} self._schedule.start_times = {}
for key in self._schedule._laps.keys(): for key in self._schedule._laps.keys():
self._schedule._laps[key] = 0 self._schedule._laps[key] = 0
if not self._schedule._cyclic and self._schedule.schedule_time: if not self._schedule._cyclic and self._schedule._schedule_time:
if alap_schedule.schedule_time > self._schedule.schedule_time: if alap_schedule._schedule_time > self._schedule._schedule_time:
raise ValueError( raise ValueError(
f"Provided scheduling time {schedule.schedule_time} cannot be reached, " f"Provided scheduling time {schedule._schedule_time} cannot be reached, "
"try to enable the cyclic property or increase the time to at least " "try to enable the cyclic property or increase the time to at least "
f"{alap_schedule.schedule_time}." f"{alap_schedule._schedule_time}."
) )
self._remaining_resources = self._max_resources.copy() self._remaining_resources = self._max_resources.copy()
...@@ -647,8 +649,8 @@ class ListScheduler(Scheduler): ...@@ -647,8 +649,8 @@ class ListScheduler(Scheduler):
next_op, self._current_time, self._op_laps next_op, self._current_time, self._op_laps
) )
self._op_laps[next_op.graph_id] = ( self._op_laps[next_op.graph_id] = (
(self._current_time) // self._schedule.schedule_time (self._current_time) // self._schedule._schedule_time
if self._schedule.schedule_time if self._schedule._schedule_time
else 0 else 0
) )
...@@ -661,7 +663,7 @@ class ListScheduler(Scheduler): ...@@ -661,7 +663,7 @@ class ListScheduler(Scheduler):
self._logger.debug("--- Non-Recursive Operation scheduling completed ---") self._logger.debug("--- Non-Recursive Operation scheduling completed ---")
def _log_scheduled_op(self, next_op: "Operation") -> None: def _log_scheduled_op(self, next_op: "Operation") -> None:
if self._schedule.schedule_time is not None: if self._schedule._schedule_time is not None:
self._logger.debug(f" Op: {next_op.graph_id}, time: {self._current_time}") self._logger.debug(f" Op: {next_op.graph_id}, time: {self._current_time}")
else: else:
self._logger.debug(f" Op: {next_op.graph_id}, time: {self._current_time}") self._logger.debug(f" Op: {next_op.graph_id}, time: {self._current_time}")
...@@ -679,8 +681,8 @@ class ListScheduler(Scheduler): ...@@ -679,8 +681,8 @@ class ListScheduler(Scheduler):
self._current_time self._current_time
+ self._cached_latency_offsets[next_op.graph_id][f"in{i}"] + self._cached_latency_offsets[next_op.graph_id][f"in{i}"]
) )
if self._schedule.schedule_time: if self._schedule._schedule_time:
time %= self._schedule.schedule_time time %= self._schedule._schedule_time
if self._used_reads.get(time): if self._used_reads.get(time):
self._used_reads[time] += 1 self._used_reads[time] += 1
else: else:
...@@ -704,7 +706,7 @@ class ListScheduler(Scheduler): ...@@ -704,7 +706,7 @@ class ListScheduler(Scheduler):
def _handle_outputs(self) -> None: def _handle_outputs(self) -> None:
self._logger.debug("--- Output placement starting ---") self._logger.debug("--- Output placement starting ---")
if self._schedule._cyclic: if self._schedule._cyclic:
end = self._schedule.schedule_time end = self._schedule._schedule_time
else: else:
end = self._schedule.get_max_end_time() end = self._schedule.get_max_end_time()
for output in self._sfg.find_by_type_name(Output.type_name()): for output in self._sfg.find_by_type_name(Output.type_name()):
...@@ -714,7 +716,7 @@ class ListScheduler(Scheduler): ...@@ -714,7 +716,7 @@ class ListScheduler(Scheduler):
new_time = end + delta_time new_time = end + delta_time
if self._schedule._cyclic and self._schedule.schedule_time is not None: if self._schedule._cyclic and self._schedule._schedule_time is not None:
self._schedule.place_operation(output, new_time, self._op_laps) self._schedule.place_operation(output, new_time, self._op_laps)
else: else:
self._schedule.start_times[output.graph_id] = new_time self._schedule.start_times[output.graph_id] = new_time
...@@ -727,8 +729,8 @@ class ListScheduler(Scheduler): ...@@ -727,8 +729,8 @@ class ListScheduler(Scheduler):
count += 1 count += 1
modulo_time = ( modulo_time = (
new_time % self._schedule.schedule_time new_time % self._schedule._schedule_time
if self._schedule.schedule_time if self._schedule._schedule_time
else new_time else new_time
) )
self._logger.debug(f" {output.graph_id} time: {modulo_time}") self._logger.debug(f" {output.graph_id} time: {modulo_time}")
...@@ -741,7 +743,7 @@ class ListScheduler(Scheduler): ...@@ -741,7 +743,7 @@ class ListScheduler(Scheduler):
) )
if min_slack != 0: if min_slack != 0:
for output in self._sfg.find_by_type_name(Output.type_name()): for output in self._sfg.find_by_type_name(Output.type_name()):
if self._schedule._cyclic and self._schedule.schedule_time is not None: if self._schedule._cyclic and self._schedule._schedule_time is not None:
self._schedule.move_operation(output.graph_id, -min_slack) self._schedule.move_operation(output.graph_id, -min_slack)
else: else:
self._schedule.start_times[output.graph_id] = ( self._schedule.start_times[output.graph_id] = (
...@@ -750,12 +752,12 @@ class ListScheduler(Scheduler): ...@@ -750,12 +752,12 @@ class ListScheduler(Scheduler):
new_time = self._schedule.start_times[output.graph_id] new_time = self._schedule.start_times[output.graph_id]
if ( if (
not self._schedule._cyclic not self._schedule._cyclic
and self._schedule.schedule_time is not None and self._schedule._schedule_time is not None
): ):
if new_time > self._schedule.schedule_time: if new_time > self._schedule._schedule_time:
raise ValueError( raise ValueError(
f"Cannot place output {output.graph_id} at time {new_time} " f"Cannot place output {output.graph_id} at time {new_time} "
f"for scheduling time {self._schedule.schedule_time}. " f"for scheduling time {self._schedule._schedule_time}. "
"Try to relax the scheduling time, change the output delta times or enable cyclic." "Try to relax the scheduling time, change the output delta times or enable cyclic."
) )
self._logger.debug( self._logger.debug(
...@@ -807,7 +809,7 @@ class RecursiveListScheduler(ListScheduler): ...@@ -807,7 +809,7 @@ class RecursiveListScheduler(ListScheduler):
if self._output_delta_times: if self._output_delta_times:
self._handle_outputs() self._handle_outputs()
if self._schedule.schedule_time is None: if self._schedule._schedule_time is None:
self._schedule.set_schedule_time(self._schedule.get_max_end_time()) self._schedule.set_schedule_time(self._schedule.get_max_end_time())
self._schedule.remove_delays() self._schedule.remove_delays()
self._handle_dont_cares() self._handle_dont_cares()
...@@ -849,7 +851,7 @@ class RecursiveListScheduler(ListScheduler): ...@@ -849,7 +851,7 @@ class RecursiveListScheduler(ListScheduler):
return [(op_id, self._deadlines[op_id]) for op_id in ready_ops] return [(op_id, self._deadlines[op_id]) for op_id in ready_ops]
def _schedule_recursive_ops(self, loops: list[list["GraphID"]]) -> None: def _schedule_recursive_ops(self, loops: list[list["GraphID"]]) -> None:
saved_sched_time = self._schedule.schedule_time saved_sched_time = self._schedule._schedule_time
self._schedule._schedule_time = None self._schedule._schedule_time = None
self._logger.debug("--- Scheduling of recursive loops starting ---") self._logger.debug("--- Scheduling of recursive loops starting ---")
...@@ -887,10 +889,10 @@ class RecursiveListScheduler(ListScheduler): ...@@ -887,10 +889,10 @@ class RecursiveListScheduler(ListScheduler):
self._schedule._schedule_time = self._schedule.get_max_end_time() self._schedule._schedule_time = self._schedule.get_max_end_time()
if ( if (
saved_sched_time is not None saved_sched_time is not None
and saved_sched_time < self._schedule.schedule_time and saved_sched_time < self._schedule._schedule_time
): ):
raise ValueError( raise ValueError(
f"Requested schedule time {saved_sched_time} cannot be reached, increase to {self._schedule.schedule_time} or assign more resources." f"Requested schedule time {saved_sched_time} cannot be reached, increase to {self._schedule._schedule_time} or assign more resources."
) )
self._logger.debug("--- Scheduling of recursive loops completed ---") self._logger.debug("--- Scheduling of recursive loops completed ---")
...@@ -924,7 +926,7 @@ class RecursiveListScheduler(ListScheduler): ...@@ -924,7 +926,7 @@ class RecursiveListScheduler(ListScheduler):
) )
usage_time = ( usage_time = (
self._schedule.start_times[destination_op.graph_id] self._schedule.start_times[destination_op.graph_id]
+ self._schedule.schedule_time + self._schedule._schedule_time
* self._schedule.laps[output_port.signals[0].graph_id] * self._schedule.laps[output_port.signals[0].graph_id]
) )
if op_available_time > usage_time: if op_available_time > usage_time:
...@@ -939,10 +941,10 @@ class RecursiveListScheduler(ListScheduler): ...@@ -939,10 +941,10 @@ class RecursiveListScheduler(ListScheduler):
continue continue
if source_op.graph_id in self._remaining_ops: if source_op.graph_id in self._remaining_ops:
return False return False
if self._schedule.schedule_time is not None: if self._schedule._schedule_time is not None:
available_time = ( available_time = (
self._schedule.start_times.get(source_op.graph_id) self._schedule.start_times.get(source_op.graph_id)
+ self._op_laps[source_op.graph_id] * self._schedule.schedule_time + self._op_laps[source_op.graph_id] * self._schedule._schedule_time
+ self._cached_latency_offsets[source_op.graph_id][ + self._cached_latency_offsets[source_op.graph_id][
f"out{source_port.index}" f"out{source_port.index}"
] ]
......
...@@ -7,7 +7,7 @@ Contains the signal flow graph operation. ...@@ -7,7 +7,7 @@ Contains the signal flow graph operation.
import itertools import itertools
import re import re
import warnings import warnings
from collections import defaultdict, deque from collections import Counter, defaultdict, deque
from collections.abc import Iterable, MutableSet, Sequence from collections.abc import Iterable, MutableSet, Sequence
from fractions import Fraction from fractions import Fraction
from io import StringIO from io import StringIO
...@@ -2066,6 +2066,10 @@ class SFG(AbstractOperation): ...@@ -2066,6 +2066,10 @@ class SFG(AbstractOperation):
paths.append(newpath) paths.append(newpath)
return paths return paths
def operation_counter(self) -> Counter:
"""Return a Counter with the number of instances for each type."""
return Counter(op.type_name() for op in self.operations)
def edit(self) -> dict[str, "SFG"]: def edit(self) -> dict[str, "SFG"]:
"""Edit SFG in GUI.""" """Edit SFG in GUI."""
from b_asic.GUI.main_window import start_editor from b_asic.GUI.main_window import start_editor
......
""" """
========================================= =========================================
Auto Scheduling With Custom IO times Automatic scheduling with custom IO times
========================================= =========================================
It is possible to specify the IO times and provide those to the scheduling.
""" """
from b_asic.core_operations import Butterfly, ConstantMultiplication from b_asic.core_operations import Butterfly, ConstantMultiplication
...@@ -31,7 +32,8 @@ schedule1 = Schedule(sfg, scheduler=ASAPScheduler()) ...@@ -31,7 +32,8 @@ schedule1 = Schedule(sfg, scheduler=ASAPScheduler())
schedule1.show() schedule1.show()
# %% # %%
# Generate a non-cyclic Schedule from HybridScheduler with custom IO times. # Generate a non-cyclic Schedule from HybridScheduler with custom IO times,
# one input and output per time unit
resources = {Butterfly.type_name(): 1, ConstantMultiplication.type_name(): 1} resources = {Butterfly.type_name(): 1, ConstantMultiplication.type_name(): 1}
input_times = {f"in{i}": i for i in range(points)} input_times = {f"in{i}": i for i in range(points)}
output_delta_times = {f"out{i}": i for i in range(points)} output_delta_times = {f"out{i}": i for i in range(points)}
......
""" """
================================ ==================================================
Automatic Scheduling for different latency-offsets. Automatic scheduling for different latency-offsets
================================ ==================================================
This example showcases how one can generate a schedule where the This example showcases how one can generate a schedule where the
operations have different latency offsets for the different inputs/outputs. operations have different latency offsets for the different inputs/outputs.
......
""" """
========================================= ===============================
LDLT Matrix Inversion Algorithm LDLT matrix inversion algorithm
========================================= ===============================
This provides some examples of the different list-based schedulers that are
available in B-ASIC.
""" """
from b_asic.architecture import Memory, ProcessingElement from b_asic.architecture import Memory, ProcessingElement
...@@ -44,26 +46,27 @@ print("Scheduling time:", schedule.schedule_time) ...@@ -44,26 +46,27 @@ print("Scheduling time:", schedule.schedule_time)
schedule.show() schedule.show()
# %% # %%
# Create an EarliestDeadline schedule that satisfies the resource constraints. # Create an earliest deadline schedule that uses one MADS and one Reciprocal PE.
resources = {MADS.type_name(): 1, Reciprocal.type_name(): 1} resources = {MADS.type_name(): 1, Reciprocal.type_name(): 1}
schedule = Schedule(sfg, scheduler=EarliestDeadlineScheduler(resources)) schedule = Schedule(sfg, scheduler=EarliestDeadlineScheduler(resources))
print("Scheduling time:", schedule.schedule_time) print("Scheduling time:", schedule.schedule_time)
schedule.show() schedule.show()
# %% # %%
# Create a LeastSlackTime schedule that satisfies the resource constraints. # Create a least slack-time schedule that uses one MADS and one Reciprocal PE.
schedule = Schedule(sfg, scheduler=LeastSlackTimeScheduler(resources)) schedule = Schedule(sfg, scheduler=LeastSlackTimeScheduler(resources))
print("Scheduling time:", schedule.schedule_time) print("Scheduling time:", schedule.schedule_time)
schedule.show() schedule.show()
# %% # %%
# Create a MaxFanOutScheduler schedule that satisfies the resource constraints. # Create a max fan-out schedule that uses one MADS and one Reciprocal PE.
schedule = Schedule(sfg, scheduler=MaxFanOutScheduler(resources)) schedule = Schedule(sfg, scheduler=MaxFanOutScheduler(resources))
print("Scheduling time:", schedule.schedule_time) print("Scheduling time:", schedule.schedule_time)
schedule.show() schedule.show()
# %% # %%
# Create a HybridScheduler schedule that satisfies the resource constraints with custom IO times. # Create a HybridScheduler schedule that one MADS and one Reciprocal PE with
# custom IO times.
# This is the schedule we will synthesize an architecture for. # This is the schedule we will synthesize an architecture for.
input_times = { input_times = {
"in0": 0, "in0": 0,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment