Skip to content

Commit

Permalink
Gather port latency info to prepare for latency inference
Browse files Browse the repository at this point in the history
  • Loading branch information
VonTum committed Feb 1, 2025
1 parent 56a4dd3 commit a4e84e6
Show file tree
Hide file tree
Showing 3 changed files with 216 additions and 0 deletions.
7 changes: 7 additions & 0 deletions src/flattening/flatten.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use crate::alloc::{ArenaAllocator, UUIDAllocator, UUIDRange, UUID};
use crate::flattening::port_latency_inference::make_port_latency_inference_info;
use crate::typing::abstract_type::{AbstractType, DomainType};
use crate::{alloc::UUIDRangeIter, prelude::*};

Expand Down Expand Up @@ -1784,6 +1785,12 @@ fn flatten_global(linker: &mut Linker, global_obj: GlobalUUID, cursor: &mut Curs
decl.typ.domain = DomainType::Physical(port.domain);
}

md.latency_inference_info = make_port_latency_inference_info(
&md.ports,
&instructions,
md.link_info.template_parameters.len(),
);

&mut md.link_info
}
GlobalUUID::Type(type_uuid) => {
Expand Down
5 changes: 5 additions & 0 deletions src/flattening/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ mod initialization;
mod lints;
mod name_context;
mod parser;
mod port_latency_inference;
mod typechecking;
mod walk;

Expand All @@ -17,6 +18,7 @@ use std::ops::Deref;
pub use flatten::flatten_all_globals;
pub use initialization::gather_initial_file_data;
pub use lints::perform_lints;
use port_latency_inference::PortLatencyInferenceInfo;
pub use typechecking::typecheck_all_modules;

use crate::linker::{Documentation, LinkInfo};
Expand Down Expand Up @@ -51,6 +53,9 @@ pub struct Module {
/// [Port::declaration_instruction] are set in Stage 2: Flattening
pub ports: FlatAlloc<Port, PortIDMarker>,

/// Created in Stage 2: Flattening
pub latency_inference_info: PortLatencyInferenceInfo,

/// Created in Stage 1: Initialization
pub domains: FlatAlloc<DomainInfo, DomainIDMarker>,
pub implicit_clk_domain: bool,
Expand Down
204 changes: 204 additions & 0 deletions src/flattening/port_latency_inference.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
use crate::{
alloc::zip_eq,
flattening::{DeclarationKind, ExpressionSource},
prelude::*,
typing::template::TVec,
value::Value,
};

use super::{BinaryOperator, Instruction, Port, UnaryOperator, WireReference, WireReferenceRoot};

/*/// ports whose latency annotations require them to be at fixed predefined offsets
///
/// For example: portA'3, portB'7, portC'2
///
/// But also: portX'L+2, portY'L-2
///
/// These are in separate groups
///
/// See [PortGroupConnection]
#[derive(Debug, Clone)]
struct PortGroup {
port: PortID,
relative_latency_offset: i64,
}*/

/// A candidate from which a template variable could be inferred
///
/// Basically, this struct is formed for a pair of ports like this:
///
/// portA'4 -> portB'L-3
///
/// That would create a inference candidate for template var 'L' with an offset of 7. (L == 7 iff portA' - portB' == 0)
#[derive(Debug, Clone)]
struct LatencyInferenceCandidate {
template_id: TemplateID,
from: PortID,
to: PortID,
offset: i64,
}

struct PortLatencyLinearity {
offset: i64,
arg_linear_factor: TVec<i64>,
}
impl PortLatencyLinearity {
fn is_const(&self) -> bool {
self.arg_linear_factor.iter().all(|(_, v)| *v == 0)
}
/// Checks if the two latency annotations are offset by a constant and exactly 1x a template variable
fn is_pair_latency_candidate(from: &Self, to: &Self) -> Option<(TemplateID, i64)> {
let mut found_var = None;

for (template_var_id, a, b) in zip_eq(&from.arg_linear_factor, &to.arg_linear_factor) {
if *b - *a == 1 {
if found_var.is_some() {
return None;
} // Offset by multiple template vars
found_var = Some(template_var_id)
}
}

found_var.map(|v| (v, to.offset - from.offset))
}
}
fn recurse_down_expression(
instructions: &FlatAlloc<Instruction, FlatIDMarker>,
cur_instr: FlatID,
num_template_args: usize,
) -> Option<PortLatencyLinearity> {
match &instructions[cur_instr].unwrap_expression().source {
ExpressionSource::UnaryOp {
op: UnaryOperator::Negate,
right,
} => {
let mut right_v = recurse_down_expression(instructions, *right, num_template_args)?;
right_v.offset = -right_v.offset;
for (_, v) in &mut right_v.arg_linear_factor {
*v = -*v;
}
Some(right_v)
}
ExpressionSource::BinaryOp { op, left, right } => {
let mut left_v = recurse_down_expression(instructions, *left, num_template_args)?;
let mut right_v = recurse_down_expression(instructions, *right, num_template_args)?;
match op {
BinaryOperator::Add => {
left_v.offset += right_v.offset;
for ((_, a), (_, b)) in left_v
.arg_linear_factor
.iter_mut()
.zip(right_v.arg_linear_factor.iter())
{
*a += *b;
}
Some(left_v)
}
BinaryOperator::Subtract => {
left_v.offset -= right_v.offset;
for ((_, a), (_, b)) in left_v
.arg_linear_factor
.iter_mut()
.zip(right_v.arg_linear_factor.iter())
{
*a -= *b;
}
Some(left_v)
}
BinaryOperator::Multiply => {
if !left_v.is_const() && !right_v.is_const() {
None
} else {
if left_v.is_const() {
std::mem::swap(&mut left_v, &mut right_v);
}
left_v.offset *= right_v.offset;
for (_, a) in &mut left_v.arg_linear_factor {
*a *= right_v.offset;
}
Some(left_v)
}
}
BinaryOperator::Divide => (left_v.is_const() && right_v.is_const()).then(|| {
left_v.offset /= right_v.offset;
left_v
}),
BinaryOperator::Modulo => (left_v.is_const() && right_v.is_const()).then(|| {
left_v.offset %= right_v.offset;
left_v
}),
_other => None,
}
}
ExpressionSource::Constant(Value::Integer(i)) => {
let offset: i64 = i.try_into().ok()?;
Some(PortLatencyLinearity {
offset,
arg_linear_factor: TVec::with_size(num_template_args, 0),
})
}
ExpressionSource::WireRef(WireReference {
root: WireReferenceRoot::LocalDecl(decl_id, _span),
path,
is_generative,
}) => {
assert!(is_generative);
if !path.is_empty() {
return None;
}
let DeclarationKind::GenerativeInput(decl_template_id) =
instructions[*decl_id].unwrap_declaration().decl_kind
else {
return None;
};
let mut result = PortLatencyLinearity {
offset: 0,
arg_linear_factor: TVec::with_size(num_template_args, 0),
};
result.arg_linear_factor[decl_template_id] = 1;
Some(result)
}
_other => None,
}
}

#[derive(Default, Debug)]
pub struct PortLatencyInferenceInfo {
//port_latency_groups: Vec<Vec<PortGroup>>,
inference_candidates: Vec<LatencyInferenceCandidate>,
}

pub fn make_port_latency_inference_info(
ports: &FlatAlloc<Port, PortIDMarker>,
instructions: &FlatAlloc<Instruction, FlatIDMarker>,
num_template_args: usize,
) -> PortLatencyInferenceInfo {
let port_infos = ports.map(|(_port_id, port)| {
let decl = instructions[port.declaration_instruction].unwrap_declaration();

decl.latency_specifier.and_then(|latency_spec| {
recurse_down_expression(instructions, latency_spec, num_template_args)
})
});

let mut inference_candidates = Vec::new();

for (from, from_info) in port_infos.iter_valids() {
for (to, to_info) in port_infos.iter_valids() {
if let Some((template_id, offset)) =
PortLatencyLinearity::is_pair_latency_candidate(from_info, to_info)
{
inference_candidates.push(LatencyInferenceCandidate {
template_id,
from,
to,
offset,
});
}
}
}

PortLatencyInferenceInfo {
inference_candidates,
}
}

0 comments on commit a4e84e6

Please sign in to comment.