From cb2f9a36f4d1114039886bffd6662cd08f4253c8 Mon Sep 17 00:00:00 2001 From: alokkumardalei-wq Date: Sat, 18 Apr 2026 13:58:11 +0530 Subject: [PATCH] ram: add support for latch-based memories Detect latch vs flip-flop storage cells using OpenSTA's Sequential API and generate master-slave latch arrangements when a latch cell is provided. The write data latch uses an inverted gated clock (shared per slice) to create negative transparency, while the bitcell latch uses the regular gated clock for positive transparency. Key changes: - Add StorageType enum and detectStorageType() using Sequential::isLatch() - Add write_latch_cell_ auto-selection (reuses storage cell master) - Add latch path in makeBit() with shared inverted clock from makeSlice() - Use upstream's buildPortMap() for dynamic pin name discovery Resolves: The-OpenROAD-Project/OpenROAD#10153 Signed-off-by: alokkumardalei-wq Signed-off-by: alokkumardalei-wq --- src/ram/include/ram/ram.h | 11 +++ src/ram/src/ram.cpp | 104 +++++++++++++++++++++++-- src/ram/test/make_8x8_sky130_latch.tcl | 32 ++++++++ 3 files changed, 140 insertions(+), 7 deletions(-) create mode 100644 src/ram/test/make_8x8_sky130_latch.tcl diff --git a/src/ram/include/ram/ram.h b/src/ram/include/ram/ram.h index 17d83c38914..b419a19bf71 100644 --- a/src/ram/include/ram/ram.h +++ b/src/ram/include/ram/ram.h @@ -51,6 +51,12 @@ class TritonRoute; namespace ram { +enum class StorageType +{ + FLIP_FLOP, + LATCH +}; + enum class PortRoleType { Clock, @@ -119,6 +125,7 @@ class RamGen int read_ports); private: + StorageType detectStorageType(odb::dbMaster* master) const; void findMasters(); std::map buildPortMap(odb::dbMaster*); odb::dbMaster* findMaster(const std::function& match, @@ -133,6 +140,7 @@ class RamGen std::unique_ptr makeBit(const std::string& prefix, int read_ports, odb::dbNet* clock, + odb::dbNet* write_clock, std::vector& select, odb::dbNet* data_input, std::vector& data_output); @@ -179,7 +187,9 @@ class RamGen grt::GlobalRouter* global_router_{nullptr}; drt::TritonRoute* detailed_router_{nullptr}; + StorageType storage_type_{StorageType::FLIP_FLOP}; odb::dbMaster* storage_cell_{nullptr}; + odb::dbMaster* write_latch_cell_{nullptr}; odb::dbMaster* tristate_cell_{nullptr}; odb::dbMaster* inv_cell_{nullptr}; odb::dbMaster* and2_cell_{nullptr}; @@ -188,6 +198,7 @@ class RamGen odb::dbMaster* tapcell_{nullptr}; std::map storage_ports_; + std::map write_latch_ports_; std::map tristate_ports_; std::map inv_ports_; std::map and2_ports_; diff --git a/src/ram/src/ram.cpp b/src/ram/src/ram.cpp index 969c166977c..b411a9b15a0 100644 --- a/src/ram/src/ram.cpp +++ b/src/ram/src/ram.cpp @@ -29,6 +29,7 @@ #include "sta/FuncExpr.hh" #include "sta/Liberty.hh" #include "sta/PortDirection.hh" +#include "sta/Sequential.hh" #include "utl/Logger.h" namespace ram { @@ -68,6 +69,24 @@ RamGen::RamGen(sta::dbNetwork* network, { } +StorageType RamGen::detectStorageType(dbMaster* master) const +{ + auto cell = network_->dbToSta(master); + if (!cell) { + return StorageType::FLIP_FLOP; + } + auto liberty = network_->libertyCell(cell); + if (!liberty || !liberty->hasSequentials()) { + return StorageType::FLIP_FLOP; + } + for (const auto& seq : liberty->sequentials()) { + if (seq.isLatch()) { + return StorageType::LATCH; + } + } + return StorageType::FLIP_FLOP; +} + dbInst* RamGen::makeInst( Cell* cell, const std::string& prefix, @@ -109,6 +128,7 @@ dbBTerm* RamGen::makeBTerm(const std::string& name, dbIoType io_type) std::unique_ptr RamGen::makeBit(const std::string& prefix, const int read_ports, dbNet* clock, + dbNet* write_clock, vector& select, dbNet* data_input, vector& data_output) @@ -117,13 +137,41 @@ std::unique_ptr RamGen::makeBit(const std::string& prefix, auto storage_net = makeNet(prefix, "storage"); - makeInst(bit_cell.get(), - prefix, - "bit", - storage_cell_, - {{storage_ports_[{PortRoleType::Clock, 0}], clock}, - {storage_ports_[{PortRoleType::DataIn, 0}], data_input}, - {storage_ports_[{PortRoleType::DataOut, 0}], storage_net}}); + if (storage_type_ == StorageType::LATCH) { + // Latch-based: data routes through a negative (active-low) write latch + // before reaching the positive bitcell latch. + // write_clock is the inverted gated clock (shared at slice level), + // clock is the regular gated clock for the bitcell. + auto write_latch_net = makeNet(prefix, "write_latch"); + + // Negative latch: transparent when gated clock is LOW + makeInst( + bit_cell.get(), + prefix, + "wlatch", + write_latch_cell_, + {{write_latch_ports_[{PortRoleType::Clock, 0}], write_clock}, + {write_latch_ports_[{PortRoleType::DataIn, 0}], data_input}, + {write_latch_ports_[{PortRoleType::DataOut, 0}], write_latch_net}}); + + // Positive latch (bitcell): transparent when gated clock is HIGH + makeInst(bit_cell.get(), + prefix, + "bit", + storage_cell_, + {{storage_ports_[{PortRoleType::Clock, 0}], clock}, + {storage_ports_[{PortRoleType::DataIn, 0}], write_latch_net}, + {storage_ports_[{PortRoleType::DataOut, 0}], storage_net}}); + } else { + // Flip-flop-based: data connects directly to storage cell + makeInst(bit_cell.get(), + prefix, + "bit", + storage_cell_, + {{storage_ports_[{PortRoleType::Clock, 0}], clock}, + {storage_ports_[{PortRoleType::DataIn, 0}], data_input}, + {storage_ports_[{PortRoleType::DataOut, 0}], storage_net}}); + } for (int read_port = 0; read_port < read_ports; ++read_port) { makeInst( @@ -160,6 +208,13 @@ void RamGen::makeSlice(const int slice_idx, auto gclock_net = makeNet(prefix, "gclock"); auto we0_net = makeNet(prefix, "we0"); + // For latch-based memories, create a single shared inverted clock at slice + // level instead of per-bit inverters. + dbNet* inv_gclock_net = nullptr; + if (storage_type_ == StorageType::LATCH) { + inv_gclock_net = makeNet(prefix, "inv_gclock"); + } + for (int local_bit = 0; local_bit < mask_size; ++local_bit) { auto name = fmt::format("{}.bit{}", prefix, start_bit_idx + local_bit); vector outs(read_ports); @@ -169,6 +224,7 @@ void RamGen::makeSlice(const int slice_idx, ram_grid_.addCell(makeBit(name, read_ports, gclock_net, + inv_gclock_net, select_b_nets, data_input[local_bit], outs), @@ -185,6 +241,16 @@ void RamGen::makeSlice(const int slice_idx, {clock_gate_ports_[{PortRoleType::DataIn, 0}], we0_net}, {clock_gate_ports_[{PortRoleType::DataOut, 0}], gclock_net}}); + // For latch mode, create one shared clock inverter per slice + if (storage_type_ == StorageType::LATCH) { + makeInst(sel_cell.get(), + prefix, + "gclock_inv", + inv_cell_, + {{inv_ports_[{PortRoleType::DataIn, 0}], gclock_net}, + {inv_ports_[{PortRoleType::DataOut, 0}], inv_gclock_net}}); + } + // Make clock and // this AND gate needs to be fed a net created by a decoder // adding any net will automatically connect with any port @@ -553,6 +619,19 @@ void RamGen::findMasters() } storage_ports_ = buildPortMap(storage_cell_); + // For latch-based memories, auto-select a write data latch cell. + // Uses the same cell master since both the write latch and bitcell are + // positive-transparent latches — the write latch is made negative by + // inverting the clock fed to its enable pin. + if (storage_type_ == StorageType::LATCH && !write_latch_cell_) { + write_latch_cell_ = storage_cell_; + write_latch_ports_ = storage_ports_; + logger_->info(RAM, + 23, + "Latch-based memory: using {} as write data latch", + write_latch_cell_->getName()); + } + if (!clock_gate_cell_) { clock_gate_cell_ = findMaster( [](sta::LibertyPort* port) { @@ -774,6 +853,17 @@ void RamGen::generate(const int mask_size, and2_cell_ = nullptr; clock_gate_cell_ = nullptr; buffer_cell_ = nullptr; + write_latch_cell_ = nullptr; + + // Detect storage type before findMasters so that write_latch_cell_ + // selection can happen in a single pass. + storage_type_ = detectStorageType(storage_cell_); + if (storage_type_ == StorageType::LATCH) { + logger_->info(RAM, + 24, + "Detected latch-based storage cell: {}", + storage_cell_->getName()); + } findMasters(); auto chip = db_->getChip(); diff --git a/src/ram/test/make_8x8_sky130_latch.tcl b/src/ram/test/make_8x8_sky130_latch.tcl new file mode 100644 index 00000000000..1a69880960f --- /dev/null +++ b/src/ram/test/make_8x8_sky130_latch.tcl @@ -0,0 +1,32 @@ +source "helpers.tcl" + +set_thread_count [expr [cpu_count] / 4] + +read_liberty sky130hd/sky130_fd_sc_hd__tt_025C_1v80.lib + +read_lef sky130hd/sky130hd.tlef +read_lef sky130hd/sky130_fd_sc_hd_merged.lef + +generate_ram \ + -mask_size 8 \ + -word_size 8 \ + -num_words 8 \ + -read_ports 1 \ + -storage_cell sky130_fd_sc_hd__dlxtp_1 \ + -power_pin VPWR \ + -ground_pin VGND \ + -routing_layer {met1 0.48} \ + -ver_layer {met2 0.48 40} \ + -hor_layer {met3 0.48 20} \ + -filler_cells {sky130_fd_sc_hd__fill_1 sky130_fd_sc_hd__fill_2 \ + sky130_fd_sc_hd__fill_4 sky130_fd_sc_hd__fill_8} \ + -tapcell sky130_fd_sc_hd__tap_1 \ + -max_tap_dist 15 + +set lef_file [make_result_file make_8x8_sky130_latch.lef] +write_abstract_lef -bloat_occupied_layers $lef_file +diff_files make_8x8_sky130_latch.lefok $lef_file + +set def_file [make_result_file make_8x8_sky130_latch.def] +write_def $def_file +diff_files make_8x8_sky130_latch.defok $def_file