Skip to content

Commit

Permalink
Merge pull request #5381 from The-OpenROAD-Project-staging/secure-tns…
Browse files Browse the repository at this point in the history
…-repair

Secure tns repair
  • Loading branch information
maliberty authored Jul 16, 2024
2 parents 21dd165 + 28f2d6c commit 86e37aa
Show file tree
Hide file tree
Showing 15 changed files with 655 additions and 103 deletions.
2 changes: 2 additions & 0 deletions src/rsz/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ repair_timing
[-allow_setup_violations]
[-skip_pin_swap]
[-skip_gate_cloning]
[-skip_buffering]
[-enable_buffer_removal]
[-repair_tns tns_end_percent]
[-max_passes passes]
Expand All @@ -295,6 +296,7 @@ repair_timing
| `-allow_setup_violations` | While repairing hold violations, buffers are not inserted that will cause setup violations unless `-allow_setup_violations` is specified. |
| `-skip_pin_swap` | Flag to skip pin swap. The default value is `False`, and the allowed values are bools. |
| `-skip_gate_cloning` | Flag to skip gate cloning. The default value is `False`, and the allowed values are bools. |
| `-skip_buffering` | Flag to skip rebuffering and load splitting. The default value is `False`, and the allowed values are bools. |
| `-enable_buffer_removal` | Flag to enable buffer removal during setup fixing. The default value is `False`, and the allowed values are bools. |
| `-repair_tns` | Percentage of violating endpoints to repair (0-100). When `tns_end_percent` is zero (the default), only the worst endpoint is repaired. When `tns_end_percent` is 100, all violating endpoints are repaired. |
| `-max_utilization` | Defines the percentage of core area used. |
Expand Down
1 change: 1 addition & 0 deletions src/rsz/include/rsz/Resizer.hh
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ class Resizer : public dbStaState
bool verbose,
bool skip_pin_swap,
bool skip_gate_cloning,
bool skip_buffering,
bool skip_buffer_removal);
// For testing.
void repairSetup(const Pin* end_pin);
Expand Down
277 changes: 259 additions & 18 deletions src/rsz/src/RepairSetup.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ using utl::RSZ;
using sta::Edge;
using sta::fuzzyEqual;
using sta::fuzzyGreater;
using sta::fuzzyGreaterEqual;
using sta::fuzzyLess;
using sta::GraphDelayCalc;
using sta::InstancePinIterator;
Expand All @@ -90,6 +91,7 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
const bool verbose,
const bool skip_pin_swap,
const bool skip_gate_cloning,
const bool skip_buffering,
const bool skip_buffer_removal)
{
init();
Expand Down Expand Up @@ -137,11 +139,14 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
} else {
// nothing to repair
logger_->metric("design__instance__count__setup_buffer", 0);
logger_->info(RSZ, 98, "No setup violations found");
return;
}

int end_index = 0;
int max_end_count = violating_ends.size() * repair_tns_end_percent;
float initial_tns = sta_->totalNegativeSlack(max_);
float prev_tns = initial_tns;
// Always repair the worst endpoint, even if tns percent is zero.
max_end_count = max(max_end_count, 1);
swap_pin_inst_set_.clear(); // Make sure we do not swap the same pin twice.
Expand All @@ -151,10 +156,13 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
sta_->checkFanoutLimitPreamble();

resizer_->incrementalParasiticsBegin();
int print_iteration = 0;
int opto_iteration = 0;
bool prev_termination = false;
bool two_cons_terminations = false;
if (verbose) {
printProgress(print_iteration, false, false);
printProgress(opto_iteration, false, false);
}
float fix_rate_threshold = inc_fix_rate_threshold_;
for (const auto& end_original_slack : violating_ends) {
Vertex* end = end_original_slack.first;
Slack end_slack = sta_->vertexSlack(end, max_);
Expand Down Expand Up @@ -191,9 +199,26 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
int decreasing_slack_passes = 0;
resizer_->journalBegin();
while (pass <= max_passes) {
print_iteration++;
opto_iteration++;
if (verbose) {
printProgress(print_iteration, false, false);
printProgress(opto_iteration, false, false);
}
if (terminateProgress(opto_iteration,
initial_tns,
prev_tns,
fix_rate_threshold,
end_index,
max_end_count)) {
if (prev_termination) {
// Abort entire fixing if no progress for 200 iterations
two_cons_terminations = true;
} else {
prev_termination = true;
}
break;
}
if (opto_iteration % opto_small_interval_ == 0) {
prev_termination = false;
}

if (end_slack > setup_slack_margin) {
Expand Down Expand Up @@ -222,6 +247,7 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
end_slack,
skip_pin_swap,
skip_gate_cloning,
skip_buffering,
skip_buffer_removal,
setup_slack_margin);
if (!changed) {
Expand Down Expand Up @@ -317,13 +343,27 @@ void RepairSetup::repairSetup(const float setup_slack_margin,
end = worst_vertex;
}
pass++;
}
} // while pass <= max_passes
if (verbose) {
printProgress(print_iteration, true, false);
printProgress(opto_iteration, true, false);
}
}
if (two_cons_terminations) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "bailing out of setup fixing"
"due to no TNS progress for two opto cycles");
// clang-format on
break;
}
} // for each violating endpoint

// do some last gasp setup fixing before we give up
OptoParams params(setup_slack_margin, verbose);
params.iteration = opto_iteration;
params.initial_tns = initial_tns;
repairSetupLastGasp(params);

if (verbose) {
printProgress(print_iteration, true, true);
printProgress(opto_iteration, true, true);
}
// Leave the parasitics up to date.
resizer_->updateParasitics();
Expand Down Expand Up @@ -375,7 +415,7 @@ void RepairSetup::repairSetup(const Pin* end_pin)
const Slack slack = sta_->vertexSlack(vertex, max_);
PathRef path = sta_->vertexWorstSlackPath(vertex, max_);
resizer_->incrementalParasiticsBegin();
repairPath(path, slack, false, false, false, 0.0);
repairPath(path, slack, false, false, false, false, 0.0);
// Leave the parasitices up to date.
resizer_->updateParasitics();
resizer_->incrementalParasiticsEnd();
Expand Down Expand Up @@ -416,6 +456,7 @@ bool RepairSetup::repairPath(PathRef& path,
const Slack path_slack,
const bool skip_pin_swap,
const bool skip_gate_cloning,
const bool skip_buffering,
const bool skip_buffer_removal,
const float setup_slack_margin)
{
Expand Down Expand Up @@ -507,7 +548,8 @@ bool RepairSetup::repairPath(PathRef& path,
// For tristate nets all we can do is resize the driver.
const bool tristate_drvr = resizer_->isTristateDriver(drvr_pin);
dbNet* db_net = db_network_->staToDb(net);
if (fanout > 1
if (!skip_buffering
&& fanout > 1
// Rebuffer blows up on large fanout nets.
&& fanout < rebuffer_max_fanout_ && !tristate_drvr
&& !resizer_->dontTouch(net) && !db_net->isConnectedByAbutment()) {
Expand Down Expand Up @@ -537,14 +579,16 @@ bool RepairSetup::repairPath(PathRef& path,
break;
}

// Don't split loads on low fanout nets.
if (fanout > split_load_min_fanout_ && !tristate_drvr
&& !resizer_->dontTouch(net) && !db_net->isConnectedByAbutment()) {
const int init_buffer_count = inserted_buffer_count_;
splitLoads(drvr_path, drvr_index, path_slack, &expanded);
split_load_buffer_count_ = inserted_buffer_count_ - init_buffer_count;
changed = true;
break;
if (!skip_buffering) {
// Don't split loads on low fanout nets.
if (fanout > split_load_min_fanout_ && !tristate_drvr
&& !resizer_->dontTouch(net) && !db_net->isConnectedByAbutment()) {
const int init_buffer_count = inserted_buffer_count_;
splitLoads(drvr_path, drvr_index, path_slack, &expanded);
split_load_buffer_count_ = inserted_buffer_count_ - init_buffer_count;
changed = true;
break;
}
}
}
}
Expand Down Expand Up @@ -1588,4 +1632,201 @@ void RepairSetup::printProgress(const int iteration,
}
}

// Terminate progress if incremental fix rate within an opto interval falls
// below the threshold. Bump up the threshold after each large opto interval.
bool RepairSetup::terminateProgress(const int iteration,
const float initial_tns,
float& prev_tns,
float& fix_rate_threshold,
// for debug only
const int endpt_index,
const int num_endpts)
{
if (iteration % opto_large_interval_ == 0) {
fix_rate_threshold *= 2.0;
}
if (iteration % opto_small_interval_ == 0) {
float curr_tns = sta_->totalNegativeSlack(max_);
float inc_fix_rate = (prev_tns - curr_tns) / initial_tns;
prev_tns = curr_tns;
if (iteration > 1000 // allow for some initial fixing for 1000 iterations
&& inc_fix_rate < fix_rate_threshold) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "bailing out at iter {}"
" because incr fix rate {:0.2f}% is < {:0.2f}% [endpt {}/{}]",
iteration, inc_fix_rate*100, fix_rate_threshold*100,
endpt_index, num_endpts);
// clang-format on
return true;
}
}
return false;
}

// Perform some last fixing based on sizing only.
// This is a greedy opto that does not degrade WNS or TNS.
// TODO: add VT swap
void RepairSetup::repairSetupLastGasp(const OptoParams& params)
{
float curr_tns = sta_->totalNegativeSlack(max_);
if (fuzzyGreaterEqual(curr_tns, 0)) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "last gasp is bailing out "
"because TNS is {:0.2f}", curr_tns);
// clang-format on
return;
}

// Don't do anything unless there was some progress from previous fixing
if ((params.initial_tns - curr_tns) / params.initial_tns < 0.05) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "last gasp is bailing out "
"because TNS was reduced by < 5% from previous fixing");
// clang-format on
return;
}

// Sort remaining failing endpoints
const VertexSet* endpoints = sta_->endpoints();
vector<pair<Vertex*, Slack>> violating_ends;
for (Vertex* end : *endpoints) {
const Slack end_slack = sta_->vertexSlack(end, max_);
if (end_slack < params.setup_slack_margin) {
violating_ends.emplace_back(end, end_slack);
}
}
std::stable_sort(violating_ends.begin(),
violating_ends.end(),
[](const auto& end_slack1, const auto& end_slack2) {
return end_slack1.second < end_slack2.second;
});

int end_index = 0;
int max_end_count = violating_ends.size();
if (max_end_count == 0) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "last gasp is bailing out "
"because there are no violating endpoints");
// clang-format on
return;
}
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "{} violating endpoints remain",
max_end_count);
// clang-format on
swap_pin_inst_set_.clear(); // Make sure we do not swap the same pin twice.
int opto_iteration = params.iteration;
if (params.verbose) {
printProgress(opto_iteration, false, false);
}

float prev_tns = curr_tns;
Slack curr_worst_slack = violating_ends[0].second;
Slack prev_worst_slack = curr_worst_slack;
bool prev_termination = false;
bool two_cons_terminations = false;
float fix_rate_threshold = inc_fix_rate_threshold_;

for (const auto& end_original_slack : violating_ends) {
Vertex* end = end_original_slack.first;
Slack end_slack = sta_->vertexSlack(end, max_);
Slack worst_slack;
Vertex* worst_vertex;
sta_->worstSlack(max_, worst_slack, worst_vertex);
end_index++;
if (end_index > max_end_count) {
break;
}
int pass = 1;
resizer_->journalBegin();
while (pass <= max_last_gasp_passes_) {
opto_iteration++;
if (terminateProgress(opto_iteration,
params.initial_tns,
prev_tns,
fix_rate_threshold,
end_index,
max_end_count)) {
if (prev_termination) {
// Abort entire fixing if no progress for 200 iterations
two_cons_terminations = true;
} else {
prev_termination = true;
}
break;
}
if (opto_iteration % opto_small_interval_ == 0) {
prev_termination = false;
}
if (params.verbose) {
printProgress(opto_iteration, false, false);
}
if (end_slack > params.setup_slack_margin) {
break;
}
PathRef end_path = sta_->vertexWorstSlackPath(end, max_);
const bool changed = repairPath(end_path,
end_slack,
true /* skip_pin_swap */,
true /* skip_gate_cloning */,
true /* skip_buffering */,
true /* skip_buffer_removal */,
params.setup_slack_margin);

if (!changed) {
if (pass != 1) {
resizer_->journalRestore(
resize_count_, inserted_buffer_count_, cloned_gate_count_);
resizer_->updateParasitics();
sta_->findRequireds();
}
break;
}
resizer_->updateParasitics();
sta_->findRequireds();
end_slack = sta_->vertexSlack(end, max_);
sta_->worstSlack(max_, curr_worst_slack, worst_vertex);
curr_tns = sta_->totalNegativeSlack(max_);

// Accept only moves that improve both WNS and TNS
if (fuzzyGreaterEqual(curr_worst_slack, prev_worst_slack)
&& fuzzyGreaterEqual(curr_tns, prev_tns)) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "sizing move accepted for "
"endpoint {} pass {} because WNS improved to {:0.3f} and "
"TNS improved to {:0.3f}",
end_index, pass, curr_worst_slack, curr_tns);
// clang-format on
prev_worst_slack = curr_worst_slack;
prev_tns = curr_tns;
resizer_->journalBegin();
} else {
resizer_->journalRestore(
resize_count_, inserted_buffer_count_, cloned_gate_count_);
resizer_->updateParasitics();
sta_->findRequireds();
break;
}

if (resizer_->overMaxArea()) {
break;
}
if (end_index == 1) {
end = worst_vertex;
}
pass++;
} // while pass <= max_last_gasp_passes_
if (params.verbose) {
printProgress(opto_iteration, true, false);
}
if (two_cons_terminations) {
// clang-format off
debugPrint(logger_, RSZ, "repair_setup", 1, "bailing out of last gasp fixing"
"due to no TNS progress for two opto cycles");
// clang-format on
break;
}
} // for each violating endpoint
}

} // namespace rsz
Loading

0 comments on commit 86e37aa

Please sign in to comment.