if (profile->peak.rate && min_rate > profile->peak.rate)
min_rate = profile->peak.rate;
- /* Each packet accomulate single count, whereas HW
+ /* Each packet accumulate single count, whereas HW
* considers each unit as Byte, so we need convert
* user pps to bps
*/
profile->pkt_mode_adj += adjust;
profile->commit.rate += (adjust * profile->commit.rate);
profile->peak.rate += (adjust * profile->peak.rate);
+ /* Number of tokens freed after scheduling was proportional
+ * to adjust value
+ */
+ profile->commit.size *= adjust;
+ profile->peak.size *= adjust;
}
return 0;
return NIX_ERR_TM_INVALID_PEAK_RATE;
}
+ /* If PIR and CIR are requested, PIR should always be larger than CIR */
+ if (peak_rate && commit_rate && (commit_rate > peak_rate))
+ return NIX_ERR_TM_INVALID_PEAK_RATE;
+
if (!skip_ins)
TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
/* Disable backpressure, it will be enabled back if needed on
* hierarchy enable
*/
- rc = nix_tm_bp_config_set(roc_nix, false);
- if (rc) {
- plt_err("Failed to disable backpressure for flush, rc=%d", rc);
- goto cleanup;
+ for (i = 0; i < sq_cnt; i++) {
+ sq = nix->sqs[i];
+ if (!sq)
+ continue;
+
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ if (rc) {
+ plt_err("Failed to disable backpressure, rc=%d", rc);
+ goto cleanup;
+ }
}
/* Flush all tx queues */