- /*enabling per rx queue congestion control */
- taildrop.threshold = CONG_THRESHOLD_RX_Q;
- taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
- taildrop.oal = CONG_RX_OAL;
- DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
- rx_queue_id);
- ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+
+ /* Private CGR will use tail drop length as nb_rx_desc.
+ * for rest cases we can use standard byte based tail drop.
+ * There is no HW restriction, but number of CGRs are limited,
+ * hence this restriction is placed.
+ */
+ if (dpaa2_q->cgid != 0xff) {
+ /*enabling per rx queue congestion control */
+ taildrop.threshold = nb_rx_desc;
+ taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
+ taildrop.oal = 0;
+ DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
+ rx_queue_id);
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_CONGESTION_GROUP,
+ DPNI_QUEUE_RX,
+ dpaa2_q->tc_index,
+ flow_id, &taildrop);
+ } else {
+ /*enabling per rx queue congestion control */
+ taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
+ taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
+ taildrop.oal = CONG_RX_OAL;
+ DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
+ rx_queue_id);
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id,
+ &taildrop);
+ }
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
+ ret);
+ return -1;
+ }
+ } else { /* Disable tail Drop */
+ struct dpni_taildrop taildrop = {0};
+ DPAA2_PMD_INFO("Tail drop is disabled on queue");
+
+ taildrop.enable = 0;
+ if (dpaa2_q->cgid != 0xff) {
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index,
+ flow_id, &taildrop);
+ } else {
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,