Update workaround changes for erratas that are fixed on 96xx A1.
This patch also enables cq drop for all the passes for
maintaining performance along with updating a default
Rx ring size in dev_info.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
#define otx2_dev_is_Ax(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_95xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+#define otx2_dev_is_95xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+
+#define otx2_dev_is_96xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_96xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
struct otx2_dev;
/* Link status callback */
aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
- /* TX pause frames enable flowctrl on RX side */
- if (dev->fc_info.tx_pause) {
- /* Single bpid is allocated for all rx channels for now */
- aq->cq.bpid = dev->fc_info.bpid[0];
- aq->cq.bp = NIX_CQ_BP_LEVEL;
- aq->cq.bp_ena = 1;
- }
-
/* Many to one reduction */
aq->cq.qint_idx = qid % dev->qints;
/* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
aq->cq.cint_idx = qid;
if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
+ const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
uint16_t min_rx_drop;
- const float rx_cq_skid = 1024 * 256;
min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
aq->cq.drop = min_rx_drop;
aq->cq.drop_ena = 1;
+ rxq->cq_drop = min_rx_drop;
+ } else {
+ rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
+ aq->cq.drop = rxq->cq_drop;
+ aq->cq.drop_ena = 1;
+ }
+
+ /* TX pause frames enable flowctrl on RX side */
+ if (dev->fc_info.tx_pause) {
+ /* Single bpid is allocated for all rx channels for now */
+ aq->cq.bpid = dev->fc_info.bpid[0];
+ aq->cq.bp = rxq->cq_drop;
+ aq->cq.bp_ena = 1;
}
rc = otx2_mbox_process(mbox);
/* Many to one reduction */
aq->rq.qint_idx = qid % dev->qints;
- if (otx2_ethdev_fixup_is_limit_cq_full(dev))
- aq->rq.xqe_drop_ena = 1;
+ aq->rq.xqe_drop_ena = 1;
rc = otx2_mbox_process(mbox);
if (rc) {
dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
- if (otx2_dev_is_Ax(dev)) {
+ if (otx2_dev_is_96xx_A0(dev) ||
+ otx2_dev_is_95xx_Ax(dev)) {
dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
}
#define NIX_CQ_ALIGN 512
#define NIX_SQB_LOWER_THRESH 90
#define LMT_SLOT_MASK 0x7f
+#define NIX_RX_DEFAULT_RING_SZ 4096
/* If PTP is enabled additional SEND MEM DESC is required which
* takes 2 words, hence max 7 iova address are possible
((RTE_ALIGN_MUL_CEIL(NIX_TX_NB_SEG_MAX, 3) / 3) \
+ NIX_TX_NB_SEG_MAX)
-/* Apply BP when CQ is 75% full */
-#define NIX_CQ_BP_LEVEL (25 * 256 / 100)
+/* Apply BP/DROP when CQ is 95% full */
+#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
enum nix_q_size_e qsize;
struct rte_eth_dev *eth_dev;
struct otx2_eth_qconf qconf;
+ uint16_t cq_drop;
} __rte_cache_aligned;
static inline struct otx2_eth_dev *
.offloads = 0,
};
+ devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .ring_size = NIX_RX_DEFAULT_RING_SZ,
+ };
+
devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = UINT16_MAX,
.nb_min = NIX_RX_MIN_DESC,
if (enb) {
aq->cq.bpid = fc->bpid[0];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
- aq->cq.bp = NIX_CQ_BP_LEVEL;
+ aq->cq.bp = rxq->cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
}