net/cnxk: fix RSS RETA table update
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev_ops.c
index 91de6b7..f4a0562 100644 (file)
@@ -67,7 +67,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 
        devinfo->speed_capa = dev->speed_capa;
        devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
-                           RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+                           RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
+                           RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        return 0;
 }
 
@@ -81,25 +82,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
                uint64_t flags;
                const char *output;
        } rx_offload_map[] = {
-               {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
-               {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-               {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-               {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-               {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
-               {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
-               {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-               {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
-               {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
-               {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
-               {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
-               {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
-               {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-               {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-               {DEV_RX_OFFLOAD_SECURITY, " Security,"},
-               {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
-               {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
-               {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-               {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+               {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
+               {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+               {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+               {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+               {RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
+               {RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
+               {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+               {RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
+               {RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
+               {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
+               {RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
+               {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+               {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+               {RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
+               {RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
+               {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
+               {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+               {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
        };
        static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
                                                 "Scalar, Rx Offloads:"
@@ -143,28 +143,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
                uint64_t flags;
                const char *output;
        } tx_offload_map[] = {
-               {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-               {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
-               {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
-               {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
-               {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
-               {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
-               {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
-               {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
-               {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
-               {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
-               {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
-               {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
-               {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
-               {DEV_TX_OFFLOAD_SECURITY, " Security,"},
-               {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
-               {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
-               {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
+               {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+               {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
+               {RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
+               {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
+               {RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
+               {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
+               {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
+               {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
+               {RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
+               {RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
+               {RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
+               {RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
        };
        static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
                                                 "Scalar, Tx Offloads:"
@@ -204,8 +204,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
        enum rte_eth_fc_mode mode_map[] = {
-                                          RTE_FC_NONE, RTE_FC_RX_PAUSE,
-                                          RTE_FC_TX_PAUSE, RTE_FC_FULL
+                                          RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
+                                          RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
                                          };
        struct roc_nix *nix = &dev->nix;
        int mode;
@@ -228,8 +228,10 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 
        memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
        cq = &dev->cqs[qid];
-       fc_cfg.cq_cfg_valid = true;
+       fc_cfg.type = ROC_NIX_FC_CQ_CFG;
        fc_cfg.cq_cfg.enable = enable;
+       /* Map all CQs to last channel */
+       fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
        fc_cfg.cq_cfg.rq = qid;
        fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,10 +250,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
        struct rte_eth_dev_data *data = eth_dev->data;
        struct cnxk_fc_cfg *fc = &dev->fc_cfg;
        struct roc_nix *nix = &dev->nix;
+       struct cnxk_eth_rxq_sp *rxq;
+       struct cnxk_eth_txq_sp *txq;
        uint8_t rx_pause, tx_pause;
        int rc, i;
 
-       if (roc_nix_is_vf_or_sdp(nix)) {
+       if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
                plt_err("Flow control configuration is not allowed on VFs");
                return -ENOTSUP;
        }
@@ -265,10 +269,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
        if (fc_conf->mode == fc->mode)
                return 0;
 
-       rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-                   (fc_conf->mode == RTE_FC_RX_PAUSE);
-       tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
-                   (fc_conf->mode == RTE_FC_TX_PAUSE);
+       rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+                   (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+       tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
+                   (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
 
        /* Check if TX pause frame is already enabled or not */
        if (fc->tx_pause ^ tx_pause) {
@@ -282,7 +286,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
                }
 
                for (i = 0; i < data->nb_rx_queues; i++) {
-                       rc = nix_fc_cq_config_set(dev, i, tx_pause);
+                       struct roc_nix_fc_cfg fc_cfg;
+
+                       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+                       rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+                             1;
+                       rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       /* Check if RX pause frame is enabled or not */
+       if (fc->rx_pause ^ rx_pause) {
+               for (i = 0; i < data->nb_tx_queues; i++) {
+                       struct roc_nix_fc_cfg fc_cfg;
+
+                       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+                       txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+                             1;
+                       fc_cfg.type = ROC_NIX_FC_TM_CFG;
+                       fc_cfg.tm_cfg.sq = txq->qid;
+                       fc_cfg.tm_cfg.enable = !!rx_pause;
+                       rc = roc_nix_fc_config_set(nix, &fc_cfg);
                        if (rc)
                                return rc;
                }
@@ -299,6 +325,42 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
        return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+                                        struct rte_eth_pfc_queue_info *pfc_info)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+       pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+       pfc_info->mode_capa = RTE_ETH_FC_FULL;
+       return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+                                        struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+       struct cnxk_pfc_cfg conf;
+       int rc;
+
+       memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+
+       conf.fc_cfg.mode = pfc_conf->mode;
+
+       conf.pause_time = pfc_conf->tx_pause.pause_time;
+       conf.rx_tc = pfc_conf->tx_pause.tc;
+       conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+       conf.tx_tc = pfc_conf->rx_pause.tc;
+       conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+       rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+       if (rc)
+               return rc;
+
+       return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
                      const struct rte_flow_ops **ops)
@@ -359,6 +421,7 @@ cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
        roc_nix_npc_promisc_ena_dis(nix, true);
        dev->dmac_filter_enable = true;
        eth_dev->data->promiscuous = false;
+       dev->dmac_filter_count++;
 
        return 0;
 }
@@ -373,6 +436,8 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
        rc = roc_nix_mac_addr_del(nix, index);
        if (rc)
                plt_err("Failed to delete mac address, rc=%d", rc);
+
+       dev->dmac_filter_count--;
 }
 
 int
@@ -406,13 +471,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
         * when this feature has not been enabled before.
         */
        if (data->dev_started && frame_size > buffsz &&
-           !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+           !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
                plt_err("Scatter offload is not enabled for mtu");
                goto exit;
        }
 
        /* Check <seg size> * <max_seg>  >= max_frame */
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)     &&
            frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
                plt_err("Greater than maximum supported packet length");
                goto exit;
@@ -436,17 +501,6 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
                plt_err("Failed to max Rx frame length, rc=%d", rc);
                goto exit;
        }
-
-       frame_size += RTE_ETHER_CRC_LEN;
-
-       if (frame_size > RTE_ETHER_MAX_LEN)
-               dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       /* Update max_rx_pkt_len */
-       data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
 exit:
        return rc;
 }
@@ -513,7 +567,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
-       return roc_nix_npc_mcast_config(&dev->nix, true, false);
+       return roc_nix_npc_mcast_config(&dev->nix, true,
+                                       eth_dev->data->promiscuous);
 }
 
 int
@@ -690,6 +745,66 @@ cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
        memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
 }
 
+uint32_t
+cnxk_nix_rx_queue_count(void *rxq)
+{
+       struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+       struct roc_nix *nix = &rxq_sp->dev->nix;
+       uint32_t head, tail;
+
+       roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+       return (tail - head) % (rxq_sp->qconf.nb_desc);
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
+{
+       /* Check given offset(queue index) has packet filled/xmit by HW
+        * in case of Rx or Tx.
+        * Also, checks for wrap around case.
+        */
+       return ((tail > head && offset <= tail && offset >= head) ||
+               (head > tail && (offset >= head || offset <= tail))) ?
+                      is_rx :
+                      !is_rx;
+}
+
+int
+cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
+{
+       struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+       struct roc_nix *nix = &rxq_sp->dev->nix;
+       uint32_t head, tail;
+
+       if (rxq_sp->qconf.nb_desc <= offset)
+               return -EINVAL;
+
+       roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
+
+       if (nix_offset_has_packet(head, tail, offset, 1))
+               return RTE_ETH_RX_DESC_DONE;
+       else
+               return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
+{
+       struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
+       struct roc_nix *nix = &txq_sp->dev->nix;
+       uint32_t head = 0, tail = 0;
+
+       if (txq_sp->qconf.nb_desc <= offset)
+               return -EINVAL;
+
+       roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
+
+       if (nix_offset_has_packet(head, tail, offset, 0))
+               return RTE_ETH_TX_DESC_DONE;
+       else
+               return RTE_ETH_TX_DESC_FULL;
+}
+
 /* It is a NOP for cnxk as HW frees the buffer on xmit */
 int
 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
@@ -724,3 +839,308 @@ cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
 
        return rc;
 }
+
+int
+cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
+                    struct rte_eth_rss_reta_entry64 *reta_conf,
+                    uint16_t reta_size)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       uint16_t reta[ROC_NIX_RSS_RETA_MAX];
+       struct roc_nix *nix = &dev->nix;
+       int i, j, rc = -EINVAL, idx = 0;
+
+       if (reta_size != dev->nix.reta_sz) {
+               plt_err("Size of hash lookup table configured (%d) does not "
+                       "match the number hardware can supported (%d)",
+                       reta_size, dev->nix.reta_sz);
+               goto fail;
+       }
+
+       roc_nix_rss_reta_get(nix, 0, reta);
+
+       /* Copy RETA table */
+       for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+                       if ((reta_conf[i].mask >> j) & 0x01)
+                               reta[idx] = reta_conf[i].reta[j];
+                       idx++;
+               }
+       }
+
+       return roc_nix_rss_reta_set(nix, 0, reta);
+
+fail:
+       return rc;
+}
+
+int
+cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
+                   struct rte_eth_rss_reta_entry64 *reta_conf,
+                   uint16_t reta_size)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       uint16_t reta[ROC_NIX_RSS_RETA_MAX];
+       struct roc_nix *nix = &dev->nix;
+       int rc = -EINVAL, i, j, idx = 0;
+
+       if (reta_size != dev->nix.reta_sz) {
+               plt_err("Size of hash lookup table configured (%d) does not "
+                       "match the number hardware can supported (%d)",
+                       reta_size, dev->nix.reta_sz);
+               goto fail;
+       }
+
+       rc = roc_nix_rss_reta_get(nix, 0, reta);
+       if (rc)
+               goto fail;
+
+       /* Copy RETA table */
+       for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
+                       if ((reta_conf[i].mask >> j) & 0x01)
+                               reta_conf[i].reta[j] = reta[idx];
+                       idx++;
+               }
+       }
+
+       return 0;
+
+fail:
+       return rc;
+}
+
+int
+cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
+                        struct rte_eth_rss_conf *rss_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct roc_nix *nix = &dev->nix;
+       uint8_t rss_hash_level;
+       uint32_t flowkey_cfg;
+       int rc = -EINVAL;
+       uint8_t alg_idx;
+
+       if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
+               plt_err("Hash key size mismatch %d vs %d",
+                       rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
+               goto fail;
+       }
+
+       if (rss_conf->rss_key)
+               roc_nix_rss_key_set(nix, rss_conf->rss_key);
+
+       rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
+       if (rss_hash_level)
+               rss_hash_level -= 1;
+       flowkey_cfg =
+               cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
+
+       rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
+                                    ROC_NIX_RSS_GROUP_DEFAULT,
+                                    ROC_NIX_RSS_MCAM_IDX_DEFAULT);
+       if (rc) {
+               plt_err("Failed to set RSS hash function rc=%d", rc);
+               return rc;
+       }
+
+fail:
+       return rc;
+}
+
+int
+cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+                          struct rte_eth_rss_conf *rss_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+       if (rss_conf->rss_key)
+               roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
+
+       rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
+       rss_conf->rss_hf = dev->ethdev_rss_hf;
+
+       return 0;
+}
+
+int
+cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
+                               struct rte_ether_addr *mc_addr_set,
+                               uint32_t nb_mc_addr)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct rte_ether_addr null_mac_addr;
+       struct roc_nix *nix = &dev->nix;
+       int rc, index;
+       uint32_t i;
+
+       memset(&null_mac_addr, 0, sizeof(null_mac_addr));
+
+       /* All configured multicast filters should be flushed first */
+       for (i = 0; i < dev->max_mac_entries; i++) {
+               if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
+                       rc = roc_nix_mac_addr_del(nix, i);
+                       if (rc) {
+                               plt_err("Failed to flush mcast address, rc=%d",
+                                       rc);
+                               return rc;
+                       }
+
+                       dev->dmac_filter_count--;
+                       /* Update address in NIC data structure */
+                       rte_ether_addr_copy(&null_mac_addr,
+                                           &data->mac_addrs[i]);
+               }
+       }
+
+       if (!mc_addr_set || !nb_mc_addr)
+               return 0;
+
+       /* Check for available space */
+       if (nb_mc_addr >
+           ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
+               plt_err("No space is available to add multicast filters");
+               return -ENOSPC;
+       }
+
+       /* Multicast addresses are to be installed */
+       for (i = 0; i < nb_mc_addr; i++) {
+               index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
+               if (index < 0) {
+                       plt_err("Failed to add mcast mac address, rc=%d",
+                               index);
+                       return index;
+               }
+
+               dev->dmac_filter_count++;
+               /* Update address in NIC data structure */
+               rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
+       }
+
+       roc_nix_npc_promisc_ena_dis(nix, true);
+       dev->dmac_filter_enable = true;
+       eth_dev->data->promiscuous = false;
+
+       return 0;
+}
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+                                struct cnxk_pfc_cfg *conf)
+{
+       enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+                                          ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+       struct roc_nix *nix = &dev->nix;
+       struct roc_nix_pfc_cfg pfc_cfg;
+       struct roc_nix_fc_cfg fc_cfg;
+       struct cnxk_eth_rxq_sp *rxq;
+       struct cnxk_eth_txq_sp *txq;
+       uint8_t rx_pause, tx_pause;
+       enum rte_eth_fc_mode mode;
+       struct roc_nix_cq *cq;
+       struct roc_nix_sq *sq;
+       int rc;
+
+       if (roc_nix_is_vf_or_sdp(nix)) {
+               plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+               return -ENOTSUP;
+       }
+
+       if (roc_model_is_cn96_ax() && data->dev_started) {
+               /* On Ax, CQ should be in disabled state
+                * while setting flow control configuration.
+                */
+               plt_info("Stop the port=%d for setting flow control",
+                        data->port_id);
+               return 0;
+       }
+
+       if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+           dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+               plt_err("Same TC can not be configured on multiple SQs");
+               return -ENOTSUP;
+       }
+
+       mode = conf->fc_cfg.mode;
+       rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+       tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+       /* Configure CQs */
+       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+       rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+       cq = &dev->cqs[rxq->qid];
+       fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+       fc_cfg.cq_cfg.tc = conf->rx_tc;
+       fc_cfg.cq_cfg.enable = !!tx_pause;
+       fc_cfg.cq_cfg.rq = cq->qid;
+       fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+       if (rc)
+               goto exit;
+
+       /* Check if RX pause frame is enabled or not */
+       if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+               if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+                       goto exit;
+
+               if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+                   eth_dev->data->nb_tx_queues > 1) {
+                       /*
+                        * Disabled xmit will be enabled when
+                        * new topology is available.
+                        */
+                       rc = roc_nix_tm_hierarchy_disable(nix);
+                       if (rc)
+                               goto exit;
+
+                       rc = roc_nix_tm_pfc_prepare_tree(nix);
+                       if (rc)
+                               goto exit;
+
+                       rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+                                                        true);
+                       if (rc)
+                               goto exit;
+               }
+       }
+
+       txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+       sq = &dev->sqs[txq->qid];
+       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+       fc_cfg.type = ROC_NIX_FC_TM_CFG;
+       fc_cfg.tm_cfg.sq = sq->qid;
+       fc_cfg.tm_cfg.tc = conf->tx_tc;
+       fc_cfg.tm_cfg.enable = !!rx_pause;
+       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+       if (rc)
+               return rc;
+
+       dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+       /* Configure MAC block */
+       if (tx_pause)
+               pfc->class_en |= BIT(conf->rx_tc);
+       else
+               pfc->class_en &= ~BIT(conf->rx_tc);
+
+       if (pfc->class_en)
+               mode = RTE_ETH_FC_FULL;
+
+       memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+       pfc_cfg.mode = mode_map[mode];
+       pfc_cfg.tc = pfc->class_en;
+       rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+       if (rc)
+               return rc;
+
+       pfc->fc_cfg.rx_pause = rx_pause;
+       pfc->fc_cfg.tx_pause = tx_pause;
+       pfc->fc_cfg.mode = mode;
+
+exit:
+       return rc;
+}