net/octeontx2: support 96xx A1 silicon revision
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev_ops.c
index 301a597..8d0a3dc 100644 (file)
@@ -2,8 +2,96 @@
  * Copyright(C) 2019 Marvell International Ltd.
  */
 
+#include <rte_mbuf_pool_ops.h>
+
 #include "otx2_ethdev.h"
 
+int
+otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+       uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD;
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_frs_cfg *req;
+       int rc;
+
+       /* Check if MTU is within the allowed range */
+       if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
+               return -EINVAL;
+
+       buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+       /* Refuse MTU that requires the support of scattered packets
+        * when this feature has not been enabled before.
+        */
+       if (data->dev_started && frame_size > buffsz &&
+           !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+               return -EINVAL;
+
+       /* Check <seg size> * <max_seg>  >= max_frame */
+       if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+           (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
+               return -EINVAL;
+
+       req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
+       req->update_smq = true;
+       /* FRS HW config should exclude FCS but include NPC VTAG insert size */
+       req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               return rc;
+
+       /* Now just update Rx MAXLEN */
+       req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
+       req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               return rc;
+
+       if (frame_size > RTE_ETHER_MAX_LEN)
+               dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       else
+               dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+       /* Update max_rx_pkt_len */
+       data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       return rc;
+}
+
+int
+otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct otx2_eth_rxq *rxq;
+       uint32_t buffsz;
+       uint16_t mtu;
+       int rc;
+
+       /* Get rx buffer size */
+       rxq = data->rx_queues[0];
+       mbp_priv = rte_mempool_get_priv(rxq->pool);
+       buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+       /* Setup scatter mode if needed by jumbo */
+       if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz)
+               dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+
+       /* Setup MTU based on max_rx_pkt_len */
+       mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
+
+       rc = otx2_nix_mtu_set(eth_dev, mtu);
+       if (rc)
+               otx2_err("Failed to set default MTU size %d", rc);
+
+       return rc;
+}
+
 static void
 nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
 {
@@ -38,6 +126,7 @@ otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
 
        otx2_mbox_process(mbox);
        eth_dev->data->promiscuous = en;
+       otx2_nix_vlan_update_promisc(eth_dev, en);
 }
 
 void
@@ -86,6 +175,229 @@ otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
        nix_allmulticast_config(eth_dev, 0);
 }
 
+void
+otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+                     struct rte_eth_rxq_info *qinfo)
+{
+       struct otx2_eth_rxq *rxq;
+
+       rxq = eth_dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->pool;
+       qinfo->scattered_rx = eth_dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->qconf.nb_desc;
+
+       qinfo->conf.rx_free_thresh = 0;
+       qinfo->conf.rx_drop_en = 0;
+       qinfo->conf.rx_deferred_start = 0;
+       qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+                     struct rte_eth_txq_info *qinfo)
+{
+       struct otx2_eth_txq *txq;
+
+       txq = eth_dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->qconf.nb_desc;
+
+       qinfo->conf.tx_thresh.pthresh = 0;
+       qinfo->conf.tx_thresh.hthresh = 0;
+       qinfo->conf.tx_thresh.wthresh = 0;
+
+       qinfo->conf.tx_free_thresh = 0;
+       qinfo->conf.tx_rs_thresh = 0;
+       qinfo->conf.offloads = txq->offloads;
+       qinfo->conf.tx_deferred_start = 0;
+}
+
+static void
+nix_rx_head_tail_get(struct otx2_eth_dev *dev,
+                    uint32_t *head, uint32_t *tail, uint16_t queue_idx)
+{
+       uint64_t reg, val;
+
+       if (head == NULL || tail == NULL)
+               return;
+
+       reg = (((uint64_t)queue_idx) << 32);
+       val = otx2_atomic64_add_nosync(reg, (int64_t *)
+                                      (dev->base + NIX_LF_CQ_OP_STATUS));
+       if (val & (OP_ERR | CQ_ERR))
+               val = 0;
+
+       *tail = (uint32_t)(val & 0xFFFFF);
+       *head = (uint32_t)((val >> 20) & 0xFFFFF);
+}
+
+uint32_t
+otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
+{
+       struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       uint32_t head, tail;
+
+       nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
+       return (tail - head) % rxq->qlen;
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
+{
+       /* Check given offset(queue index) has packet filled by HW */
+       if (tail > head && offset <= tail && offset >= head)
+               return 1;
+       /* Wrap around case */
+       if (head > tail && (offset >= head || offset <= tail))
+               return 1;
+
+       return 0;
+}
+
+int
+otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+       struct otx2_eth_rxq *rxq = rx_queue;
+       uint32_t head, tail;
+
+       nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+                            &head, &tail, rxq->rq);
+
+       return nix_offset_has_packet(head, tail, offset);
+}
+
+int
+otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct otx2_eth_rxq *rxq = rx_queue;
+       uint32_t head, tail;
+
+       if (rxq->qlen >= offset)
+               return -EINVAL;
+
+       nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+                            &head, &tail, rxq->rq);
+
+       if (nix_offset_has_packet(head, tail, offset))
+               return RTE_ETH_RX_DESC_DONE;
+       else
+               return RTE_ETH_RX_DESC_AVAIL;
+}
+
+/* It is a NOP for octeontx2 as HW frees the buffer on xmit */
+int
+otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+       RTE_SET_USED(txq);
+       RTE_SET_USED(free_cnt);
+
+       return 0;
+}
+
+int
+otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+                       size_t fw_size)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       int rc = (int)fw_size;
+
+       if (fw_size > sizeof(dev->mkex_pfl_name))
+               rc = sizeof(dev->mkex_pfl_name);
+
+       rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
+
+       rc += 1; /* Add the size of '\0' */
+       if (fw_size < (uint32_t)rc)
+               return rc;
+
+       return 0;
+}
+
+int
+otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
+{
+       RTE_SET_USED(eth_dev);
+
+       if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
+               return 0;
+
+       return -ENOTSUP;
+}
+
+int
+otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+                        enum rte_filter_type filter_type,
+                        enum rte_filter_op filter_op, void *arg)
+{
+       RTE_SET_USED(eth_dev);
+
+       if (filter_type != RTE_ETH_FILTER_GENERIC) {
+               otx2_err("Unsupported filter type %d", filter_type);
+               return -ENOTSUP;
+       }
+
+       if (filter_op == RTE_ETH_FILTER_GET) {
+               *(const void **)arg = &otx2_flow_ops;
+               return 0;
+       }
+
+       otx2_err("Invalid filter_op %d", filter_op);
+       return -EINVAL;
+}
+
+static struct cgx_fw_data *
+nix_get_fwdata(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       struct cgx_fw_data *rsp = NULL;
+
+       otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
+
+       otx2_mbox_process_msg(mbox, (void *)&rsp);
+
+       return rsp;
+}
+
+int
+otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
+                        struct rte_eth_dev_module_info *modinfo)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct cgx_fw_data *rsp;
+
+       rsp = nix_get_fwdata(dev);
+       if (rsp == NULL)
+               return -EIO;
+
+       modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+       modinfo->eeprom_len = SFP_EEPROM_SIZE;
+
+       return 0;
+}
+
+int
+otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
+                          struct rte_dev_eeprom_info *info)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct cgx_fw_data *rsp;
+
+       if (!info->data || !info->length ||
+           (info->offset + info->length > SFP_EEPROM_SIZE))
+               return -EINVAL;
+
+       rsp = nix_get_fwdata(dev);
+       if (rsp == NULL)
+               return -EIO;
+
+       otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
+                        info->length);
+
+       return 0;
+}
+
 void
 otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 {
@@ -119,6 +431,10 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
                .offloads = 0,
        };
 
+       devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
+               .ring_size = NIX_RX_DEFAULT_RING_SZ,
+       };
+
        devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
                .nb_max = UINT16_MAX,
                .nb_min = NIX_RX_MIN_DESC,
@@ -143,4 +459,7 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
        devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
                                ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
                                ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+
+       devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+                               RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 }