This Patch implements mtu set dev op for cn9k and cn10k platforms.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
- Receiver Side Scaling (RSS)
- Inner and Outer Checksum offload
- Link state information
+- MTU update
- Scatter-Gather IO support
- Vector Poll mode driver
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
+MTU update = Y
TSO = Y
RSS hash = Y
Inner RSS = Y
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
+MTU update = Y
RSS hash = Y
Inner RSS = Y
Jumbo frame = Y
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
+MTU update = Y
TSO = Y
RSS hash = Y
Inner RSS = Y
return speed_capa;
}
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ uint32_t buffsz;
+
+ dev = rxq->dev;
+ eth_dev = dev->eth_dev;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+}
+
+static int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_eth_rxq_sp *rxq;
+ uint16_t mtu;
+ int rc;
+
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+ /* Setup scatter mode if needed by jumbo */
+ nix_enable_mseg_on_jumbo(rxq);
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+ CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ plt_err("Failed to set default MTU size, rc=%d", rc);
+
+ return rc;
+}
+
uint64_t
cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc, i;
+ if (eth_dev->data->nb_rx_queues != 0) {
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
/* Start rx queues */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = cnxk_nix_rx_queue_start(eth_dev, i);
/* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = {
+ .mtu_set = cnxk_nix_mtu_set,
.mac_addr_set = cnxk_nix_mac_addr_set,
.dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update,
#define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
/* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
-#define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8)
+#define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
+ RTE_ETHER_CRC_LEN + \
+ CNXK_NIX_MAX_VTAG_ACT_SIZE)
#define CNXK_NIX_RX_MIN_DESC 16
#define CNXK_NIX_RX_MIN_DESC_ALIGN 16
int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev);
int cnxk_nix_remove(struct rte_pci_device *pci_dev);
+int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
struct rte_ether_addr *addr);
int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
devinfo->max_mac_addrs = dev->max_mac_entries;
devinfo->max_vfs = pci_dev->max_vfs;
- devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD;
+ devinfo->max_mtu = devinfo->max_rx_pktlen -
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
devinfo->rx_offload_capa = dev->rx_offload_capa;
exit:
return rc;
}
+
+int
+cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix *nix = &dev->nix;
+ int rc = -EINVAL;
+ uint32_t buffsz;
+
+ /* Check if MTU is within the allowed range */
+ if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
+ plt_err("MTU is lesser than minimum");
+ goto exit;
+ }
+
+ if ((frame_size - RTE_ETHER_CRC_LEN) >
+ ((uint32_t)roc_nix_max_pkt_len(nix))) {
+ plt_err("MTU is greater than maximum");
+ goto exit;
+ }
+
+ buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+ old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
+
+ /* Refuse MTU that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (data->dev_started && frame_size > buffsz &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ plt_err("Scatter offload is not enabled for mtu");
+ goto exit;
+ }
+
+ /* Check <seg size> * <max_seg> >= max_frame */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
+ plt_err("Greater than maximum supported packet length");
+ goto exit;
+ }
+
+ frame_size -= RTE_ETHER_CRC_LEN;
+
+ /* Update mtu on Tx */
+ rc = roc_nix_mac_mtu_set(nix, frame_size);
+ if (rc) {
+ plt_err("Failed to set MTU, rc=%d", rc);
+ goto exit;
+ }
+
+ /* Sync same frame size on Rx */
+ rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
+ if (rc) {
+ /* Rollback to older mtu */
+ roc_nix_mac_mtu_set(nix,
+ old_frame_size - RTE_ETHER_CRC_LEN);
+ plt_err("Failed to max Rx frame length, rc=%d", rc);
+ goto exit;
+ }
+
+ frame_size += RTE_ETHER_CRC_LEN;
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Update max_rx_pkt_len */
+ data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+exit:
+ return rc;
+}