static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct rte_eth_dev_info dev_info;
struct axgbe_port *pdata = dev->data->dev_private;
- uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- unsigned int val = 0;
- axgbe_dev_info_get(dev, &dev_info);
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
- return -EINVAL;
+ unsigned int val;
+
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev->data->port_id);
return -EBUSY;
}
- if (mtu > RTE_ETHER_MTU)
- val = 1;
- else
- val = 0;
+ val = mtu > RTE_ETHER_MTU ? 1 : 0;
AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+
return 0;
}
uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
struct bnxt *bp = eth_dev->data->dev_private;
uint32_t new_pkt_size;
- uint32_t rc = 0;
+ uint32_t rc;
uint32_t i;
rc = is_bnxt_in_error(bp);
{
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- struct rte_eth_dev_info dev_info;
- int err;
uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- err = cxgbe_dev_info_get(eth_dev, &dev_info);
- if (err != 0)
- return err;
-
- /* Must accommodate at least RTE_ETHER_MIN_MTU */
- if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
- return -EINVAL;
-
- err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
- return err;
}
/*
PMD_INIT_FUNC_TRACE();
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
- return -EINVAL;
/*
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
return -EINVAL;
}
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
- return -EINVAL;
-
/* Set the Max Rx frame length as 'mtu' +
* Maximum Ethernet header length
*/
static int
eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct rte_eth_dev_info dev_info;
struct e1000_hw *hw;
uint32_t frame_size;
uint32_t rctl;
- int ret;
-
- ret = eth_em_infos_get(dev, &dev_info);
- if (ret != 0)
- return ret;
frame_size = mtu + E1000_ETH_OVERHEAD;
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
- return -EINVAL;
-
/*
* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
{
uint32_t rctl;
struct e1000_hw *hw;
- struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
- int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == e1000_82571)
return -ENOTSUP;
#endif
- ret = eth_igb_infos_get(dev, &dev_info);
- if (ret != 0)
- return ret;
-
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU ||
- frame_size > dev_info.max_rx_pktlen)
- return -EINVAL;
-
/*
* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
struct enetc_hw *enetc_hw = &hw->hw;
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- /* check that mtu is within the allowed range */
- if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
- return -EINVAL;
-
/*
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- int ret = 0;
+ int ret;
PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
- if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
- PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
- mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
- return -EINVAL;
- }
-
ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
if (ret) {
PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
}
static int
-i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
- int ret = 0;
-
- /* check if mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
- if (dev_data->dev_started) {
+ if (dev->data->dev_started != 0) {
PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
- dev_data->port_id);
+ dev->data->port_id);
return -EBUSY;
}
- return ret;
+ return 0;
}
/* Restore ethertype filter */
}
static int
-iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
- int ret = 0;
-
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port must be stopped before configuration");
return -EBUSY;
}
- return ret;
+ return 0;
}
static int
}
static int
-ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
-
- /* check if mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
- if (dev_data->dev_started) {
+ if (dev->data->dev_started != 0) {
PMD_DRV_LOG(ERR,
"port %d must be stopped before configuration",
- dev_data->port_id);
+ dev->data->port_id);
return -EBUSY;
}
if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
frame_size += VLAN_TAG_SIZE;
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU ||
- frame_size > MAX_RX_JUMBO_FRAME_SIZE)
- return -EINVAL;
-
/*
* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
int ret = 0;
struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
struct rte_eth_dev_data *dev_data = ethdev->data;
- uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
-
- /* check if mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU ||
- frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
- return -EINVAL;
/* mtu setting is forbidden if port is start */
/* make sure NIC port is stopped */
lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
struct lio_dev_ctrl_cmd ctrl_cmd;
struct lio_ctrl_pkt ctrl_pkt;
return -EINVAL;
}
- /* check if VF MTU is within allowed range.
- * New value should not exceed PF MTU.
- */
- if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
- lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
- RTE_ETHER_MIN_MTU, pf_mtu);
- return -EINVAL;
- }
-
/* flush added to prevent cmd failure
* incase the queue is full
*/
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu)
- return -EINVAL;
-
/* mtu setting is forbidden if port is started */
if (dev->data->dev_started) {
PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
struct rte_eth_dev_data *data = eth_dev->data;
int rc = 0;
- /* Check if MTU is within the allowed range */
- if (frame_size < OCCTX_MIN_FRS || frame_size > OCCTX_MAX_FRS)
- return -EINVAL;
-
buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
/* Refuse MTU that requires the support of scattered packets
if (dev->configured && otx2_ethdev_is_ptp_en(dev))
frame_size += NIX_TIMESYNC_RX_OFFSET;
- /* Check if MTU is within the allowed range */
- if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
- return -EINVAL;
-
buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
/* Refuse MTU that requires the support of scattered packets
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
uint32_t frame_size;
uint16_t bufsz;
int i, rc;
PMD_INIT_FUNC_TRACE(edev);
- rc = qede_dev_info_get(dev, &dev_info);
- if (rc != 0) {
- DP_ERR(edev, "Error during getting ethernet device info\n");
- return rc;
- }
frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
- DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
- mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
- QEDE_ETH_OVERHEAD);
- return -EINVAL;
- }
if (!dev->data->scattered_rx &&
frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
PMD_INIT_FUNC_TRACE();
- if (frame_size > NIC_HW_MAX_FRS)
- return -EINVAL;
-
- if (frame_size < NIC_HW_MIN_FRS)
- return -EINVAL;
-
buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
/*
txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
- struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
struct rte_eth_dev_data *dev_data = dev->data;
- int ret;
-
- ret = txgbe_dev_info_get(dev, &dev_info);
- if (ret != 0)
- return ret;
-
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
- return -EINVAL;
/* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
* which relies on dev->dev_ops->dev_infos_get.
*/
if (*dev->dev_ops->dev_infos_get != NULL) {
+ uint16_t overhead_len;
+ uint32_t frame_size;
+
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
return ret;
if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
return -EINVAL;
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ frame_size = mtu + overhead_len;
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+ return -EINVAL;
+
if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
is_jumbo_frame_capable = 1;
}