TAILQ_INIT(&(eth_dev->callbacks));
/*
- * Set the default maximum frame size.
+ * Set the default MTU.
*/
- eth_dev->data->max_frame_size = ETHER_MAX_LEN;
+ eth_dev->data->mtu = ETHER_MTU;
/* Invoke PMD device initialization function */
diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
}
+
+int
+rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ *mtu = dev->data->mtu;
+ return 0;
+}
+
+int
+rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+
+ ret = (*dev->dev_ops->mtu_set)(dev, mtu);
+ if (!ret)
+ dev->data->mtu = mtu;
+
+ return ret;
+}
+
int
rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
{
typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
/**< @Check DD bit of specific RX descriptor */
+typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
+/**< @internal Set MTU. */
+
typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
uint16_t vlan_id,
int on);
eth_queue_stats_mapping_set_t queue_stats_mapping_set;
/**< Configure per queue stat counter mapping. */
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
+ mtu_set_t mtu_set; /**< Set MTU. */
vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */
vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
/**< Link-level information & status */
struct rte_eth_conf dev_conf; /**< Configuration applied to device. */
- uint16_t max_frame_size; /**< Default is ETHER_MAX_LEN (1518). */
+ uint16_t mtu; /**< Maximum Transmission Unit. */
uint32_t min_rx_buf_size;
/**< Common rx buffer size handled by all queues */
extern void rte_eth_dev_info_get(uint8_t port_id,
struct rte_eth_dev_info *dev_info);
+/**
+ * Retrieve the MTU of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mtu
+ * A pointer to a uint16_t where the retrieved MTU is to be stored.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+extern int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
+
+/**
+ * Change the MTU of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mtu
+ * A uint16_t for the MTU to be applied.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if operation is not supported.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if *mtu* invalid.
+ */
+extern int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
+
/**
* Enable/Disable hardware filtering by an Ethernet device of received
* VLAN packets tagged with a given VLAN Tag Identifier.
#define ETHER_MAX_VLAN_ID 4095 /**< Maximum VLAN ID. */
+#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
+
/**
* Ethernet address:
* A universally administered address is uniquely assigned to a device by its
static void em_init_manageability(struct e1000_hw *hw);
static void em_release_manageability(struct e1000_hw *hw);
+static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
.stats_get = eth_em_stats_get,
.stats_reset = eth_em_stats_reset,
.dev_infos_get = eth_em_infos_get,
+ .mtu_set = eth_em_mtu_set,
.vlan_filter_set = eth_em_vlan_filter_set,
.vlan_offload_set = eth_em_vlan_offload_set,
.rx_queue_setup = eth_em_rx_queue_setup,
e1000_rar_set(hw, addr, index);
}
+static int
+eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw;
+ uint32_t frame_size;
+ uint32_t rctl;
+
+ eth_em_infos_get(dev, &dev_info);
+ frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ return 0;
+}
+
struct rte_driver em_pmd_drv = {
.type = PMD_PDEV,
.init = rte_em_pmd_init,
static void igb_init_manageability(struct e1000_hw *hw);
static void igb_release_manageability(struct e1000_hw *hw);
+static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
.stats_get = eth_igb_stats_get,
.stats_reset = eth_igb_stats_reset,
.dev_infos_get = eth_igb_infos_get,
+ .mtu_set = eth_igb_mtu_set,
.vlan_filter_set = eth_igb_vlan_filter_set,
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.vlan_offload_set = eth_igb_vlan_offload_set,
return -ENOENT;
}
+static int
+eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t rctl;
+ struct e1000_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
+ VLAN_TAG_SIZE);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_LIBRTE_82571_SUPPORT
+ /* XXX: not bigger than max_rx_pktlen */
+ if (hw->mac.type == e1000_82571)
+ return -ENOTSUP;
+#endif
+ eth_igb_infos_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) ||
+ (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ return 0;
+}
+
static struct rte_driver pmd_igb_drv = {
.type = PMD_PDEV,
.init = rte_igb_pmd_init,
uint8_t is_rx);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
.stats_reset = ixgbe_dev_stats_reset,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
+ .mtu_set = ixgbe_dev_mtu_set,
.vlan_filter_set = ixgbe_vlan_filter_set,
.vlan_tpid_set = ixgbe_vlan_tpid_set,
.vlan_offload_set = ixgbe_vlan_offload_set,
ixgbe_clear_rar(hw, index);
}
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t hlreg0;
+ uint32_t maxfrs;
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ ixgbe_dev_info_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ maxfrs &= 0x0000FFFF;
+ maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+ return 0;
+}
+
/*
* Virtual Function operations
*/
uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
struct ixgbe_dcb_tc_config *tc;
- uint32_t max_frame = dev->data->max_frame_size;
+ uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);