#define BNXT_MAX_MTU 9574
#define VLAN_TAG_SIZE 4
+#define BNXT_NUM_VLANS 2
+#define BNXT_MAX_PKT_LEN (BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\
+ RTE_ETHER_CRC_LEN +\
+ (BNXT_NUM_VLANS * VLAN_TAG_SIZE))
#define BNXT_VF_RSV_NUM_RSS_CTX 1
#define BNXT_VF_RSV_NUM_L2_CTX 4
/* TODO: For now, do not support VMDq/RFS on VFs. */
#define BNXT_VF_RSV_NUM_VNIC 1
#define BNXT_MAX_LED 4
-#define BNXT_NUM_VLANS 2
#define BNXT_MIN_RING_DESC 16
#define BNXT_MAX_TX_RING_DESC 4096
#define BNXT_MAX_RX_RING_DESC 8192
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
+ /* MTU specifics */
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_mtu = BNXT_MAX_MTU;
+
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
- dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct rte_eth_dev_info dev_info;
uint32_t new_pkt_size;
uint32_t rc = 0;
uint32_t i;
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
- rc = bnxt_dev_info_get_op(eth_dev, &dev_info);
- if (rc != 0) {
- PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n");
- return rc;
- }
-
- if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
- PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
- RTE_ETHER_MIN_MTU, BNXT_MAX_MTU);
- return -EINVAL;
- }
-
#ifdef RTE_ARCH_X86
/*
* If vector-mode tx/rx is active, disallow any MTU change that would
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
- eth_dev->data->mtu = new_mtu;
- PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
-
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
uint16_t size = 0;
- vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ vnic->mru = new_mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc)
break;
}
}
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
+
return rc;
}
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
+ rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
enables |=
HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
mb_pool = bp->rx_queues[0]->mb_pool;
rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
+ rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
size -= RTE_PKTMBUF_HEADROOM;
+ size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
req.jumbo_thresh = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
if (rc)
goto err_out;
- rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
-
if (bp->eth_dev->data->rx_queue_state[queue_index] ==
RTE_ETH_QUEUE_STATE_STARTED) {
if (bnxt_init_one_rx_ring(rxq)) {
if (bnxt_alloc_rx_agg_ring(bp, i))
goto err_out;
- rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
struct bnxt_vnic_info *vnic;
uint32_t rx_buf_size;
- uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct bnxt_cp_ring_info *nq_ring;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
- rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
+ rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
rxr = rte_zmalloc_socket("bnxt_rx_ring",
sizeof(struct bnxt_rx_ring_info),
uint16_t size;
size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (rxq->rx_buf_use_size <= size)
- size = rxq->rx_buf_use_size;
+ size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;