if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
struct qede_fastpath *fp;
uint32_t max_rx_pkt_len;
uint32_t frame_size;
- uint16_t rx_buf_size;
uint16_t bufsz;
bool restart = false;
- int i;
+ int i, rc;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+ frame_size = max_rx_pkt_len;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
- ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = frame_size;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ /* cache align the mbuf size to simplfy rx_buf_size
+ * calculation
+ */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+ if (rc < 0)
+ return rc;
+
+ fp->rxq->rx_buf_size = rc;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
return 0;
}
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2)
+ * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ * criteria 1, we need to adjust the size to floor instead of ceil,
+ * so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
+int
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rx_buf_size;
+
+ if (dev->data->scattered_rx) {
+ /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+ * bufferes can be used for single packet. So need to make sure
+ * mbuf size is sufficient enough for this.
+ */
+ if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+ (max_frame_size + QEDE_ETH_OVERHEAD)) {
+ DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+ mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+ return -EINVAL;
+ }
+
+ rx_buf_size = RTE_MAX(mbufsz,
+ (max_frame_size + QEDE_ETH_OVERHEAD) /
+ ETH_RX_MAX_BUFF_PER_PKT);
+ } else {
+ rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
+ }
+
+ /* Align to cache-line size if needed */
+ return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
}
}
- if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
- /* Align to cache-line size if needed */
- rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ if (rc < 0) {
+ rte_free(rxq);
+ return rc;
+ }
+
+ rxq->rx_buf_size = rc;
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
-/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
- + (QEDE_LLC_SNAP_HDR_LEN))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
+
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
+
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);