struct rte_mbuf *new_mb;
uint16_t rx_pref;
struct eth_fast_path_rx_cqe *cqe_fp;
- uint16_t len, pad;
+ uint16_t len, pad, bd_len, buf_len;
struct rte_mbuf *rx_mb = NULL;
+ static bool log_once = true;
rte_spinlock_lock(&(fp)->rx_mtx);
len = cqe_fp->pkt_len_or_gro_seg_len;
pad = cqe_fp->placement_offset;
+ bd_len = cqe_fp->len_on_bd;
+ buf_len = rxq->sw_ring[bd_cons]->buf_len;
+
+ /* Check for sufficient buffer length */
+ if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) {
+ if (unlikely(log_once)) {
+ PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d",
+ buf_len - RTE_PKTMBUF_HEADROOM,
+ buf_len -
+ (pad + RTE_PKTMBUF_HEADROOM));
+ log_once = false;
+ }
+ goto next_rx;
+ }
new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
rx_mb->nb_segs = 1;
rx_mb->next = NULL;
- rx_mb->pkt_len = rx_mb->data_len = len;
+ rx_mb->pkt_len = len;
+ rx_mb->data_len = bd_len;
rx_mb->port = rxq->port_id;
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));