X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=66b0512c8695c96774d7de6b7495f7b708751c7e;hb=0663a84524e5c63cb737cd723b4ea33493e8d17a;hp=589735ecd3340571f47d6692f6d4eea808db0ad8;hpb=ba7eeb035a5f442414f9dca6c12a9105a16576aa;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 589735ecd3..66b0512c86 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -12,19 +12,8 @@ static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->device->driver->name, ring_name, - dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, - RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE); + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); } static void @@ -48,9 +37,9 @@ bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue) } void -bnx2x_dev_rx_queue_release(void *rxq) +bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) { - bnx2x_rx_queue_release(rxq); + bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]); } int @@ -193,9 +182,9 @@ bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue) } void -bnx2x_dev_tx_queue_release(void *txq) +bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) { - bnx2x_tx_queue_release(txq); + bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]); } static uint16_t @@ -322,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_bd_tail = 0; txq->tx_bd_head = 0; txq->nb_tx_avail = txq->nb_tx_desc; - dev->tx_pkt_burst = bnx2x_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; @@ -333,12 +321,15 @@ static inline void bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod) { - union ustorm_eth_rx_producers rx_prods; + union { + struct ustorm_eth_rx_producers rx_prods; + uint32_t val; + } val = { {0} }; - rx_prods.prod.bd_prod = rx_bd_prod; - rx_prods.prod.cqe_prod = rx_cq_prod; + val.rx_prods.bd_prod = rx_bd_prod; + val.rx_prods.cqe_prod = rx_cq_prod; - REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]); + REG_WR(sc, fp->ustorm_rx_prods_offset, val.val); } static uint16_t @@ -353,8 +344,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct rte_mbuf *new_mb; uint16_t rx_pref; struct eth_fast_path_rx_cqe *cqe_fp; - uint16_t len, pad; + uint16_t len, pad, bd_len, buf_len; struct rte_mbuf *rx_mb = NULL; + static bool log_once = true; + + rte_spinlock_lock(&(fp)->rx_mtx); hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == @@ -367,8 +361,10 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) sw_cq_cons = rxq->rx_cq_head; sw_cq_prod = rxq->rx_cq_tail; - if (sw_cq_cons == hw_cq_cons) + if (sw_cq_cons == hw_cq_cons) { + rte_spinlock_unlock(&(fp)->rx_mtx); return 0; + } while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { @@ -390,6 +386,20 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; + bd_len = cqe_fp->len_on_bd; + buf_len = rxq->sw_ring[bd_cons]->buf_len; + + /* Check for sufficient buffer length */ + if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) { + if (unlikely(log_once)) { + PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d", + buf_len - RTE_PKTMBUF_HEADROOM, + buf_len - + (pad + RTE_PKTMBUF_HEADROOM)); + log_once = false; + } + goto next_rx; + } new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { @@ -414,7 +424,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; - rx_mb->pkt_len = rx_mb->data_len = len; + rx_mb->pkt_len = len; + rx_mb->data_len = bd_len; rx_mb->port = rxq->port_id; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); @@ -424,7 +435,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { rx_mb->vlan_tci = cqe_fp->vlan_tag; - rx_mb->ol_flags |= PKT_RX_VLAN; + rx_mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; } rx_pkts[nb_rx] = rx_mb; @@ -449,17 +460,31 @@ next_rx: bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod); + rte_spinlock_unlock(&(fp)->rx_mtx); + return nb_rx; } -int -bnx2x_dev_rx_init(struct rte_eth_dev *dev) +static uint16_t +bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) { - dev->rx_pkt_burst = bnx2x_recv_pkts; - return 0; } +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; + dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; +} + +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + dev->tx_pkt_burst = bnx2x_xmit_pkts; +} + void bnx2x_dev_clear_queues(struct rte_eth_dev *dev) {