X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_rxtx.c;h=35cde561ba59c62a69aefface67ac3afbaf570cb;hb=1c9e61b3a45321b3cae742e8769b10c06c0324f0;hp=40a246229379862303ef04c88077f5564e517fac;hpb=7ed1cd53dbb515328f8780aa95aabce85e1ffb2f;p=dpdk.git diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 40a2462293..35cde561ba 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -24,8 +24,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) rte_mempool_in_use_count(rxq->mb_pool)); return -ENOMEM; } - rxq->sw_rx_ring[idx].mbuf = new_mb; - rxq->sw_rx_ring[idx].page_offset = 0; + rxq->sw_rx_ring[idx] = new_mb; mapping = rte_mbuf_data_iova_default(new_mb); /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); @@ -39,17 +38,24 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count) { - void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned; struct rte_mbuf *mbuf = NULL; struct eth_rx_bd *rx_bd; dma_addr_t mapping; int i, ret = 0; uint16_t idx; + uint16_t mask = NUM_RX_BDS(rxq); if (count > QEDE_MAX_BULK_ALLOC_COUNT) count = QEDE_MAX_BULK_ALLOC_COUNT; - ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count); + idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + + if (count > mask - idx + 1) + count = mask - idx + 1; + + ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx], + count); + if (unlikely(ret)) { PMD_RX_LOG(ERR, rxq, "Failed to allocate %d rx buffers " @@ -63,20 +69,17 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count) } for (i = 0; i < count; i++) { - mbuf = obj_p[i]; - if (likely(i < count - 1)) - rte_prefetch0(obj_p[i + 1]); + rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]); + mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)]; - idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); - rxq->sw_rx_ring[idx].mbuf = mbuf; - rxq->sw_rx_ring[idx].page_offset = 0; mapping = rte_mbuf_data_iova_default(mbuf); rx_bd = (struct eth_rx_bd *) ecore_chain_produce(&rxq->rx_bd_ring); rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); - rxq->sw_rx_prod++; + idx++; } + rxq->sw_rx_prod = idx; return 0; } @@ -309,9 +312,9 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) if (rxq->sw_rx_ring) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_rx_ring[i].mbuf) { - rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf); - rxq->sw_rx_ring[i].mbuf = NULL; + if (rxq->sw_rx_ring[i]) { + rte_pktmbuf_free(rxq->sw_rx_ring[i]); + rxq->sw_rx_ring[i] = NULL; } } } @@ -393,6 +396,7 @@ qede_alloc_tx_queue_mem(struct rte_eth_dev *dev, struct ecore_dev *edev = &qdev->edev; struct qede_tx_queue *txq; int rc; + size_t sw_tx_ring_size; txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue), RTE_CACHE_LINE_SIZE, socket_id); @@ -425,9 +429,9 @@ qede_alloc_tx_queue_mem(struct rte_eth_dev *dev, } /* Allocate software ring */ + sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc; txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring", - (sizeof(struct qede_tx_entry) * - txq->nb_tx_desc), + sw_tx_ring_size, RTE_CACHE_LINE_SIZE, socket_id); if (!txq->sw_tx_ring) { @@ -523,9 +527,9 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq) if (txq->sw_tx_ring) { for (i = 0; i < txq->nb_tx_desc; i++) { - if (txq->sw_tx_ring[i].mbuf) { - rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf); - txq->sw_tx_ring[i].mbuf = NULL; + if (txq->sw_tx_ring[i]) { + rte_pktmbuf_free(txq->sw_tx_ring[i]); + txq->sw_tx_ring[i] = NULL; } } } @@ -593,12 +597,14 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, int qede_alloc_fp_resc(struct qede_dev *qdev) { - struct ecore_dev *edev = &qdev->edev; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qede_fastpath *fp; uint32_t num_sbs; uint16_t sb_idx; int i; + PMD_INIT_FUNC_TRACE(edev); + if (IS_VF(edev)) ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); else @@ -645,8 +651,6 @@ int qede_alloc_fp_resc(struct qede_dev *qdev) for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { fp = &qdev->fp_array[sb_idx]; - if (!fp) - continue; fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info), RTE_CACHE_LINE_SIZE); if (!fp->sb_info) { @@ -676,11 +680,9 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { fp = &qdev->fp_array[sb_idx]; - if (!fp) - continue; - DP_INFO(edev, "Free sb_info index 0x%x\n", - fp->sb_info->igu_sb_id); if (fp->sb_info) { + DP_INFO(edev, "Free sb_info index 0x%x\n", + fp->sb_info->igu_sb_id); OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt, fp->sb_info->sb_phys, sizeof(struct status_block)); @@ -719,9 +721,10 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev, { uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); - struct eth_rx_prod_data rx_prods = { 0 }; + struct eth_rx_prod_data rx_prods; /* Update producers */ + memset(&rx_prods, 0, sizeof(rx_prods)); rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod); rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod); @@ -805,7 +808,7 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) fp->rxq->hw_rxq_prod_addr = ret_params.p_prod; fp->rxq->handle = ret_params.p_handle; - fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI]; qede_update_rx_prod(qdev, fp->rxq); eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; @@ -863,7 +866,7 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) txq->doorbell_addr = ret_params.p_doorbell; txq->handle = ret_params.p_handle; - txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)]; + txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)]; SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, @@ -884,51 +887,66 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) } static inline void -qede_free_tx_pkt(struct qede_tx_queue *txq) +qede_process_tx_compl(__rte_unused struct ecore_dev *edev, + struct qede_tx_queue *txq) { + uint16_t hw_bd_cons; + uint16_t sw_tx_cons; + uint16_t remaining; + uint16_t mask; struct rte_mbuf *mbuf; uint16_t nb_segs; uint16_t idx; + uint16_t first_idx; + + rte_compiler_barrier(); + rte_prefetch0(txq->hw_cons_ptr); + sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl); + hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n", + abs(hw_bd_cons - sw_tx_cons)); +#endif + + mask = NUM_TX_BDS(txq); + idx = txq->sw_tx_cons & mask; - idx = TX_CONS(txq); - mbuf = txq->sw_tx_ring[idx].mbuf; - if (mbuf) { + remaining = hw_bd_cons - sw_tx_cons; + txq->nb_tx_avail += remaining; + first_idx = idx; + + while (remaining) { + mbuf = txq->sw_tx_ring[idx]; + RTE_ASSERT(mbuf); nb_segs = mbuf->nb_segs; + remaining -= nb_segs; + + /* Prefetch the next mbuf. Note that at least the last 4 mbufs + * that are prefetched will not be used in the current call. + */ + rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]); + rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]); + PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs); + while (nb_segs) { - /* It's like consuming rxbuf in recv() */ ecore_chain_consume(&txq->tx_pbl); - txq->nb_tx_avail++; nb_segs--; } - rte_pktmbuf_free(mbuf); - txq->sw_tx_ring[idx].mbuf = NULL; - txq->sw_tx_cons++; + + idx = (idx + 1) & mask; PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n"); - } else { - ecore_chain_consume(&txq->tx_pbl); - txq->nb_tx_avail++; } -} + txq->sw_tx_cons = idx; -static inline void -qede_process_tx_compl(__rte_unused struct ecore_dev *edev, - struct qede_tx_queue *txq) -{ - uint16_t hw_bd_cons; -#ifdef RTE_LIBRTE_QEDE_DEBUG_TX - uint16_t sw_tx_cons; -#endif - - rte_compiler_barrier(); - hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); -#ifdef RTE_LIBRTE_QEDE_DEBUG_TX - sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl); - PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n", - abs(hw_bd_cons - sw_tx_cons)); -#endif - while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) - qede_free_tx_pkt(txq); + if (first_idx > idx) { + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx], + mask - first_idx + 1); + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx); + } else { + rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx], + idx - first_idx); + } } static int qede_drain_txq(struct qede_dev *qdev, @@ -1304,18 +1322,15 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) static inline void qede_reuse_page(__rte_unused struct qede_dev *qdev, - struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons) + struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons) { struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); - struct qede_rx_entry *curr_prod; dma_addr_t new_mapping; - curr_prod = &rxq->sw_rx_ring[idx]; - *curr_prod = *curr_cons; + rxq->sw_rx_ring[idx] = curr_cons; - new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) + - curr_prod->page_offset; + new_mapping = rte_mbuf_data_iova_default(curr_cons); rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping)); rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping)); @@ -1327,10 +1342,10 @@ static inline void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *qdev, uint8_t count) { - struct qede_rx_entry *curr_cons; + struct rte_mbuf *curr_cons; for (; count > 0; count--) { - curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; + curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; qede_reuse_page(qdev, rxq, curr_cons); qede_rx_bd_ring_consume(rxq); } @@ -1352,7 +1367,7 @@ qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev, if (rte_le_to_cpu_16(len)) { tpa_info = &rxq->tpa_info[agg_index]; cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - curr_frag = rxq->sw_rx_ring[cons_idx].mbuf; + curr_frag = rxq->sw_rx_ring[cons_idx]; assert(curr_frag); curr_frag->nb_segs = 1; curr_frag->pkt_len = rte_le_to_cpu_16(len); @@ -1484,7 +1499,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, return -EINVAL; } sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf; + seg2 = rxq->sw_rx_ring[sw_rx_index]; qede_rx_bd_ring_consume(rxq); pkt_len -= cur_size; seg2->data_len = cur_size; @@ -1518,6 +1533,228 @@ print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq, } #endif +uint16_t +qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL; + register struct rte_mbuf *rx_mb = NULL; + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = &qdev->edev; + union eth_rx_cqe *cqe; + uint64_t ol_flags; + enum eth_rx_cqe_type cqe_type; + int rss_enable = qdev->rss_enable; + int rx_alloc_count = 0; + uint32_t packet_type; + uint32_t rss_hash; + uint16_t vlan_tci, port_id; + uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds; + uint16_t rx_pkt = 0; + uint16_t pkt_len = 0; + uint16_t len; /* Length of first BD */ + uint16_t preload_idx; + uint16_t parse_flag; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + uint8_t bitfield_val; +#endif + uint8_t offset, flags, bd_num; + + + /* Allocate buffers that we used in previous loop */ + if (rxq->rx_alloc_count) { + if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, + rxq->rx_alloc_count))) { + struct rte_eth_dev *dev; + + PMD_RX_LOG(ERR, rxq, + "New buffer allocation failed," + "dropping incoming packetn"); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += + rxq->rx_alloc_count; + rxq->rx_alloc_errors += rxq->rx_alloc_count; + return 0; + } + qede_update_rx_prod(qdev, rxq); + rxq->rx_alloc_count = 0; + } + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + rte_rmb(); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + num_rx_bds = NUM_RX_BDS(rxq); + port_id = rxq->port_id; + + while (sw_comp_cons != hw_comp_cons) { + ol_flags = 0; + packet_type = RTE_PTYPE_UNKNOWN; + vlan_tci = 0; + rss_hash = 0; + + /* Get the CQE from the completion ring */ + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type); + + if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) { + fp_cqe = &cqe->fast_path_regular; + } else { + if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { + PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n"); + ecore_eth_cqe_completion + (&edev->hwfns[rxq->queue_id % + edev->num_hwfns], + (struct eth_slow_path_rx_cqe *)cqe); + } + goto next_cqe; + } + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & num_rx_bds; + rx_mb = rxq->sw_rx_ring[sw_rx_index]; + assert(rx_mb != NULL); + + parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags); + offset = fp_cqe->placement_offset; + len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd); + pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len); + vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); + rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash); + bd_num = fp_cqe->bd_num; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + bitfield_val = fp_cqe->bitfields; +#endif + + if (unlikely(qede_tunn_exist(parse_flag))) { + PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n"); + if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "Outer L3 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + flags = fp_cqe->tunnel_pars_flags.flags; + + /* Tunnel_type */ + packet_type = + qede_rx_cqe_to_tunn_pkt_type(flags); + + /* Inner header */ + packet_type |= + qede_rx_cqe_to_pkt_type_inner(parse_flag); + + /* Outer L3/L4 types is not available in CQE */ + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + + /* Outer L3/L4 types is not available in CQE. + * Need to add offset to parse correctly, + */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } else { + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); + } + + /* Common handling for non-tunnel packets and for inner + * headers in the case of tunnel. + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { + PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_IP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + if (unlikely(CQE_HAS_VLAN(parse_flag) || + CQE_HAS_OUTER_VLAN(parse_flag))) { + /* Note: FW doesn't indicate Q-in-Q packet */ + ol_flags |= PKT_RX_VLAN; + if (qdev->vlan_strip_flg) { + ol_flags |= PKT_RX_VLAN_STRIPPED; + rx_mb->vlan_tci = vlan_tci; + } + } + + if (rss_enable) { + ol_flags |= PKT_RX_RSS_HASH; + rx_mb->hash.rss = rss_hash; + } + + rx_alloc_count++; + qede_rx_bd_ring_consume(rxq); + + /* Prefetch next mbuf while processing current one. */ + preload_idx = rxq->sw_rx_cons & num_rx_bds; + rte_prefetch0(rxq->sw_rx_ring[preload_idx]); + + /* Update rest of the MBUF fields */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + rx_mb->port = port_id; + rx_mb->ol_flags = ol_flags; + rx_mb->data_len = len; + rx_mb->packet_type = packet_type; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + print_rx_bd_info(rx_mb, rxq, bitfield_val); +#endif + rx_mb->nb_segs = bd_num; + rx_mb->pkt_len = pkt_len; + + rx_pkts[rx_pkt] = rx_mb; + rx_pkt++; + +next_cqe: + ecore_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + if (rx_pkt == nb_pkts) { + PMD_RX_LOG(DEBUG, rxq, + "Budget reached nb_pkts=%u received=%u", + rx_pkt, nb_pkts); + break; + } + } + + /* Request number of bufferes to be allocated in next loop */ + rxq->rx_alloc_count = rx_alloc_count; + + rxq->rcv_pkts += rx_pkt; + rxq->rx_segs += rx_pkt; + PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id()); + + return rx_pkt; +} + uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -1638,7 +1875,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + rx_mb = rxq->sw_rx_ring[sw_rx_index]; assert(rx_mb != NULL); /* Handle regular CQE or TPA start CQE */ @@ -1681,7 +1918,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) "Outer L3 csum failed, flags = 0x%x\n", parse_flag); rxq->rx_hw_errors++; - ol_flags |= PKT_RX_EIP_CKSUM_BAD; + ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD; } else { ol_flags |= PKT_RX_IP_CKSUM_GOOD; } @@ -1769,7 +2006,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Prefetch next mbuf while processing current one. */ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); - rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + rte_prefetch0(rxq->sw_rx_ring[preload_idx]); /* Update rest of the MBUF fields */ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; @@ -2012,6 +2249,131 @@ qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf, } #endif +uint16_t +qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_tx_queue *txq = p_txq; + struct qede_dev *qdev = txq->qdev; + struct ecore_dev *edev = &qdev->edev; + struct eth_tx_1st_bd *bd1; + struct eth_tx_2nd_bd *bd2; + struct eth_tx_3rd_bd *bd3; + struct rte_mbuf *m_seg = NULL; + struct rte_mbuf *mbuf; + struct rte_mbuf **sw_tx_ring; + uint16_t nb_tx_pkts; + uint16_t bd_prod; + uint16_t idx; + uint16_t nb_frags = 0; + uint16_t nb_pkt_sent = 0; + uint8_t nbds; + uint64_t tx_ol_flags; + /* BD1 */ + uint16_t bd1_bf; + uint8_t bd1_bd_flags_bf; + + if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) { + PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u", + nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh); + qede_process_tx_compl(edev, txq); + } + + nb_tx_pkts = nb_pkts; + bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); + sw_tx_ring = txq->sw_tx_ring; + + while (nb_tx_pkts--) { + /* Init flags/values */ + nbds = 0; + bd1 = NULL; + bd2 = NULL; + bd3 = NULL; + bd1_bf = 0; + bd1_bd_flags_bf = 0; + nb_frags = 0; + + mbuf = *tx_pkts++; + assert(mbuf); + + + /* Check minimum TX BDS availability against available BDs */ + if (unlikely(txq->nb_tx_avail < mbuf->nb_segs)) + break; + + tx_ol_flags = mbuf->ol_flags; + bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + if (unlikely(txq->nb_tx_avail < + ETH_TX_MIN_BDS_PER_NON_LSO_PKT)) + break; + bd1_bf |= + (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) + << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + + /* Offload the IP checksum in the hardware */ + if (tx_ol_flags & PKT_TX_IP_CKSUM) + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + + /* L4 checksum offload (tcp or udp) */ + if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) && + (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = TX_PROD(txq); + sw_tx_ring[idx] = mbuf; + + /* BD1 */ + bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); + memset(bd1, 0, sizeof(struct eth_tx_1st_bd)); + nbds++; + + /* Map MBUF linear data for DMA and set in the BD1 */ + QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf), + mbuf->data_len); + bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf); + bd1->data.bd_flags.bitfields = bd1_bd_flags_bf; + + /* Handle fragmented MBUF */ + if (unlikely(mbuf->nb_segs > 1)) { + m_seg = mbuf->next; + + /* Encode scatter gather buffer descriptors */ + nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, + nbds - 1); + } + + bd1->data.nbds = nbds + nb_frags; + + txq->nb_tx_avail -= bd1->data.nbds; + txq->sw_tx_prod++; + bd_prod = + rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags); +#endif + nb_pkt_sent++; + txq->xmit_pkts++; + } + + /* Write value of prod idx into bd_prod */ + txq->tx_db.data.bd_prod = bd_prod; + rte_wmb(); + rte_compiler_barrier(); + DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw); + rte_wmb(); + + /* Check again for Tx completions */ + qede_process_tx_compl(edev, txq); + + PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d", + nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id()); + + return nb_pkt_sent; +} + uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -2267,7 +2629,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = TX_PROD(txq); - txq->sw_tx_ring[idx].mbuf = mbuf; + txq->sw_tx_ring[idx] = mbuf; /* BD1 */ bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);