X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=5e7c86ed8249c5c16cfe3d694fee2da93fa35020;hb=cfa9e98d3c135e4f937265263feb7377330219f6;hp=6a66cec20eaa95205641514dcff2b86a47694283;hpb=cc46d3d36837a00e69072fbedd63cb3f56f78821;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 6a66cec20e..5e7c86ed82 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -989,6 +989,24 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) return ctx_desc; } +/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */ +#define I40E_MAX_DATA_PER_TXD \ + (I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT) +/* Calculate the number of TX descriptors needed for each pkt */ +static inline uint16_t +i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) +{ + struct rte_mbuf *txd = tx_pkt; + uint16_t count = 0; + + while (txd != NULL) { + count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD); + txd = txd->next; + } + + return count; +} + uint16_t i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1021,7 +1039,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Check if the descriptor ring needs to be cleaned. */ if (txq->nb_tx_free < txq->tx_free_thresh) - i40e_xmit_cleanup(txq); + (void)i40e_xmit_cleanup(txq); for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { td_cmd = 0; @@ -1046,8 +1064,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * The number of descriptors that must be allocated for * a packet equals to the number of the segments of that * packet plus 1 context descriptor if needed. + * Recalculate the needed tx descs when TSO enabled in case + * the mbuf data size exceeds max data size that hw allows + * per tx desc. */ - nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + if (ol_flags & PKT_TX_TCP_SEG) + nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) + + nb_ctx); + else + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); tx_last = (uint16_t)(tx_id + nb_used - 1); /* Circular ring */ @@ -1160,6 +1185,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) slen = m_seg->data_len; buf_dma_addr = rte_mbuf_data_iova(m_seg); + while ((ol_flags & PKT_TX_TCP_SEG) && + unlikely(slen > I40E_MAX_DATA_PER_TXD)) { + txd->buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, + td_offset, I40E_MAX_DATA_PER_TXD, + td_tag); + + buf_dma_addr += I40E_MAX_DATA_PER_TXD; + slen -= I40E_MAX_DATA_PER_TXD; + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + } PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" "buf_dma_addr: %#"PRIx64";\n" "td_cmd: %#x;\n" @@ -1205,7 +1248,8 @@ end_of_tx: (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id); + rte_cio_wmb(); + I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -2455,6 +2499,113 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) } } +static int +i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + struct i40e_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && i40e_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (i40e_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = i40e_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +static int +i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} +int +i40e_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq; + struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) + return i40e_tx_done_cleanup_vec(q, free_cnt); + else + return i40e_tx_done_cleanup_simple(q, free_cnt); + } else { + return i40e_tx_done_cleanup_full(q, free_cnt); + } +} + void i40e_reset_tx_queue(struct i40e_tx_queue *txq) { @@ -2943,7 +3094,7 @@ i40e_get_recommend_rx_vec(bool scatter) i40e_recv_pkts_vec; } -void __attribute__((cold)) +void __rte_cold i40e_set_rx_function(struct rte_eth_dev *dev) { struct i40e_adapter *ad = @@ -3017,52 +3168,48 @@ i40e_set_rx_function(struct rte_eth_dev *dev) } } -int -i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, - struct rte_eth_burst_mode *mode) -{ - eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; - uint64_t options; - - if (pkt_burst == i40e_recv_scattered_pkts) - options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SCATTERED; - else if (pkt_burst == i40e_recv_pkts_bulk_alloc) - options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_BULK_ALLOC; - else if (pkt_burst == i40e_recv_pkts) - options = RTE_ETH_BURST_SCALAR; +static const struct { + eth_rx_burst_t pkt_burst; + const char *info; +} i40e_rx_burst_infos[] = { + { i40e_recv_scattered_pkts, "Scalar Scattered" }, + { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, + { i40e_recv_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 - else if (pkt_burst == i40e_recv_scattered_pkts_vec_avx2) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2 | - RTE_ETH_BURST_SCATTERED; - else if (pkt_burst == i40e_recv_pkts_vec_avx2) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2; - else if (pkt_burst == i40e_recv_scattered_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE | - RTE_ETH_BURST_SCATTERED; - else if (pkt_burst == i40e_recv_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE; + { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, + { i40e_recv_pkts_vec_avx2, "Vector AVX2" }, + { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" }, + { i40e_recv_pkts_vec, "Vector SSE" }, #elif defined(RTE_ARCH_ARM64) - else if (pkt_burst == i40e_recv_scattered_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON | - RTE_ETH_BURST_SCATTERED; - else if (pkt_burst == i40e_recv_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON; + { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" }, + { i40e_recv_pkts_vec, "Vector Neon" }, #elif defined(RTE_ARCH_PPC_64) - else if (pkt_burst == i40e_recv_scattered_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC | - RTE_ETH_BURST_SCATTERED; - else if (pkt_burst == i40e_recv_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC; + { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" }, + { i40e_recv_pkts_vec, "Vector AltiVec" }, #endif - else - options = 0; +}; - mode->options = options; +int +i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(i40e_rx_burst_infos); ++i) { + if (pkt_burst == i40e_rx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + i40e_rx_burst_infos[i].info); + ret = 0; + break; + } + } - return options != 0 ? 0 : -EINVAL; + return ret; } -void __attribute__((cold)) +void __rte_cold i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) { struct i40e_adapter *ad = @@ -3113,7 +3260,7 @@ i40e_get_recommend_tx_vec(void) return i40e_xmit_pkts_vec; } -void __attribute__((cold)) +void __rte_cold i40e_set_tx_function(struct rte_eth_dev *dev) { struct i40e_adapter *ad = @@ -3155,38 +3302,43 @@ i40e_set_tx_function(struct rte_eth_dev *dev) } } -int -i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, - struct rte_eth_burst_mode *mode) -{ - eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; - uint64_t options; - - if (pkt_burst == i40e_xmit_pkts_simple) - options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SIMPLE; - else if (pkt_burst == i40e_xmit_pkts) - options = RTE_ETH_BURST_SCALAR; +static const struct { + eth_tx_burst_t pkt_burst; + const char *info; +} i40e_tx_burst_infos[] = { + { i40e_xmit_pkts_simple, "Scalar Simple" }, + { i40e_xmit_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 - else if (pkt_burst == i40e_xmit_pkts_vec_avx2) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2; - else if (pkt_burst == i40e_xmit_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE; + { i40e_xmit_pkts_vec_avx2, "Vector AVX2" }, + { i40e_xmit_pkts_vec, "Vector SSE" }, #elif defined(RTE_ARCH_ARM64) - else if (pkt_burst == i40e_xmit_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON; + { i40e_xmit_pkts_vec, "Vector Neon" }, #elif defined(RTE_ARCH_PPC_64) - else if (pkt_burst == i40e_xmit_pkts_vec) - options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC; + { i40e_xmit_pkts_vec, "Vector AltiVec" }, #endif - else - options = 0; +}; - mode->options = options; +int +i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(i40e_tx_burst_infos); ++i) { + if (pkt_burst == i40e_tx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + i40e_tx_burst_infos[i].info); + ret = 0; + break; + } + } - return options != 0 ? 0 : -EINVAL; + return ret; } -void __attribute__((cold)) +void __rte_cold i40e_set_default_ptype_table(struct rte_eth_dev *dev) { struct i40e_adapter *ad = @@ -3197,7 +3349,7 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev) ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); } -void __attribute__((cold)) +void __rte_cold i40e_set_default_pctype_table(struct rte_eth_dev *dev) { struct i40e_adapter *ad =