X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fionic%2Fionic_rxtx.c;h=b83ea1bcaa6abf12db5866f9258f342f87343b30;hb=6ceb7ab83f168fa6b8e90e4bd5a1392de1a48c70;hp=89b37733b676b16ab870de690a104f3ab3a2f9f6;hpb=8ec5ad7f8028830ef46c0d57315a32d0032af9c3;p=dpdk.git diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c index 89b37733b6..b83ea1bcaa 100644 --- a/drivers/net/ionic/ionic_rxtx.c +++ b/drivers/net/ionic/ionic_rxtx.c @@ -47,8 +47,6 @@ #include "ionic_lif.h" #include "ionic_rxtx.h" -#define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) - /********************************************************************* * * TX functions @@ -123,9 +121,13 @@ void __rte_cold ionic_dev_tx_queue_release(void *tx_queue) { struct ionic_tx_qcq *txq = tx_queue; + struct ionic_tx_stats *stats = &txq->stats; IONIC_PRINT_CALL(); + IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", + txq->qcq.q.index, stats->packets, stats->tso); + ionic_lif_txq_deinit(txq); ionic_qcq_free(&txq->qcq); @@ -298,6 +300,7 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, uint16_t vlan_tci, bool has_vlan, bool start, bool done) { + void **info; uint8_t flags = 0; flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; @@ -311,7 +314,12 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, desc->hdr_len = hdrlen; desc->mss = mss; - ionic_q_post(q, done, done ? txm : NULL); + if (done) { + info = IONIC_INFO_PTR(q, q->head_idx); + info[0] = txm; + } + + q->head_idx = Q_NEXT_TO_POST(q, 1); } static struct ionic_txq_desc * @@ -328,8 +336,7 @@ ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) } static int -ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, - bool not_xmit_more) +ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) { struct ionic_queue *q = &txq->qcq.q; struct ionic_tx_stats *stats = &txq->stats; @@ -397,7 +404,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, hdrlen, mss, encap, vlan_tci, has_vlan, - start, done && not_xmit_more); + start, done); desc = ionic_tx_tso_next(txq, &elem); start = false; seglen = mss; @@ -410,7 +417,6 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, offset = 0; data_iova = rte_mbuf_data_iova(txm_seg); left = txm_seg->data_len; - stats->frags++; while (left > 0) { next_addr = rte_cpu_to_le_64(data_iova + offset); @@ -439,7 +445,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, hdrlen, mss, encap, vlan_tci, has_vlan, - start, done && not_xmit_more); + start, done); desc = ionic_tx_tso_next(txq, &elem); start = false; } @@ -453,8 +459,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, } static __rte_always_inline int -ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, - bool not_xmit_more) +ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) { struct ionic_queue *q = &txq->qcq.q; struct ionic_txq_desc *desc, *desc_base = q->base; @@ -462,6 +467,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, struct ionic_txq_sg_elem *elem; struct ionic_tx_stats *stats = &txq->stats; struct rte_mbuf *txm_seg; + void **info; bool encap; bool has_vlan; uint64_t ol_flags = txm->ol_flags; @@ -470,6 +476,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, uint8_t flags = 0; desc = &desc_base[q->head_idx]; + info = IONIC_INFO_PTR(q, q->head_idx); if ((ol_flags & PKT_TX_IP_CKSUM) && (txq->flags & IONIC_QCQ_F_CSUM_L3)) { @@ -503,17 +510,19 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, desc->len = txm->data_len; desc->vlan_tci = txm->vlan_tci; + info[0] = txm; + elem = sg_desc_base[q->head_idx].elems; + txm_seg = txm->next; while (txm_seg != NULL) { elem->len = txm_seg->data_len; elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); - stats->frags++; elem++; txm_seg = txm_seg->next; } - ionic_q_post(q, not_xmit_more, txm); + q->head_idx = Q_NEXT_TO_POST(q, 1); return 0; } @@ -527,21 +536,19 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ionic_tx_stats *stats = &txq->stats; uint32_t next_q_head_idx; uint32_t bytes_tx = 0; - uint16_t nb_tx = 0; + uint16_t nb_avail, nb_tx = 0; int err; - bool last; /* Cleaning old buffers */ ionic_tx_flush(txq); - if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { - stats->stop += nb_pkts; - return 0; + nb_avail = ionic_q_space_avail(q); + if (unlikely(nb_avail < nb_pkts)) { + stats->stop += nb_pkts - nb_avail; + nb_pkts = nb_avail; } while (nb_tx < nb_pkts) { - last = (nb_tx == (nb_pkts - 1)); - next_q_head_idx = Q_NEXT_TO_POST(q, 1); if ((next_q_head_idx & 0x3) == 0) { struct ionic_txq_desc *desc_base = q->base; @@ -550,13 +557,11 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) - err = ionic_tx_tso(txq, tx_pkts[nb_tx], last); + err = ionic_tx_tso(txq, tx_pkts[nb_tx]); else - err = ionic_tx(txq, tx_pkts[nb_tx], last); + err = ionic_tx(txq, tx_pkts[nb_tx]); if (err) { stats->drop += nb_pkts - nb_tx; - if (nb_tx > 0) - ionic_q_flush(q); break; } @@ -564,6 +569,11 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx++; } + if (nb_tx > 0) { + rte_wmb(); + ionic_q_flush(q); + } + stats->packets += nb_tx; stats->bytes += bytes_tx; @@ -588,9 +598,9 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) uint16_t -ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct ionic_tx_qcq *txq = tx_queue; struct rte_mbuf *txm; uint64_t offloads; int i = 0; @@ -598,7 +608,7 @@ ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, for (i = 0; i < nb_pkts; i++) { txm = tx_pkts[i]; - if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) { + if (txm->nb_segs > txq->num_segs_fw) { rte_errno = -EINVAL; break; } @@ -657,12 +667,18 @@ void __rte_cold ionic_dev_rx_queue_release(void *rx_queue) { struct ionic_rx_qcq *rxq = rx_queue; + struct ionic_rx_stats *stats; if (!rxq) return; IONIC_PRINT_CALL(); + stats = &rxq->stats; + + IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", + rxq->qcq.q.index, stats->packets, stats->mtods); + ionic_rx_empty(rxq); ionic_lif_rxq_deinit(rxq); @@ -887,6 +903,7 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq, pkt_type = RTE_PTYPE_L2_ETHER_ARP; else pkt_type = RTE_PTYPE_UNKNOWN; + stats->mtods++; break; } } @@ -911,7 +928,11 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, new->addr = old->addr; new->len = old->len; - ionic_q_post(q, true, mbuf); + q->info[q->head_idx] = mbuf; + + q->head_idx = Q_NEXT_TO_POST(q, 1); + + ionic_q_flush(q); } static __rte_always_inline int @@ -921,9 +942,9 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) struct ionic_rxq_desc *desc, *desc_base = q->base; struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; struct ionic_rxq_sg_elem *elem; + void **info; rte_iova_t dma_addr; uint32_t i, j, nsegs, buf_size, size; - bool ring_doorbell; buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); @@ -938,6 +959,8 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) return -ENOMEM; } + info = IONIC_INFO_PTR(q, q->head_idx); + nsegs = (len + buf_size - 1) / buf_size; desc = &desc_base[q->head_idx]; @@ -977,12 +1000,13 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", size, len); - ring_doorbell = ((q->head_idx + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; + info[0] = rxm; - ionic_q_post(q, ring_doorbell, rxm); + q->head_idx = Q_NEXT_TO_POST(q, 1); } + ionic_q_flush(q); + return 0; }