X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fionic%2Fionic_rxtx.c;h=67631a5813b79a55e8229cc111a925cb58799eef;hb=7e5b479803c3e200dcab5d832bec7bd7f16081c6;hp=5f836f01340d412dcb61a798b06f130298ce0830;hpb=be39f75cd47f590e801912c085f895d6f8143ec1;p=dpdk.git diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c index 5f836f0134..67631a5813 100644 --- a/drivers/net/ionic/ionic_rxtx.c +++ b/drivers/net/ionic/ionic_rxtx.c @@ -47,8 +47,6 @@ #include "ionic_lif.h" #include "ionic_rxtx.h" -#define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) - /********************************************************************* * * TX functions @@ -120,12 +118,16 @@ ionic_tx_flush(struct ionic_tx_qcq *txq) } void __rte_cold -ionic_dev_tx_queue_release(void *tx_queue) +ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct ionic_tx_qcq *txq = tx_queue; + struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; + struct ionic_tx_stats *stats = &txq->stats; IONIC_PRINT_CALL(); + IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", + txq->qcq.q.index, stats->packets, stats->tso); + ionic_lif_txq_deinit(txq); ionic_qcq_free(&txq->qcq); @@ -183,15 +185,14 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, /* Free memory prior to re-allocation if needed... */ if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { - void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; - ionic_dev_tx_queue_release(tx_queue); + ionic_dev_tx_queue_release(eth_dev, tx_queue_id); eth_dev->data->tx_queues[tx_queue_id] = NULL; } eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq); + err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); if (err) { IONIC_PRINT(DEBUG, "Queue allocation failure"); return -EINVAL; @@ -298,6 +299,7 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, uint16_t vlan_tci, bool has_vlan, bool start, bool done) { + void **info; uint8_t flags = 0; flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; @@ -311,7 +313,12 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, desc->hdr_len = hdrlen; desc->mss = mss; - ionic_q_post(q, done, done ? txm : NULL); + if (done) { + info = IONIC_INFO_PTR(q, q->head_idx); + info[0] = txm; + } + + q->head_idx = Q_NEXT_TO_POST(q, 1); } static struct ionic_txq_desc * @@ -328,8 +335,7 @@ ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) } static int -ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, - bool not_xmit_more) +ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) { struct ionic_queue *q = &txq->qcq.q; struct ionic_tx_stats *stats = &txq->stats; @@ -397,7 +403,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, hdrlen, mss, encap, vlan_tci, has_vlan, - start, done && not_xmit_more); + start, done); desc = ionic_tx_tso_next(txq, &elem); start = false; seglen = mss; @@ -410,7 +416,6 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, offset = 0; data_iova = rte_mbuf_data_iova(txm_seg); left = txm_seg->data_len; - stats->frags++; while (left > 0) { next_addr = rte_cpu_to_le_64(data_iova + offset); @@ -439,7 +444,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, hdrlen, mss, encap, vlan_tci, has_vlan, - start, done && not_xmit_more); + start, done); desc = ionic_tx_tso_next(txq, &elem); start = false; } @@ -453,8 +458,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, } static __rte_always_inline int -ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, - bool not_xmit_more) +ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) { struct ionic_queue *q = &txq->qcq.q; struct ionic_txq_desc *desc, *desc_base = q->base; @@ -462,6 +466,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, struct ionic_txq_sg_elem *elem; struct ionic_tx_stats *stats = &txq->stats; struct rte_mbuf *txm_seg; + void **info; bool encap; bool has_vlan; uint64_t ol_flags = txm->ol_flags; @@ -470,6 +475,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, uint8_t flags = 0; desc = &desc_base[q->head_idx]; + info = IONIC_INFO_PTR(q, q->head_idx); if ((ol_flags & PKT_TX_IP_CKSUM) && (txq->flags & IONIC_QCQ_F_CSUM_L3)) { @@ -503,17 +509,19 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, desc->len = txm->data_len; desc->vlan_tci = txm->vlan_tci; + info[0] = txm; + elem = sg_desc_base[q->head_idx].elems; + txm_seg = txm->next; while (txm_seg != NULL) { elem->len = txm_seg->data_len; elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); - stats->frags++; elem++; txm_seg = txm_seg->next; } - ionic_q_post(q, not_xmit_more, txm); + q->head_idx = Q_NEXT_TO_POST(q, 1); return 0; } @@ -527,21 +535,19 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ionic_tx_stats *stats = &txq->stats; uint32_t next_q_head_idx; uint32_t bytes_tx = 0; - uint16_t nb_tx = 0; + uint16_t nb_avail, nb_tx = 0; int err; - bool last; /* Cleaning old buffers */ ionic_tx_flush(txq); - if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { - stats->stop += nb_pkts; - return 0; + nb_avail = ionic_q_space_avail(q); + if (unlikely(nb_avail < nb_pkts)) { + stats->stop += nb_pkts - nb_avail; + nb_pkts = nb_avail; } while (nb_tx < nb_pkts) { - last = (nb_tx == (nb_pkts - 1)); - next_q_head_idx = Q_NEXT_TO_POST(q, 1); if ((next_q_head_idx & 0x3) == 0) { struct ionic_txq_desc *desc_base = q->base; @@ -550,13 +556,11 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) - err = ionic_tx_tso(txq, tx_pkts[nb_tx], last); + err = ionic_tx_tso(txq, tx_pkts[nb_tx]); else - err = ionic_tx(txq, tx_pkts[nb_tx], last); + err = ionic_tx(txq, tx_pkts[nb_tx]); if (err) { stats->drop += nb_pkts - nb_tx; - if (nb_tx > 0) - ionic_q_flush(q); break; } @@ -564,6 +568,11 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx++; } + if (nb_tx > 0) { + rte_wmb(); + ionic_q_flush(q); + } + stats->packets += nb_tx; stats->bytes += bytes_tx; @@ -588,9 +597,9 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) uint16_t -ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct ionic_tx_qcq *txq = tx_queue; struct rte_mbuf *txm; uint64_t offloads; int i = 0; @@ -598,7 +607,7 @@ ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, for (i = 0; i < nb_pkts; i++) { txm = tx_pkts[i]; - if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) { + if (txm->nb_segs > txq->num_segs_fw) { rte_errno = -EINVAL; break; } @@ -654,15 +663,21 @@ ionic_rx_empty(struct ionic_rx_qcq *rxq) } void __rte_cold -ionic_dev_rx_queue_release(void *rx_queue) +ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct ionic_rx_qcq *rxq = rx_queue; + struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; + struct ionic_rx_stats *stats; if (!rxq) return; IONIC_PRINT_CALL(); + stats = &rxq->stats; + + IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", + rxq->qcq.q.index, stats->packets, stats->mtods); + ionic_rx_empty(rxq); ionic_lif_rxq_deinit(rxq); @@ -710,15 +725,14 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, /* Free memory prior to re-allocation if needed... */ if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { - void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; - ionic_dev_rx_queue_release(rx_queue); + ionic_dev_rx_queue_release(eth_dev, rx_queue_id); eth_dev->data->rx_queues[rx_queue_id] = NULL; } eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, + err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, &rxq); if (err) { IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); @@ -887,6 +901,7 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq, pkt_type = RTE_PTYPE_L2_ETHER_ARP; else pkt_type = RTE_PTYPE_UNKNOWN; + stats->mtods++; break; } } @@ -911,7 +926,11 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, new->addr = old->addr; new->len = old->len; - ionic_q_post(q, true, mbuf); + q->info[q->head_idx] = mbuf; + + q->head_idx = Q_NEXT_TO_POST(q, 1); + + ionic_q_flush(q); } static __rte_always_inline int @@ -921,9 +940,9 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) struct ionic_rxq_desc *desc, *desc_base = q->base; struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; struct ionic_rxq_sg_elem *elem; + void **info; rte_iova_t dma_addr; uint32_t i, j, nsegs, buf_size, size; - bool ring_doorbell; buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); @@ -938,6 +957,8 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) return -ENOMEM; } + info = IONIC_INFO_PTR(q, q->head_idx); + nsegs = (len + buf_size - 1) / buf_size; desc = &desc_base[q->head_idx]; @@ -977,12 +998,13 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", size, len); - ring_doorbell = ((q->head_idx + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; + info[0] = rxm; - ionic_q_post(q, ring_doorbell, rxm); + q->head_idx = Q_NEXT_TO_POST(q, 1); } + ionic_q_flush(q); + return 0; }