#include "ionic_lif.h"
#include "ionic_rxtx.h"
-#define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1)
-
/*********************************************************************
*
* TX functions
ionic_dev_tx_queue_release(void *tx_queue)
{
struct ionic_tx_qcq *txq = tx_queue;
+ struct ionic_tx_stats *stats = &txq->stats;
IONIC_PRINT_CALL();
+ IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
+ txq->qcq.q.index, stats->packets, stats->tso);
+
ionic_lif_txq_deinit(txq);
ionic_qcq_free(&txq->qcq);
eth_dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STOPPED;
- err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
+ err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
if (err) {
IONIC_PRINT(DEBUG, "Queue allocation failure");
return -EINVAL;
uint16_t vlan_tci, bool has_vlan,
bool start, bool done)
{
+ void **info;
uint8_t flags = 0;
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
desc->hdr_len = hdrlen;
desc->mss = mss;
- ionic_q_post(q, done, done ? txm : NULL);
+ if (done) {
+ info = IONIC_INFO_PTR(q, q->head_idx);
+ info[0] = txm;
+ }
+
+ q->head_idx = Q_NEXT_TO_POST(q, 1);
}
static struct ionic_txq_desc *
}
static int
-ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm,
- bool not_xmit_more)
+ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
{
struct ionic_queue *q = &txq->qcq.q;
struct ionic_tx_stats *stats = &txq->stats;
hdrlen, mss,
encap,
vlan_tci, has_vlan,
- start, done && not_xmit_more);
+ start, done);
desc = ionic_tx_tso_next(txq, &elem);
start = false;
seglen = mss;
offset = 0;
data_iova = rte_mbuf_data_iova(txm_seg);
left = txm_seg->data_len;
- stats->frags++;
while (left > 0) {
next_addr = rte_cpu_to_le_64(data_iova + offset);
hdrlen, mss,
encap,
vlan_tci, has_vlan,
- start, done && not_xmit_more);
+ start, done);
desc = ionic_tx_tso_next(txq, &elem);
start = false;
}
}
static __rte_always_inline int
-ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm,
- bool not_xmit_more)
+ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
{
struct ionic_queue *q = &txq->qcq.q;
struct ionic_txq_desc *desc, *desc_base = q->base;
struct ionic_txq_sg_elem *elem;
struct ionic_tx_stats *stats = &txq->stats;
struct rte_mbuf *txm_seg;
+ void **info;
bool encap;
bool has_vlan;
uint64_t ol_flags = txm->ol_flags;
uint8_t flags = 0;
desc = &desc_base[q->head_idx];
+ info = IONIC_INFO_PTR(q, q->head_idx);
if ((ol_flags & PKT_TX_IP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_L3)) {
desc->len = txm->data_len;
desc->vlan_tci = txm->vlan_tci;
+ info[0] = txm;
+
elem = sg_desc_base[q->head_idx].elems;
+
txm_seg = txm->next;
while (txm_seg != NULL) {
elem->len = txm_seg->data_len;
elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
- stats->frags++;
elem++;
txm_seg = txm_seg->next;
}
- ionic_q_post(q, not_xmit_more, txm);
+ q->head_idx = Q_NEXT_TO_POST(q, 1);
return 0;
}
struct ionic_tx_stats *stats = &txq->stats;
uint32_t next_q_head_idx;
uint32_t bytes_tx = 0;
- uint16_t nb_tx = 0;
+ uint16_t nb_avail, nb_tx = 0;
int err;
- bool last;
/* Cleaning old buffers */
ionic_tx_flush(txq);
- if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
- stats->stop += nb_pkts;
- return 0;
+ nb_avail = ionic_q_space_avail(q);
+ if (unlikely(nb_avail < nb_pkts)) {
+ stats->stop += nb_pkts - nb_avail;
+ nb_pkts = nb_avail;
}
while (nb_tx < nb_pkts) {
- last = (nb_tx == (nb_pkts - 1));
-
next_q_head_idx = Q_NEXT_TO_POST(q, 1);
if ((next_q_head_idx & 0x3) == 0) {
struct ionic_txq_desc *desc_base = q->base;
}
if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
- err = ionic_tx_tso(txq, tx_pkts[nb_tx], last);
+ err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
else
- err = ionic_tx(txq, tx_pkts[nb_tx], last);
+ err = ionic_tx(txq, tx_pkts[nb_tx]);
if (err) {
stats->drop += nb_pkts - nb_tx;
- if (nb_tx > 0)
- ionic_q_flush(q);
break;
}
nb_tx++;
}
+ if (nb_tx > 0) {
+ rte_wmb();
+ ionic_q_flush(q);
+ }
+
stats->packets += nb_tx;
stats->bytes += bytes_tx;
(PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
uint16_t
-ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
+ struct ionic_tx_qcq *txq = tx_queue;
struct rte_mbuf *txm;
uint64_t offloads;
int i = 0;
for (i = 0; i < nb_pkts; i++) {
txm = tx_pkts[i];
- if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) {
+ if (txm->nb_segs > txq->num_segs_fw) {
rte_errno = -EINVAL;
break;
}
ionic_dev_rx_queue_release(void *rx_queue)
{
struct ionic_rx_qcq *rxq = rx_queue;
+ struct ionic_rx_stats *stats;
if (!rxq)
return;
IONIC_PRINT_CALL();
+ stats = &rxq->stats;
+
+ IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
+ rxq->qcq.q.index, stats->packets, stats->mtods);
+
ionic_rx_empty(rxq);
ionic_lif_rxq_deinit(rxq);
eth_dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STOPPED;
- err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc,
+ err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
&rxq);
if (err) {
IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
pkt_type = RTE_PTYPE_L2_ETHER_ARP;
else
pkt_type = RTE_PTYPE_UNKNOWN;
+ stats->mtods++;
break;
}
}
new->addr = old->addr;
new->len = old->len;
- ionic_q_post(q, true, mbuf);
+ q->info[q->head_idx] = mbuf;
+
+ q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+ ionic_q_flush(q);
}
static __rte_always_inline int
struct ionic_rxq_desc *desc, *desc_base = q->base;
struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
struct ionic_rxq_sg_elem *elem;
+ void **info;
rte_iova_t dma_addr;
uint32_t i, j, nsegs, buf_size, size;
- bool ring_doorbell;
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
return -ENOMEM;
}
+ info = IONIC_INFO_PTR(q, q->head_idx);
+
nsegs = (len + buf_size - 1) / buf_size;
desc = &desc_base[q->head_idx];
IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
size, len);
- ring_doorbell = ((q->head_idx + 1) &
- IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+ info[0] = rxm;
- ionic_q_post(q, ring_doorbell, rxm);
+ q->head_idx = Q_NEXT_TO_POST(q, 1);
}
+ ionic_q_flush(q);
+
return 0;
}