#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
struct ionic_queue *q = &txq->q;
qinfo->nb_desc = q->num_descs;
- qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
}
-static inline void __rte_cold
+static __rte_always_inline void
ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_queue *q = cq->bound_q;
if (tx_conf->tx_deferred_start)
txq->flags |= IONIC_QCQ_F_DEFERRED;
- txq->offloads = offloads;
+ /* Convert the offload flags into queue flags */
+ if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_L3;
+ if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_TCP;
+ if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_UDP;
eth_dev->data->tx_queues[tx_queue_id] = txq;
err = ionic_lif_txq_init(txq);
if (err)
return err;
+ } else {
+ ionic_qcq_enable(txq);
}
- ionic_qcq_enable(txq);
-
tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
static int
-ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
- uint64_t offloads __rte_unused, bool not_xmit_more)
+ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm,
+ bool not_xmit_more)
{
+ struct ionic_queue *q = &txq->q;
struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
struct ionic_txq_desc *desc;
struct ionic_txq_sg_elem *elem;
struct rte_mbuf *txm_seg;
- uint64_t desc_addr = 0;
+ rte_iova_t data_iova;
+ uint64_t desc_addr = 0, next_addr;
uint16_t desc_len = 0;
uint8_t desc_nsge;
uint32_t hdrlen;
seglen = hdrlen + mss;
left = txm->data_len;
+ data_iova = rte_mbuf_data_iova(txm);
desc = ionic_tx_tso_next(q, &elem);
start = true;
while (left > 0) {
len = RTE_MIN(seglen, left);
frag_left = seglen - len;
- desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ desc_addr = rte_cpu_to_le_64(data_iova + offset);
desc_len = len;
desc_nsge = 0;
left -= len;
txm_seg = txm->next;
while (txm_seg != NULL) {
offset = 0;
+ data_iova = rte_mbuf_data_iova(txm_seg);
left = txm_seg->data_len;
stats->frags++;
while (left > 0) {
- rte_iova_t data_iova;
- data_iova = rte_mbuf_data_iova(txm_seg);
- elem->addr = rte_cpu_to_le_64(data_iova) + offset;
+ next_addr = rte_cpu_to_le_64(data_iova + offset);
if (frag_left > 0) {
len = RTE_MIN(frag_left, left);
frag_left -= len;
+ elem->addr = next_addr;
elem->len = len;
elem++;
desc_nsge++;
} else {
len = RTE_MIN(mss, left);
frag_left = mss - len;
- data_iova = rte_mbuf_data_iova(txm_seg);
- desc_addr = rte_cpu_to_le_64(data_iova);
+ desc_addr = next_addr;
desc_len = len;
desc_nsge = 0;
}
offset += len;
if (txm_seg->next != NULL && frag_left > 0)
continue;
+
done = (txm_seg->next == NULL && left == 0);
ionic_tx_tso_post(q, desc, txm_seg,
desc_addr, desc_nsge, desc_len,
return 0;
}
-static int
-ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
- uint64_t offloads, bool not_xmit_more)
+static __rte_always_inline int
+ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm,
+ bool not_xmit_more)
{
+ struct ionic_queue *q = &txq->q;
struct ionic_txq_desc *desc_base = q->base;
struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
struct ionic_txq_desc *desc = &desc_base[q->head_idx];
bool encap;
bool has_vlan;
uint64_t ol_flags = txm->ol_flags;
- uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ uint64_t addr;
uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
uint8_t flags = 0;
if ((ol_flags & PKT_TX_IP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
+ (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
- if (((ol_flags & PKT_TX_TCP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
- ((ol_flags & PKT_TX_UDP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)))
- flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
- } else {
- stats->no_csum++;
}
+ if (((ol_flags & PKT_TX_TCP_CKSUM) &&
+ (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
+ ((ol_flags & PKT_TX_UDP_CKSUM) &&
+ (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
+ opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+ flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
+ }
+
+ if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
+ stats->no_csum++;
+
has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+ addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
+
desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
desc->len = txm->data_len;
desc->vlan_tci = txm->vlan_tci;
}
if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
- err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
- last);
+ err = ionic_tx_tso(txq, tx_pkts[nb_tx], last);
else
- err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
+ err = ionic_tx(txq, tx_pkts[nb_tx], last);
if (err) {
stats->drop += nb_pkts - nb_tx;
if (nb_tx > 0)
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = q->num_descs;
qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
- qinfo->conf.offloads = rxq->offloads;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
}
static void __rte_cold
"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
socket_id, rx_queue_id, nb_desc, offloads);
+ if (!rx_conf->rx_drop_en)
+ IONIC_PRINT(WARNING, "No-drop mode is not supported");
+
/* Validate number of receive descriptors */
if (!rte_is_power_of_2(nb_desc) ||
nb_desc < IONIC_MIN_RING_DESC ||
return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
- eth_dev->data->scattered_rx = 1;
-
/* Free memory prior to re-allocation if needed... */
if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
if (rx_conf->rx_deferred_start)
rxq->flags |= IONIC_QCQ_F_DEFERRED;
- rxq->offloads = offloads;
-
eth_dev->data->rx_queues[rx_queue_id] = rxq;
return 0;
}
-static void
+static __rte_always_inline void
ionic_rx_clean(struct ionic_queue *q,
uint32_t q_desc_index, uint32_t cq_desc_index,
void *cb_arg, void *service_cb_arg)
ionic_q_post(q, true, ionic_rx_clean, mbuf);
}
-static int __rte_cold
+static __rte_always_inline int
ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
{
struct ionic_queue *q = &rxq->q;
err = ionic_lif_rxq_init(rxq);
if (err)
return err;
+ } else {
+ ionic_qcq_enable(rxq);
}
/* Allocate buffers for descriptor rings */
return -1;
}
- ionic_qcq_enable(rxq);
-
rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
-static inline void __rte_cold
+static __rte_always_inline void
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
void *service_cb_arg)
{