#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
struct ionic_queue *q = &txq->q;
qinfo->nb_desc = q->num_descs;
- qinfo->conf.offloads = txq->offloads;
- qinfo->conf.tx_deferred_start = txq->deferred_start;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
}
-static inline void __attribute__((cold))
+static __rte_always_inline void
ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_queue *q = cq->bound_q;
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_tx_queue_release(void *tx_queue)
{
struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
IONIC_PRINT_CALL();
+ ionic_lif_txq_deinit(txq);
+
ionic_qcq_free(txq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
struct ionic_qcq *txq;
- IONIC_PRINT_CALL();
+ IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
txq = eth_dev->data->tx_queues[tx_queue_id];
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
/*
* Note: we should better post NOP Tx desc and wait for its completion
* before disabling Tx queue
ionic_tx_flush(&txq->cq);
- ionic_lif_txq_deinit(txq);
-
- eth_dev->data->tx_queue_state[tx_queue_id] =
- RTE_ETH_QUEUE_STATE_STOPPED;
-
return 0;
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
- uint16_t nb_desc, uint32_t socket_id __rte_unused,
+ uint16_t nb_desc, uint32_t socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
uint64_t offloads;
int err;
- IONIC_PRINT_CALL();
-
- IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers",
- tx_queue_id, nb_desc);
-
if (tx_queue_id >= lif->ntxqcqs) {
IONIC_PRINT(DEBUG, "Queue index %u not available "
"(max %u queues)",
}
offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+ IONIC_PRINT(DEBUG,
+ "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
+ socket_id, tx_queue_id, nb_desc, offloads);
/* Validate number of receive descriptors */
if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
eth_dev->data->tx_queues[tx_queue_id] = NULL;
}
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
if (err) {
IONIC_PRINT(DEBUG, "Queue allocation failure");
}
/* Do not start queue with rte_eth_dev_start() */
- txq->deferred_start = tx_conf->tx_deferred_start;
+ if (tx_conf->tx_deferred_start)
+ txq->flags |= IONIC_QCQ_F_DEFERRED;
- txq->offloads = offloads;
+ /* Convert the offload flags into queue flags */
+ if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_L3;
+ if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_TCP;
+ if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
+ txq->flags |= IONIC_QCQ_F_CSUM_UDP;
eth_dev->data->tx_queues[tx_queue_id] = txq;
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
+ uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
struct ionic_qcq *txq;
int err;
- IONIC_PRINT_CALL();
+ if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
+ IONIC_PRINT(DEBUG, "TX queue %u already started",
+ tx_queue_id);
+ return 0;
+ }
txq = eth_dev->data->tx_queues[tx_queue_id];
- err = ionic_lif_txq_init(txq);
- if (err)
- return err;
+ IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
+ tx_queue_id, txq->q.num_descs);
- ionic_qcq_enable(txq);
+ if (!(txq->flags & IONIC_QCQ_F_INITED)) {
+ err = ionic_lif_txq_init(txq);
+ if (err)
+ return err;
+ } else {
+ ionic_qcq_enable(txq);
+ }
- eth_dev->data->tx_queue_state[tx_queue_id] =
- RTE_ETH_QUEUE_STATE_STARTED;
+ tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
}
static int
-ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
- uint64_t offloads __rte_unused, bool not_xmit_more)
+ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm,
+ bool not_xmit_more)
{
+ struct ionic_queue *q = &txq->q;
struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
struct ionic_txq_desc *desc;
struct ionic_txq_sg_elem *elem;
struct rte_mbuf *txm_seg;
- uint64_t desc_addr = 0;
+ rte_iova_t data_iova;
+ uint64_t desc_addr = 0, next_addr;
uint16_t desc_len = 0;
uint8_t desc_nsge;
uint32_t hdrlen;
seglen = hdrlen + mss;
left = txm->data_len;
+ data_iova = rte_mbuf_data_iova(txm);
desc = ionic_tx_tso_next(q, &elem);
start = true;
while (left > 0) {
len = RTE_MIN(seglen, left);
frag_left = seglen - len;
- desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ desc_addr = rte_cpu_to_le_64(data_iova + offset);
desc_len = len;
desc_nsge = 0;
left -= len;
txm_seg = txm->next;
while (txm_seg != NULL) {
offset = 0;
+ data_iova = rte_mbuf_data_iova(txm_seg);
left = txm_seg->data_len;
stats->frags++;
while (left > 0) {
- rte_iova_t data_iova;
- data_iova = rte_mbuf_data_iova(txm_seg);
- elem->addr = rte_cpu_to_le_64(data_iova) + offset;
+ next_addr = rte_cpu_to_le_64(data_iova + offset);
if (frag_left > 0) {
len = RTE_MIN(frag_left, left);
frag_left -= len;
+ elem->addr = next_addr;
elem->len = len;
elem++;
desc_nsge++;
} else {
len = RTE_MIN(mss, left);
frag_left = mss - len;
- data_iova = rte_mbuf_data_iova(txm_seg);
- desc_addr = rte_cpu_to_le_64(data_iova);
+ desc_addr = next_addr;
desc_len = len;
desc_nsge = 0;
}
offset += len;
if (txm_seg->next != NULL && frag_left > 0)
continue;
+
done = (txm_seg->next == NULL && left == 0);
ionic_tx_tso_post(q, desc, txm_seg,
desc_addr, desc_nsge, desc_len,
return 0;
}
-static int
-ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
- uint64_t offloads, bool not_xmit_more)
+static __rte_always_inline int
+ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm,
+ bool not_xmit_more)
{
+ struct ionic_queue *q = &txq->q;
struct ionic_txq_desc *desc_base = q->base;
struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
struct ionic_txq_desc *desc = &desc_base[q->head_idx];
bool encap;
bool has_vlan;
uint64_t ol_flags = txm->ol_flags;
- uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ uint64_t addr;
uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
uint8_t flags = 0;
if ((ol_flags & PKT_TX_IP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
+ (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
- if (((ol_flags & PKT_TX_TCP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
- ((ol_flags & PKT_TX_UDP_CKSUM) &&
- (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)))
- flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
- } else {
- stats->no_csum++;
}
+ if (((ol_flags & PKT_TX_TCP_CKSUM) &&
+ (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
+ ((ol_flags & PKT_TX_UDP_CKSUM) &&
+ (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
+ opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+ flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
+ }
+
+ if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
+ stats->no_csum++;
+
has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+ addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
+
desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
desc->len = txm->data_len;
desc->vlan_tci = txm->vlan_tci;
}
if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
- err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
- last);
+ err = ionic_tx_tso(txq, tx_pkts[nb_tx], last);
else
- err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
+ err = ionic_tx(txq, tx_pkts[nb_tx], last);
if (err) {
stats->drop += nb_pkts - nb_tx;
if (nb_tx > 0)
qinfo->mp = rxq->mb_pool;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = q->num_descs;
- qinfo->conf.rx_deferred_start = rxq->deferred_start;
- qinfo->conf.offloads = rxq->offloads;
+ qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
}
-static void __attribute__((cold))
+static void __rte_cold
ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_rx_queue_release(void *rx_queue)
{
struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
ionic_rx_empty(&rxq->q);
+ ionic_lif_rxq_deinit(rxq);
+
ionic_qcq_free(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id,
uint16_t nb_desc,
- uint32_t socket_id __rte_unused,
+ uint32_t socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint64_t offloads;
int err;
- IONIC_PRINT_CALL();
-
- IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers",
- rx_queue_id, nb_desc);
-
if (rx_queue_id >= lif->nrxqcqs) {
IONIC_PRINT(ERR,
"Queue index %u not available (max %u queues)",
}
offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+ IONIC_PRINT(DEBUG,
+ "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
+ socket_id, rx_queue_id, nb_desc, offloads);
+
+ if (!rx_conf->rx_drop_en)
+ IONIC_PRINT(WARNING, "No-drop mode is not supported");
/* Validate number of receive descriptors */
if (!rte_is_power_of_2(nb_desc) ||
nb_desc < IONIC_MIN_RING_DESC ||
nb_desc > IONIC_MAX_RING_DESC) {
IONIC_PRINT(ERR,
- "Bad number of descriptors (%u) for queue %u (min: %u)",
+ "Bad descriptor count (%u) for queue %u (min: %u)",
nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
- eth_dev->data->scattered_rx = 1;
-
/* Free memory prior to re-allocation if needed... */
if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
eth_dev->data->rx_queues[rx_queue_id] = NULL;
}
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
if (err) {
- IONIC_PRINT(ERR, "Queue allocation failure");
+ IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
return -EINVAL;
}
*/
/* Do not start queue with rte_eth_dev_start() */
- rxq->deferred_start = rx_conf->rx_deferred_start;
-
- rxq->offloads = offloads;
+ if (rx_conf->rx_deferred_start)
+ rxq->flags |= IONIC_QCQ_F_DEFERRED;
eth_dev->data->rx_queues[rx_queue_id] = rxq;
return 0;
}
-static void
+static __rte_always_inline void
ionic_rx_clean(struct ionic_queue *q,
uint32_t q_desc_index, uint32_t cq_desc_index,
void *cb_arg, void *service_cb_arg)
ionic_q_post(q, true, ionic_rx_clean, mbuf);
}
-static int __attribute__((cold))
+static __rte_always_inline int
ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
{
struct ionic_queue *q = &rxq->q;
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
struct ionic_qcq *rxq;
int err;
- IONIC_PRINT_CALL();
-
- IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)",
- frame_size);
+ if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
+ IONIC_PRINT(DEBUG, "RX queue %u already started",
+ rx_queue_id);
+ return 0;
+ }
rxq = eth_dev->data->rx_queues[rx_queue_id];
- err = ionic_lif_rxq_init(rxq);
- if (err)
- return err;
+ IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
+ rx_queue_id, rxq->q.num_descs, frame_size);
+
+ if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
+ err = ionic_lif_rxq_init(rxq);
+ if (err)
+ return err;
+ } else {
+ ionic_qcq_enable(rxq);
+ }
/* Allocate buffers for descriptor rings */
if (ionic_rx_fill(rxq, frame_size) != 0) {
return -1;
}
- ionic_qcq_enable(rxq);
-
- eth_dev->data->rx_queue_state[rx_queue_id] =
- RTE_ETH_QUEUE_STATE_STARTED;
+ rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
-static inline void __attribute__((cold))
+static __rte_always_inline void
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
void *service_cb_arg)
{
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
struct ionic_qcq *rxq;
- IONIC_PRINT_CALL();
+ IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
rxq = eth_dev->data->rx_queues[rx_queue_id];
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
ionic_qcq_disable(rxq);
/* Flush */
ionic_rxq_service(&rxq->cq, -1, NULL);
- ionic_lif_rxq_deinit(rxq);
-
- eth_dev->data->rx_queue_state[rx_queue_id] =
- RTE_ETH_QUEUE_STATE_STOPPED;
-
return 0;
}