1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev.h>
28 #include "base/avf_prototype.h"
29 #include "base/avf_type.h"
34 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
36 /* The following constraints must be satisfied:
37 * thresh >= AVF_RX_MAX_BURST
38 * thresh < rxq->nb_rx_desc
39 * (rxq->nb_rx_desc % thresh) == 0
41 if (thresh < AVF_RX_MAX_BURST ||
43 (nb_desc % thresh != 0)) {
44 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u, "
45 "greater than or equal to %u, "
46 "and a divisor of %u",
47 thresh, nb_desc, AVF_RX_MAX_BURST, nb_desc);
54 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
55 uint16_t tx_free_thresh)
57 /* TX descriptors will have their RS bit set after tx_rs_thresh
58 * descriptors have been used. The TX descriptor ring will be cleaned
59 * after tx_free_thresh descriptors are used or if the number of
60 * descriptors required to transmit a packet is greater than the
61 * number of free TX descriptors.
63 * The following constraints must be satisfied:
64 * - tx_rs_thresh must be less than the size of the ring minus 2.
65 * - tx_free_thresh must be less than the size of the ring minus 3.
66 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
67 * - tx_rs_thresh must be a divisor of the ring size.
69 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
70 * race condition, hence the maximum threshold constraints. When set
71 * to zero use default values.
73 if (tx_rs_thresh >= (nb_desc - 2)) {
74 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
75 "number of TX descriptors (%u) minus 2",
76 tx_rs_thresh, nb_desc);
79 if (tx_free_thresh >= (nb_desc - 3)) {
80 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
81 "number of TX descriptors (%u) minus 3.",
82 tx_free_thresh, nb_desc);
85 if (tx_rs_thresh > tx_free_thresh) {
86 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
87 "equal to tx_free_thresh (%u).",
88 tx_rs_thresh, tx_free_thresh);
91 if ((nb_desc % tx_rs_thresh) != 0) {
92 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
93 "number of TX descriptors (%u).",
94 tx_rs_thresh, nb_desc);
102 reset_rx_queue(struct avf_rx_queue *rxq)
109 len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
111 for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
112 ((volatile char *)rxq->rx_ring)[i] = 0;
114 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
116 for (i = 0; i < AVF_RX_MAX_BURST; i++)
117 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
121 rxq->pkt_first_seg = NULL;
122 rxq->pkt_last_seg = NULL;
126 reset_tx_queue(struct avf_tx_queue *txq)
128 struct avf_tx_entry *txe;
129 uint16_t i, prev, size;
132 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
137 size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
138 for (i = 0; i < size; i++)
139 ((volatile char *)txq->tx_ring)[i] = 0;
141 prev = (uint16_t)(txq->nb_tx_desc - 1);
142 for (i = 0; i < txq->nb_tx_desc; i++) {
143 txq->tx_ring[i].cmd_type_offset_bsz =
144 rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
147 txe[prev].next_id = i;
154 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
155 txq->nb_free = txq->nb_tx_desc - 1;
157 txq->next_dd = txq->rs_thresh - 1;
158 txq->next_rs = txq->rs_thresh - 1;
162 alloc_rxq_mbufs(struct avf_rx_queue *rxq)
164 volatile union avf_rx_desc *rxd;
165 struct rte_mbuf *mbuf = NULL;
169 for (i = 0; i < rxq->nb_rx_desc; i++) {
170 mbuf = rte_mbuf_raw_alloc(rxq->mp);
171 if (unlikely(!mbuf)) {
172 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
176 rte_mbuf_refcnt_set(mbuf, 1);
178 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
180 mbuf->port = rxq->port_id;
183 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
185 rxd = &rxq->rx_ring[i];
186 rxd->read.pkt_addr = dma_addr;
187 rxd->read.hdr_addr = 0;
188 #ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
193 rxq->sw_ring[i] = mbuf;
200 release_rxq_mbufs(struct avf_rx_queue *rxq)
202 struct rte_mbuf *mbuf;
208 for (i = 0; i < rxq->nb_rx_desc; i++) {
209 if (rxq->sw_ring[i]) {
210 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
211 rxq->sw_ring[i] = NULL;
217 release_txq_mbufs(struct avf_tx_queue *txq)
221 if (!txq || !txq->sw_ring) {
222 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
226 for (i = 0; i < txq->nb_tx_desc; i++) {
227 if (txq->sw_ring[i].mbuf) {
228 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
229 txq->sw_ring[i].mbuf = NULL;
235 avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
236 uint16_t nb_desc, unsigned int socket_id,
237 const struct rte_eth_rxconf *rx_conf,
238 struct rte_mempool *mp)
240 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
241 struct avf_adapter *ad =
242 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
243 struct avf_rx_queue *rxq;
244 const struct rte_memzone *mz;
247 uint16_t rx_free_thresh;
248 uint16_t base, bsf, tc_mapping;
250 PMD_INIT_FUNC_TRACE();
252 if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
253 nb_desc > AVF_MAX_RING_DESC ||
254 nb_desc < AVF_MIN_RING_DESC) {
255 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
260 /* Check free threshold */
261 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
262 AVF_DEFAULT_RX_FREE_THRESH :
263 rx_conf->rx_free_thresh;
264 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
267 /* Free memory if needed */
268 if (dev->data->rx_queues[queue_idx]) {
269 avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
270 dev->data->rx_queues[queue_idx] = NULL;
273 /* Allocate the rx queue data structure */
274 rxq = rte_zmalloc_socket("avf rxq",
275 sizeof(struct avf_rx_queue),
279 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
280 "rx queue data structure");
285 rxq->nb_rx_desc = nb_desc;
286 rxq->rx_free_thresh = rx_free_thresh;
287 rxq->queue_id = queue_idx;
288 rxq->port_id = dev->data->port_id;
289 rxq->crc_len = 0; /* crc stripping by default */
290 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
293 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
294 rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
296 /* Allocate the software ring. */
297 len = nb_desc + AVF_RX_MAX_BURST;
299 rte_zmalloc_socket("avf rx sw ring",
300 sizeof(struct rte_mbuf *) * len,
304 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
309 /* Allocate the maximun number of RX ring hardware descriptor with
310 * a liitle more to support bulk allocate.
312 len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
313 ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
315 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
316 ring_size, AVF_RING_BASE_ALIGN,
319 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
320 rte_free(rxq->sw_ring);
324 /* Zero all the descriptors in the ring. */
325 memset(mz->addr, 0, ring_size);
326 rxq->rx_ring_phys_addr = mz->iova;
327 rxq->rx_ring = (union avf_rx_desc *)mz->addr;
332 dev->data->rx_queues[queue_idx] = rxq;
333 rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
339 avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
342 unsigned int socket_id,
343 const struct rte_eth_txconf *tx_conf)
345 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
346 struct avf_tx_queue *txq;
347 const struct rte_memzone *mz;
349 uint16_t tx_rs_thresh, tx_free_thresh;
350 uint16_t i, base, bsf, tc_mapping;
352 PMD_INIT_FUNC_TRACE();
354 if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
355 nb_desc > AVF_MAX_RING_DESC ||
356 nb_desc < AVF_MIN_RING_DESC) {
357 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
362 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
363 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
364 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
365 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
366 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
368 /* Free memory if needed. */
369 if (dev->data->tx_queues[queue_idx]) {
370 avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
371 dev->data->tx_queues[queue_idx] = NULL;
374 /* Allocate the TX queue data structure. */
375 txq = rte_zmalloc_socket("avf txq",
376 sizeof(struct avf_tx_queue),
380 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
381 "tx queue structure");
385 txq->nb_tx_desc = nb_desc;
386 txq->rs_thresh = tx_rs_thresh;
387 txq->free_thresh = tx_free_thresh;
388 txq->queue_id = queue_idx;
389 txq->port_id = dev->data->port_id;
390 txq->txq_flags = tx_conf->txq_flags;
391 txq->tx_deferred_start = tx_conf->tx_deferred_start;
393 /* Allocate software ring */
395 rte_zmalloc_socket("avf tx sw ring",
396 sizeof(struct avf_tx_entry) * nb_desc,
400 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
405 /* Allocate TX hardware ring descriptors. */
406 ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
407 ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
408 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
409 ring_size, AVF_RING_BASE_ALIGN,
412 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
413 rte_free(txq->sw_ring);
417 txq->tx_ring_phys_addr = mz->iova;
418 txq->tx_ring = (struct avf_tx_desc *)mz->addr;
423 dev->data->tx_queues[queue_idx] = txq;
424 txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
430 avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
432 struct avf_adapter *adapter =
433 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
434 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
435 struct avf_rx_queue *rxq;
438 PMD_DRV_FUNC_TRACE();
440 if (rx_queue_id >= dev->data->nb_rx_queues)
443 rxq = dev->data->rx_queues[rx_queue_id];
445 err = alloc_rxq_mbufs(rxq);
447 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
453 /* Init the RX tail register. */
454 AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
457 /* Ready to switch the queue on */
458 err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
460 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
463 dev->data->rx_queue_state[rx_queue_id] =
464 RTE_ETH_QUEUE_STATE_STARTED;
470 avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
472 struct avf_adapter *adapter =
473 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
474 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
475 struct avf_tx_queue *txq;
478 PMD_DRV_FUNC_TRACE();
480 if (tx_queue_id >= dev->data->nb_tx_queues)
483 txq = dev->data->tx_queues[tx_queue_id];
485 /* Init the RX tail register. */
486 AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
489 /* Ready to switch the queue on */
490 err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
493 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
496 dev->data->tx_queue_state[tx_queue_id] =
497 RTE_ETH_QUEUE_STATE_STARTED;
503 avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
505 struct avf_adapter *adapter =
506 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
507 struct avf_rx_queue *rxq;
510 PMD_DRV_FUNC_TRACE();
512 if (rx_queue_id >= dev->data->nb_rx_queues)
515 err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
517 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
522 rxq = dev->data->rx_queues[rx_queue_id];
523 release_rxq_mbufs(rxq);
525 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
531 avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
533 struct avf_adapter *adapter =
534 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
535 struct avf_tx_queue *txq;
538 PMD_DRV_FUNC_TRACE();
540 if (tx_queue_id >= dev->data->nb_tx_queues)
543 err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
545 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
550 txq = dev->data->tx_queues[tx_queue_id];
551 release_txq_mbufs(txq);
553 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
559 avf_dev_rx_queue_release(void *rxq)
561 struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
566 release_rxq_mbufs(q);
567 rte_free(q->sw_ring);
568 rte_memzone_free(q->mz);
573 avf_dev_tx_queue_release(void *txq)
575 struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
580 release_txq_mbufs(q);
581 rte_free(q->sw_ring);
582 rte_memzone_free(q->mz);
587 avf_stop_queues(struct rte_eth_dev *dev)
589 struct avf_adapter *adapter =
590 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
591 struct avf_rx_queue *rxq;
592 struct avf_tx_queue *txq;
595 /* Stop All queues */
596 ret = avf_disable_queues(adapter);
598 PMD_DRV_LOG(WARNING, "Fail to stop queues");
600 for (i = 0; i < dev->data->nb_tx_queues; i++) {
601 txq = dev->data->tx_queues[i];
604 release_txq_mbufs(txq);
606 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
608 for (i = 0; i < dev->data->nb_rx_queues; i++) {
609 rxq = dev->data->rx_queues[i];
612 release_rxq_mbufs(rxq);
614 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;