1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 equal stride packed stream receive native datapath implementation */
14 #include <rte_byteorder.h>
18 #include "efx_types.h"
19 #include "efx_regs_ef10.h"
21 #include "sfc_debug.h"
22 #include "sfc_tweak.h"
23 #include "sfc_dp_rx.h"
24 #include "sfc_kvargs.h"
27 /* Tunnels are not supported */
28 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
29 #include "sfc_ef10_rx_ev.h"
31 #define sfc_ef10_essb_rx_err(dpq, ...) \
32 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
34 #define sfc_ef10_essb_rx_info(dpq, ...) \
35 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
38 * Fake length for RXQ descriptors in equal stride super-buffer mode
39 * to make hardware happy.
41 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
44 * Minimum number of Rx buffers the datapath allows to use.
46 * Each HW Rx descriptor has many Rx buffers. The number of buffers
47 * in one HW Rx descriptor is equal to size of contiguous block
48 * provided by Rx buffers memory pool. The contiguous block size
49 * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
50 * data size specified on the memory pool creation. Typical rte_mbuf
51 * data size is about 2k which makes a bit less than 32 buffers in
52 * contiguous block with default bucket size equal to 64k.
53 * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
54 * it makes about 256 as required minimum. Double it in advertised
55 * minimum to allow for at least 2 refill blocks.
57 #define SFC_EF10_ESSB_RX_DESCS_MIN 512
60 * Number of Rx buffers should be aligned to.
62 * There are no extra requirements on alignment since actual number of
63 * pushed Rx buffers will be multiple by contiguous block size which
64 * is unknown beforehand.
66 #define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
69 * Maximum number of descriptors/buffers in the Rx ring.
70 * It should guarantee that corresponding event queue never overfill.
72 #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
73 ((_nevs) - 1 /* head must not step on tail */ - \
74 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
75 1 /* Rx error */ - 1 /* flush */)
77 struct sfc_ef10_essb_rx_sw_desc {
78 struct rte_mbuf *first_mbuf;
81 struct sfc_ef10_essb_rxq {
82 /* Used on data path */
84 #define SFC_EF10_ESSB_RXQ_STARTED 0x1
85 #define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
86 #define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
87 unsigned int rxq_ptr_mask;
88 unsigned int block_size;
89 unsigned int buf_stride;
90 unsigned int bufs_ptr;
91 unsigned int completed;
92 unsigned int pending_id;
93 unsigned int bufs_pending;
94 unsigned int left_in_completed;
95 unsigned int left_in_pending;
96 unsigned int evq_read_ptr;
97 unsigned int evq_ptr_mask;
98 efx_qword_t *evq_hw_ring;
99 struct sfc_ef10_essb_rx_sw_desc *sw_ring;
104 unsigned int max_fill_level;
105 unsigned int refill_threshold;
106 struct rte_mempool *refill_mb_pool;
107 efx_qword_t *rxq_hw_ring;
108 volatile void *doorbell;
110 /* Datapath receive queue anchor */
111 struct sfc_dp_rxq dp;
114 static inline struct sfc_ef10_essb_rxq *
115 sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
117 return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
120 static struct rte_mbuf *
121 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
122 struct rte_mbuf *mbuf)
126 m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
127 MBUF_RAW_ALLOC_CHECK(m);
131 static struct rte_mbuf *
132 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
133 struct rte_mbuf *mbuf, unsigned int idx)
137 m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
138 MBUF_RAW_ALLOC_CHECK(m);
142 static struct rte_mbuf *
143 sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
145 const struct sfc_ef10_essb_rx_sw_desc *rxd;
147 if (rxq->left_in_completed != 0) {
148 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
149 return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
150 rxq->block_size - rxq->left_in_completed);
153 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
154 rxq->left_in_completed = rxq->block_size;
155 return rxd->first_mbuf;
160 sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
162 const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
163 unsigned int free_space;
165 void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
166 unsigned int added = rxq->added;
168 free_space = rxq->max_fill_level - (added - rxq->completed);
170 if (free_space < rxq->refill_threshold)
173 bulks = free_space / RTE_DIM(mbuf_blocks);
174 /* refill_threshold guarantees that bulks is positive */
175 SFC_ASSERT(bulks > 0);
181 if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
182 mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
183 struct rte_eth_dev_data *dev_data =
184 rte_eth_devices[rxq->port_id].data;
187 * It is hardly a safe way to increment counter
188 * from different contexts, but all PMDs do it.
190 dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
191 /* Return if we have posted nothing yet */
192 if (added == rxq->added)
198 for (i = 0, id = added & rxq_ptr_mask;
199 i < RTE_DIM(mbuf_blocks);
201 struct rte_mbuf *m = mbuf_blocks[i];
202 struct sfc_ef10_essb_rx_sw_desc *rxd;
204 SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
205 rxd = &rxq->sw_ring[id];
208 /* RX_KER_BYTE_CNT is ignored by firmware */
209 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
210 ESF_DZ_RX_KER_BYTE_CNT,
211 SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
212 ESF_DZ_RX_KER_BUF_ADDR,
213 rte_mbuf_data_iova_default(m));
216 added += RTE_DIM(mbuf_blocks);
218 } while (--bulks > 0);
220 SFC_ASSERT(rxq->added != added);
222 sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
226 sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
228 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
230 if (!sfc_ef10_ev_present(*rx_ev))
233 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
234 FSE_AZ_EV_CODE_RX_EV)) {
236 * Do not move read_ptr to keep the event for exception
239 rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
240 sfc_ef10_essb_rx_err(&rxq->dp.dpq,
241 "RxQ exception at EvQ read ptr %#x",
251 sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
255 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
257 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
259 rxq->bufs_ptr += ready;
260 rxq->bufs_pending += ready;
262 SFC_ASSERT(ready > 0);
264 const struct sfc_ef10_essb_rx_sw_desc *rxd;
266 unsigned int todo_bufs;
269 rxd = &rxq->sw_ring[rxq->pending_id];
270 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
271 rxq->block_size - rxq->left_in_pending);
273 if (ready < rxq->left_in_pending) {
276 rxq->left_in_pending -= todo_bufs;
278 todo_bufs = rxq->left_in_pending;
280 rxq->left_in_pending = rxq->block_size;
281 if (rxq->pending_id != rxq->rxq_ptr_mask)
287 SFC_ASSERT(todo_bufs > 0);
290 sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
292 /* Prefetch pseudo-header */
293 rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
296 while (todo_bufs-- > 0) {
297 m = sfc_ef10_essb_next_mbuf(rxq, m);
298 m->ol_flags = m0->ol_flags;
299 m->packet_type = m0->packet_type;
300 /* Prefetch pseudo-header */
301 rte_prefetch0((uint8_t *)m->buf_addr +
302 RTE_PKTMBUF_HEADROOM);
308 sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
309 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
311 unsigned int n_rx_pkts = 0;
312 unsigned int todo_bufs;
315 while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
316 rxq->bufs_pending)) > 0) {
317 m = sfc_ef10_essb_maybe_next_completed(rxq);
319 todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
321 rxq->bufs_pending -= todo_bufs;
322 rxq->left_in_completed -= todo_bufs;
324 SFC_ASSERT(todo_bufs > 0);
328 const efx_qword_t *qwordp;
331 /* Buffers to be discarded have 0 in packet type */
332 if (unlikely(m->packet_type == 0)) {
333 rte_mbuf_raw_free(m);
337 rx_pkts[n_rx_pkts++] = m;
339 /* Parse pseudo-header */
340 qwordp = (const efx_qword_t *)
341 ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
343 EFX_QWORD_FIELD(*qwordp,
344 ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
346 m->data_off = RTE_PKTMBUF_HEADROOM +
347 ES_EZ_ESSB_RX_PREFIX_LEN;
348 m->port = rxq->port_id;
350 rte_pktmbuf_pkt_len(m) = pkt_len;
351 rte_pktmbuf_data_len(m) = pkt_len;
355 !!EFX_TEST_QWORD_BIT(*qwordp,
356 ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
358 !!EFX_TEST_QWORD_BIT(*qwordp,
359 ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
361 !!EFX_TEST_QWORD_BIT(*qwordp,
362 ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
364 /* EFX_QWORD_FIELD converts little-endian to CPU */
366 EFX_QWORD_FIELD(*qwordp,
367 ES_EZ_ESSB_RX_PREFIX_HASH);
369 EFX_QWORD_FIELD(*qwordp,
370 ES_EZ_ESSB_RX_PREFIX_MARK);
373 m = sfc_ef10_essb_next_mbuf(rxq, m);
374 } while (todo_bufs-- > 0);
382 sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
385 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
386 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
390 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
391 SFC_EF10_ESSB_RXQ_EXCEPTION)))
394 n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
396 while (n_rx_pkts != nb_pkts &&
397 sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
399 * DROP_EVENT is an internal to the NIC, software should
400 * never see it and, therefore, may ignore it.
403 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
404 n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
406 nb_pkts - n_rx_pkts);
409 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
410 evq_old_read_ptr, rxq->evq_read_ptr);
412 /* It is not a problem if we refill in the case of exception */
413 sfc_ef10_essb_rx_qrefill(rxq);
418 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
420 sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
422 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
423 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
426 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
427 SFC_EF10_ESSB_RXQ_EXCEPTION)))
428 return rxq->bufs_pending;
430 while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
432 * DROP_EVENT is an internal to the NIC, software should
433 * never see it and, therefore, may ignore it.
435 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
438 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
439 evq_old_read_ptr, rxq->evq_read_ptr);
441 return rxq->bufs_pending;
444 static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
446 sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
448 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
449 unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
451 if (offset < pending)
452 return RTE_ETH_RX_DESC_DONE;
454 if (offset < (rxq->added - rxq->completed) * rxq->block_size +
455 rxq->left_in_completed - rxq->block_size)
456 return RTE_ETH_RX_DESC_AVAIL;
458 return RTE_ETH_RX_DESC_UNAVAIL;
461 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
463 sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
466 * Number of descriptors just defines maximum number of pushed
467 * descriptors (fill level).
469 dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
470 dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
473 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
475 sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
477 SFC_ASSERT(pool != NULL);
479 if (strcmp(pool, "bucket") == 0)
485 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
487 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
488 struct sfc_dp_rx_hw_limits *limits,
489 struct rte_mempool *mb_pool,
490 unsigned int *rxq_entries,
491 unsigned int *evq_entries,
492 unsigned int *rxq_max_fill_level)
495 struct rte_mempool_info mp_info;
496 unsigned int nb_hw_rx_desc;
497 unsigned int max_events;
499 rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
502 if (mp_info.contig_block_size == 0)
506 * Calculate required number of hardware Rx descriptors each
507 * carrying contig block size Rx buffers.
508 * It cannot be less than Rx write pointer alignment plus 1
509 * in order to avoid cases when the ring is guaranteed to be
512 nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
513 mp_info.contig_block_size),
514 SFC_EF10_RX_WPTR_ALIGN + 1);
515 if (nb_hw_rx_desc <= limits->rxq_min_entries) {
516 *rxq_entries = limits->rxq_min_entries;
518 *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
519 if (*rxq_entries > limits->rxq_max_entries)
523 max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
524 mp_info.contig_block_size +
525 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
526 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
528 *evq_entries = rte_align32pow2(max_events);
529 *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
530 *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
533 * May be even maximum event queue size is insufficient to handle
534 * so many Rx descriptors. If so, we should limit Rx queue fill level.
536 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
537 SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
541 static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
543 sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
544 const struct rte_pci_addr *pci_addr, int socket_id,
545 const struct sfc_dp_rx_qcreate_info *info,
546 struct sfc_dp_rxq **dp_rxqp)
548 struct rte_mempool * const mp = info->refill_mb_pool;
549 struct rte_mempool_info mp_info;
550 struct sfc_ef10_essb_rxq *rxq;
553 rc = rte_mempool_ops_get_info(mp, &mp_info);
555 /* Positive errno is used in the driver */
557 goto fail_get_contig_block_size;
560 /* Check if the mempool provides block dequeue */
562 if (mp_info.contig_block_size == 0)
563 goto fail_no_block_dequeue;
566 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
567 RTE_CACHE_LINE_SIZE, socket_id);
571 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
574 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
576 sizeof(*rxq->sw_ring),
577 RTE_CACHE_LINE_SIZE, socket_id);
578 if (rxq->sw_ring == NULL)
579 goto fail_desc_alloc;
581 rxq->block_size = mp_info.contig_block_size;
582 rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
583 rxq->rxq_ptr_mask = info->rxq_entries - 1;
584 rxq->evq_ptr_mask = info->evq_entries - 1;
585 rxq->evq_hw_ring = info->evq_hw_ring;
586 rxq->port_id = port_id;
588 rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
589 rxq->refill_threshold =
590 RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
591 SFC_EF10_RX_WPTR_ALIGN);
592 rxq->refill_mb_pool = mp;
593 rxq->rxq_hw_ring = info->rxq_hw_ring;
595 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
596 ER_DZ_RX_DESC_UPD_REG_OFST +
597 (info->hw_index << info->vi_window_shift);
599 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
600 "block size is %u, buf stride is %u",
601 rxq->block_size, rxq->buf_stride);
602 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
603 "max fill level is %u descs (%u bufs), "
604 "refill threashold %u descs (%u bufs)",
606 rxq->max_fill_level * rxq->block_size,
607 rxq->refill_threshold,
608 rxq->refill_threshold * rxq->block_size);
617 fail_no_block_dequeue:
618 fail_get_contig_block_size:
622 static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
624 sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
626 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
628 rte_free(rxq->sw_ring);
632 static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
634 sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
636 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
638 rxq->evq_read_ptr = evq_read_ptr;
640 /* Initialize before refill */
641 rxq->completed = rxq->pending_id = rxq->added = 0;
642 rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
643 rxq->bufs_ptr = UINT_MAX;
644 rxq->bufs_pending = 0;
646 sfc_ef10_essb_rx_qrefill(rxq);
648 rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
650 ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
655 static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
657 sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
659 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
661 rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
663 *evq_read_ptr = rxq->evq_read_ptr;
666 static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
668 sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
670 __rte_unused struct sfc_ef10_essb_rxq *rxq;
672 rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
673 SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
676 * It is safe to ignore Rx event since we free all mbufs on
677 * queue purge anyway.
683 static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
685 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
687 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
689 const struct sfc_ef10_essb_rx_sw_desc *rxd;
692 for (i = rxq->completed; i != rxq->added; ++i) {
693 rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
694 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
695 rxq->block_size - rxq->left_in_completed);
696 while (rxq->left_in_completed > 0) {
697 rte_mbuf_raw_free(m);
698 m = sfc_ef10_essb_next_mbuf(rxq, m);
699 rxq->left_in_completed--;
701 rxq->left_in_completed = rxq->block_size;
704 rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
707 struct sfc_dp_rx sfc_ef10_essb_rx = {
709 .name = SFC_KVARG_DATAPATH_EF10_ESSB,
711 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
712 SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
714 .features = SFC_DP_RX_FEAT_FLOW_FLAG |
715 SFC_DP_RX_FEAT_FLOW_MARK,
716 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
717 DEV_RX_OFFLOAD_RSS_HASH,
718 .queue_offload_capa = 0,
719 .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
720 .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
721 .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
722 .qcreate = sfc_ef10_essb_rx_qcreate,
723 .qdestroy = sfc_ef10_essb_rx_qdestroy,
724 .qstart = sfc_ef10_essb_rx_qstart,
725 .qstop = sfc_ef10_essb_rx_qstop,
726 .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
727 .qpurge = sfc_ef10_essb_rx_qpurge,
728 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
729 .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
730 .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
731 .pkt_burst = sfc_ef10_essb_recv_pkts,