1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 equal stride packed stream receive native datapath implementation */
14 #include <rte_byteorder.h>
18 #include "efx_types.h"
19 #include "efx_regs_ef10.h"
21 #include "sfc_tweak.h"
22 #include "sfc_dp_rx.h"
23 #include "sfc_kvargs.h"
26 /* Tunnels are not supported */
27 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
28 #include "sfc_ef10_rx_ev.h"
30 #define sfc_ef10_essb_rx_err(dpq, ...) \
31 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
33 #define sfc_ef10_essb_rx_info(dpq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
37 * Fake length for RXQ descriptors in equal stride super-buffer mode
38 * to make hardware happy.
40 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
43 * Minimum number of Rx buffers the datapath allows to use.
45 * Each HW Rx descriptor has many Rx buffers. The number of buffers
46 * in one HW Rx descriptor is equal to size of contiguous block
47 * provided by Rx buffers memory pool. The contiguous block size
48 * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
49 * data size specified on the memory pool creation. Typical rte_mbuf
50 * data size is about 2k which makes a bit less than 32 buffers in
51 * contiguous block with default bucket size equal to 64k.
52 * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
53 * it makes about 256 as required minimum. Double it in advertised
54 * minimum to allow for at least 2 refill blocks.
56 #define SFC_EF10_ESSB_RX_DESCS_MIN 512
59 * Number of Rx buffers should be aligned to.
61 * There are no extra requirements on alignment since actual number of
62 * pushed Rx buffers will be multiple by contiguous block size which
63 * is unknown beforehand.
65 #define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
68 * Maximum number of descriptors/buffers in the Rx ring.
69 * It should guarantee that corresponding event queue never overfill.
71 #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
72 ((_nevs) - 1 /* head must not step on tail */ - \
73 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
74 1 /* Rx error */ - 1 /* flush */)
76 struct sfc_ef10_essb_rx_sw_desc {
77 struct rte_mbuf *first_mbuf;
80 struct sfc_ef10_essb_rxq {
81 /* Used on data path */
83 #define SFC_EF10_ESSB_RXQ_STARTED 0x1
84 #define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
85 #define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
86 unsigned int rxq_ptr_mask;
87 unsigned int block_size;
88 unsigned int buf_stride;
89 unsigned int bufs_ptr;
90 unsigned int completed;
91 unsigned int pending_id;
92 unsigned int bufs_pending;
93 unsigned int left_in_completed;
94 unsigned int left_in_pending;
95 unsigned int evq_read_ptr;
96 unsigned int evq_ptr_mask;
97 efx_qword_t *evq_hw_ring;
98 struct sfc_ef10_essb_rx_sw_desc *sw_ring;
103 unsigned int max_fill_level;
104 unsigned int refill_threshold;
105 struct rte_mempool *refill_mb_pool;
106 efx_qword_t *rxq_hw_ring;
107 volatile void *doorbell;
109 /* Datapath receive queue anchor */
110 struct sfc_dp_rxq dp;
113 static inline struct sfc_ef10_essb_rxq *
114 sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
116 return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
119 static struct rte_mbuf *
120 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
121 struct rte_mbuf *mbuf)
125 m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
126 MBUF_RAW_ALLOC_CHECK(m);
130 static struct rte_mbuf *
131 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
132 struct rte_mbuf *mbuf, unsigned int idx)
136 m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
137 MBUF_RAW_ALLOC_CHECK(m);
141 static struct rte_mbuf *
142 sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
144 const struct sfc_ef10_essb_rx_sw_desc *rxd;
146 if (rxq->left_in_completed != 0) {
147 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
148 return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
149 rxq->block_size - rxq->left_in_completed);
152 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
153 rxq->left_in_completed = rxq->block_size;
154 return rxd->first_mbuf;
159 sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
161 const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
162 unsigned int free_space;
164 void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
165 unsigned int added = rxq->added;
167 free_space = rxq->max_fill_level - (added - rxq->completed);
169 if (free_space < rxq->refill_threshold)
172 bulks = free_space / RTE_DIM(mbuf_blocks);
173 /* refill_threshold guarantees that bulks is positive */
174 SFC_ASSERT(bulks > 0);
180 if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
181 mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
182 struct rte_eth_dev_data *dev_data =
183 rte_eth_devices[rxq->port_id].data;
186 * It is hardly a safe way to increment counter
187 * from different contexts, but all PMDs do it.
189 dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
190 /* Return if we have posted nothing yet */
191 if (added == rxq->added)
197 for (i = 0, id = added & rxq_ptr_mask;
198 i < RTE_DIM(mbuf_blocks);
200 struct rte_mbuf *m = mbuf_blocks[i];
201 struct sfc_ef10_essb_rx_sw_desc *rxd;
203 SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
204 rxd = &rxq->sw_ring[id];
207 /* RX_KER_BYTE_CNT is ignored by firmware */
208 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
209 ESF_DZ_RX_KER_BYTE_CNT,
210 SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
211 ESF_DZ_RX_KER_BUF_ADDR,
212 rte_mbuf_data_iova_default(m));
215 added += RTE_DIM(mbuf_blocks);
217 } while (--bulks > 0);
219 SFC_ASSERT(rxq->added != added);
221 sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
225 sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
227 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
229 if (!sfc_ef10_ev_present(*rx_ev))
232 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
233 FSE_AZ_EV_CODE_RX_EV)) {
235 * Do not move read_ptr to keep the event for exception
238 rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
239 sfc_ef10_essb_rx_err(&rxq->dp.dpq,
240 "RxQ exception at EvQ read ptr %#x",
250 sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
254 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
256 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
258 rxq->bufs_ptr += ready;
259 rxq->bufs_pending += ready;
261 SFC_ASSERT(ready > 0);
263 const struct sfc_ef10_essb_rx_sw_desc *rxd;
265 unsigned int todo_bufs;
268 rxd = &rxq->sw_ring[rxq->pending_id];
269 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
270 rxq->block_size - rxq->left_in_pending);
272 if (ready < rxq->left_in_pending) {
275 rxq->left_in_pending -= todo_bufs;
277 todo_bufs = rxq->left_in_pending;
279 rxq->left_in_pending = rxq->block_size;
280 if (rxq->pending_id != rxq->rxq_ptr_mask)
286 SFC_ASSERT(todo_bufs > 0);
289 sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
291 /* Prefetch pseudo-header */
292 rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
295 while (todo_bufs-- > 0) {
296 m = sfc_ef10_essb_next_mbuf(rxq, m);
297 m->ol_flags = m0->ol_flags;
298 m->packet_type = m0->packet_type;
299 /* Prefetch pseudo-header */
300 rte_prefetch0((uint8_t *)m->buf_addr +
301 RTE_PKTMBUF_HEADROOM);
307 sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
308 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
310 unsigned int n_rx_pkts = 0;
311 unsigned int todo_bufs;
314 while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
315 rxq->bufs_pending)) > 0) {
316 m = sfc_ef10_essb_maybe_next_completed(rxq);
318 todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
320 rxq->bufs_pending -= todo_bufs;
321 rxq->left_in_completed -= todo_bufs;
323 SFC_ASSERT(todo_bufs > 0);
327 const efx_qword_t *qwordp;
330 /* Buffers to be discarded have 0 in packet type */
331 if (unlikely(m->packet_type == 0)) {
332 rte_mbuf_raw_free(m);
336 rx_pkts[n_rx_pkts++] = m;
338 /* Parse pseudo-header */
339 qwordp = (const efx_qword_t *)
340 ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
342 EFX_QWORD_FIELD(*qwordp,
343 ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
345 m->data_off = RTE_PKTMBUF_HEADROOM +
346 ES_EZ_ESSB_RX_PREFIX_LEN;
347 m->port = rxq->port_id;
349 rte_pktmbuf_pkt_len(m) = pkt_len;
350 rte_pktmbuf_data_len(m) = pkt_len;
354 !!EFX_TEST_QWORD_BIT(*qwordp,
355 ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
357 !!EFX_TEST_QWORD_BIT(*qwordp,
358 ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
360 !!EFX_TEST_QWORD_BIT(*qwordp,
361 ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
363 /* EFX_QWORD_FIELD converts little-endian to CPU */
365 EFX_QWORD_FIELD(*qwordp,
366 ES_EZ_ESSB_RX_PREFIX_HASH);
368 EFX_QWORD_FIELD(*qwordp,
369 ES_EZ_ESSB_RX_PREFIX_MARK);
372 m = sfc_ef10_essb_next_mbuf(rxq, m);
373 } while (todo_bufs-- > 0);
381 sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
384 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
385 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
389 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
390 SFC_EF10_ESSB_RXQ_EXCEPTION)))
393 n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
395 while (n_rx_pkts != nb_pkts &&
396 sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
398 * DROP_EVENT is an internal to the NIC, software should
399 * never see it and, therefore, may ignore it.
402 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
403 n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
405 nb_pkts - n_rx_pkts);
408 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
409 evq_old_read_ptr, rxq->evq_read_ptr);
411 /* It is not a problem if we refill in the case of exception */
412 sfc_ef10_essb_rx_qrefill(rxq);
417 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
419 sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
421 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
422 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
425 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
426 SFC_EF10_ESSB_RXQ_EXCEPTION)))
427 return rxq->bufs_pending;
429 while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
431 * DROP_EVENT is an internal to the NIC, software should
432 * never see it and, therefore, may ignore it.
434 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
437 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
438 evq_old_read_ptr, rxq->evq_read_ptr);
440 return rxq->bufs_pending;
443 static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
445 sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
447 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
448 unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
450 if (offset < pending)
451 return RTE_ETH_RX_DESC_DONE;
453 if (offset < (rxq->added - rxq->completed) * rxq->block_size +
454 rxq->left_in_completed - rxq->block_size)
455 return RTE_ETH_RX_DESC_AVAIL;
457 return RTE_ETH_RX_DESC_UNAVAIL;
460 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
462 sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
465 * Number of descriptors just defines maximum number of pushed
466 * descriptors (fill level).
468 dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
469 dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
472 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
474 sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
476 SFC_ASSERT(pool != NULL);
478 if (strcmp(pool, "bucket") == 0)
484 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
486 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
487 struct sfc_dp_rx_hw_limits *limits,
488 struct rte_mempool *mb_pool,
489 unsigned int *rxq_entries,
490 unsigned int *evq_entries,
491 unsigned int *rxq_max_fill_level)
494 struct rte_mempool_info mp_info;
495 unsigned int nb_hw_rx_desc;
496 unsigned int max_events;
498 rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
501 if (mp_info.contig_block_size == 0)
505 * Calculate required number of hardware Rx descriptors each
506 * carrying contig block size Rx buffers.
507 * It cannot be less than Rx write pointer alignment plus 1
508 * in order to avoid cases when the ring is guaranteed to be
511 nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
512 mp_info.contig_block_size),
513 SFC_EF10_RX_WPTR_ALIGN + 1);
514 if (nb_hw_rx_desc <= limits->rxq_min_entries) {
515 *rxq_entries = limits->rxq_min_entries;
517 *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
518 if (*rxq_entries > limits->rxq_max_entries)
522 max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
523 mp_info.contig_block_size +
524 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
525 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
527 *evq_entries = rte_align32pow2(max_events);
528 *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
529 *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
532 * May be even maximum event queue size is insufficient to handle
533 * so many Rx descriptors. If so, we should limit Rx queue fill level.
535 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
536 SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
540 static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
542 sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
543 const struct rte_pci_addr *pci_addr, int socket_id,
544 const struct sfc_dp_rx_qcreate_info *info,
545 struct sfc_dp_rxq **dp_rxqp)
547 struct rte_mempool * const mp = info->refill_mb_pool;
548 struct rte_mempool_info mp_info;
549 struct sfc_ef10_essb_rxq *rxq;
552 rc = rte_mempool_ops_get_info(mp, &mp_info);
554 /* Positive errno is used in the driver */
556 goto fail_get_contig_block_size;
559 /* Check if the mempool provides block dequeue */
561 if (mp_info.contig_block_size == 0)
562 goto fail_no_block_dequeue;
565 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
566 RTE_CACHE_LINE_SIZE, socket_id);
570 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
573 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
575 sizeof(*rxq->sw_ring),
576 RTE_CACHE_LINE_SIZE, socket_id);
577 if (rxq->sw_ring == NULL)
578 goto fail_desc_alloc;
580 rxq->block_size = mp_info.contig_block_size;
581 rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
582 rxq->rxq_ptr_mask = info->rxq_entries - 1;
583 rxq->evq_ptr_mask = info->evq_entries - 1;
584 rxq->evq_hw_ring = info->evq_hw_ring;
585 rxq->port_id = port_id;
587 rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
588 rxq->refill_threshold =
589 RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
590 SFC_EF10_RX_WPTR_ALIGN);
591 rxq->refill_mb_pool = mp;
592 rxq->rxq_hw_ring = info->rxq_hw_ring;
594 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
595 ER_DZ_RX_DESC_UPD_REG_OFST +
596 (info->hw_index << info->vi_window_shift);
598 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
599 "block size is %u, buf stride is %u",
600 rxq->block_size, rxq->buf_stride);
601 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
602 "max fill level is %u descs (%u bufs), "
603 "refill threashold %u descs (%u bufs)",
605 rxq->max_fill_level * rxq->block_size,
606 rxq->refill_threshold,
607 rxq->refill_threshold * rxq->block_size);
616 fail_no_block_dequeue:
617 fail_get_contig_block_size:
621 static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
623 sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
625 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
627 rte_free(rxq->sw_ring);
631 static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
633 sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
635 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
637 rxq->evq_read_ptr = evq_read_ptr;
639 /* Initialize before refill */
640 rxq->completed = rxq->pending_id = rxq->added = 0;
641 rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
642 rxq->bufs_ptr = UINT_MAX;
643 rxq->bufs_pending = 0;
645 sfc_ef10_essb_rx_qrefill(rxq);
647 rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
649 ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
654 static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
656 sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
658 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
660 rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
662 *evq_read_ptr = rxq->evq_read_ptr;
665 static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
667 sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
669 __rte_unused struct sfc_ef10_essb_rxq *rxq;
671 rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
672 SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
675 * It is safe to ignore Rx event since we free all mbufs on
676 * queue purge anyway.
682 static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
684 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
686 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
688 const struct sfc_ef10_essb_rx_sw_desc *rxd;
691 for (i = rxq->completed; i != rxq->added; ++i) {
692 rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
693 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
694 rxq->block_size - rxq->left_in_completed);
695 while (rxq->left_in_completed > 0) {
696 rte_mbuf_raw_free(m);
697 m = sfc_ef10_essb_next_mbuf(rxq, m);
698 rxq->left_in_completed--;
700 rxq->left_in_completed = rxq->block_size;
703 rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
706 struct sfc_dp_rx sfc_ef10_essb_rx = {
708 .name = SFC_KVARG_DATAPATH_EF10_ESSB,
710 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
711 SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
713 .features = SFC_DP_RX_FEAT_FLOW_FLAG |
714 SFC_DP_RX_FEAT_FLOW_MARK,
715 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
716 DEV_RX_OFFLOAD_RSS_HASH,
717 .queue_offload_capa = 0,
718 .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
719 .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
720 .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
721 .qcreate = sfc_ef10_essb_rx_qcreate,
722 .qdestroy = sfc_ef10_essb_rx_qdestroy,
723 .qstart = sfc_ef10_essb_rx_qstart,
724 .qstop = sfc_ef10_essb_rx_qstop,
725 .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
726 .qpurge = sfc_ef10_essb_rx_qpurge,
727 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
728 .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
729 .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
730 .pkt_burst = sfc_ef10_essb_recv_pkts,