1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_debug.h"
25 #include "sfc_tweak.h"
26 #include "sfc_dp_rx.h"
27 #include "sfc_kvargs.h"
30 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
31 #include "sfc_ef10_rx_ev.h"
33 #define sfc_ef10_rx_err(dpq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
36 #define sfc_ef10_rx_info(dpq, ...) \
37 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
40 * Maximum number of descriptors/buffers in the Rx ring.
41 * It should guarantee that corresponding event queue never overfill.
42 * EF10 native datapath uses event queue of the same size as Rx queue.
43 * Maximum number of events on datapath can be estimated as number of
44 * Rx queue entries (one event per Rx buffer in the worst case) plus
45 * Rx error and flush events.
47 #define SFC_EF10_RXQ_LIMIT(_ndesc) \
48 ((_ndesc) - 1 /* head must not step on tail */ - \
49 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
50 1 /* Rx error */ - 1 /* flush */)
52 struct sfc_ef10_rx_sw_desc {
53 struct rte_mbuf *mbuf;
57 /* Used on data path */
59 #define SFC_EF10_RXQ_STARTED 0x1
60 #define SFC_EF10_RXQ_NOT_RUNNING 0x2
61 #define SFC_EF10_RXQ_EXCEPTION 0x4
62 #define SFC_EF10_RXQ_RSS_HASH 0x8
63 #define SFC_EF10_RXQ_FLAG_INTR_EN 0x10
64 unsigned int ptr_mask;
66 unsigned int completed;
67 unsigned int evq_read_ptr;
68 unsigned int evq_read_ptr_primed;
69 efx_qword_t *evq_hw_ring;
70 struct sfc_ef10_rx_sw_desc *sw_ring;
72 struct rte_mbuf *scatter_pkt;
73 volatile void *evq_prime;
79 unsigned int max_fill_level;
80 unsigned int refill_threshold;
81 struct rte_mempool *refill_mb_pool;
82 efx_qword_t *rxq_hw_ring;
83 volatile void *doorbell;
85 /* Datapath receive queue anchor */
89 static inline struct sfc_ef10_rxq *
90 sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
92 return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
96 sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq)
98 sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask);
99 rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
103 sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
105 const unsigned int ptr_mask = rxq->ptr_mask;
106 const uint32_t buf_size = rxq->buf_size;
107 unsigned int free_space;
109 void *objs[SFC_RX_REFILL_BULK];
110 unsigned int added = rxq->added;
112 RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
114 free_space = rxq->max_fill_level - (added - rxq->completed);
116 if (free_space < rxq->refill_threshold)
119 bulks = free_space / RTE_DIM(objs);
120 /* refill_threshold guarantees that bulks is positive */
121 SFC_ASSERT(bulks > 0);
127 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
128 RTE_DIM(objs)) < 0)) {
129 struct rte_eth_dev_data *dev_data =
130 rte_eth_devices[rxq->dp.dpq.port_id].data;
133 * It is hardly a safe way to increment counter
134 * from different contexts, but all PMDs do it.
136 dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
137 /* Return if we have posted nothing yet */
138 if (added == rxq->added)
144 for (i = 0, id = added & ptr_mask;
147 struct rte_mbuf *m = objs[i];
148 struct sfc_ef10_rx_sw_desc *rxd;
149 rte_iova_t phys_addr;
151 MBUF_RAW_ALLOC_CHECK(m);
153 SFC_ASSERT((id & ~ptr_mask) == 0);
154 rxd = &rxq->sw_ring[id];
158 * Avoid writing to mbuf. It is cheaper to do it
159 * when we receive packet and fill in nearby
163 phys_addr = rte_mbuf_data_iova_default(m);
164 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
165 ESF_DZ_RX_KER_BYTE_CNT, buf_size,
166 ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
169 added += RTE_DIM(objs);
170 } while (--bulks > 0);
172 SFC_ASSERT(rxq->added != added);
174 sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
178 sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
180 struct rte_mbuf *next_mbuf;
182 /* Prefetch next bunch of software descriptors */
183 if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
184 rte_prefetch0(&rxq->sw_ring[next_id]);
187 * It looks strange to prefetch depending on previous prefetch
188 * data, but measurements show that it is really efficient and
189 * increases packet rate.
191 next_mbuf = rxq->sw_ring[next_id].mbuf;
192 if (likely(next_mbuf != NULL)) {
193 /* Prefetch the next mbuf structure */
194 rte_mbuf_prefetch_part1(next_mbuf);
196 /* Prefetch pseudo header of the next packet */
197 /* data_off is not filled in yet */
198 /* Yes, data could be not ready yet, but we hope */
199 rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
200 RTE_PKTMBUF_HEADROOM);
204 static struct rte_mbuf **
205 sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
208 uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
210 SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL);
212 if (n_rx_pkts != 0) {
213 unsigned int completed = rxq->completed;
215 rxq->completed = completed + n_rx_pkts;
219 rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
220 } while (completed != rxq->completed);
227 sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
229 return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
233 sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
235 return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
238 static struct rte_mbuf **
239 sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
240 struct rte_mbuf **rx_pkts,
241 struct rte_mbuf ** const rx_pkts_end)
243 const unsigned int ptr_mask = rxq->ptr_mask;
244 unsigned int pending = rxq->pending;
246 struct sfc_ef10_rx_sw_desc *rxd;
249 const uint8_t *pseudo_hdr;
252 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
253 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
256 /* Rx abort - it was no enough descriptors for Rx packet */
257 rte_pktmbuf_free(rxq->scatter_pkt);
258 rxq->scatter_pkt = NULL;
262 rxq->pending = pending + ready;
264 if (rx_ev.eq_u64[0] &
265 rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
266 (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
267 SFC_ASSERT(rxq->completed == pending);
269 rxd = &rxq->sw_ring[pending++ & ptr_mask];
270 rte_mbuf_raw_free(rxd->mbuf);
271 } while (pending != rxq->pending);
272 rxq->completed = pending;
276 /* If scattered packet is in progress */
277 if (rxq->scatter_pkt != NULL) {
278 /* Events for scattered packet frags are not merged */
279 SFC_ASSERT(ready == 1);
280 SFC_ASSERT(rxq->completed == pending);
282 /* There is no pseudo-header in scatter segments. */
283 seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES);
285 rxd = &rxq->sw_ring[pending++ & ptr_mask];
288 MBUF_RAW_ALLOC_CHECK(m);
290 m->data_off = RTE_PKTMBUF_HEADROOM;
291 rte_pktmbuf_data_len(m) = seg_len;
292 rte_pktmbuf_pkt_len(m) = seg_len;
294 rxq->scatter_pkt->nb_segs++;
295 rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len;
296 rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m;
298 if (~rx_ev.eq_u64[0] &
299 rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
300 *rx_pkts++ = rxq->scatter_pkt;
301 rxq->scatter_pkt = NULL;
303 rxq->completed = pending;
307 rxd = &rxq->sw_ring[pending++ & ptr_mask];
309 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
313 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
314 m->rearm_data[0] = rxq->rearm_data;
316 /* Classify packet based on Rx event */
317 /* Mask RSS hash offload flag if RSS is not enabled */
318 sfc_ef10_rx_ev_to_offloads(rx_ev, m,
319 (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
320 ~0ull : ~PKT_RX_RSS_HASH);
322 /* data_off already moved past pseudo header */
323 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
326 * Always get RSS hash from pseudo header to avoid
327 * condition/branching. If it is valid or not depends on
328 * PKT_RX_RSS_HASH in m->ol_flags.
330 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
333 seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
336 seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
337 SFC_ASSERT(seg_len > 0);
338 rte_pktmbuf_data_len(m) = seg_len;
339 rte_pktmbuf_pkt_len(m) = seg_len;
341 SFC_ASSERT(m->next == NULL);
343 if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
345 rxq->completed = pending;
347 /* Events with CONT bit are not merged */
348 SFC_ASSERT(ready == 1);
349 rxq->scatter_pkt = m;
350 rxq->completed = pending;
354 /* Remember mbuf to copy offload flags and packet type from */
356 while (pending != rxq->pending) {
357 rxd = &rxq->sw_ring[pending++ & ptr_mask];
359 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
363 if (rx_pkts != rx_pkts_end) {
365 rxq->completed = pending;
368 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
369 sizeof(rxq->rearm_data));
370 m->rearm_data[0] = rxq->rearm_data;
372 /* Event-dependent information is the same */
373 m->ol_flags = m0->ol_flags;
374 m->packet_type = m0->packet_type;
376 /* data_off already moved past pseudo header */
377 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
380 * Always get RSS hash from pseudo header to avoid
381 * condition/branching. If it is valid or not depends on
382 * PKT_RX_RSS_HASH in m->ol_flags.
384 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
386 seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
387 SFC_ASSERT(seg_len > 0);
388 rte_pktmbuf_data_len(m) = seg_len;
389 rte_pktmbuf_pkt_len(m) = seg_len;
391 SFC_ASSERT(m->next == NULL);
398 sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
400 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
402 if (!sfc_ef10_ev_present(*rx_ev))
405 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
406 FSE_AZ_EV_CODE_RX_EV)) {
408 * Do not move read_ptr to keep the event for exception
409 * handling by the control path.
411 rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
412 sfc_ef10_rx_err(&rxq->dp.dpq,
413 "RxQ exception at EvQ read ptr %#x",
423 sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
425 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
426 struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
427 unsigned int evq_old_read_ptr;
430 rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
432 if (unlikely(rxq->flags &
433 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
436 evq_old_read_ptr = rxq->evq_read_ptr;
437 while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
439 * DROP_EVENT is an internal to the NIC, software should
440 * never see it and, therefore, may ignore it.
443 rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
444 rx_pkts, rx_pkts_end);
447 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
450 /* It is not a problem if we refill in the case of exception */
451 sfc_ef10_rx_qrefill(rxq);
453 if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) &&
454 rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
455 sfc_ef10_rx_qprime(rxq);
458 return nb_pkts - (rx_pkts_end - rx_pkts);
462 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
464 static const uint32_t ef10_native_ptypes[] = {
466 RTE_PTYPE_L2_ETHER_ARP,
467 RTE_PTYPE_L2_ETHER_VLAN,
468 RTE_PTYPE_L2_ETHER_QINQ,
469 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
470 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
476 static const uint32_t ef10_overlay_ptypes[] = {
478 RTE_PTYPE_L2_ETHER_ARP,
479 RTE_PTYPE_L2_ETHER_VLAN,
480 RTE_PTYPE_L2_ETHER_QINQ,
481 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
482 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
486 RTE_PTYPE_TUNNEL_VXLAN,
487 RTE_PTYPE_TUNNEL_NVGRE,
488 RTE_PTYPE_INNER_L2_ETHER,
489 RTE_PTYPE_INNER_L2_ETHER_VLAN,
490 RTE_PTYPE_INNER_L2_ETHER_QINQ,
491 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
492 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
493 RTE_PTYPE_INNER_L4_FRAG,
494 RTE_PTYPE_INNER_L4_TCP,
495 RTE_PTYPE_INNER_L4_UDP,
500 * The function returns static set of supported packet types,
501 * so we can't build it dynamically based on supported tunnel
502 * encapsulations and should limit to known sets.
504 switch (tunnel_encaps) {
505 case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
506 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
507 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
508 return ef10_overlay_ptypes;
511 "Unexpected set of supported tunnel encapsulations: %#x",
515 return ef10_native_ptypes;
519 static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
521 sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
523 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
525 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
526 unsigned int pending = rxq->pending;
529 if (unlikely(rxq->flags &
530 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
533 while (sfc_ef10_rx_get_event(rxq, &rx_ev)) {
534 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
536 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
541 * The function does not process events, so return event queue read
542 * pointer to the original position to allow the events that were
543 * read to be processed later
545 rxq->evq_read_ptr = evq_old_read_ptr;
548 return pending - rxq->completed;
551 static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
553 sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
555 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
556 unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq);
558 if (unlikely(offset > rxq->ptr_mask))
561 if (offset < npending)
562 return RTE_ETH_RX_DESC_DONE;
564 if (offset < (rxq->added - rxq->completed))
565 return RTE_ETH_RX_DESC_AVAIL;
567 return RTE_ETH_RX_DESC_UNAVAIL;
571 static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
573 sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
576 * Number of descriptors just defines maximum number of pushed
577 * descriptors (fill level).
579 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
580 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
584 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
586 sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
587 struct sfc_dp_rx_hw_limits *limits,
588 __rte_unused struct rte_mempool *mb_pool,
589 unsigned int *rxq_entries,
590 unsigned int *evq_entries,
591 unsigned int *rxq_max_fill_level)
594 * rte_ethdev API guarantees that the number meets min, max and
595 * alignment requirements.
597 if (nb_rx_desc <= limits->rxq_min_entries)
598 *rxq_entries = limits->rxq_min_entries;
600 *rxq_entries = rte_align32pow2(nb_rx_desc);
602 *evq_entries = *rxq_entries;
604 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
605 SFC_EF10_RXQ_LIMIT(*evq_entries));
611 sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
615 memset(&m, 0, sizeof(m));
617 rte_mbuf_refcnt_set(&m, 1);
618 m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
622 /* rearm_data covers structure members filled in above */
623 rte_compiler_barrier();
624 RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
625 return m.rearm_data[0];
628 static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
630 sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
631 const struct rte_pci_addr *pci_addr, int socket_id,
632 const struct sfc_dp_rx_qcreate_info *info,
633 struct sfc_dp_rxq **dp_rxqp)
635 struct sfc_ef10_rxq *rxq;
639 if (info->rxq_entries != info->evq_entries)
643 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
644 RTE_CACHE_LINE_SIZE, socket_id);
648 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
651 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
653 sizeof(*rxq->sw_ring),
654 RTE_CACHE_LINE_SIZE, socket_id);
655 if (rxq->sw_ring == NULL)
656 goto fail_desc_alloc;
658 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
659 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
660 rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
661 rxq->ptr_mask = info->rxq_entries - 1;
662 rxq->evq_hw_ring = info->evq_hw_ring;
663 rxq->max_fill_level = info->max_fill_level;
664 rxq->refill_threshold = info->refill_threshold;
666 sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
667 rxq->prefix_size = info->prefix_size;
668 rxq->buf_size = info->buf_size;
669 rxq->refill_mb_pool = info->refill_mb_pool;
670 rxq->rxq_hw_ring = info->rxq_hw_ring;
671 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
672 ER_DZ_RX_DESC_UPD_REG_OFST +
673 (info->hw_index << info->vi_window_shift);
674 rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
675 ER_DZ_EVQ_RPTR_REG_OFST +
676 (info->evq_hw_index << info->vi_window_shift);
678 sfc_ef10_rx_info(&rxq->dp.dpq, "RxQ doorbell is %p", rxq->doorbell);
691 static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
693 sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
695 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
697 rte_free(rxq->sw_ring);
701 static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
703 sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
705 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
707 SFC_ASSERT(rxq->completed == 0);
708 SFC_ASSERT(rxq->pending == 0);
709 SFC_ASSERT(rxq->added == 0);
711 sfc_ef10_rx_qrefill(rxq);
713 rxq->evq_read_ptr = evq_read_ptr;
715 rxq->flags |= SFC_EF10_RXQ_STARTED;
716 rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
718 if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN)
719 sfc_ef10_rx_qprime(rxq);
724 static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
726 sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
728 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
730 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
732 *evq_read_ptr = rxq->evq_read_ptr;
735 static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
737 sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
739 __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
741 SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
744 * It is safe to ignore Rx event since we free all mbufs on
745 * queue purge anyway.
751 static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
753 sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
755 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
757 struct sfc_ef10_rx_sw_desc *rxd;
759 rte_pktmbuf_free(rxq->scatter_pkt);
760 rxq->scatter_pkt = NULL;
762 for (i = rxq->completed; i != rxq->added; ++i) {
763 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
764 rte_mbuf_raw_free(rxd->mbuf);
768 rxq->completed = rxq->pending = rxq->added = 0;
770 rxq->flags &= ~SFC_EF10_RXQ_STARTED;
773 static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable;
775 sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
777 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
779 rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN;
780 if (rxq->flags & SFC_EF10_RXQ_STARTED)
781 sfc_ef10_rx_qprime(rxq);
785 static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable;
787 sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
789 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
791 /* Cannot disarm, just disable rearm */
792 rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN;
796 struct sfc_dp_rx sfc_ef10_rx = {
798 .name = SFC_KVARG_DATAPATH_EF10,
800 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
802 .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
804 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
805 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
806 DEV_RX_OFFLOAD_RSS_HASH,
807 .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
808 .get_dev_info = sfc_ef10_rx_get_dev_info,
809 .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
810 .qcreate = sfc_ef10_rx_qcreate,
811 .qdestroy = sfc_ef10_rx_qdestroy,
812 .qstart = sfc_ef10_rx_qstart,
813 .qstop = sfc_ef10_rx_qstop,
814 .qrx_ev = sfc_ef10_rx_qrx_ev,
815 .qpurge = sfc_ef10_rx_qpurge,
816 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
817 .qdesc_npending = sfc_ef10_rx_qdesc_npending,
818 .qdesc_status = sfc_ef10_rx_qdesc_status,
819 .intr_enable = sfc_ef10_rx_intr_enable,
820 .intr_disable = sfc_ef10_rx_intr_disable,
821 .pkt_burst = sfc_ef10_recv_pkts,