1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF100 native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
19 #include "efx_types.h"
20 #include "efx_regs_ef100.h"
22 #include "sfc_debug.h"
23 #include "sfc_tweak.h"
24 #include "sfc_dp_rx.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef100.h"
29 #define sfc_ef100_rx_err(_rxq, ...) \
30 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_rxq)->dp.dpq, __VA_ARGS__)
32 #define sfc_ef100_rx_debug(_rxq, ...) \
33 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_rxq)->dp.dpq, \
37 * Maximum number of descriptors/buffers in the Rx ring.
38 * It should guarantee that corresponding event queue never overfill.
39 * EF10 native datapath uses event queue of the same size as Rx queue.
40 * Maximum number of events on datapath can be estimated as number of
41 * Rx queue entries (one event per Rx buffer in the worst case) plus
42 * Rx error and flush events.
44 #define SFC_EF100_RXQ_LIMIT(_ndesc) \
45 ((_ndesc) - 1 /* head must not step on tail */ - \
46 1 /* Rx error */ - 1 /* flush */)
48 struct sfc_ef100_rx_sw_desc {
49 struct rte_mbuf *mbuf;
52 struct sfc_ef100_rxq {
53 /* Used on data path */
55 #define SFC_EF100_RXQ_STARTED 0x1
56 #define SFC_EF100_RXQ_NOT_RUNNING 0x2
57 #define SFC_EF100_RXQ_EXCEPTION 0x4
58 unsigned int ptr_mask;
59 unsigned int evq_phase_bit_shift;
60 unsigned int ready_pkts;
61 unsigned int completed;
62 unsigned int evq_read_ptr;
63 volatile efx_qword_t *evq_hw_ring;
64 struct sfc_ef100_rx_sw_desc *sw_ring;
71 unsigned int max_fill_level;
72 unsigned int refill_threshold;
73 struct rte_mempool *refill_mb_pool;
74 efx_qword_t *rxq_hw_ring;
75 volatile void *doorbell;
77 /* Datapath receive queue anchor */
81 static inline struct sfc_ef100_rxq *
82 sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
84 return container_of(dp_rxq, struct sfc_ef100_rxq, dp);
88 sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
92 EFX_POPULATE_DWORD_1(dword, ERF_GZ_RX_RING_PIDX, added & rxq->ptr_mask);
94 /* DMA sync to device is not required */
97 * rte_write32() has rte_io_wmb() which guarantees that the STORE
98 * operations (i.e. Rx and event descriptor updates) that precede
99 * the rte_io_wmb() call are visible to NIC before the STORE
100 * operations that follow it (i.e. doorbell write).
102 rte_write32(dword.ed_u32[0], rxq->doorbell);
104 sfc_ef100_rx_debug(rxq, "RxQ pushed doorbell at pidx %u (added=%u)",
105 EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX),
110 sfc_ef100_rx_qrefill(struct sfc_ef100_rxq *rxq)
112 const unsigned int ptr_mask = rxq->ptr_mask;
113 unsigned int free_space;
115 void *objs[SFC_RX_REFILL_BULK];
116 unsigned int added = rxq->added;
118 free_space = rxq->max_fill_level - (added - rxq->completed);
120 if (free_space < rxq->refill_threshold)
123 bulks = free_space / RTE_DIM(objs);
124 /* refill_threshold guarantees that bulks is positive */
125 SFC_ASSERT(bulks > 0);
131 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
132 RTE_DIM(objs)) < 0)) {
133 struct rte_eth_dev_data *dev_data =
134 rte_eth_devices[rxq->dp.dpq.port_id].data;
137 * It is hardly a safe way to increment counter
138 * from different contexts, but all PMDs do it.
140 dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
141 /* Return if we have posted nothing yet */
142 if (added == rxq->added)
148 for (i = 0, id = added & ptr_mask;
151 struct rte_mbuf *m = objs[i];
152 struct sfc_ef100_rx_sw_desc *rxd;
153 rte_iova_t phys_addr;
155 MBUF_RAW_ALLOC_CHECK(m);
157 SFC_ASSERT((id & ~ptr_mask) == 0);
158 rxd = &rxq->sw_ring[id];
162 * Avoid writing to mbuf. It is cheaper to do it
163 * when we receive packet and fill in nearby
167 phys_addr = rte_mbuf_data_iova_default(m);
168 EFX_POPULATE_QWORD_1(rxq->rxq_hw_ring[id],
169 ESF_GZ_RX_BUF_ADDR, phys_addr);
172 added += RTE_DIM(objs);
173 } while (--bulks > 0);
175 SFC_ASSERT(rxq->added != added);
177 sfc_ef100_rx_qpush(rxq, added);
180 static inline uint64_t
181 sfc_ef100_rx_nt_or_inner_l4_csum(const efx_word_t class)
183 return EFX_WORD_FIELD(class,
184 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM) ==
185 ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
186 PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
189 static inline uint64_t
190 sfc_ef100_rx_tun_outer_l4_csum(const efx_word_t class)
192 return EFX_WORD_FIELD(class,
193 ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM) ==
194 ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
195 PKT_RX_OUTER_L4_CKSUM_GOOD : PKT_RX_OUTER_L4_CKSUM_GOOD;
199 sfc_ef100_rx_class_decode(const efx_word_t class, uint64_t *ol_flags)
202 bool no_tunnel = false;
204 if (unlikely(EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS) !=
205 ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN))
208 switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN)) {
210 ptype = RTE_PTYPE_L2_ETHER;
213 ptype = RTE_PTYPE_L2_ETHER_VLAN;
216 ptype = RTE_PTYPE_L2_ETHER_QINQ;
220 switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS)) {
221 case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE:
224 case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN:
225 ptype |= RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
226 *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
228 case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE:
229 ptype |= RTE_PTYPE_TUNNEL_NVGRE;
231 case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE:
232 ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
233 *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
237 * Driver does not know the tunnel, but it is
238 * still a tunnel and NT_OR_INNER refer to inner
245 bool l4_valid = true;
247 switch (EFX_WORD_FIELD(class,
248 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
249 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
250 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
251 *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
253 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
254 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
255 *ol_flags |= PKT_RX_IP_CKSUM_BAD;
257 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
258 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
265 switch (EFX_WORD_FIELD(class,
266 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
267 case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
268 ptype |= RTE_PTYPE_L4_TCP;
270 sfc_ef100_rx_nt_or_inner_l4_csum(class);
272 case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
273 ptype |= RTE_PTYPE_L4_UDP;
275 sfc_ef100_rx_nt_or_inner_l4_csum(class);
277 case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
278 ptype |= RTE_PTYPE_L4_FRAG;
283 bool l4_valid = true;
285 switch (EFX_WORD_FIELD(class,
286 ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS)) {
287 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
288 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
290 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
291 ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
292 *ol_flags |= PKT_RX_EIP_CKSUM_BAD;
294 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
295 ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
299 switch (EFX_WORD_FIELD(class,
300 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
301 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
302 ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
303 *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
305 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
306 ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
307 *ol_flags |= PKT_RX_IP_CKSUM_BAD;
309 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
310 ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
318 switch (EFX_WORD_FIELD(class,
319 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
320 case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
321 ptype |= RTE_PTYPE_INNER_L4_TCP;
323 sfc_ef100_rx_nt_or_inner_l4_csum(class);
325 case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
326 ptype |= RTE_PTYPE_INNER_L4_UDP;
328 sfc_ef100_rx_nt_or_inner_l4_csum(class);
330 case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
331 ptype |= RTE_PTYPE_INNER_L4_FRAG;
341 sfc_ef100_rx_prefix_to_offloads(const efx_oword_t *rx_prefix,
344 const efx_word_t *class;
345 uint64_t ol_flags = 0;
347 RTE_BUILD_BUG_ON(EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
348 RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
349 RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT !=
351 class = (const efx_word_t *)((const uint8_t *)rx_prefix +
352 EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT);
353 if (unlikely(EFX_WORD_FIELD(*class,
354 ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS) !=
355 ESE_GZ_RH_HCLASS_L2_STATUS_OK))
358 m->packet_type = sfc_ef100_rx_class_decode(*class, &ol_flags);
360 m->ol_flags = ol_flags;
364 static const uint8_t *
365 sfc_ef100_rx_pkt_prefix(const struct rte_mbuf *m)
367 return (const uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
370 static struct rte_mbuf *
371 sfc_ef100_rx_next_mbuf(struct sfc_ef100_rxq *rxq)
376 /* mbuf associated with current Rx descriptor */
377 m = rxq->sw_ring[rxq->completed++ & rxq->ptr_mask].mbuf;
379 /* completed is already moved to the next one */
380 if (unlikely(rxq->completed == rxq->added))
384 * Prefetch Rx prefix of the next packet.
385 * Current packet is scattered and the next mbuf is its fragment
386 * it simply prefetches some data - no harm since packet rate
387 * should not be high if scatter is used.
389 id = rxq->completed & rxq->ptr_mask;
390 rte_prefetch0(sfc_ef100_rx_pkt_prefix(rxq->sw_ring[id].mbuf));
392 if (unlikely(rxq->completed + 1 == rxq->added))
396 * Prefetch mbuf control structure of the next after next Rx
399 id = (id == rxq->ptr_mask) ? 0 : (id + 1);
400 rte_mbuf_prefetch_part1(rxq->sw_ring[id].mbuf);
403 * If the next time we'll need SW Rx descriptor from the next
404 * cache line, try to make sure that we have it in cache.
406 if ((id & 0x7) == 0x7)
407 rte_prefetch0(&rxq->sw_ring[(id + 1) & rxq->ptr_mask]);
413 static struct rte_mbuf **
414 sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq *rxq,
415 struct rte_mbuf **rx_pkts,
416 struct rte_mbuf ** const rx_pkts_end)
418 while (rxq->ready_pkts > 0 && rx_pkts != rx_pkts_end) {
419 struct rte_mbuf *pkt;
420 struct rte_mbuf *lastseg;
421 const efx_oword_t *rx_prefix;
428 pkt = sfc_ef100_rx_next_mbuf(rxq);
429 MBUF_RAW_ALLOC_CHECK(pkt);
431 RTE_BUILD_BUG_ON(sizeof(pkt->rearm_data[0]) !=
432 sizeof(rxq->rearm_data));
433 pkt->rearm_data[0] = rxq->rearm_data;
435 /* data_off already moved past Rx prefix */
436 rx_prefix = (const efx_oword_t *)sfc_ef100_rx_pkt_prefix(pkt);
438 pkt_len = EFX_OWORD_FIELD(rx_prefix[0],
439 ESF_GZ_RX_PREFIX_LENGTH);
440 SFC_ASSERT(pkt_len > 0);
441 rte_pktmbuf_pkt_len(pkt) = pkt_len;
443 seg_len = RTE_MIN(pkt_len, rxq->buf_size - rxq->prefix_size);
444 rte_pktmbuf_data_len(pkt) = seg_len;
446 deliver = sfc_ef100_rx_prefix_to_offloads(rx_prefix, pkt);
449 while ((pkt_len -= seg_len) > 0) {
450 struct rte_mbuf *seg;
452 seg = sfc_ef100_rx_next_mbuf(rxq);
453 MBUF_RAW_ALLOC_CHECK(seg);
455 seg->data_off = RTE_PKTMBUF_HEADROOM;
457 seg_len = RTE_MIN(pkt_len, rxq->buf_size);
458 rte_pktmbuf_data_len(seg) = seg_len;
459 rte_pktmbuf_pkt_len(seg) = seg_len;
469 rte_pktmbuf_free(pkt);
476 sfc_ef100_rx_get_event(struct sfc_ef100_rxq *rxq, efx_qword_t *ev)
478 *ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
480 if (!sfc_ef100_ev_present(ev,
481 (rxq->evq_read_ptr >> rxq->evq_phase_bit_shift) & 1))
484 if (unlikely(!sfc_ef100_ev_type_is(ev, ESE_GZ_EF100_EV_RX_PKTS))) {
486 * Do not move read_ptr to keep the event for exception
487 * handling by the control path.
489 rxq->flags |= SFC_EF100_RXQ_EXCEPTION;
490 sfc_ef100_rx_err(rxq,
491 "RxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
492 rxq->evq_read_ptr, rxq->evq_read_ptr & rxq->ptr_mask,
493 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
494 EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
498 sfc_ef100_rx_debug(rxq, "RxQ got event %08x:%08x at %u (%#x)",
499 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
500 EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
502 rxq->evq_read_ptr & rxq->ptr_mask);
509 sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
511 struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(rx_queue);
512 struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
515 rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts, rx_pkts_end);
517 if (unlikely(rxq->flags &
518 (SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION)))
521 while (rx_pkts != rx_pkts_end && sfc_ef100_rx_get_event(rxq, &rx_ev)) {
523 EFX_QWORD_FIELD(rx_ev, ESF_GZ_EV_RXPKTS_NUM_PKT);
524 rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts,
528 /* It is not a problem if we refill in the case of exception */
529 sfc_ef100_rx_qrefill(rxq);
532 return nb_pkts - (rx_pkts_end - rx_pkts);
535 static const uint32_t *
536 sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
538 static const uint32_t ef100_native_ptypes[] = {
540 RTE_PTYPE_L2_ETHER_VLAN,
541 RTE_PTYPE_L2_ETHER_QINQ,
542 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
543 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
547 RTE_PTYPE_TUNNEL_VXLAN,
548 RTE_PTYPE_TUNNEL_NVGRE,
549 RTE_PTYPE_TUNNEL_GENEVE,
550 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
551 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
552 RTE_PTYPE_INNER_L4_TCP,
553 RTE_PTYPE_INNER_L4_UDP,
554 RTE_PTYPE_INNER_L4_FRAG,
558 return ef100_native_ptypes;
561 static sfc_dp_rx_qdesc_npending_t sfc_ef100_rx_qdesc_npending;
563 sfc_ef100_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
568 static sfc_dp_rx_qdesc_status_t sfc_ef100_rx_qdesc_status;
570 sfc_ef100_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
571 __rte_unused uint16_t offset)
577 static sfc_dp_rx_get_dev_info_t sfc_ef100_rx_get_dev_info;
579 sfc_ef100_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
582 * Number of descriptors just defines maximum number of pushed
583 * descriptors (fill level).
585 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
586 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
590 static sfc_dp_rx_qsize_up_rings_t sfc_ef100_rx_qsize_up_rings;
592 sfc_ef100_rx_qsize_up_rings(uint16_t nb_rx_desc,
593 struct sfc_dp_rx_hw_limits *limits,
594 __rte_unused struct rte_mempool *mb_pool,
595 unsigned int *rxq_entries,
596 unsigned int *evq_entries,
597 unsigned int *rxq_max_fill_level)
600 * rte_ethdev API guarantees that the number meets min, max and
601 * alignment requirements.
603 if (nb_rx_desc <= limits->rxq_min_entries)
604 *rxq_entries = limits->rxq_min_entries;
606 *rxq_entries = rte_align32pow2(nb_rx_desc);
608 *evq_entries = *rxq_entries;
610 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
611 SFC_EF100_RXQ_LIMIT(*evq_entries));
617 sfc_ef100_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
621 memset(&m, 0, sizeof(m));
623 rte_mbuf_refcnt_set(&m, 1);
624 m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
628 /* rearm_data covers structure members filled in above */
629 rte_compiler_barrier();
630 RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
631 return m.rearm_data[0];
634 static sfc_dp_rx_qcreate_t sfc_ef100_rx_qcreate;
636 sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,
637 const struct rte_pci_addr *pci_addr, int socket_id,
638 const struct sfc_dp_rx_qcreate_info *info,
639 struct sfc_dp_rxq **dp_rxqp)
641 struct sfc_ef100_rxq *rxq;
645 if (info->rxq_entries != info->evq_entries)
649 rxq = rte_zmalloc_socket("sfc-ef100-rxq", sizeof(*rxq),
650 RTE_CACHE_LINE_SIZE, socket_id);
654 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
657 rxq->sw_ring = rte_calloc_socket("sfc-ef100-rxq-sw_ring",
659 sizeof(*rxq->sw_ring),
660 RTE_CACHE_LINE_SIZE, socket_id);
661 if (rxq->sw_ring == NULL)
662 goto fail_desc_alloc;
664 rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
665 rxq->ptr_mask = info->rxq_entries - 1;
666 rxq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
667 rxq->evq_hw_ring = info->evq_hw_ring;
668 rxq->max_fill_level = info->max_fill_level;
669 rxq->refill_threshold = info->refill_threshold;
671 sfc_ef100_mk_mbuf_rearm_data(port_id, info->prefix_size);
672 rxq->prefix_size = info->prefix_size;
673 rxq->buf_size = info->buf_size;
674 rxq->refill_mb_pool = info->refill_mb_pool;
675 rxq->rxq_hw_ring = info->rxq_hw_ring;
676 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
677 ER_GZ_RX_RING_DOORBELL_OFST +
678 (info->hw_index << info->vi_window_shift);
680 sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell);
693 static sfc_dp_rx_qdestroy_t sfc_ef100_rx_qdestroy;
695 sfc_ef100_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
697 struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
699 rte_free(rxq->sw_ring);
703 static sfc_dp_rx_qstart_t sfc_ef100_rx_qstart;
705 sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
707 struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
709 SFC_ASSERT(rxq->completed == 0);
710 SFC_ASSERT(rxq->added == 0);
712 sfc_ef100_rx_qrefill(rxq);
714 rxq->evq_read_ptr = evq_read_ptr;
716 rxq->flags |= SFC_EF100_RXQ_STARTED;
717 rxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);
722 static sfc_dp_rx_qstop_t sfc_ef100_rx_qstop;
724 sfc_ef100_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
726 struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
728 rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
730 *evq_read_ptr = rxq->evq_read_ptr;
733 static sfc_dp_rx_qrx_ev_t sfc_ef100_rx_qrx_ev;
735 sfc_ef100_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
737 __rte_unused struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
739 SFC_ASSERT(rxq->flags & SFC_EF100_RXQ_NOT_RUNNING);
742 * It is safe to ignore Rx event since we free all mbufs on
743 * queue purge anyway.
749 static sfc_dp_rx_qpurge_t sfc_ef100_rx_qpurge;
751 sfc_ef100_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
753 struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
755 struct sfc_ef100_rx_sw_desc *rxd;
757 for (i = rxq->completed; i != rxq->added; ++i) {
758 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
759 rte_mbuf_raw_free(rxd->mbuf);
763 rxq->completed = rxq->added = 0;
766 rxq->flags &= ~SFC_EF100_RXQ_STARTED;
769 struct sfc_dp_rx sfc_ef100_rx = {
771 .name = SFC_KVARG_DATAPATH_EF100,
773 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
775 .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
776 .dev_offload_capa = 0,
777 .queue_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
778 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
779 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
780 DEV_RX_OFFLOAD_SCATTER,
781 .get_dev_info = sfc_ef100_rx_get_dev_info,
782 .qsize_up_rings = sfc_ef100_rx_qsize_up_rings,
783 .qcreate = sfc_ef100_rx_qcreate,
784 .qdestroy = sfc_ef100_rx_qdestroy,
785 .qstart = sfc_ef100_rx_qstart,
786 .qstop = sfc_ef100_rx_qstop,
787 .qrx_ev = sfc_ef100_rx_qrx_ev,
788 .qpurge = sfc_ef100_rx_qpurge,
789 .supported_ptypes_get = sfc_ef100_supported_ptypes_get,
790 .qdesc_npending = sfc_ef100_rx_qdesc_npending,
791 .qdesc_status = sfc_ef100_rx_qdesc_status,
792 .pkt_burst = sfc_ef100_recv_pkts,