1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_debug.h"
25 #include "sfc_tweak.h"
26 #include "sfc_dp_rx.h"
27 #include "sfc_kvargs.h"
30 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
31 #include "sfc_ef10_rx_ev.h"
33 #define sfc_ef10_rx_err(dpq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
36 #define sfc_ef10_rx_info(dpq, ...) \
37 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
40 * Maximum number of descriptors/buffers in the Rx ring.
41 * It should guarantee that corresponding event queue never overfill.
42 * EF10 native datapath uses event queue of the same size as Rx queue.
43 * Maximum number of events on datapath can be estimated as number of
44 * Rx queue entries (one event per Rx buffer in the worst case) plus
45 * Rx error and flush events.
47 #define SFC_EF10_RXQ_LIMIT(_ndesc) \
48 ((_ndesc) - 1 /* head must not step on tail */ - \
49 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
50 1 /* Rx error */ - 1 /* flush */)
52 struct sfc_ef10_rx_sw_desc {
53 struct rte_mbuf *mbuf;
57 /* Used on data path */
59 #define SFC_EF10_RXQ_STARTED 0x1
60 #define SFC_EF10_RXQ_NOT_RUNNING 0x2
61 #define SFC_EF10_RXQ_EXCEPTION 0x4
62 #define SFC_EF10_RXQ_RSS_HASH 0x8
63 #define SFC_EF10_RXQ_FLAG_INTR_EN 0x10
64 unsigned int ptr_mask;
66 unsigned int completed;
67 unsigned int evq_read_ptr;
68 unsigned int evq_read_ptr_primed;
69 efx_qword_t *evq_hw_ring;
70 struct sfc_ef10_rx_sw_desc *sw_ring;
72 struct rte_mbuf *scatter_pkt;
73 volatile void *evq_prime;
79 unsigned int max_fill_level;
80 unsigned int refill_threshold;
81 struct rte_mempool *refill_mb_pool;
82 efx_qword_t *rxq_hw_ring;
83 volatile void *doorbell;
85 /* Datapath receive queue anchor */
89 static inline struct sfc_ef10_rxq *
90 sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
92 return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
96 sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq)
98 sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask);
99 rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
103 sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
105 const unsigned int ptr_mask = rxq->ptr_mask;
106 const uint32_t buf_size = rxq->buf_size;
107 unsigned int free_space;
109 void *objs[SFC_RX_REFILL_BULK];
110 unsigned int added = rxq->added;
112 RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
114 free_space = rxq->max_fill_level - (added - rxq->completed);
116 if (free_space < rxq->refill_threshold)
119 bulks = free_space / RTE_DIM(objs);
120 /* refill_threshold guarantees that bulks is positive */
121 SFC_ASSERT(bulks > 0);
127 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
128 RTE_DIM(objs)) < 0)) {
129 struct rte_eth_dev_data *dev_data =
130 rte_eth_devices[rxq->dp.dpq.port_id].data;
133 * It is hardly a safe way to increment counter
134 * from different contexts, but all PMDs do it.
136 dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
137 /* Return if we have posted nothing yet */
138 if (added == rxq->added)
144 for (i = 0, id = added & ptr_mask;
147 struct rte_mbuf *m = objs[i];
148 struct sfc_ef10_rx_sw_desc *rxd;
149 rte_iova_t phys_addr;
151 __rte_mbuf_raw_sanity_check(m);
153 SFC_ASSERT((id & ~ptr_mask) == 0);
154 rxd = &rxq->sw_ring[id];
158 * Avoid writing to mbuf. It is cheaper to do it
159 * when we receive packet and fill in nearby
163 phys_addr = rte_mbuf_data_iova_default(m);
164 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
165 ESF_DZ_RX_KER_BYTE_CNT, buf_size,
166 ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
169 added += RTE_DIM(objs);
170 } while (--bulks > 0);
172 SFC_ASSERT(rxq->added != added);
174 sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask,
175 &rxq->dp.dpq.rx_dbells);
179 sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
181 struct rte_mbuf *next_mbuf;
183 /* Prefetch next bunch of software descriptors */
184 if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
185 rte_prefetch0(&rxq->sw_ring[next_id]);
188 * It looks strange to prefetch depending on previous prefetch
189 * data, but measurements show that it is really efficient and
190 * increases packet rate.
192 next_mbuf = rxq->sw_ring[next_id].mbuf;
193 if (likely(next_mbuf != NULL)) {
194 /* Prefetch the next mbuf structure */
195 rte_mbuf_prefetch_part1(next_mbuf);
197 /* Prefetch pseudo header of the next packet */
198 /* data_off is not filled in yet */
199 /* Yes, data could be not ready yet, but we hope */
200 rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
201 RTE_PKTMBUF_HEADROOM);
205 static struct rte_mbuf **
206 sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
209 uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
211 SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL);
213 if (n_rx_pkts != 0) {
214 unsigned int completed = rxq->completed;
216 rxq->completed = completed + n_rx_pkts;
220 rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
221 } while (completed != rxq->completed);
228 * Below Rx pseudo-header (aka Rx prefix) accessors rely on the
229 * following fields layout.
231 static const efx_rx_prefix_layout_t sfc_ef10_rx_prefix_layout = {
233 [EFX_RX_PREFIX_FIELD_RSS_HASH] =
234 { 0, sizeof(uint32_t) * CHAR_BIT, B_FALSE },
235 [EFX_RX_PREFIX_FIELD_LENGTH] =
236 { 8 * CHAR_BIT, sizeof(uint16_t) * CHAR_BIT, B_FALSE },
240 sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
242 return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
246 sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
248 return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
251 static struct rte_mbuf **
252 sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
253 struct rte_mbuf **rx_pkts,
254 struct rte_mbuf ** const rx_pkts_end)
256 const unsigned int ptr_mask = rxq->ptr_mask;
257 unsigned int pending = rxq->pending;
259 struct sfc_ef10_rx_sw_desc *rxd;
262 const uint8_t *pseudo_hdr;
265 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
266 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
269 /* Rx abort - it was no enough descriptors for Rx packet */
270 rte_pktmbuf_free(rxq->scatter_pkt);
271 rxq->scatter_pkt = NULL;
275 rxq->pending = pending + ready;
277 if (rx_ev.eq_u64[0] &
278 rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
279 (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
280 SFC_ASSERT(rxq->completed == pending);
282 rxd = &rxq->sw_ring[pending++ & ptr_mask];
283 rte_mbuf_raw_free(rxd->mbuf);
284 } while (pending != rxq->pending);
285 rxq->completed = pending;
289 /* If scattered packet is in progress */
290 if (rxq->scatter_pkt != NULL) {
291 /* Events for scattered packet frags are not merged */
292 SFC_ASSERT(ready == 1);
293 SFC_ASSERT(rxq->completed == pending);
295 /* There is no pseudo-header in scatter segments. */
296 seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES);
298 rxd = &rxq->sw_ring[pending++ & ptr_mask];
301 __rte_mbuf_raw_sanity_check(m);
303 m->data_off = RTE_PKTMBUF_HEADROOM;
304 rte_pktmbuf_data_len(m) = seg_len;
305 rte_pktmbuf_pkt_len(m) = seg_len;
307 rxq->scatter_pkt->nb_segs++;
308 rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len;
309 rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m;
311 if (~rx_ev.eq_u64[0] &
312 rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
313 *rx_pkts++ = rxq->scatter_pkt;
314 rxq->scatter_pkt = NULL;
316 rxq->completed = pending;
320 rxd = &rxq->sw_ring[pending++ & ptr_mask];
322 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
326 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
327 m->rearm_data[0] = rxq->rearm_data;
329 /* Classify packet based on Rx event */
330 /* Mask RSS hash offload flag if RSS is not enabled */
331 sfc_ef10_rx_ev_to_offloads(rx_ev, m,
332 (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
333 ~0ull : ~PKT_RX_RSS_HASH);
335 /* data_off already moved past pseudo header */
336 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
339 * Always get RSS hash from pseudo header to avoid
340 * condition/branching. If it is valid or not depends on
341 * PKT_RX_RSS_HASH in m->ol_flags.
343 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
346 seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
349 seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
350 SFC_ASSERT(seg_len > 0);
351 rte_pktmbuf_data_len(m) = seg_len;
352 rte_pktmbuf_pkt_len(m) = seg_len;
354 SFC_ASSERT(m->next == NULL);
356 if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
358 rxq->completed = pending;
360 /* Events with CONT bit are not merged */
361 SFC_ASSERT(ready == 1);
362 rxq->scatter_pkt = m;
363 rxq->completed = pending;
367 /* Remember mbuf to copy offload flags and packet type from */
369 while (pending != rxq->pending) {
370 rxd = &rxq->sw_ring[pending++ & ptr_mask];
372 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
376 if (rx_pkts != rx_pkts_end) {
378 rxq->completed = pending;
381 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
382 sizeof(rxq->rearm_data));
383 m->rearm_data[0] = rxq->rearm_data;
385 /* Event-dependent information is the same */
386 m->ol_flags = m0->ol_flags;
387 m->packet_type = m0->packet_type;
389 /* data_off already moved past pseudo header */
390 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
393 * Always get RSS hash from pseudo header to avoid
394 * condition/branching. If it is valid or not depends on
395 * PKT_RX_RSS_HASH in m->ol_flags.
397 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
399 seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
400 SFC_ASSERT(seg_len > 0);
401 rte_pktmbuf_data_len(m) = seg_len;
402 rte_pktmbuf_pkt_len(m) = seg_len;
404 SFC_ASSERT(m->next == NULL);
411 sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
413 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
415 if (!sfc_ef10_ev_present(*rx_ev))
418 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
419 FSE_AZ_EV_CODE_RX_EV)) {
421 * Do not move read_ptr to keep the event for exception
422 * handling by the control path.
424 rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
425 sfc_ef10_rx_err(&rxq->dp.dpq,
426 "RxQ exception at EvQ read ptr %#x",
436 sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
438 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
439 struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
440 unsigned int evq_old_read_ptr;
443 rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
445 if (unlikely(rxq->flags &
446 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
449 evq_old_read_ptr = rxq->evq_read_ptr;
450 while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
452 * DROP_EVENT is an internal to the NIC, software should
453 * never see it and, therefore, may ignore it.
456 rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
457 rx_pkts, rx_pkts_end);
460 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
463 /* It is not a problem if we refill in the case of exception */
464 sfc_ef10_rx_qrefill(rxq);
466 if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) &&
467 rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
468 sfc_ef10_rx_qprime(rxq);
471 return nb_pkts - (rx_pkts_end - rx_pkts);
475 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
477 static const uint32_t ef10_native_ptypes[] = {
479 RTE_PTYPE_L2_ETHER_ARP,
480 RTE_PTYPE_L2_ETHER_VLAN,
481 RTE_PTYPE_L2_ETHER_QINQ,
482 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
483 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
489 static const uint32_t ef10_overlay_ptypes[] = {
491 RTE_PTYPE_L2_ETHER_ARP,
492 RTE_PTYPE_L2_ETHER_VLAN,
493 RTE_PTYPE_L2_ETHER_QINQ,
494 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
495 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
499 RTE_PTYPE_TUNNEL_VXLAN,
500 RTE_PTYPE_TUNNEL_NVGRE,
501 RTE_PTYPE_INNER_L2_ETHER,
502 RTE_PTYPE_INNER_L2_ETHER_VLAN,
503 RTE_PTYPE_INNER_L2_ETHER_QINQ,
504 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
505 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
506 RTE_PTYPE_INNER_L4_FRAG,
507 RTE_PTYPE_INNER_L4_TCP,
508 RTE_PTYPE_INNER_L4_UDP,
513 * The function returns static set of supported packet types,
514 * so we can't build it dynamically based on supported tunnel
515 * encapsulations and should limit to known sets.
517 switch (tunnel_encaps) {
518 case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
519 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
520 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
521 return ef10_overlay_ptypes;
524 "Unexpected set of supported tunnel encapsulations: %#x",
528 return ef10_native_ptypes;
532 static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
534 sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
536 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
538 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
539 unsigned int pending = rxq->pending;
542 if (unlikely(rxq->flags &
543 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
546 while (sfc_ef10_rx_get_event(rxq, &rx_ev)) {
547 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
549 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
554 * The function does not process events, so return event queue read
555 * pointer to the original position to allow the events that were
556 * read to be processed later
558 rxq->evq_read_ptr = evq_old_read_ptr;
561 return pending - rxq->completed;
564 static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
566 sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
568 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
569 unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq);
571 if (unlikely(offset > rxq->ptr_mask))
574 if (offset < npending)
575 return RTE_ETH_RX_DESC_DONE;
577 if (offset < (rxq->added - rxq->completed))
578 return RTE_ETH_RX_DESC_AVAIL;
580 return RTE_ETH_RX_DESC_UNAVAIL;
584 static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
586 sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
589 * Number of descriptors just defines maximum number of pushed
590 * descriptors (fill level).
592 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
593 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
597 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
599 sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
600 struct sfc_dp_rx_hw_limits *limits,
601 __rte_unused struct rte_mempool *mb_pool,
602 unsigned int *rxq_entries,
603 unsigned int *evq_entries,
604 unsigned int *rxq_max_fill_level)
607 * rte_ethdev API guarantees that the number meets min, max and
608 * alignment requirements.
610 if (nb_rx_desc <= limits->rxq_min_entries)
611 *rxq_entries = limits->rxq_min_entries;
613 *rxq_entries = rte_align32pow2(nb_rx_desc);
615 *evq_entries = *rxq_entries;
617 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
618 SFC_EF10_RXQ_LIMIT(*evq_entries));
624 sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
628 memset(&m, 0, sizeof(m));
630 rte_mbuf_refcnt_set(&m, 1);
631 m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
635 /* rearm_data covers structure members filled in above */
636 rte_compiler_barrier();
637 RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
638 return m.rearm_data[0];
641 static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
643 sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
644 const struct rte_pci_addr *pci_addr, int socket_id,
645 const struct sfc_dp_rx_qcreate_info *info,
646 struct sfc_dp_rxq **dp_rxqp)
648 struct sfc_ef10_rxq *rxq;
652 if (info->rxq_entries != info->evq_entries)
656 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
657 RTE_CACHE_LINE_SIZE, socket_id);
661 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
664 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
666 sizeof(*rxq->sw_ring),
667 RTE_CACHE_LINE_SIZE, socket_id);
668 if (rxq->sw_ring == NULL)
669 goto fail_desc_alloc;
671 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
672 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
673 rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
674 rxq->ptr_mask = info->rxq_entries - 1;
675 rxq->evq_hw_ring = info->evq_hw_ring;
676 rxq->max_fill_level = info->max_fill_level;
677 rxq->refill_threshold = info->refill_threshold;
679 sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
680 rxq->prefix_size = info->prefix_size;
681 rxq->buf_size = info->buf_size;
682 rxq->refill_mb_pool = info->refill_mb_pool;
683 rxq->rxq_hw_ring = info->rxq_hw_ring;
684 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
685 ER_DZ_RX_DESC_UPD_REG_OFST +
686 (info->hw_index << info->vi_window_shift);
687 rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
688 ER_DZ_EVQ_RPTR_REG_OFST +
689 (info->evq_hw_index << info->vi_window_shift);
691 sfc_ef10_rx_info(&rxq->dp.dpq, "RxQ doorbell is %p", rxq->doorbell);
704 static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
706 sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
708 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
710 rte_free(rxq->sw_ring);
714 static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
716 sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
717 const efx_rx_prefix_layout_t *pinfo)
719 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
721 SFC_ASSERT(rxq->completed == 0);
722 SFC_ASSERT(rxq->pending == 0);
723 SFC_ASSERT(rxq->added == 0);
725 if (pinfo->erpl_length != rxq->prefix_size ||
726 efx_rx_prefix_layout_check(pinfo, &sfc_ef10_rx_prefix_layout) != 0)
729 sfc_ef10_rx_qrefill(rxq);
731 rxq->evq_read_ptr = evq_read_ptr;
733 rxq->flags |= SFC_EF10_RXQ_STARTED;
734 rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
736 if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN)
737 sfc_ef10_rx_qprime(rxq);
742 static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
744 sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
746 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
748 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
750 *evq_read_ptr = rxq->evq_read_ptr;
753 static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
755 sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
757 __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
759 SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
762 * It is safe to ignore Rx event since we free all mbufs on
763 * queue purge anyway.
769 static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
771 sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
773 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
775 struct sfc_ef10_rx_sw_desc *rxd;
777 rte_pktmbuf_free(rxq->scatter_pkt);
778 rxq->scatter_pkt = NULL;
780 for (i = rxq->completed; i != rxq->added; ++i) {
781 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
782 rte_mbuf_raw_free(rxd->mbuf);
786 rxq->completed = rxq->pending = rxq->added = 0;
788 rxq->flags &= ~SFC_EF10_RXQ_STARTED;
791 static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable;
793 sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
795 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
797 rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN;
798 if (rxq->flags & SFC_EF10_RXQ_STARTED)
799 sfc_ef10_rx_qprime(rxq);
803 static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable;
805 sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
807 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
809 /* Cannot disarm, just disable rearm */
810 rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN;
814 struct sfc_dp_rx sfc_ef10_rx = {
816 .name = SFC_KVARG_DATAPATH_EF10,
818 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
820 .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
822 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
823 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
824 DEV_RX_OFFLOAD_RSS_HASH,
825 .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
826 .get_dev_info = sfc_ef10_rx_get_dev_info,
827 .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
828 .qcreate = sfc_ef10_rx_qcreate,
829 .qdestroy = sfc_ef10_rx_qdestroy,
830 .qstart = sfc_ef10_rx_qstart,
831 .qstop = sfc_ef10_rx_qstop,
832 .qrx_ev = sfc_ef10_rx_qrx_ev,
833 .qpurge = sfc_ef10_rx_qpurge,
834 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
835 .qdesc_npending = sfc_ef10_rx_qdesc_npending,
836 .qdesc_status = sfc_ef10_rx_qdesc_status,
837 .intr_enable = sfc_ef10_rx_intr_enable,
838 .intr_disable = sfc_ef10_rx_intr_disable,
839 .pkt_burst = sfc_ef10_recv_pkts,