1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_tweak.h"
25 #include "sfc_dp_rx.h"
26 #include "sfc_kvargs.h"
29 #define sfc_ef10_rx_err(dpq, ...) \
30 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
33 * Maximum number of descriptors/buffers in the Rx ring.
34 * It should guarantee that corresponding event queue never overfill.
35 * EF10 native datapath uses event queue of the same size as Rx queue.
36 * Maximum number of events on datapath can be estimated as number of
37 * Rx queue entries (one event per Rx buffer in the worst case) plus
38 * Rx error and flush events.
40 #define SFC_EF10_RXQ_LIMIT(_ndesc) \
41 ((_ndesc) - 1 /* head must not step on tail */ - \
42 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
43 1 /* Rx error */ - 1 /* flush */)
45 struct sfc_ef10_rx_sw_desc {
46 struct rte_mbuf *mbuf;
50 /* Used on data path */
52 #define SFC_EF10_RXQ_STARTED 0x1
53 #define SFC_EF10_RXQ_NOT_RUNNING 0x2
54 #define SFC_EF10_RXQ_EXCEPTION 0x4
55 #define SFC_EF10_RXQ_RSS_HASH 0x8
56 unsigned int ptr_mask;
57 unsigned int prepared;
58 unsigned int completed;
59 unsigned int evq_read_ptr;
60 efx_qword_t *evq_hw_ring;
61 struct sfc_ef10_rx_sw_desc *sw_ring;
68 unsigned int max_fill_level;
69 unsigned int refill_threshold;
70 struct rte_mempool *refill_mb_pool;
71 efx_qword_t *rxq_hw_ring;
72 volatile void *doorbell;
74 /* Datapath receive queue anchor */
78 static inline struct sfc_ef10_rxq *
79 sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
81 return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
85 sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
87 const unsigned int ptr_mask = rxq->ptr_mask;
88 const uint32_t buf_size = rxq->buf_size;
89 unsigned int free_space;
91 void *objs[SFC_RX_REFILL_BULK];
92 unsigned int added = rxq->added;
94 RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
96 free_space = rxq->max_fill_level - (added - rxq->completed);
98 if (free_space < rxq->refill_threshold)
101 bulks = free_space / RTE_DIM(objs);
102 /* refill_threshold guarantees that bulks is positive */
103 SFC_ASSERT(bulks > 0);
109 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
110 RTE_DIM(objs)) < 0)) {
111 struct rte_eth_dev_data *dev_data =
112 rte_eth_devices[rxq->dp.dpq.port_id].data;
115 * It is hardly a safe way to increment counter
116 * from different contexts, but all PMDs do it.
118 dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
119 /* Return if we have posted nothing yet */
120 if (added == rxq->added)
126 for (i = 0, id = added & ptr_mask;
129 struct rte_mbuf *m = objs[i];
130 struct sfc_ef10_rx_sw_desc *rxd;
131 rte_iova_t phys_addr;
133 SFC_ASSERT((id & ~ptr_mask) == 0);
134 rxd = &rxq->sw_ring[id];
138 * Avoid writing to mbuf. It is cheaper to do it
139 * when we receive packet and fill in nearby
143 phys_addr = rte_mbuf_data_iova_default(m);
144 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
145 ESF_DZ_RX_KER_BYTE_CNT, buf_size,
146 ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
149 added += RTE_DIM(objs);
150 } while (--bulks > 0);
152 SFC_ASSERT(rxq->added != added);
154 sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
158 sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
160 struct rte_mbuf *next_mbuf;
162 /* Prefetch next bunch of software descriptors */
163 if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
164 rte_prefetch0(&rxq->sw_ring[next_id]);
167 * It looks strange to prefetch depending on previous prefetch
168 * data, but measurements show that it is really efficient and
169 * increases packet rate.
171 next_mbuf = rxq->sw_ring[next_id].mbuf;
172 if (likely(next_mbuf != NULL)) {
173 /* Prefetch the next mbuf structure */
174 rte_mbuf_prefetch_part1(next_mbuf);
176 /* Prefetch pseudo header of the next packet */
177 /* data_off is not filled in yet */
178 /* Yes, data could be not ready yet, but we hope */
179 rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
180 RTE_PKTMBUF_HEADROOM);
185 sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
188 uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
189 unsigned int completed = rxq->completed;
192 rxq->prepared -= n_rx_pkts;
193 rxq->completed = completed + n_rx_pkts;
195 for (i = 0; i < n_rx_pkts; ++i, ++completed)
196 rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
202 sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
205 uint32_t tun_ptype = 0;
206 /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
207 int8_t ip_csum_err_bit;
208 /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
209 int8_t l4_csum_err_bit;
210 uint32_t l2_ptype = 0;
211 uint32_t l3_ptype = 0;
212 uint32_t l4_ptype = 0;
213 uint64_t ol_flags = 0;
215 if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
218 switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
220 /* Unexpected encapsulation tag class */
223 case ESE_EZ_ENCAP_HDR_NONE:
225 case ESE_EZ_ENCAP_HDR_VXLAN:
227 * It is definitely UDP, but we have no information
228 * about IPv4 vs IPv6 and VLAN tagging.
230 tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
232 case ESE_EZ_ENCAP_HDR_GRE:
234 * We have no information about IPv4 vs IPv6 and VLAN tagging.
236 tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
240 if (tun_ptype == 0) {
241 ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
242 l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
244 ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
245 l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
246 if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
247 ESF_DZ_RX_IPCKSUM_ERR_LBN)))
248 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
251 switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
252 case ESE_DZ_ETH_TAG_CLASS_NONE:
253 l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
254 RTE_PTYPE_INNER_L2_ETHER;
256 case ESE_DZ_ETH_TAG_CLASS_VLAN1:
257 l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
258 RTE_PTYPE_INNER_L2_ETHER_VLAN;
260 case ESE_DZ_ETH_TAG_CLASS_VLAN2:
261 l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
262 RTE_PTYPE_INNER_L2_ETHER_QINQ;
265 /* Unexpected Eth tag class */
269 switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
270 case ESE_DZ_L3_CLASS_IP4_FRAG:
271 l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
272 RTE_PTYPE_INNER_L4_FRAG;
274 case ESE_DZ_L3_CLASS_IP4:
275 l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
277 ol_flags |= PKT_RX_RSS_HASH |
278 ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
279 PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
281 case ESE_DZ_L3_CLASS_IP6_FRAG:
282 l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
283 RTE_PTYPE_INNER_L4_FRAG;
285 case ESE_DZ_L3_CLASS_IP6:
286 l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
287 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
288 ol_flags |= PKT_RX_RSS_HASH;
290 case ESE_DZ_L3_CLASS_ARP:
291 /* Override Layer 2 packet type */
292 /* There is no ARP classification for inner packets */
294 l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
297 /* Unexpected Layer 3 class */
302 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
303 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
304 * and values for all EF10 controllers.
306 RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN);
307 switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) {
308 case ESE_FZ_L4_CLASS_TCP:
309 RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP);
310 l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
311 RTE_PTYPE_INNER_L4_TCP;
313 (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
314 PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
316 case ESE_FZ_L4_CLASS_UDP:
317 RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP);
318 l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
319 RTE_PTYPE_INNER_L4_UDP;
321 (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
322 PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
324 case ESE_FZ_L4_CLASS_UNKNOWN:
325 RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN !=
326 ESE_DE_L4_CLASS_UNKNOWN);
329 /* Unexpected Layer 4 class */
334 m->ol_flags = ol_flags & ol_mask;
335 m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
339 sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
341 return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
345 sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
347 return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
351 sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
352 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
354 const unsigned int ptr_mask = rxq->ptr_mask;
355 unsigned int completed = rxq->completed;
357 struct sfc_ef10_rx_sw_desc *rxd;
361 const uint8_t *pseudo_hdr;
364 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
365 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
366 SFC_ASSERT(ready > 0);
368 if (rx_ev.eq_u64[0] &
369 rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
370 (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
371 SFC_ASSERT(rxq->prepared == 0);
372 rxq->completed += ready;
373 while (ready-- > 0) {
374 rxd = &rxq->sw_ring[completed++ & ptr_mask];
375 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
380 n_rx_pkts = RTE_MIN(ready, nb_pkts);
381 rxq->prepared = ready - n_rx_pkts;
382 rxq->completed += n_rx_pkts;
384 rxd = &rxq->sw_ring[completed++ & ptr_mask];
386 sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
392 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
393 m->rearm_data[0] = rxq->rearm_data;
395 /* Classify packet based on Rx event */
396 /* Mask RSS hash offload flag if RSS is not enabled */
397 sfc_ef10_rx_ev_to_offloads(rx_ev, m,
398 (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
399 ~0ull : ~PKT_RX_RSS_HASH);
401 /* data_off already moved past pseudo header */
402 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
405 * Always get RSS hash from pseudo header to avoid
406 * condition/branching. If it is valid or not depends on
407 * PKT_RX_RSS_HASH in m->ol_flags.
409 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
412 pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
415 pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
416 SFC_ASSERT(pkt_len > 0);
417 rte_pktmbuf_data_len(m) = pkt_len;
418 rte_pktmbuf_pkt_len(m) = pkt_len;
420 SFC_ASSERT(m->next == NULL);
422 /* Remember mbuf to copy offload flags and packet type from */
424 for (--ready; ready > 0; --ready) {
425 rxd = &rxq->sw_ring[completed++ & ptr_mask];
427 sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
431 if (ready > rxq->prepared)
434 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
435 sizeof(rxq->rearm_data));
436 m->rearm_data[0] = rxq->rearm_data;
438 /* Event-dependent information is the same */
439 m->ol_flags = m0->ol_flags;
440 m->packet_type = m0->packet_type;
442 /* data_off already moved past pseudo header */
443 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
446 * Always get RSS hash from pseudo header to avoid
447 * condition/branching. If it is valid or not depends on
448 * PKT_RX_RSS_HASH in m->ol_flags.
450 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
452 pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
453 SFC_ASSERT(pkt_len > 0);
454 rte_pktmbuf_data_len(m) = pkt_len;
455 rte_pktmbuf_pkt_len(m) = pkt_len;
457 SFC_ASSERT(m->next == NULL);
464 sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
466 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
468 if (!sfc_ef10_ev_present(*rx_ev))
471 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
472 FSE_AZ_EV_CODE_RX_EV)) {
474 * Do not move read_ptr to keep the event for exception
475 * handling by the control path.
477 rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
478 sfc_ef10_rx_err(&rxq->dp.dpq,
479 "RxQ exception at EvQ read ptr %#x",
489 sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
491 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
492 unsigned int evq_old_read_ptr;
496 if (unlikely(rxq->flags &
497 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
500 n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
502 evq_old_read_ptr = rxq->evq_read_ptr;
503 while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
505 * DROP_EVENT is an internal to the NIC, software should
506 * never see it and, therefore, may ignore it.
509 n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
511 nb_pkts - n_rx_pkts);
514 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
517 /* It is not a problem if we refill in the case of exception */
518 sfc_ef10_rx_qrefill(rxq);
523 static const uint32_t *
524 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
526 static const uint32_t ef10_native_ptypes[] = {
528 RTE_PTYPE_L2_ETHER_ARP,
529 RTE_PTYPE_L2_ETHER_VLAN,
530 RTE_PTYPE_L2_ETHER_QINQ,
531 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
532 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
538 static const uint32_t ef10_overlay_ptypes[] = {
540 RTE_PTYPE_L2_ETHER_ARP,
541 RTE_PTYPE_L2_ETHER_VLAN,
542 RTE_PTYPE_L2_ETHER_QINQ,
543 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
544 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
548 RTE_PTYPE_TUNNEL_VXLAN,
549 RTE_PTYPE_TUNNEL_NVGRE,
550 RTE_PTYPE_INNER_L2_ETHER,
551 RTE_PTYPE_INNER_L2_ETHER_VLAN,
552 RTE_PTYPE_INNER_L2_ETHER_QINQ,
553 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
554 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
555 RTE_PTYPE_INNER_L4_FRAG,
556 RTE_PTYPE_INNER_L4_TCP,
557 RTE_PTYPE_INNER_L4_UDP,
562 * The function returns static set of supported packet types,
563 * so we can't build it dynamically based on supported tunnel
564 * encapsulations and should limit to known sets.
566 switch (tunnel_encaps) {
567 case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
568 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
569 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
570 return ef10_overlay_ptypes;
573 "Unexpected set of supported tunnel encapsulations: %#x",
577 return ef10_native_ptypes;
581 static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
583 sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
586 * Correct implementation requires EvQ polling and events
587 * processing (keeping all ready mbufs in prepared).
592 static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
594 sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
595 __rte_unused uint16_t offset)
601 static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
603 sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
606 * Number of descriptors just defines maximum number of pushed
607 * descriptors (fill level).
609 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
610 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
614 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
616 sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
617 unsigned int *rxq_entries,
618 unsigned int *evq_entries,
619 unsigned int *rxq_max_fill_level)
622 * rte_ethdev API guarantees that the number meets min, max and
623 * alignment requirements.
625 if (nb_rx_desc <= EFX_RXQ_MINNDESCS)
626 *rxq_entries = EFX_RXQ_MINNDESCS;
628 *rxq_entries = rte_align32pow2(nb_rx_desc);
630 *evq_entries = *rxq_entries;
632 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
633 SFC_EF10_RXQ_LIMIT(*evq_entries));
639 sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
643 memset(&m, 0, sizeof(m));
645 rte_mbuf_refcnt_set(&m, 1);
646 m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
650 /* rearm_data covers structure members filled in above */
651 rte_compiler_barrier();
652 RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
653 return m.rearm_data[0];
656 static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
658 sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
659 const struct rte_pci_addr *pci_addr, int socket_id,
660 const struct sfc_dp_rx_qcreate_info *info,
661 struct sfc_dp_rxq **dp_rxqp)
663 struct sfc_ef10_rxq *rxq;
667 if (info->rxq_entries != info->evq_entries)
671 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
672 RTE_CACHE_LINE_SIZE, socket_id);
676 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
679 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
681 sizeof(*rxq->sw_ring),
682 RTE_CACHE_LINE_SIZE, socket_id);
683 if (rxq->sw_ring == NULL)
684 goto fail_desc_alloc;
686 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
687 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
688 rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
689 rxq->ptr_mask = info->rxq_entries - 1;
690 rxq->evq_hw_ring = info->evq_hw_ring;
691 rxq->max_fill_level = info->max_fill_level;
692 rxq->refill_threshold = info->refill_threshold;
694 sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
695 rxq->prefix_size = info->prefix_size;
696 rxq->buf_size = info->buf_size;
697 rxq->refill_mb_pool = info->refill_mb_pool;
698 rxq->rxq_hw_ring = info->rxq_hw_ring;
699 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
700 ER_DZ_RX_DESC_UPD_REG_OFST +
701 (info->hw_index << info->vi_window_shift);
714 static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
716 sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
718 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
720 rte_free(rxq->sw_ring);
724 static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
726 sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
728 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
731 rxq->completed = rxq->added = 0;
733 sfc_ef10_rx_qrefill(rxq);
735 rxq->evq_read_ptr = evq_read_ptr;
737 rxq->flags |= SFC_EF10_RXQ_STARTED;
738 rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
743 static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
745 sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
747 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
749 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
751 *evq_read_ptr = rxq->evq_read_ptr;
754 static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
756 sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
758 __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
760 SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
763 * It is safe to ignore Rx event since we free all mbufs on
764 * queue purge anyway.
770 static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
772 sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
774 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
776 struct sfc_ef10_rx_sw_desc *rxd;
778 for (i = rxq->completed; i != rxq->added; ++i) {
779 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
780 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
784 rxq->flags &= ~SFC_EF10_RXQ_STARTED;
787 struct sfc_dp_rx sfc_ef10_rx = {
789 .name = SFC_KVARG_DATAPATH_EF10,
791 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
793 .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
794 SFC_DP_RX_FEAT_TUNNELS,
795 .get_dev_info = sfc_ef10_rx_get_dev_info,
796 .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
797 .qcreate = sfc_ef10_rx_qcreate,
798 .qdestroy = sfc_ef10_rx_qdestroy,
799 .qstart = sfc_ef10_rx_qstart,
800 .qstop = sfc_ef10_rx_qstop,
801 .qrx_ev = sfc_ef10_rx_qrx_ev,
802 .qpurge = sfc_ef10_rx_qpurge,
803 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
804 .qdesc_npending = sfc_ef10_rx_qdesc_npending,
805 .qdesc_status = sfc_ef10_rx_qdesc_status,
806 .pkt_burst = sfc_ef10_recv_pkts,