1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_tweak.h"
25 #include "sfc_dp_rx.h"
26 #include "sfc_kvargs.h"
29 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
30 #include "sfc_ef10_rx_ev.h"
32 #define sfc_ef10_rx_err(dpq, ...) \
33 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
36 * Maximum number of descriptors/buffers in the Rx ring.
37 * It should guarantee that corresponding event queue never overfill.
38 * EF10 native datapath uses event queue of the same size as Rx queue.
39 * Maximum number of events on datapath can be estimated as number of
40 * Rx queue entries (one event per Rx buffer in the worst case) plus
41 * Rx error and flush events.
43 #define SFC_EF10_RXQ_LIMIT(_ndesc) \
44 ((_ndesc) - 1 /* head must not step on tail */ - \
45 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
46 1 /* Rx error */ - 1 /* flush */)
48 struct sfc_ef10_rx_sw_desc {
49 struct rte_mbuf *mbuf;
53 /* Used on data path */
55 #define SFC_EF10_RXQ_STARTED 0x1
56 #define SFC_EF10_RXQ_NOT_RUNNING 0x2
57 #define SFC_EF10_RXQ_EXCEPTION 0x4
58 #define SFC_EF10_RXQ_RSS_HASH 0x8
59 unsigned int ptr_mask;
61 unsigned int completed;
62 unsigned int evq_read_ptr;
63 efx_qword_t *evq_hw_ring;
64 struct sfc_ef10_rx_sw_desc *sw_ring;
71 unsigned int max_fill_level;
72 unsigned int refill_threshold;
73 struct rte_mempool *refill_mb_pool;
74 efx_qword_t *rxq_hw_ring;
75 volatile void *doorbell;
77 /* Datapath receive queue anchor */
81 static inline struct sfc_ef10_rxq *
82 sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
84 return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
88 sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
90 const unsigned int ptr_mask = rxq->ptr_mask;
91 const uint32_t buf_size = rxq->buf_size;
92 unsigned int free_space;
94 void *objs[SFC_RX_REFILL_BULK];
95 unsigned int added = rxq->added;
97 RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
99 free_space = rxq->max_fill_level - (added - rxq->completed);
101 if (free_space < rxq->refill_threshold)
104 bulks = free_space / RTE_DIM(objs);
105 /* refill_threshold guarantees that bulks is positive */
106 SFC_ASSERT(bulks > 0);
112 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
113 RTE_DIM(objs)) < 0)) {
114 struct rte_eth_dev_data *dev_data =
115 rte_eth_devices[rxq->dp.dpq.port_id].data;
118 * It is hardly a safe way to increment counter
119 * from different contexts, but all PMDs do it.
121 dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
122 /* Return if we have posted nothing yet */
123 if (added == rxq->added)
129 for (i = 0, id = added & ptr_mask;
132 struct rte_mbuf *m = objs[i];
133 struct sfc_ef10_rx_sw_desc *rxd;
134 rte_iova_t phys_addr;
136 MBUF_RAW_ALLOC_CHECK(m);
138 SFC_ASSERT((id & ~ptr_mask) == 0);
139 rxd = &rxq->sw_ring[id];
143 * Avoid writing to mbuf. It is cheaper to do it
144 * when we receive packet and fill in nearby
148 phys_addr = rte_mbuf_data_iova_default(m);
149 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
150 ESF_DZ_RX_KER_BYTE_CNT, buf_size,
151 ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
154 added += RTE_DIM(objs);
155 } while (--bulks > 0);
157 SFC_ASSERT(rxq->added != added);
159 sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
163 sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
165 struct rte_mbuf *next_mbuf;
167 /* Prefetch next bunch of software descriptors */
168 if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
169 rte_prefetch0(&rxq->sw_ring[next_id]);
172 * It looks strange to prefetch depending on previous prefetch
173 * data, but measurements show that it is really efficient and
174 * increases packet rate.
176 next_mbuf = rxq->sw_ring[next_id].mbuf;
177 if (likely(next_mbuf != NULL)) {
178 /* Prefetch the next mbuf structure */
179 rte_mbuf_prefetch_part1(next_mbuf);
181 /* Prefetch pseudo header of the next packet */
182 /* data_off is not filled in yet */
183 /* Yes, data could be not ready yet, but we hope */
184 rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
185 RTE_PKTMBUF_HEADROOM);
189 static struct rte_mbuf **
190 sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
193 uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
195 if (n_rx_pkts != 0) {
196 unsigned int completed = rxq->completed;
198 rxq->completed = completed + n_rx_pkts;
202 rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
203 } while (completed != rxq->completed);
210 sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
212 return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
216 sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
218 return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
221 static struct rte_mbuf **
222 sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
223 struct rte_mbuf **rx_pkts,
224 struct rte_mbuf ** const rx_pkts_end)
226 const unsigned int ptr_mask = rxq->ptr_mask;
227 unsigned int pending = rxq->pending;
229 struct sfc_ef10_rx_sw_desc *rxd;
232 const uint8_t *pseudo_hdr;
235 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
236 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
237 SFC_ASSERT(ready > 0);
239 rxq->pending = pending + ready;
241 if (rx_ev.eq_u64[0] &
242 rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
243 (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
244 SFC_ASSERT(rxq->completed == pending);
246 rxd = &rxq->sw_ring[pending++ & ptr_mask];
247 rte_mbuf_raw_free(rxd->mbuf);
248 } while (pending != rxq->pending);
249 rxq->completed = pending;
253 rxd = &rxq->sw_ring[pending++ & ptr_mask];
255 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
260 rxq->completed = pending;
262 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
263 m->rearm_data[0] = rxq->rearm_data;
265 /* Classify packet based on Rx event */
266 /* Mask RSS hash offload flag if RSS is not enabled */
267 sfc_ef10_rx_ev_to_offloads(rx_ev, m,
268 (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
269 ~0ull : ~PKT_RX_RSS_HASH);
271 /* data_off already moved past pseudo header */
272 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
275 * Always get RSS hash from pseudo header to avoid
276 * condition/branching. If it is valid or not depends on
277 * PKT_RX_RSS_HASH in m->ol_flags.
279 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
282 pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
285 pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
286 SFC_ASSERT(pkt_len > 0);
287 rte_pktmbuf_data_len(m) = pkt_len;
288 rte_pktmbuf_pkt_len(m) = pkt_len;
290 SFC_ASSERT(m->next == NULL);
292 /* Remember mbuf to copy offload flags and packet type from */
294 while (pending != rxq->pending) {
295 rxd = &rxq->sw_ring[pending++ & ptr_mask];
297 sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
301 if (rx_pkts != rx_pkts_end) {
303 rxq->completed = pending;
306 RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
307 sizeof(rxq->rearm_data));
308 m->rearm_data[0] = rxq->rearm_data;
310 /* Event-dependent information is the same */
311 m->ol_flags = m0->ol_flags;
312 m->packet_type = m0->packet_type;
314 /* data_off already moved past pseudo header */
315 pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
318 * Always get RSS hash from pseudo header to avoid
319 * condition/branching. If it is valid or not depends on
320 * PKT_RX_RSS_HASH in m->ol_flags.
322 m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
324 pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
325 SFC_ASSERT(pkt_len > 0);
326 rte_pktmbuf_data_len(m) = pkt_len;
327 rte_pktmbuf_pkt_len(m) = pkt_len;
329 SFC_ASSERT(m->next == NULL);
336 sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
338 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
340 if (!sfc_ef10_ev_present(*rx_ev))
343 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
344 FSE_AZ_EV_CODE_RX_EV)) {
346 * Do not move read_ptr to keep the event for exception
347 * handling by the control path.
349 rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
350 sfc_ef10_rx_err(&rxq->dp.dpq,
351 "RxQ exception at EvQ read ptr %#x",
361 sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
363 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
364 struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
365 unsigned int evq_old_read_ptr;
368 rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
370 if (unlikely(rxq->flags &
371 (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
374 evq_old_read_ptr = rxq->evq_read_ptr;
375 while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
377 * DROP_EVENT is an internal to the NIC, software should
378 * never see it and, therefore, may ignore it.
381 rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
382 rx_pkts, rx_pkts_end);
385 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
388 /* It is not a problem if we refill in the case of exception */
389 sfc_ef10_rx_qrefill(rxq);
392 return nb_pkts - (rx_pkts_end - rx_pkts);
396 sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
398 static const uint32_t ef10_native_ptypes[] = {
400 RTE_PTYPE_L2_ETHER_ARP,
401 RTE_PTYPE_L2_ETHER_VLAN,
402 RTE_PTYPE_L2_ETHER_QINQ,
403 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
404 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
410 static const uint32_t ef10_overlay_ptypes[] = {
412 RTE_PTYPE_L2_ETHER_ARP,
413 RTE_PTYPE_L2_ETHER_VLAN,
414 RTE_PTYPE_L2_ETHER_QINQ,
415 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
416 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
420 RTE_PTYPE_TUNNEL_VXLAN,
421 RTE_PTYPE_TUNNEL_NVGRE,
422 RTE_PTYPE_INNER_L2_ETHER,
423 RTE_PTYPE_INNER_L2_ETHER_VLAN,
424 RTE_PTYPE_INNER_L2_ETHER_QINQ,
425 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
426 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
427 RTE_PTYPE_INNER_L4_FRAG,
428 RTE_PTYPE_INNER_L4_TCP,
429 RTE_PTYPE_INNER_L4_UDP,
434 * The function returns static set of supported packet types,
435 * so we can't build it dynamically based on supported tunnel
436 * encapsulations and should limit to known sets.
438 switch (tunnel_encaps) {
439 case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
440 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
441 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
442 return ef10_overlay_ptypes;
445 "Unexpected set of supported tunnel encapsulations: %#x",
449 return ef10_native_ptypes;
453 static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
455 sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
458 * Correct implementation requires EvQ polling and events
459 * processing (keeping all ready mbufs in prepared).
464 static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
466 sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
467 __rte_unused uint16_t offset)
473 static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
475 sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
478 * Number of descriptors just defines maximum number of pushed
479 * descriptors (fill level).
481 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
482 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
486 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
488 sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
489 __rte_unused struct rte_mempool *mb_pool,
490 unsigned int *rxq_entries,
491 unsigned int *evq_entries,
492 unsigned int *rxq_max_fill_level)
495 * rte_ethdev API guarantees that the number meets min, max and
496 * alignment requirements.
498 if (nb_rx_desc <= EFX_RXQ_MINNDESCS)
499 *rxq_entries = EFX_RXQ_MINNDESCS;
501 *rxq_entries = rte_align32pow2(nb_rx_desc);
503 *evq_entries = *rxq_entries;
505 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
506 SFC_EF10_RXQ_LIMIT(*evq_entries));
512 sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
516 memset(&m, 0, sizeof(m));
518 rte_mbuf_refcnt_set(&m, 1);
519 m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
523 /* rearm_data covers structure members filled in above */
524 rte_compiler_barrier();
525 RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
526 return m.rearm_data[0];
529 static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
531 sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
532 const struct rte_pci_addr *pci_addr, int socket_id,
533 const struct sfc_dp_rx_qcreate_info *info,
534 struct sfc_dp_rxq **dp_rxqp)
536 struct sfc_ef10_rxq *rxq;
540 if (info->rxq_entries != info->evq_entries)
544 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
545 RTE_CACHE_LINE_SIZE, socket_id);
549 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
552 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
554 sizeof(*rxq->sw_ring),
555 RTE_CACHE_LINE_SIZE, socket_id);
556 if (rxq->sw_ring == NULL)
557 goto fail_desc_alloc;
559 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
560 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
561 rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
562 rxq->ptr_mask = info->rxq_entries - 1;
563 rxq->evq_hw_ring = info->evq_hw_ring;
564 rxq->max_fill_level = info->max_fill_level;
565 rxq->refill_threshold = info->refill_threshold;
567 sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
568 rxq->prefix_size = info->prefix_size;
569 rxq->buf_size = info->buf_size;
570 rxq->refill_mb_pool = info->refill_mb_pool;
571 rxq->rxq_hw_ring = info->rxq_hw_ring;
572 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
573 ER_DZ_RX_DESC_UPD_REG_OFST +
574 (info->hw_index << info->vi_window_shift);
587 static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
589 sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
591 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
593 rte_free(rxq->sw_ring);
597 static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
599 sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
601 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
603 SFC_ASSERT(rxq->completed == 0);
604 SFC_ASSERT(rxq->pending == 0);
605 SFC_ASSERT(rxq->added == 0);
607 sfc_ef10_rx_qrefill(rxq);
609 rxq->evq_read_ptr = evq_read_ptr;
611 rxq->flags |= SFC_EF10_RXQ_STARTED;
612 rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
617 static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
619 sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
621 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
623 rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
625 *evq_read_ptr = rxq->evq_read_ptr;
628 static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
630 sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
632 __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
634 SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
637 * It is safe to ignore Rx event since we free all mbufs on
638 * queue purge anyway.
644 static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
646 sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
648 struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
650 struct sfc_ef10_rx_sw_desc *rxd;
652 for (i = rxq->completed; i != rxq->added; ++i) {
653 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
654 rte_mbuf_raw_free(rxd->mbuf);
658 rxq->completed = rxq->pending = rxq->added = 0;
660 rxq->flags &= ~SFC_EF10_RXQ_STARTED;
663 struct sfc_dp_rx sfc_ef10_rx = {
665 .name = SFC_KVARG_DATAPATH_EF10,
667 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
669 .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
670 SFC_DP_RX_FEAT_TUNNELS |
671 SFC_DP_RX_FEAT_CHECKSUM,
672 .get_dev_info = sfc_ef10_rx_get_dev_info,
673 .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
674 .qcreate = sfc_ef10_rx_qcreate,
675 .qdestroy = sfc_ef10_rx_qdestroy,
676 .qstart = sfc_ef10_rx_qstart,
677 .qstop = sfc_ef10_rx_qstop,
678 .qrx_ev = sfc_ef10_rx_qrx_ev,
679 .qpurge = sfc_ef10_rx_qpurge,
680 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
681 .qdesc_npending = sfc_ef10_rx_qdesc_npending,
682 .qdesc_status = sfc_ef10_rx_qdesc_status,
683 .pkt_burst = sfc_ef10_recv_pkts,