1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 equal stride packed stream receive native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_tweak.h"
25 #include "sfc_dp_rx.h"
26 #include "sfc_kvargs.h"
29 /* Tunnels are not supported */
30 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
31 #include "sfc_ef10_rx_ev.h"
33 #define sfc_ef10_essb_rx_err(dpq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
36 #define sfc_ef10_essb_rx_info(dpq, ...) \
37 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
40 * Fake length for RXQ descriptors in equal stride super-buffer mode
41 * to make hardware happy.
43 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
46 * Maximum number of descriptors/buffers in the Rx ring.
47 * It should guarantee that corresponding event queue never overfill.
49 #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
50 ((_nevs) - 1 /* head must not step on tail */ - \
51 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
52 1 /* Rx error */ - 1 /* flush */)
54 struct sfc_ef10_essb_rx_sw_desc {
55 struct rte_mbuf *first_mbuf;
58 struct sfc_ef10_essb_rxq {
59 /* Used on data path */
61 #define SFC_EF10_ESSB_RXQ_STARTED 0x1
62 #define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
63 #define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
64 unsigned int rxq_ptr_mask;
65 unsigned int block_size;
66 unsigned int buf_stride;
67 unsigned int bufs_ptr;
68 unsigned int completed;
69 unsigned int pending_id;
70 unsigned int bufs_pending;
71 unsigned int left_in_completed;
72 unsigned int left_in_pending;
73 unsigned int evq_read_ptr;
74 unsigned int evq_ptr_mask;
75 efx_qword_t *evq_hw_ring;
76 struct sfc_ef10_essb_rx_sw_desc *sw_ring;
81 unsigned int max_fill_level;
82 unsigned int refill_threshold;
83 struct rte_mempool *refill_mb_pool;
84 efx_qword_t *rxq_hw_ring;
85 volatile void *doorbell;
87 /* Datapath receive queue anchor */
91 static inline struct sfc_ef10_essb_rxq *
92 sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
94 return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
97 static struct rte_mbuf *
98 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
99 struct rte_mbuf *mbuf)
101 return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
104 static struct rte_mbuf *
105 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
106 struct rte_mbuf *mbuf, unsigned int idx)
108 return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
111 static struct rte_mbuf *
112 sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
114 const struct sfc_ef10_essb_rx_sw_desc *rxd;
116 if (rxq->left_in_completed != 0) {
117 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
118 return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
119 rxq->block_size - rxq->left_in_completed);
122 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
123 rxq->left_in_completed = rxq->block_size;
124 return rxd->first_mbuf;
129 sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
131 const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
132 unsigned int free_space;
134 void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
135 unsigned int added = rxq->added;
137 free_space = rxq->max_fill_level - (added - rxq->completed);
139 if (free_space < rxq->refill_threshold)
142 bulks = free_space / RTE_DIM(mbuf_blocks);
143 /* refill_threshold guarantees that bulks is positive */
144 SFC_ASSERT(bulks > 0);
150 if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
151 mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
152 struct rte_eth_dev_data *dev_data =
153 rte_eth_devices[rxq->port_id].data;
156 * It is hardly a safe way to increment counter
157 * from different contexts, but all PMDs do it.
159 dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
160 /* Return if we have posted nothing yet */
161 if (added == rxq->added)
167 for (i = 0, id = added & rxq_ptr_mask;
168 i < RTE_DIM(mbuf_blocks);
170 struct rte_mbuf *m = mbuf_blocks[i];
171 struct sfc_ef10_essb_rx_sw_desc *rxd;
173 SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
174 rxd = &rxq->sw_ring[id];
177 /* RX_KER_BYTE_CNT is ignored by firmware */
178 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
179 ESF_DZ_RX_KER_BYTE_CNT,
180 SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
181 ESF_DZ_RX_KER_BUF_ADDR,
182 rte_mbuf_data_iova_default(m));
185 added += RTE_DIM(mbuf_blocks);
187 } while (--bulks > 0);
189 SFC_ASSERT(rxq->added != added);
191 sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
195 sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
197 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
199 if (!sfc_ef10_ev_present(*rx_ev))
202 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
203 FSE_AZ_EV_CODE_RX_EV)) {
205 * Do not move read_ptr to keep the event for exception
208 rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
209 sfc_ef10_essb_rx_err(&rxq->dp.dpq,
210 "RxQ exception at EvQ read ptr %#x",
220 sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
224 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
226 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
228 rxq->bufs_ptr += ready;
229 rxq->bufs_pending += ready;
231 SFC_ASSERT(ready > 0);
233 const struct sfc_ef10_essb_rx_sw_desc *rxd;
235 unsigned int todo_bufs;
238 rxd = &rxq->sw_ring[rxq->pending_id];
239 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
240 rxq->block_size - rxq->left_in_pending);
242 if (ready < rxq->left_in_pending) {
245 rxq->left_in_pending -= todo_bufs;
247 todo_bufs = rxq->left_in_pending;
249 rxq->left_in_pending = rxq->block_size;
250 if (rxq->pending_id != rxq->rxq_ptr_mask)
256 SFC_ASSERT(todo_bufs > 0);
259 sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
261 /* Prefetch pseudo-header */
262 rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
265 while (todo_bufs-- > 0) {
266 m = sfc_ef10_essb_next_mbuf(rxq, m);
267 m->ol_flags = m0->ol_flags;
268 m->packet_type = m0->packet_type;
269 /* Prefetch pseudo-header */
270 rte_prefetch0((uint8_t *)m->buf_addr +
271 RTE_PKTMBUF_HEADROOM);
277 sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
278 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
280 unsigned int n_rx_pkts = 0;
281 unsigned int todo_bufs;
284 while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
285 rxq->bufs_pending)) > 0) {
286 m = sfc_ef10_essb_maybe_next_completed(rxq);
288 todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
290 rxq->bufs_pending -= todo_bufs;
291 rxq->left_in_completed -= todo_bufs;
293 SFC_ASSERT(todo_bufs > 0);
297 const efx_qword_t *qwordp;
300 rx_pkts[n_rx_pkts++] = m;
302 /* Parse pseudo-header */
303 qwordp = (const efx_qword_t *)
304 ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
306 EFX_QWORD_FIELD(*qwordp,
307 ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
309 m->data_off = RTE_PKTMBUF_HEADROOM +
310 ES_EZ_ESSB_RX_PREFIX_LEN;
311 m->port = rxq->port_id;
313 rte_pktmbuf_pkt_len(m) = pkt_len;
314 rte_pktmbuf_data_len(m) = pkt_len;
318 !!EFX_TEST_QWORD_BIT(*qwordp,
319 ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
321 !!EFX_TEST_QWORD_BIT(*qwordp,
322 ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
324 !!EFX_TEST_QWORD_BIT(*qwordp,
325 ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
327 /* EFX_QWORD_FIELD converts little-endian to CPU */
329 EFX_QWORD_FIELD(*qwordp,
330 ES_EZ_ESSB_RX_PREFIX_HASH);
332 EFX_QWORD_FIELD(*qwordp,
333 ES_EZ_ESSB_RX_PREFIX_MARK);
335 m = sfc_ef10_essb_next_mbuf(rxq, m);
336 } while (todo_bufs-- > 0);
344 sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
347 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
348 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
352 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
353 SFC_EF10_ESSB_RXQ_EXCEPTION)))
356 n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
358 while (n_rx_pkts != nb_pkts &&
359 sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
361 * DROP_EVENT is an internal to the NIC, software should
362 * never see it and, therefore, may ignore it.
365 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
366 n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
368 nb_pkts - n_rx_pkts);
371 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
372 evq_old_read_ptr, rxq->evq_read_ptr);
374 /* It is not a problem if we refill in the case of exception */
375 sfc_ef10_essb_rx_qrefill(rxq);
380 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
382 sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
385 * Correct implementation requires EvQ polling and events
391 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
393 sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
396 * Number of descriptors just defines maximum number of pushed
397 * descriptors (fill level).
399 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
400 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
403 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
405 sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
407 SFC_ASSERT(pool != NULL);
409 if (strcmp(pool, "bucket") == 0)
415 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
417 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
418 struct rte_mempool *mb_pool,
419 unsigned int *rxq_entries,
420 unsigned int *evq_entries,
421 unsigned int *rxq_max_fill_level)
424 struct rte_mempool_info mp_info;
425 unsigned int nb_hw_rx_desc;
426 unsigned int max_events;
428 rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
431 if (mp_info.contig_block_size == 0)
435 * Calculate required number of hardware Rx descriptors each
436 * carrying contig block size Rx buffers.
437 * It cannot be less than Rx write pointer alignment plus 1
438 * in order to avoid cases when the ring is guaranteed to be
441 nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
442 mp_info.contig_block_size),
443 SFC_EF10_RX_WPTR_ALIGN + 1);
444 if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
445 *rxq_entries = EFX_RXQ_MINNDESCS;
447 *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
448 if (*rxq_entries > EFX_RXQ_MAXNDESCS)
452 max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
453 mp_info.contig_block_size +
454 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
455 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
457 *evq_entries = rte_align32pow2(max_events);
458 *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
459 *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
462 * May be even maximum event queue size is insufficient to handle
463 * so many Rx descriptors. If so, we should limit Rx queue fill level.
465 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
466 SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
470 static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
472 sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
473 const struct rte_pci_addr *pci_addr, int socket_id,
474 const struct sfc_dp_rx_qcreate_info *info,
475 struct sfc_dp_rxq **dp_rxqp)
477 struct rte_mempool * const mp = info->refill_mb_pool;
478 struct rte_mempool_info mp_info;
479 struct sfc_ef10_essb_rxq *rxq;
482 rc = rte_mempool_ops_get_info(mp, &mp_info);
484 /* Positive errno is used in the driver */
486 goto fail_get_contig_block_size;
489 /* Check if the mempool provides block dequeue */
491 if (mp_info.contig_block_size == 0)
492 goto fail_no_block_dequeue;
495 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
496 RTE_CACHE_LINE_SIZE, socket_id);
500 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
503 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
505 sizeof(*rxq->sw_ring),
506 RTE_CACHE_LINE_SIZE, socket_id);
507 if (rxq->sw_ring == NULL)
508 goto fail_desc_alloc;
510 rxq->block_size = mp_info.contig_block_size;
511 rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
512 rxq->rxq_ptr_mask = info->rxq_entries - 1;
513 rxq->evq_ptr_mask = info->evq_entries - 1;
514 rxq->evq_hw_ring = info->evq_hw_ring;
515 rxq->port_id = port_id;
517 rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
518 rxq->refill_threshold =
519 RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
520 SFC_EF10_RX_WPTR_ALIGN);
521 rxq->refill_mb_pool = mp;
522 rxq->rxq_hw_ring = info->rxq_hw_ring;
524 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
525 ER_DZ_RX_DESC_UPD_REG_OFST +
526 (info->hw_index << info->vi_window_shift);
528 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
529 "block size is %u, buf stride is %u",
530 rxq->block_size, rxq->buf_stride);
531 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
532 "max fill level is %u descs (%u bufs), "
533 "refill threashold %u descs (%u bufs)",
535 rxq->max_fill_level * rxq->block_size,
536 rxq->refill_threshold,
537 rxq->refill_threshold * rxq->block_size);
546 fail_no_block_dequeue:
547 fail_get_contig_block_size:
551 static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
553 sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
555 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
557 rte_free(rxq->sw_ring);
561 static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
563 sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
565 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
567 rxq->evq_read_ptr = evq_read_ptr;
569 /* Initialize before refill */
570 rxq->completed = rxq->pending_id = rxq->added = 0;
571 rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
572 rxq->bufs_ptr = UINT_MAX;
573 rxq->bufs_pending = 0;
575 sfc_ef10_essb_rx_qrefill(rxq);
577 rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
579 ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
584 static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
586 sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
588 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
590 rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
592 *evq_read_ptr = rxq->evq_read_ptr;
595 static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
597 sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
599 __rte_unused struct sfc_ef10_essb_rxq *rxq;
601 rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
602 SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
605 * It is safe to ignore Rx event since we free all mbufs on
606 * queue purge anyway.
612 static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
614 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
616 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
618 const struct sfc_ef10_essb_rx_sw_desc *rxd;
621 if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
622 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
623 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
624 rxq->block_size - rxq->left_in_completed);
626 rxq->left_in_completed--;
627 rte_mempool_put(rxq->refill_mb_pool, m);
628 m = sfc_ef10_essb_next_mbuf(rxq, m);
629 } while (rxq->left_in_completed > 0);
633 for (i = rxq->completed; i != rxq->added; ++i) {
634 rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
636 for (j = 0; j < rxq->block_size; ++j) {
637 rte_mempool_put(rxq->refill_mb_pool, m);
638 m = sfc_ef10_essb_next_mbuf(rxq, m);
642 rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
645 struct sfc_dp_rx sfc_ef10_essb_rx = {
647 .name = SFC_KVARG_DATAPATH_EF10_ESSB,
649 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
650 SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
652 .features = SFC_DP_RX_FEAT_FLOW_FLAG |
653 SFC_DP_RX_FEAT_FLOW_MARK,
654 .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
655 .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
656 .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
657 .qcreate = sfc_ef10_essb_rx_qcreate,
658 .qdestroy = sfc_ef10_essb_rx_qdestroy,
659 .qstart = sfc_ef10_essb_rx_qstart,
660 .qstop = sfc_ef10_essb_rx_qstop,
661 .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
662 .qpurge = sfc_ef10_essb_rx_qpurge,
663 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
664 .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
665 .pkt_burst = sfc_ef10_essb_recv_pkts,