1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 /* EF10 equal stride packed stream receive native datapath implementation */
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
20 #include "efx_types.h"
22 #include "efx_regs_ef10.h"
24 #include "sfc_tweak.h"
25 #include "sfc_dp_rx.h"
26 #include "sfc_kvargs.h"
29 /* Tunnels are not supported */
30 #define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
31 #include "sfc_ef10_rx_ev.h"
33 #define sfc_ef10_essb_rx_err(dpq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
36 #define sfc_ef10_essb_rx_info(dpq, ...) \
37 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
40 * Fake length for RXQ descriptors in equal stride super-buffer mode
41 * to make hardware happy.
43 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
46 * Maximum number of descriptors/buffers in the Rx ring.
47 * It should guarantee that corresponding event queue never overfill.
49 #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
50 ((_nevs) - 1 /* head must not step on tail */ - \
51 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
52 1 /* Rx error */ - 1 /* flush */)
54 struct sfc_ef10_essb_rx_sw_desc {
55 struct rte_mbuf *first_mbuf;
58 struct sfc_ef10_essb_rxq {
59 /* Used on data path */
61 #define SFC_EF10_ESSB_RXQ_STARTED 0x1
62 #define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
63 #define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
64 unsigned int rxq_ptr_mask;
65 unsigned int block_size;
66 unsigned int buf_stride;
67 unsigned int bufs_ptr;
68 unsigned int completed;
69 unsigned int pending_id;
70 unsigned int bufs_pending;
71 unsigned int left_in_completed;
72 unsigned int left_in_pending;
73 unsigned int evq_read_ptr;
74 unsigned int evq_ptr_mask;
75 efx_qword_t *evq_hw_ring;
76 struct sfc_ef10_essb_rx_sw_desc *sw_ring;
81 unsigned int max_fill_level;
82 unsigned int refill_threshold;
83 struct rte_mempool *refill_mb_pool;
84 efx_qword_t *rxq_hw_ring;
85 volatile void *doorbell;
87 /* Datapath receive queue anchor */
91 static inline struct sfc_ef10_essb_rxq *
92 sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
94 return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
97 static struct rte_mbuf *
98 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
99 struct rte_mbuf *mbuf)
101 return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
104 static struct rte_mbuf *
105 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
106 struct rte_mbuf *mbuf, unsigned int idx)
108 return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
111 static struct rte_mbuf *
112 sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
114 const struct sfc_ef10_essb_rx_sw_desc *rxd;
116 if (rxq->left_in_completed != 0) {
117 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
118 return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
119 rxq->block_size - rxq->left_in_completed);
122 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
123 rxq->left_in_completed = rxq->block_size;
124 return rxd->first_mbuf;
129 sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
131 const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
132 unsigned int free_space;
134 void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
135 unsigned int added = rxq->added;
137 free_space = rxq->max_fill_level - (added - rxq->completed);
139 if (free_space < rxq->refill_threshold)
142 bulks = free_space / RTE_DIM(mbuf_blocks);
143 /* refill_threshold guarantees that bulks is positive */
144 SFC_ASSERT(bulks > 0);
150 if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
151 mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
152 struct rte_eth_dev_data *dev_data =
153 rte_eth_devices[rxq->port_id].data;
156 * It is hardly a safe way to increment counter
157 * from different contexts, but all PMDs do it.
159 dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
160 /* Return if we have posted nothing yet */
161 if (added == rxq->added)
167 for (i = 0, id = added & rxq_ptr_mask;
168 i < RTE_DIM(mbuf_blocks);
170 struct rte_mbuf *m = mbuf_blocks[i];
171 struct sfc_ef10_essb_rx_sw_desc *rxd;
173 SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
174 rxd = &rxq->sw_ring[id];
177 /* RX_KER_BYTE_CNT is ignored by firmware */
178 EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
179 ESF_DZ_RX_KER_BYTE_CNT,
180 SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
181 ESF_DZ_RX_KER_BUF_ADDR,
182 rte_mbuf_data_iova_default(m));
185 added += RTE_DIM(mbuf_blocks);
187 } while (--bulks > 0);
189 SFC_ASSERT(rxq->added != added);
191 sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
195 sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
197 *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
199 if (!sfc_ef10_ev_present(*rx_ev))
202 if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
203 FSE_AZ_EV_CODE_RX_EV)) {
205 * Do not move read_ptr to keep the event for exception
208 rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
209 sfc_ef10_essb_rx_err(&rxq->dp.dpq,
210 "RxQ exception at EvQ read ptr %#x",
220 sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
224 ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
226 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
228 rxq->bufs_ptr += ready;
229 rxq->bufs_pending += ready;
231 SFC_ASSERT(ready > 0);
233 const struct sfc_ef10_essb_rx_sw_desc *rxd;
235 unsigned int todo_bufs;
238 rxd = &rxq->sw_ring[rxq->pending_id];
239 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
240 rxq->block_size - rxq->left_in_pending);
242 if (ready < rxq->left_in_pending) {
245 rxq->left_in_pending -= todo_bufs;
247 todo_bufs = rxq->left_in_pending;
249 rxq->left_in_pending = rxq->block_size;
250 if (rxq->pending_id != rxq->rxq_ptr_mask)
256 SFC_ASSERT(todo_bufs > 0);
259 sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
261 /* Prefetch pseudo-header */
262 rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
265 while (todo_bufs-- > 0) {
266 m = sfc_ef10_essb_next_mbuf(rxq, m);
267 m->ol_flags = m0->ol_flags;
268 m->packet_type = m0->packet_type;
269 /* Prefetch pseudo-header */
270 rte_prefetch0((uint8_t *)m->buf_addr +
271 RTE_PKTMBUF_HEADROOM);
277 sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
278 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
280 unsigned int n_rx_pkts = 0;
281 unsigned int todo_bufs;
284 while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
285 rxq->bufs_pending)) > 0) {
286 m = sfc_ef10_essb_maybe_next_completed(rxq);
288 todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
290 rxq->bufs_pending -= todo_bufs;
291 rxq->left_in_completed -= todo_bufs;
293 SFC_ASSERT(todo_bufs > 0);
297 const efx_qword_t *qwordp;
300 rx_pkts[n_rx_pkts++] = m;
302 /* Parse pseudo-header */
303 qwordp = (const efx_qword_t *)
304 ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
306 EFX_QWORD_FIELD(*qwordp,
307 ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
309 m->data_off = RTE_PKTMBUF_HEADROOM +
310 ES_EZ_ESSB_RX_PREFIX_LEN;
311 m->port = rxq->port_id;
313 rte_pktmbuf_pkt_len(m) = pkt_len;
314 rte_pktmbuf_data_len(m) = pkt_len;
318 !!EFX_TEST_QWORD_BIT(*qwordp,
319 ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN));
321 /* EFX_QWORD_FIELD converts little-endian to CPU */
323 EFX_QWORD_FIELD(*qwordp,
324 ES_EZ_ESSB_RX_PREFIX_HASH);
326 m = sfc_ef10_essb_next_mbuf(rxq, m);
327 } while (todo_bufs-- > 0);
335 sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
338 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
339 const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
343 if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
344 SFC_EF10_ESSB_RXQ_EXCEPTION)))
347 n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
349 while (n_rx_pkts != nb_pkts &&
350 sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
352 * DROP_EVENT is an internal to the NIC, software should
353 * never see it and, therefore, may ignore it.
356 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
357 n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
359 nb_pkts - n_rx_pkts);
362 sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
363 evq_old_read_ptr, rxq->evq_read_ptr);
365 /* It is not a problem if we refill in the case of exception */
366 sfc_ef10_essb_rx_qrefill(rxq);
371 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
373 sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
376 * Correct implementation requires EvQ polling and events
382 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
384 sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
387 * Number of descriptors just defines maximum number of pushed
388 * descriptors (fill level).
390 dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
391 dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
394 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
396 sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
398 SFC_ASSERT(pool != NULL);
400 if (strcmp(pool, "bucket") == 0)
406 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
408 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
409 struct rte_mempool *mb_pool,
410 unsigned int *rxq_entries,
411 unsigned int *evq_entries,
412 unsigned int *rxq_max_fill_level)
415 struct rte_mempool_info mp_info;
416 unsigned int nb_hw_rx_desc;
417 unsigned int max_events;
419 rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
422 if (mp_info.contig_block_size == 0)
426 * Calculate required number of hardware Rx descriptors each
427 * carrying contig block size Rx buffers.
428 * It cannot be less than Rx write pointer alignment plus 1
429 * in order to avoid cases when the ring is guaranteed to be
432 nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
433 mp_info.contig_block_size),
434 SFC_EF10_RX_WPTR_ALIGN + 1);
435 if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
436 *rxq_entries = EFX_RXQ_MINNDESCS;
438 *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
439 if (*rxq_entries > EFX_RXQ_MAXNDESCS)
443 max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
444 mp_info.contig_block_size +
445 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
446 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
448 *evq_entries = rte_align32pow2(max_events);
449 *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
450 *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
453 * May be even maximum event queue size is insufficient to handle
454 * so many Rx descriptors. If so, we should limit Rx queue fill level.
456 *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
457 SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
461 static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
463 sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
464 const struct rte_pci_addr *pci_addr, int socket_id,
465 const struct sfc_dp_rx_qcreate_info *info,
466 struct sfc_dp_rxq **dp_rxqp)
468 struct rte_mempool * const mp = info->refill_mb_pool;
469 struct rte_mempool_info mp_info;
470 struct sfc_ef10_essb_rxq *rxq;
473 rc = rte_mempool_ops_get_info(mp, &mp_info);
475 /* Positive errno is used in the driver */
477 goto fail_get_contig_block_size;
480 /* Check if the mempool provides block dequeue */
482 if (mp_info.contig_block_size == 0)
483 goto fail_no_block_dequeue;
486 rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
487 RTE_CACHE_LINE_SIZE, socket_id);
491 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
494 rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
496 sizeof(*rxq->sw_ring),
497 RTE_CACHE_LINE_SIZE, socket_id);
498 if (rxq->sw_ring == NULL)
499 goto fail_desc_alloc;
501 rxq->block_size = mp_info.contig_block_size;
502 rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
503 rxq->rxq_ptr_mask = info->rxq_entries - 1;
504 rxq->evq_ptr_mask = info->evq_entries - 1;
505 rxq->evq_hw_ring = info->evq_hw_ring;
506 rxq->port_id = port_id;
508 rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
509 rxq->refill_threshold =
510 RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
511 SFC_EF10_RX_WPTR_ALIGN);
512 rxq->refill_mb_pool = mp;
513 rxq->rxq_hw_ring = info->rxq_hw_ring;
515 rxq->doorbell = (volatile uint8_t *)info->mem_bar +
516 ER_DZ_RX_DESC_UPD_REG_OFST +
517 (info->hw_index << info->vi_window_shift);
519 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
520 "block size is %u, buf stride is %u",
521 rxq->block_size, rxq->buf_stride);
522 sfc_ef10_essb_rx_info(&rxq->dp.dpq,
523 "max fill level is %u descs (%u bufs), "
524 "refill threashold %u descs (%u bufs)",
526 rxq->max_fill_level * rxq->block_size,
527 rxq->refill_threshold,
528 rxq->refill_threshold * rxq->block_size);
537 fail_no_block_dequeue:
538 fail_get_contig_block_size:
542 static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
544 sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
546 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
548 rte_free(rxq->sw_ring);
552 static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
554 sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
556 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
558 rxq->evq_read_ptr = evq_read_ptr;
560 /* Initialize before refill */
561 rxq->completed = rxq->pending_id = rxq->added = 0;
562 rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
563 rxq->bufs_ptr = UINT_MAX;
564 rxq->bufs_pending = 0;
566 sfc_ef10_essb_rx_qrefill(rxq);
568 rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
570 ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
575 static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
577 sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
579 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
581 rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
583 *evq_read_ptr = rxq->evq_read_ptr;
586 static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
588 sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
590 __rte_unused struct sfc_ef10_essb_rxq *rxq;
592 rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
593 SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
596 * It is safe to ignore Rx event since we free all mbufs on
597 * queue purge anyway.
603 static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
605 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
607 struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
609 const struct sfc_ef10_essb_rx_sw_desc *rxd;
612 if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
613 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
614 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
615 rxq->block_size - rxq->left_in_completed);
617 rxq->left_in_completed--;
618 rte_mempool_put(rxq->refill_mb_pool, m);
619 m = sfc_ef10_essb_next_mbuf(rxq, m);
620 } while (rxq->left_in_completed > 0);
624 for (i = rxq->completed; i != rxq->added; ++i) {
625 rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
627 for (j = 0; j < rxq->block_size; ++j) {
628 rte_mempool_put(rxq->refill_mb_pool, m);
629 m = sfc_ef10_essb_next_mbuf(rxq, m);
633 rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
636 struct sfc_dp_rx sfc_ef10_essb_rx = {
638 .name = SFC_KVARG_DATAPATH_EF10_ESSB,
640 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
641 SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
644 .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
645 .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
646 .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
647 .qcreate = sfc_ef10_essb_rx_qcreate,
648 .qdestroy = sfc_ef10_essb_rx_qdestroy,
649 .qstart = sfc_ef10_essb_rx_qstart,
650 .qstop = sfc_ef10_essb_rx_qstop,
651 .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
652 .qpurge = sfc_ef10_essb_rx_qpurge,
653 .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
654 .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
655 .pkt_burst = sfc_ef10_essb_recv_pkts,