remove config prefix used with make
[dpdk.git] / drivers / net / sfc / sfc_ef10_essb_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 /* EF10 equal stride packed stream receive native datapath implementation */
11
12 #include <stdbool.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_mbuf.h>
16 #include <rte_io.h>
17
18 #include "efx_types.h"
19 #include "efx_regs_ef10.h"
20 #include "efx.h"
21
22 #include "sfc_debug.h"
23 #include "sfc_tweak.h"
24 #include "sfc_dp_rx.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef10.h"
27
28 /* Tunnels are not supported */
29 #define SFC_EF10_RX_EV_ENCAP_SUPPORT    0
30 #include "sfc_ef10_rx_ev.h"
31
32 #define sfc_ef10_essb_rx_err(dpq, ...) \
33         SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
34
35 #define sfc_ef10_essb_rx_info(dpq, ...) \
36         SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
37
38 /*
39  * Fake length for RXQ descriptors in equal stride super-buffer mode
40  * to make hardware happy.
41  */
42 #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE  32
43
44 /**
45  * Minimum number of Rx buffers the datapath allows to use.
46  *
47  * Each HW Rx descriptor has many Rx buffers. The number of buffers
48  * in one HW Rx descriptor is equal to size of contiguous block
49  * provided by Rx buffers memory pool. The contiguous block size
50  * depends on RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
51  * data size specified on the memory pool creation. Typical rte_mbuf
52  * data size is about 2k which makes a bit less than 32 buffers in
53  * contiguous block with default bucket size equal to 64k.
54  * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
55  * it makes about 256 as required minimum. Double it in advertised
56  * minimum to allow for at least 2 refill blocks.
57  */
58 #define SFC_EF10_ESSB_RX_DESCS_MIN      512
59
60 /**
61  * Number of Rx buffers should be aligned to.
62  *
63  * There are no extra requirements on alignment since actual number of
64  * pushed Rx buffers will be multiple by contiguous block size which
65  * is unknown beforehand.
66  */
67 #define SFC_EF10_ESSB_RX_DESCS_ALIGN    1
68
69 /**
70  * Maximum number of descriptors/buffers in the Rx ring.
71  * It should guarantee that corresponding event queue never overfill.
72  */
73 #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
74         ((_nevs) - 1 /* head must not step on tail */ - \
75          (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
76          1 /* Rx error */ - 1 /* flush */)
77
78 struct sfc_ef10_essb_rx_sw_desc {
79         struct rte_mbuf                 *first_mbuf;
80 };
81
82 struct sfc_ef10_essb_rxq {
83         /* Used on data path */
84         unsigned int                    flags;
85 #define SFC_EF10_ESSB_RXQ_STARTED       0x1
86 #define SFC_EF10_ESSB_RXQ_NOT_RUNNING   0x2
87 #define SFC_EF10_ESSB_RXQ_EXCEPTION     0x4
88         unsigned int                    rxq_ptr_mask;
89         unsigned int                    block_size;
90         unsigned int                    buf_stride;
91         unsigned int                    bufs_ptr;
92         unsigned int                    completed;
93         unsigned int                    pending_id;
94         unsigned int                    bufs_pending;
95         unsigned int                    left_in_completed;
96         unsigned int                    left_in_pending;
97         unsigned int                    evq_read_ptr;
98         unsigned int                    evq_ptr_mask;
99         efx_qword_t                     *evq_hw_ring;
100         struct sfc_ef10_essb_rx_sw_desc *sw_ring;
101         uint16_t                        port_id;
102
103         /* Used on refill */
104         unsigned int                    added;
105         unsigned int                    max_fill_level;
106         unsigned int                    refill_threshold;
107         struct rte_mempool              *refill_mb_pool;
108         efx_qword_t                     *rxq_hw_ring;
109         volatile void                   *doorbell;
110
111         /* Datapath receive queue anchor */
112         struct sfc_dp_rxq               dp;
113 };
114
115 static inline struct sfc_ef10_essb_rxq *
116 sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
117 {
118         return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
119 }
120
121 static struct rte_mbuf *
122 sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
123                         struct rte_mbuf *mbuf)
124 {
125         struct rte_mbuf *m;
126
127         m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
128         MBUF_RAW_ALLOC_CHECK(m);
129         return m;
130 }
131
132 static struct rte_mbuf *
133 sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
134                             struct rte_mbuf *mbuf, unsigned int idx)
135 {
136         struct rte_mbuf *m;
137
138         m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
139         MBUF_RAW_ALLOC_CHECK(m);
140         return m;
141 }
142
143 static struct rte_mbuf *
144 sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
145 {
146         const struct sfc_ef10_essb_rx_sw_desc *rxd;
147
148         if (rxq->left_in_completed != 0) {
149                 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
150                 return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
151                                 rxq->block_size - rxq->left_in_completed);
152         } else {
153                 rxq->completed++;
154                 rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
155                 rxq->left_in_completed = rxq->block_size;
156                 return rxd->first_mbuf;
157         }
158 }
159
160 static void
161 sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
162 {
163         const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
164         unsigned int free_space;
165         unsigned int bulks;
166         void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
167         unsigned int added = rxq->added;
168
169         free_space = rxq->max_fill_level - (added - rxq->completed);
170
171         if (free_space < rxq->refill_threshold)
172                 return;
173
174         bulks = free_space / RTE_DIM(mbuf_blocks);
175         /* refill_threshold guarantees that bulks is positive */
176         SFC_ASSERT(bulks > 0);
177
178         do {
179                 unsigned int id;
180                 unsigned int i;
181
182                 if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
183                                 mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
184                         struct rte_eth_dev_data *dev_data =
185                                 rte_eth_devices[rxq->port_id].data;
186
187                         /*
188                          * It is hardly a safe way to increment counter
189                          * from different contexts, but all PMDs do it.
190                          */
191                         dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
192                         /* Return if we have posted nothing yet */
193                         if (added == rxq->added)
194                                 return;
195                         /* Push posted */
196                         break;
197                 }
198
199                 for (i = 0, id = added & rxq_ptr_mask;
200                      i < RTE_DIM(mbuf_blocks);
201                      ++i, ++id) {
202                         struct rte_mbuf *m = mbuf_blocks[i];
203                         struct sfc_ef10_essb_rx_sw_desc *rxd;
204
205                         SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
206                         rxd = &rxq->sw_ring[id];
207                         rxd->first_mbuf = m;
208
209                         /* RX_KER_BYTE_CNT is ignored by firmware */
210                         EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
211                                              ESF_DZ_RX_KER_BYTE_CNT,
212                                              SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
213                                              ESF_DZ_RX_KER_BUF_ADDR,
214                                              rte_mbuf_data_iova_default(m));
215                 }
216
217                 added += RTE_DIM(mbuf_blocks);
218
219         } while (--bulks > 0);
220
221         SFC_ASSERT(rxq->added != added);
222         rxq->added = added;
223         sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
224 }
225
226 static bool
227 sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
228 {
229         *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
230
231         if (!sfc_ef10_ev_present(*rx_ev))
232                 return false;
233
234         if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
235                      FSE_AZ_EV_CODE_RX_EV)) {
236                 /*
237                  * Do not move read_ptr to keep the event for exception
238                  * handling
239                  */
240                 rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
241                 sfc_ef10_essb_rx_err(&rxq->dp.dpq,
242                                      "RxQ exception at EvQ read ptr %#x",
243                                      rxq->evq_read_ptr);
244                 return false;
245         }
246
247         rxq->evq_read_ptr++;
248         return true;
249 }
250
251 static void
252 sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
253 {
254         unsigned int ready;
255
256         ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
257                  rxq->bufs_ptr) &
258                 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
259
260         rxq->bufs_ptr += ready;
261         rxq->bufs_pending += ready;
262
263         SFC_ASSERT(ready > 0);
264         do {
265                 const struct sfc_ef10_essb_rx_sw_desc *rxd;
266                 struct rte_mbuf *m;
267                 unsigned int todo_bufs;
268                 struct rte_mbuf *m0;
269
270                 rxd = &rxq->sw_ring[rxq->pending_id];
271                 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
272                         rxq->block_size - rxq->left_in_pending);
273
274                 if (ready < rxq->left_in_pending) {
275                         todo_bufs = ready;
276                         ready = 0;
277                         rxq->left_in_pending -= todo_bufs;
278                 } else {
279                         todo_bufs = rxq->left_in_pending;
280                         ready -= todo_bufs;
281                         rxq->left_in_pending = rxq->block_size;
282                         if (rxq->pending_id != rxq->rxq_ptr_mask)
283                                 rxq->pending_id++;
284                         else
285                                 rxq->pending_id = 0;
286                 }
287
288                 SFC_ASSERT(todo_bufs > 0);
289                 --todo_bufs;
290
291                 sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
292
293                 /* Prefetch pseudo-header */
294                 rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
295
296                 m0 = m;
297                 while (todo_bufs-- > 0) {
298                         m = sfc_ef10_essb_next_mbuf(rxq, m);
299                         m->ol_flags = m0->ol_flags;
300                         m->packet_type = m0->packet_type;
301                         /* Prefetch pseudo-header */
302                         rte_prefetch0((uint8_t *)m->buf_addr +
303                                       RTE_PKTMBUF_HEADROOM);
304                 }
305         } while (ready > 0);
306 }
307
308 /*
309  * Below function relies on the following length and layout of the
310  * Rx prefix.
311  */
312 static const efx_rx_prefix_layout_t sfc_ef10_essb_rx_prefix_layout = {
313         .erpl_length    = ES_EZ_ESSB_RX_PREFIX_LEN,
314         .erpl_fields    = {
315 #define SFC_EF10_ESSB_RX_PREFIX_FIELD(_efx, _ef10) \
316         EFX_RX_PREFIX_FIELD(_efx, ES_EZ_ESSB_RX_PREFIX_ ## _ef10, B_FALSE)
317
318                 SFC_EF10_ESSB_RX_PREFIX_FIELD(LENGTH, DATA_LEN),
319                 SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_MARK, MARK),
320                 SFC_EF10_ESSB_RX_PREFIX_FIELD(RSS_HASH_VALID, HASH_VALID),
321                 SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_MARK_VALID, MARK_VALID),
322                 SFC_EF10_ESSB_RX_PREFIX_FIELD(USER_FLAG, MATCH_FLAG),
323                 SFC_EF10_ESSB_RX_PREFIX_FIELD(RSS_HASH, HASH),
324
325 #undef  SFC_EF10_ESSB_RX_PREFIX_FIELD
326         }
327 };
328
329 static unsigned int
330 sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
331                              struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
332 {
333         unsigned int n_rx_pkts = 0;
334         unsigned int todo_bufs;
335         struct rte_mbuf *m;
336
337         while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
338                                     rxq->bufs_pending)) > 0) {
339                 m = sfc_ef10_essb_maybe_next_completed(rxq);
340
341                 todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
342
343                 rxq->bufs_pending -= todo_bufs;
344                 rxq->left_in_completed -= todo_bufs;
345
346                 SFC_ASSERT(todo_bufs > 0);
347                 todo_bufs--;
348
349                 do {
350                         const efx_qword_t *qwordp;
351                         uint16_t pkt_len;
352
353                         /* Buffers to be discarded have 0 in packet type */
354                         if (unlikely(m->packet_type == 0)) {
355                                 rte_mbuf_raw_free(m);
356                                 goto next_buf;
357                         }
358
359                         rx_pkts[n_rx_pkts++] = m;
360
361                         /* Parse pseudo-header */
362                         qwordp = (const efx_qword_t *)
363                                 ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
364                         pkt_len =
365                                 EFX_QWORD_FIELD(*qwordp,
366                                                 ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
367
368                         m->data_off = RTE_PKTMBUF_HEADROOM +
369                                 ES_EZ_ESSB_RX_PREFIX_LEN;
370                         m->port = rxq->port_id;
371
372                         rte_pktmbuf_pkt_len(m) = pkt_len;
373                         rte_pktmbuf_data_len(m) = pkt_len;
374
375                         m->ol_flags |=
376                                 (PKT_RX_RSS_HASH *
377                                  !!EFX_TEST_QWORD_BIT(*qwordp,
378                                         ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
379                                 (PKT_RX_FDIR_ID *
380                                  !!EFX_TEST_QWORD_BIT(*qwordp,
381                                         ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
382                                 (PKT_RX_FDIR *
383                                  !!EFX_TEST_QWORD_BIT(*qwordp,
384                                         ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
385
386                         /* EFX_QWORD_FIELD converts little-endian to CPU */
387                         m->hash.rss =
388                                 EFX_QWORD_FIELD(*qwordp,
389                                                 ES_EZ_ESSB_RX_PREFIX_HASH);
390                         m->hash.fdir.hi =
391                                 EFX_QWORD_FIELD(*qwordp,
392                                                 ES_EZ_ESSB_RX_PREFIX_MARK);
393
394 next_buf:
395                         m = sfc_ef10_essb_next_mbuf(rxq, m);
396                 } while (todo_bufs-- > 0);
397         }
398
399         return n_rx_pkts;
400 }
401
402
403 static uint16_t
404 sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
405                         uint16_t nb_pkts)
406 {
407         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
408         const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
409         uint16_t n_rx_pkts;
410         efx_qword_t rx_ev;
411
412         if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
413                                    SFC_EF10_ESSB_RXQ_EXCEPTION)))
414                 return 0;
415
416         n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
417
418         while (n_rx_pkts != nb_pkts &&
419                sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
420                 /*
421                  * DROP_EVENT is an internal to the NIC, software should
422                  * never see it and, therefore, may ignore it.
423                  */
424
425                 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
426                 n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
427                                                           rx_pkts + n_rx_pkts,
428                                                           nb_pkts - n_rx_pkts);
429         }
430
431         sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
432                            evq_old_read_ptr, rxq->evq_read_ptr);
433
434         /* It is not a problem if we refill in the case of exception */
435         sfc_ef10_essb_rx_qrefill(rxq);
436
437         return n_rx_pkts;
438 }
439
440 static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
441 static unsigned int
442 sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
443 {
444         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
445         const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
446         efx_qword_t rx_ev;
447
448         if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
449                                    SFC_EF10_ESSB_RXQ_EXCEPTION)))
450                 return rxq->bufs_pending;
451
452         while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
453                 /*
454                  * DROP_EVENT is an internal to the NIC, software should
455                  * never see it and, therefore, may ignore it.
456                  */
457                 sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
458         }
459
460         sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
461                            evq_old_read_ptr, rxq->evq_read_ptr);
462
463         return rxq->bufs_pending;
464 }
465
466 static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
467 static int
468 sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
469 {
470         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
471         unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
472
473         if (offset < pending)
474                 return RTE_ETH_RX_DESC_DONE;
475
476         if (offset < (rxq->added - rxq->completed) * rxq->block_size +
477                      rxq->left_in_completed - rxq->block_size)
478                 return RTE_ETH_RX_DESC_AVAIL;
479
480         return RTE_ETH_RX_DESC_UNAVAIL;
481 }
482
483 static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
484 static void
485 sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
486 {
487         /*
488          * Number of descriptors just defines maximum number of pushed
489          * descriptors (fill level).
490          */
491         dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
492         dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
493 }
494
495 static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
496 static int
497 sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
498 {
499         SFC_ASSERT(pool != NULL);
500
501         if (strcmp(pool, "bucket") == 0)
502                 return 0;
503
504         return -ENOTSUP;
505 }
506
507 static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
508 static int
509 sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
510                                 struct sfc_dp_rx_hw_limits *limits,
511                                 struct rte_mempool *mb_pool,
512                                 unsigned int *rxq_entries,
513                                 unsigned int *evq_entries,
514                                 unsigned int *rxq_max_fill_level)
515 {
516         int rc;
517         struct rte_mempool_info mp_info;
518         unsigned int nb_hw_rx_desc;
519         unsigned int max_events;
520
521         rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
522         if (rc != 0)
523                 return -rc;
524         if (mp_info.contig_block_size == 0)
525                 return EINVAL;
526
527         /*
528          * Calculate required number of hardware Rx descriptors each
529          * carrying contig block size Rx buffers.
530          * It cannot be less than Rx write pointer alignment plus 1
531          * in order to avoid cases when the ring is guaranteed to be
532          * empty.
533          */
534         nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
535                                                  mp_info.contig_block_size),
536                                 SFC_EF10_RX_WPTR_ALIGN + 1);
537         if (nb_hw_rx_desc <= limits->rxq_min_entries) {
538                 *rxq_entries = limits->rxq_min_entries;
539         } else {
540                 *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
541                 if (*rxq_entries > limits->rxq_max_entries)
542                         return EINVAL;
543         }
544
545         max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
546                 mp_info.contig_block_size +
547                 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
548                 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
549
550         *evq_entries = rte_align32pow2(max_events);
551         *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries);
552         *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries);
553
554         /*
555          * May be even maximum event queue size is insufficient to handle
556          * so many Rx descriptors. If so, we should limit Rx queue fill level.
557          */
558         *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
559                                       SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
560         return 0;
561 }
562
563 static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
564 static int
565 sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
566                          const struct rte_pci_addr *pci_addr, int socket_id,
567                          const struct sfc_dp_rx_qcreate_info *info,
568                          struct sfc_dp_rxq **dp_rxqp)
569 {
570         struct rte_mempool * const mp = info->refill_mb_pool;
571         struct rte_mempool_info mp_info;
572         struct sfc_ef10_essb_rxq *rxq;
573         int rc;
574
575         rc = rte_mempool_ops_get_info(mp, &mp_info);
576         if (rc != 0) {
577                 /* Positive errno is used in the driver */
578                 rc = -rc;
579                 goto fail_get_contig_block_size;
580         }
581
582         /* Check if the mempool provides block dequeue */
583         rc = EINVAL;
584         if (mp_info.contig_block_size == 0)
585                 goto fail_no_block_dequeue;
586
587         rc = ENOMEM;
588         rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
589                                  RTE_CACHE_LINE_SIZE, socket_id);
590         if (rxq == NULL)
591                 goto fail_rxq_alloc;
592
593         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
594
595         rc = ENOMEM;
596         rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
597                                          info->rxq_entries,
598                                          sizeof(*rxq->sw_ring),
599                                          RTE_CACHE_LINE_SIZE, socket_id);
600         if (rxq->sw_ring == NULL)
601                 goto fail_desc_alloc;
602
603         rxq->block_size = mp_info.contig_block_size;
604         rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
605         rxq->rxq_ptr_mask = info->rxq_entries - 1;
606         rxq->evq_ptr_mask = info->evq_entries - 1;
607         rxq->evq_hw_ring = info->evq_hw_ring;
608         rxq->port_id = port_id;
609
610         rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
611         rxq->refill_threshold =
612                 RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
613                         SFC_EF10_RX_WPTR_ALIGN);
614         rxq->refill_mb_pool = mp;
615         rxq->rxq_hw_ring = info->rxq_hw_ring;
616
617         rxq->doorbell = (volatile uint8_t *)info->mem_bar +
618                         ER_DZ_RX_DESC_UPD_REG_OFST +
619                         (info->hw_index << info->vi_window_shift);
620
621         sfc_ef10_essb_rx_info(&rxq->dp.dpq, "RxQ doorbell is %p",
622                               rxq->doorbell);
623         sfc_ef10_essb_rx_info(&rxq->dp.dpq,
624                               "block size is %u, buf stride is %u",
625                               rxq->block_size, rxq->buf_stride);
626         sfc_ef10_essb_rx_info(&rxq->dp.dpq,
627                               "max fill level is %u descs (%u bufs), "
628                               "refill threashold %u descs (%u bufs)",
629                               rxq->max_fill_level,
630                               rxq->max_fill_level * rxq->block_size,
631                               rxq->refill_threshold,
632                               rxq->refill_threshold * rxq->block_size);
633
634         *dp_rxqp = &rxq->dp;
635         return 0;
636
637 fail_desc_alloc:
638         rte_free(rxq);
639
640 fail_rxq_alloc:
641 fail_no_block_dequeue:
642 fail_get_contig_block_size:
643         return rc;
644 }
645
646 static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
647 static void
648 sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
649 {
650         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
651
652         rte_free(rxq->sw_ring);
653         rte_free(rxq);
654 }
655
656 static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
657 static int
658 sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
659                         const efx_rx_prefix_layout_t *pinfo)
660 {
661         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
662
663         if (pinfo->erpl_length != sfc_ef10_essb_rx_prefix_layout.erpl_length)
664                 return ENOTSUP;
665
666         if (efx_rx_prefix_layout_check(pinfo,
667                                        &sfc_ef10_essb_rx_prefix_layout) != 0)
668                 return ENOTSUP;
669
670         rxq->evq_read_ptr = evq_read_ptr;
671
672         /* Initialize before refill */
673         rxq->completed = rxq->pending_id = rxq->added = 0;
674         rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
675         rxq->bufs_ptr = UINT_MAX;
676         rxq->bufs_pending = 0;
677
678         sfc_ef10_essb_rx_qrefill(rxq);
679
680         rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
681         rxq->flags &=
682                 ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
683
684         return 0;
685 }
686
687 static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
688 static void
689 sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
690 {
691         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
692
693         rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
694
695         *evq_read_ptr = rxq->evq_read_ptr;
696 }
697
698 static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
699 static bool
700 sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
701 {
702         __rte_unused struct sfc_ef10_essb_rxq *rxq;
703
704         rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
705         SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
706
707         /*
708          * It is safe to ignore Rx event since we free all mbufs on
709          * queue purge anyway.
710          */
711
712         return false;
713 }
714
715 static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
716 static void
717 sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
718 {
719         struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
720         unsigned int i;
721         const struct sfc_ef10_essb_rx_sw_desc *rxd;
722         struct rte_mbuf *m;
723
724         for (i = rxq->completed; i != rxq->added; ++i) {
725                 rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
726                 m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
727                                 rxq->block_size - rxq->left_in_completed);
728                 while (rxq->left_in_completed > 0) {
729                         rte_mbuf_raw_free(m);
730                         m = sfc_ef10_essb_next_mbuf(rxq, m);
731                         rxq->left_in_completed--;
732                 }
733                 rxq->left_in_completed = rxq->block_size;
734         }
735
736         rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
737 }
738
739 struct sfc_dp_rx sfc_ef10_essb_rx = {
740         .dp = {
741                 .name           = SFC_KVARG_DATAPATH_EF10_ESSB,
742                 .type           = SFC_DP_RX,
743                 .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF10 |
744                                   SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
745         },
746         .features               = SFC_DP_RX_FEAT_FLOW_FLAG |
747                                   SFC_DP_RX_FEAT_FLOW_MARK,
748         .dev_offload_capa       = DEV_RX_OFFLOAD_CHECKSUM |
749                                   DEV_RX_OFFLOAD_RSS_HASH,
750         .queue_offload_capa     = 0,
751         .get_dev_info           = sfc_ef10_essb_rx_get_dev_info,
752         .pool_ops_supported     = sfc_ef10_essb_rx_pool_ops_supported,
753         .qsize_up_rings         = sfc_ef10_essb_rx_qsize_up_rings,
754         .qcreate                = sfc_ef10_essb_rx_qcreate,
755         .qdestroy               = sfc_ef10_essb_rx_qdestroy,
756         .qstart                 = sfc_ef10_essb_rx_qstart,
757         .qstop                  = sfc_ef10_essb_rx_qstop,
758         .qrx_ev                 = sfc_ef10_essb_rx_qrx_ev,
759         .qpurge                 = sfc_ef10_essb_rx_qpurge,
760         .supported_ptypes_get   = sfc_ef10_supported_ptypes_get,
761         .qdesc_npending         = sfc_ef10_essb_rx_qdesc_npending,
762         .qdesc_status           = sfc_ef10_essb_rx_qdesc_status,
763         .pkt_burst              = sfc_ef10_essb_recv_pkts,
764 };