net/sfc: support per-queue Rx RSS hash offload for EF100
[dpdk.git] / drivers / net / sfc / sfc_ef100_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 /* EF100 native datapath implementation */
11
12 #include <stdbool.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_mbuf_ptype.h>
16 #include <rte_mbuf.h>
17 #include <rte_io.h>
18
19 #include "efx_types.h"
20 #include "efx_regs_ef100.h"
21 #include "efx.h"
22
23 #include "sfc_debug.h"
24 #include "sfc_tweak.h"
25 #include "sfc_dp_rx.h"
26 #include "sfc_kvargs.h"
27 #include "sfc_ef100.h"
28
29
30 #define sfc_ef100_rx_err(_rxq, ...) \
31         SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_rxq)->dp.dpq, __VA_ARGS__)
32
33 #define sfc_ef100_rx_debug(_rxq, ...) \
34         SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_rxq)->dp.dpq, \
35                    __VA_ARGS__)
36
37 /**
38  * Maximum number of descriptors/buffers in the Rx ring.
39  * It should guarantee that corresponding event queue never overfill.
40  * EF10 native datapath uses event queue of the same size as Rx queue.
41  * Maximum number of events on datapath can be estimated as number of
42  * Rx queue entries (one event per Rx buffer in the worst case) plus
43  * Rx error and flush events.
44  */
45 #define SFC_EF100_RXQ_LIMIT(_ndesc) \
46         ((_ndesc) - 1 /* head must not step on tail */ - \
47          1 /* Rx error */ - 1 /* flush */)
48
49 struct sfc_ef100_rx_sw_desc {
50         struct rte_mbuf                 *mbuf;
51 };
52
53 struct sfc_ef100_rxq {
54         /* Used on data path */
55         unsigned int                    flags;
56 #define SFC_EF100_RXQ_STARTED           0x1
57 #define SFC_EF100_RXQ_NOT_RUNNING       0x2
58 #define SFC_EF100_RXQ_EXCEPTION         0x4
59 #define SFC_EF100_RXQ_RSS_HASH          0x10
60         unsigned int                    ptr_mask;
61         unsigned int                    evq_phase_bit_shift;
62         unsigned int                    ready_pkts;
63         unsigned int                    completed;
64         unsigned int                    evq_read_ptr;
65         volatile efx_qword_t            *evq_hw_ring;
66         struct sfc_ef100_rx_sw_desc     *sw_ring;
67         uint64_t                        rearm_data;
68         uint16_t                        buf_size;
69         uint16_t                        prefix_size;
70
71         /* Used on refill */
72         unsigned int                    added;
73         unsigned int                    max_fill_level;
74         unsigned int                    refill_threshold;
75         struct rte_mempool              *refill_mb_pool;
76         efx_qword_t                     *rxq_hw_ring;
77         volatile void                   *doorbell;
78
79         /* Datapath receive queue anchor */
80         struct sfc_dp_rxq               dp;
81 };
82
83 static inline struct sfc_ef100_rxq *
84 sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
85 {
86         return container_of(dp_rxq, struct sfc_ef100_rxq, dp);
87 }
88
89 static inline void
90 sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
91 {
92         efx_dword_t dword;
93
94         EFX_POPULATE_DWORD_1(dword, ERF_GZ_RX_RING_PIDX, added & rxq->ptr_mask);
95
96         /* DMA sync to device is not required */
97
98         /*
99          * rte_write32() has rte_io_wmb() which guarantees that the STORE
100          * operations (i.e. Rx and event descriptor updates) that precede
101          * the rte_io_wmb() call are visible to NIC before the STORE
102          * operations that follow it (i.e. doorbell write).
103          */
104         rte_write32(dword.ed_u32[0], rxq->doorbell);
105
106         sfc_ef100_rx_debug(rxq, "RxQ pushed doorbell at pidx %u (added=%u)",
107                            EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX),
108                            added);
109 }
110
111 static void
112 sfc_ef100_rx_qrefill(struct sfc_ef100_rxq *rxq)
113 {
114         const unsigned int ptr_mask = rxq->ptr_mask;
115         unsigned int free_space;
116         unsigned int bulks;
117         void *objs[SFC_RX_REFILL_BULK];
118         unsigned int added = rxq->added;
119
120         free_space = rxq->max_fill_level - (added - rxq->completed);
121
122         if (free_space < rxq->refill_threshold)
123                 return;
124
125         bulks = free_space / RTE_DIM(objs);
126         /* refill_threshold guarantees that bulks is positive */
127         SFC_ASSERT(bulks > 0);
128
129         do {
130                 unsigned int id;
131                 unsigned int i;
132
133                 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
134                                                   RTE_DIM(objs)) < 0)) {
135                         struct rte_eth_dev_data *dev_data =
136                                 rte_eth_devices[rxq->dp.dpq.port_id].data;
137
138                         /*
139                          * It is hardly a safe way to increment counter
140                          * from different contexts, but all PMDs do it.
141                          */
142                         dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
143                         /* Return if we have posted nothing yet */
144                         if (added == rxq->added)
145                                 return;
146                         /* Push posted */
147                         break;
148                 }
149
150                 for (i = 0, id = added & ptr_mask;
151                      i < RTE_DIM(objs);
152                      ++i, ++id) {
153                         struct rte_mbuf *m = objs[i];
154                         struct sfc_ef100_rx_sw_desc *rxd;
155                         rte_iova_t phys_addr;
156
157                         MBUF_RAW_ALLOC_CHECK(m);
158
159                         SFC_ASSERT((id & ~ptr_mask) == 0);
160                         rxd = &rxq->sw_ring[id];
161                         rxd->mbuf = m;
162
163                         /*
164                          * Avoid writing to mbuf. It is cheaper to do it
165                          * when we receive packet and fill in nearby
166                          * structure members.
167                          */
168
169                         phys_addr = rte_mbuf_data_iova_default(m);
170                         EFX_POPULATE_QWORD_1(rxq->rxq_hw_ring[id],
171                             ESF_GZ_RX_BUF_ADDR, phys_addr);
172                 }
173
174                 added += RTE_DIM(objs);
175         } while (--bulks > 0);
176
177         SFC_ASSERT(rxq->added != added);
178         rxq->added = added;
179         sfc_ef100_rx_qpush(rxq, added);
180 }
181
182 static inline uint64_t
183 sfc_ef100_rx_nt_or_inner_l4_csum(const efx_word_t class)
184 {
185         return EFX_WORD_FIELD(class,
186                               ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM) ==
187                 ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
188                 PKT_RX_L4_CKSUM_GOOD : PKT_RX_L4_CKSUM_BAD;
189 }
190
191 static inline uint64_t
192 sfc_ef100_rx_tun_outer_l4_csum(const efx_word_t class)
193 {
194         return EFX_WORD_FIELD(class,
195                               ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM) ==
196                 ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
197                 PKT_RX_OUTER_L4_CKSUM_GOOD : PKT_RX_OUTER_L4_CKSUM_GOOD;
198 }
199
200 static uint32_t
201 sfc_ef100_rx_class_decode(const efx_word_t class, uint64_t *ol_flags)
202 {
203         uint32_t ptype;
204         bool no_tunnel = false;
205
206         if (unlikely(EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS) !=
207                      ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN))
208                 return 0;
209
210         switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN)) {
211         case 0:
212                 ptype = RTE_PTYPE_L2_ETHER;
213                 break;
214         case 1:
215                 ptype = RTE_PTYPE_L2_ETHER_VLAN;
216                 break;
217         default:
218                 ptype = RTE_PTYPE_L2_ETHER_QINQ;
219                 break;
220         }
221
222         switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS)) {
223         case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE:
224                 no_tunnel = true;
225                 break;
226         case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN:
227                 ptype |= RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
228                 *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
229                 break;
230         case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE:
231                 ptype |= RTE_PTYPE_TUNNEL_NVGRE;
232                 break;
233         case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE:
234                 ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
235                 *ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
236                 break;
237         default:
238                 /*
239                  * Driver does not know the tunnel, but it is
240                  * still a tunnel and NT_OR_INNER refer to inner
241                  * frame.
242                  */
243                 no_tunnel = false;
244         }
245
246         if (no_tunnel) {
247                 bool l4_valid = true;
248
249                 switch (EFX_WORD_FIELD(class,
250                         ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
251                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
252                         ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
253                         *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
254                         break;
255                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
256                         ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
257                         *ol_flags |= PKT_RX_IP_CKSUM_BAD;
258                         break;
259                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
260                         ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
261                         break;
262                 default:
263                         l4_valid = false;
264                 }
265
266                 if (l4_valid) {
267                         switch (EFX_WORD_FIELD(class,
268                                 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
269                         case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
270                                 ptype |= RTE_PTYPE_L4_TCP;
271                                 *ol_flags |=
272                                         sfc_ef100_rx_nt_or_inner_l4_csum(class);
273                                 break;
274                         case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
275                                 ptype |= RTE_PTYPE_L4_UDP;
276                                 *ol_flags |=
277                                         sfc_ef100_rx_nt_or_inner_l4_csum(class);
278                                 break;
279                         case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
280                                 ptype |= RTE_PTYPE_L4_FRAG;
281                                 break;
282                         }
283                 }
284         } else {
285                 bool l4_valid = true;
286
287                 switch (EFX_WORD_FIELD(class,
288                         ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS)) {
289                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
290                         ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
291                         break;
292                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
293                         ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
294                         *ol_flags |= PKT_RX_EIP_CKSUM_BAD;
295                         break;
296                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
297                         ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
298                         break;
299                 }
300
301                 switch (EFX_WORD_FIELD(class,
302                         ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
303                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
304                         ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
305                         *ol_flags |= PKT_RX_IP_CKSUM_GOOD;
306                         break;
307                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
308                         ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
309                         *ol_flags |= PKT_RX_IP_CKSUM_BAD;
310                         break;
311                 case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
312                         ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
313                         break;
314                 default:
315                         l4_valid = false;
316                         break;
317                 }
318
319                 if (l4_valid) {
320                         switch (EFX_WORD_FIELD(class,
321                                 ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
322                         case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
323                                 ptype |= RTE_PTYPE_INNER_L4_TCP;
324                                 *ol_flags |=
325                                         sfc_ef100_rx_nt_or_inner_l4_csum(class);
326                                 break;
327                         case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
328                                 ptype |= RTE_PTYPE_INNER_L4_UDP;
329                                 *ol_flags |=
330                                         sfc_ef100_rx_nt_or_inner_l4_csum(class);
331                                 break;
332                         case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
333                                 ptype |= RTE_PTYPE_INNER_L4_FRAG;
334                                 break;
335                         }
336                 }
337         }
338
339         return ptype;
340 }
341
342 /*
343  * Below function relies on the following fields in Rx prefix.
344  * Some fields are mandatory, some fields are optional.
345  * See sfc_ef100_rx_qstart() below.
346  */
347 static const efx_rx_prefix_layout_t sfc_ef100_rx_prefix_layout = {
348         .erpl_fields    = {
349 #define SFC_EF100_RX_PREFIX_FIELD(_name, _big_endian) \
350         EFX_RX_PREFIX_FIELD(_name, ESF_GZ_RX_PREFIX_ ## _name, _big_endian)
351
352                 SFC_EF100_RX_PREFIX_FIELD(LENGTH, B_FALSE),
353                 SFC_EF100_RX_PREFIX_FIELD(RSS_HASH_VALID, B_FALSE),
354                 SFC_EF100_RX_PREFIX_FIELD(CLASS, B_FALSE),
355                 SFC_EF100_RX_PREFIX_FIELD(RSS_HASH, B_FALSE),
356
357 #undef  SFC_EF100_RX_PREFIX_FIELD
358         }
359 };
360
361 static bool
362 sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq *rxq,
363                                 const efx_oword_t *rx_prefix,
364                                 struct rte_mbuf *m)
365 {
366         const efx_word_t *class;
367         uint64_t ol_flags = 0;
368
369         RTE_BUILD_BUG_ON(EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
370         RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
371         RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT !=
372                          sizeof(*class));
373         class = (const efx_word_t *)((const uint8_t *)rx_prefix +
374                 EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT);
375         if (unlikely(EFX_WORD_FIELD(*class,
376                                     ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS) !=
377                      ESE_GZ_RH_HCLASS_L2_STATUS_OK))
378                 return false;
379
380         m->packet_type = sfc_ef100_rx_class_decode(*class, &ol_flags);
381
382         if ((rxq->flags & SFC_EF100_RXQ_RSS_HASH) &&
383             EFX_TEST_OWORD_BIT(rx_prefix[0],
384                                ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN)) {
385                 ol_flags |= PKT_RX_RSS_HASH;
386                 /* EFX_OWORD_FIELD converts little-endian to CPU */
387                 m->hash.rss = EFX_OWORD_FIELD(rx_prefix[0],
388                                               ESF_GZ_RX_PREFIX_RSS_HASH);
389         }
390
391         m->ol_flags = ol_flags;
392         return true;
393 }
394
395 static const uint8_t *
396 sfc_ef100_rx_pkt_prefix(const struct rte_mbuf *m)
397 {
398         return (const uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
399 }
400
401 static struct rte_mbuf *
402 sfc_ef100_rx_next_mbuf(struct sfc_ef100_rxq *rxq)
403 {
404         struct rte_mbuf *m;
405         unsigned int id;
406
407         /* mbuf associated with current Rx descriptor */
408         m = rxq->sw_ring[rxq->completed++ & rxq->ptr_mask].mbuf;
409
410         /* completed is already moved to the next one */
411         if (unlikely(rxq->completed == rxq->added))
412                 goto done;
413
414         /*
415          * Prefetch Rx prefix of the next packet.
416          * Current packet is scattered and the next mbuf is its fragment
417          * it simply prefetches some data - no harm since packet rate
418          * should not be high if scatter is used.
419          */
420         id = rxq->completed & rxq->ptr_mask;
421         rte_prefetch0(sfc_ef100_rx_pkt_prefix(rxq->sw_ring[id].mbuf));
422
423         if (unlikely(rxq->completed + 1 == rxq->added))
424                 goto done;
425
426         /*
427          * Prefetch mbuf control structure of the next after next Rx
428          * descriptor.
429          */
430         id = (id == rxq->ptr_mask) ? 0 : (id + 1);
431         rte_mbuf_prefetch_part1(rxq->sw_ring[id].mbuf);
432
433         /*
434          * If the next time we'll need SW Rx descriptor from the next
435          * cache line, try to make sure that we have it in cache.
436          */
437         if ((id & 0x7) == 0x7)
438                 rte_prefetch0(&rxq->sw_ring[(id + 1) & rxq->ptr_mask]);
439
440 done:
441         return m;
442 }
443
444 static struct rte_mbuf **
445 sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq *rxq,
446                                 struct rte_mbuf **rx_pkts,
447                                 struct rte_mbuf ** const rx_pkts_end)
448 {
449         while (rxq->ready_pkts > 0 && rx_pkts != rx_pkts_end) {
450                 struct rte_mbuf *pkt;
451                 struct rte_mbuf *lastseg;
452                 const efx_oword_t *rx_prefix;
453                 uint16_t pkt_len;
454                 uint16_t seg_len;
455                 bool deliver;
456
457                 rxq->ready_pkts--;
458
459                 pkt = sfc_ef100_rx_next_mbuf(rxq);
460                 MBUF_RAW_ALLOC_CHECK(pkt);
461
462                 RTE_BUILD_BUG_ON(sizeof(pkt->rearm_data[0]) !=
463                                  sizeof(rxq->rearm_data));
464                 pkt->rearm_data[0] = rxq->rearm_data;
465
466                 /* data_off already moved past Rx prefix */
467                 rx_prefix = (const efx_oword_t *)sfc_ef100_rx_pkt_prefix(pkt);
468
469                 pkt_len = EFX_OWORD_FIELD(rx_prefix[0],
470                                           ESF_GZ_RX_PREFIX_LENGTH);
471                 SFC_ASSERT(pkt_len > 0);
472                 rte_pktmbuf_pkt_len(pkt) = pkt_len;
473
474                 seg_len = RTE_MIN(pkt_len, rxq->buf_size - rxq->prefix_size);
475                 rte_pktmbuf_data_len(pkt) = seg_len;
476
477                 deliver = sfc_ef100_rx_prefix_to_offloads(rxq, rx_prefix, pkt);
478
479                 lastseg = pkt;
480                 while ((pkt_len -= seg_len) > 0) {
481                         struct rte_mbuf *seg;
482
483                         seg = sfc_ef100_rx_next_mbuf(rxq);
484                         MBUF_RAW_ALLOC_CHECK(seg);
485
486                         seg->data_off = RTE_PKTMBUF_HEADROOM;
487
488                         seg_len = RTE_MIN(pkt_len, rxq->buf_size);
489                         rte_pktmbuf_data_len(seg) = seg_len;
490                         rte_pktmbuf_pkt_len(seg) = seg_len;
491
492                         pkt->nb_segs++;
493                         lastseg->next = seg;
494                         lastseg = seg;
495                 }
496
497                 if (likely(deliver))
498                         *rx_pkts++ = pkt;
499                 else
500                         rte_pktmbuf_free(pkt);
501         }
502
503         return rx_pkts;
504 }
505
506 static bool
507 sfc_ef100_rx_get_event(struct sfc_ef100_rxq *rxq, efx_qword_t *ev)
508 {
509         *ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
510
511         if (!sfc_ef100_ev_present(ev,
512                         (rxq->evq_read_ptr >> rxq->evq_phase_bit_shift) & 1))
513                 return false;
514
515         if (unlikely(!sfc_ef100_ev_type_is(ev, ESE_GZ_EF100_EV_RX_PKTS))) {
516                 /*
517                  * Do not move read_ptr to keep the event for exception
518                  * handling by the control path.
519                  */
520                 rxq->flags |= SFC_EF100_RXQ_EXCEPTION;
521                 sfc_ef100_rx_err(rxq,
522                         "RxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
523                         rxq->evq_read_ptr, rxq->evq_read_ptr & rxq->ptr_mask,
524                         EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
525                         EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
526                 return false;
527         }
528
529         sfc_ef100_rx_debug(rxq, "RxQ got event %08x:%08x at %u (%#x)",
530                            EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
531                            EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
532                            rxq->evq_read_ptr,
533                            rxq->evq_read_ptr & rxq->ptr_mask);
534
535         rxq->evq_read_ptr++;
536         return true;
537 }
538
539 static uint16_t
540 sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
541 {
542         struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(rx_queue);
543         struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
544         efx_qword_t rx_ev;
545
546         rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts, rx_pkts_end);
547
548         if (unlikely(rxq->flags &
549                      (SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION)))
550                 goto done;
551
552         while (rx_pkts != rx_pkts_end && sfc_ef100_rx_get_event(rxq, &rx_ev)) {
553                 rxq->ready_pkts =
554                         EFX_QWORD_FIELD(rx_ev, ESF_GZ_EV_RXPKTS_NUM_PKT);
555                 rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts,
556                                                           rx_pkts_end);
557         }
558
559         /* It is not a problem if we refill in the case of exception */
560         sfc_ef100_rx_qrefill(rxq);
561
562 done:
563         return nb_pkts - (rx_pkts_end - rx_pkts);
564 }
565
566 static const uint32_t *
567 sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
568 {
569         static const uint32_t ef100_native_ptypes[] = {
570                 RTE_PTYPE_L2_ETHER,
571                 RTE_PTYPE_L2_ETHER_VLAN,
572                 RTE_PTYPE_L2_ETHER_QINQ,
573                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
574                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
575                 RTE_PTYPE_L4_TCP,
576                 RTE_PTYPE_L4_UDP,
577                 RTE_PTYPE_L4_FRAG,
578                 RTE_PTYPE_TUNNEL_VXLAN,
579                 RTE_PTYPE_TUNNEL_NVGRE,
580                 RTE_PTYPE_TUNNEL_GENEVE,
581                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
582                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
583                 RTE_PTYPE_INNER_L4_TCP,
584                 RTE_PTYPE_INNER_L4_UDP,
585                 RTE_PTYPE_INNER_L4_FRAG,
586                 RTE_PTYPE_UNKNOWN
587         };
588
589         return ef100_native_ptypes;
590 }
591
592 static sfc_dp_rx_qdesc_npending_t sfc_ef100_rx_qdesc_npending;
593 static unsigned int
594 sfc_ef100_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
595 {
596         return 0;
597 }
598
599 static sfc_dp_rx_qdesc_status_t sfc_ef100_rx_qdesc_status;
600 static int
601 sfc_ef100_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
602                           __rte_unused uint16_t offset)
603 {
604         return -ENOTSUP;
605 }
606
607
608 static sfc_dp_rx_get_dev_info_t sfc_ef100_rx_get_dev_info;
609 static void
610 sfc_ef100_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
611 {
612         /*
613          * Number of descriptors just defines maximum number of pushed
614          * descriptors (fill level).
615          */
616         dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
617         dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
618 }
619
620
621 static sfc_dp_rx_qsize_up_rings_t sfc_ef100_rx_qsize_up_rings;
622 static int
623 sfc_ef100_rx_qsize_up_rings(uint16_t nb_rx_desc,
624                            struct sfc_dp_rx_hw_limits *limits,
625                            __rte_unused struct rte_mempool *mb_pool,
626                            unsigned int *rxq_entries,
627                            unsigned int *evq_entries,
628                            unsigned int *rxq_max_fill_level)
629 {
630         /*
631          * rte_ethdev API guarantees that the number meets min, max and
632          * alignment requirements.
633          */
634         if (nb_rx_desc <= limits->rxq_min_entries)
635                 *rxq_entries = limits->rxq_min_entries;
636         else
637                 *rxq_entries = rte_align32pow2(nb_rx_desc);
638
639         *evq_entries = *rxq_entries;
640
641         *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
642                                       SFC_EF100_RXQ_LIMIT(*evq_entries));
643         return 0;
644 }
645
646
647 static uint64_t
648 sfc_ef100_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
649 {
650         struct rte_mbuf m;
651
652         memset(&m, 0, sizeof(m));
653
654         rte_mbuf_refcnt_set(&m, 1);
655         m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
656         m.nb_segs = 1;
657         m.port = port_id;
658
659         /* rearm_data covers structure members filled in above */
660         rte_compiler_barrier();
661         RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
662         return m.rearm_data[0];
663 }
664
665 static sfc_dp_rx_qcreate_t sfc_ef100_rx_qcreate;
666 static int
667 sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,
668                     const struct rte_pci_addr *pci_addr, int socket_id,
669                     const struct sfc_dp_rx_qcreate_info *info,
670                     struct sfc_dp_rxq **dp_rxqp)
671 {
672         struct sfc_ef100_rxq *rxq;
673         int rc;
674
675         rc = EINVAL;
676         if (info->rxq_entries != info->evq_entries)
677                 goto fail_rxq_args;
678
679         rc = ENOMEM;
680         rxq = rte_zmalloc_socket("sfc-ef100-rxq", sizeof(*rxq),
681                                  RTE_CACHE_LINE_SIZE, socket_id);
682         if (rxq == NULL)
683                 goto fail_rxq_alloc;
684
685         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
686
687         rc = ENOMEM;
688         rxq->sw_ring = rte_calloc_socket("sfc-ef100-rxq-sw_ring",
689                                          info->rxq_entries,
690                                          sizeof(*rxq->sw_ring),
691                                          RTE_CACHE_LINE_SIZE, socket_id);
692         if (rxq->sw_ring == NULL)
693                 goto fail_desc_alloc;
694
695         rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
696         rxq->ptr_mask = info->rxq_entries - 1;
697         rxq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
698         rxq->evq_hw_ring = info->evq_hw_ring;
699         rxq->max_fill_level = info->max_fill_level;
700         rxq->refill_threshold = info->refill_threshold;
701         rxq->prefix_size = info->prefix_size;
702         rxq->buf_size = info->buf_size;
703         rxq->refill_mb_pool = info->refill_mb_pool;
704         rxq->rxq_hw_ring = info->rxq_hw_ring;
705         rxq->doorbell = (volatile uint8_t *)info->mem_bar +
706                         ER_GZ_RX_RING_DOORBELL_OFST +
707                         (info->hw_index << info->vi_window_shift);
708
709         sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell);
710
711         *dp_rxqp = &rxq->dp;
712         return 0;
713
714 fail_desc_alloc:
715         rte_free(rxq);
716
717 fail_rxq_alloc:
718 fail_rxq_args:
719         return rc;
720 }
721
722 static sfc_dp_rx_qdestroy_t sfc_ef100_rx_qdestroy;
723 static void
724 sfc_ef100_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
725 {
726         struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
727
728         rte_free(rxq->sw_ring);
729         rte_free(rxq);
730 }
731
732 static sfc_dp_rx_qstart_t sfc_ef100_rx_qstart;
733 static int
734 sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
735                     const efx_rx_prefix_layout_t *pinfo)
736 {
737         struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
738         uint32_t unsup_rx_prefix_fields;
739
740         SFC_ASSERT(rxq->completed == 0);
741         SFC_ASSERT(rxq->added == 0);
742
743         /* Prefix must fit into reserved Rx buffer space */
744         if (pinfo->erpl_length > rxq->prefix_size)
745                 return ENOTSUP;
746
747         unsup_rx_prefix_fields =
748                 efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
749
750         /* LENGTH and CLASS filds must always be present */
751         if ((unsup_rx_prefix_fields &
752              ((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
753               (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
754                 return ENOTSUP;
755
756         if ((unsup_rx_prefix_fields &
757              ((1U << EFX_RX_PREFIX_FIELD_RSS_HASH_VALID) |
758               (1U << EFX_RX_PREFIX_FIELD_RSS_HASH))) == 0)
759                 rxq->flags |= SFC_EF100_RXQ_RSS_HASH;
760         else
761                 rxq->flags &= ~SFC_EF100_RXQ_RSS_HASH;
762
763         rxq->prefix_size = pinfo->erpl_length;
764         rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id,
765                                                        rxq->prefix_size);
766
767         sfc_ef100_rx_qrefill(rxq);
768
769         rxq->evq_read_ptr = evq_read_ptr;
770
771         rxq->flags |= SFC_EF100_RXQ_STARTED;
772         rxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);
773
774         return 0;
775 }
776
777 static sfc_dp_rx_qstop_t sfc_ef100_rx_qstop;
778 static void
779 sfc_ef100_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
780 {
781         struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
782
783         rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
784
785         *evq_read_ptr = rxq->evq_read_ptr;
786 }
787
788 static sfc_dp_rx_qrx_ev_t sfc_ef100_rx_qrx_ev;
789 static bool
790 sfc_ef100_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
791 {
792         __rte_unused struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
793
794         SFC_ASSERT(rxq->flags & SFC_EF100_RXQ_NOT_RUNNING);
795
796         /*
797          * It is safe to ignore Rx event since we free all mbufs on
798          * queue purge anyway.
799          */
800
801         return false;
802 }
803
804 static sfc_dp_rx_qpurge_t sfc_ef100_rx_qpurge;
805 static void
806 sfc_ef100_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
807 {
808         struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
809         unsigned int i;
810         struct sfc_ef100_rx_sw_desc *rxd;
811
812         for (i = rxq->completed; i != rxq->added; ++i) {
813                 rxd = &rxq->sw_ring[i & rxq->ptr_mask];
814                 rte_mbuf_raw_free(rxd->mbuf);
815                 rxd->mbuf = NULL;
816         }
817
818         rxq->completed = rxq->added = 0;
819         rxq->ready_pkts = 0;
820
821         rxq->flags &= ~SFC_EF100_RXQ_STARTED;
822 }
823
824 struct sfc_dp_rx sfc_ef100_rx = {
825         .dp = {
826                 .name           = SFC_KVARG_DATAPATH_EF100,
827                 .type           = SFC_DP_RX,
828                 .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF100,
829         },
830         .features               = SFC_DP_RX_FEAT_MULTI_PROCESS,
831         .dev_offload_capa       = 0,
832         .queue_offload_capa     = DEV_RX_OFFLOAD_CHECKSUM |
833                                   DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
834                                   DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
835                                   DEV_RX_OFFLOAD_SCATTER |
836                                   DEV_RX_OFFLOAD_RSS_HASH,
837         .get_dev_info           = sfc_ef100_rx_get_dev_info,
838         .qsize_up_rings         = sfc_ef100_rx_qsize_up_rings,
839         .qcreate                = sfc_ef100_rx_qcreate,
840         .qdestroy               = sfc_ef100_rx_qdestroy,
841         .qstart                 = sfc_ef100_rx_qstart,
842         .qstop                  = sfc_ef100_rx_qstop,
843         .qrx_ev                 = sfc_ef100_rx_qrx_ev,
844         .qpurge                 = sfc_ef100_rx_qpurge,
845         .supported_ptypes_get   = sfc_ef100_supported_ptypes_get,
846         .qdesc_npending         = sfc_ef100_rx_qdesc_npending,
847         .qdesc_status           = sfc_ef100_rx_qdesc_status,
848         .pkt_burst              = sfc_ef100_recv_pkts,
849 };