net/sfc: support equal stride super-buffer Rx mode
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2016-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <rte_mempool.h>
11
12 #include "efx.h"
13
14 #include "sfc.h"
15 #include "sfc_debug.h"
16 #include "sfc_log.h"
17 #include "sfc_ev.h"
18 #include "sfc_rx.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
21
22 /*
23  * Maximum number of Rx queue flush attempt in the case of failure or
24  * flush timeout
25  */
26 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
27
28 /*
29  * Time to wait between event queue polling attempts when waiting for Rx
30  * queue flush done or failed events.
31  */
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
33
34 /*
35  * Maximum number of event queue polling attempts when waiting for Rx queue
36  * flush done or failed events. It defines Rx queue flush attempt timeout
37  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
38  */
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
40
41 void
42 sfc_rx_qflush_done(struct sfc_rxq *rxq)
43 {
44         rxq->state |= SFC_RXQ_FLUSHED;
45         rxq->state &= ~SFC_RXQ_FLUSHING;
46 }
47
48 void
49 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
50 {
51         rxq->state |= SFC_RXQ_FLUSH_FAILED;
52         rxq->state &= ~SFC_RXQ_FLUSHING;
53 }
54
55 static void
56 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
57 {
58         unsigned int free_space;
59         unsigned int bulks;
60         void *objs[SFC_RX_REFILL_BULK];
61         efsys_dma_addr_t addr[RTE_DIM(objs)];
62         unsigned int added = rxq->added;
63         unsigned int id;
64         unsigned int i;
65         struct sfc_efx_rx_sw_desc *rxd;
66         struct rte_mbuf *m;
67         uint16_t port_id = rxq->dp.dpq.port_id;
68
69         free_space = rxq->max_fill_level - (added - rxq->completed);
70
71         if (free_space < rxq->refill_threshold)
72                 return;
73
74         bulks = free_space / RTE_DIM(objs);
75         /* refill_threshold guarantees that bulks is positive */
76         SFC_ASSERT(bulks > 0);
77
78         id = added & rxq->ptr_mask;
79         do {
80                 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
81                                                   RTE_DIM(objs)) < 0)) {
82                         /*
83                          * It is hardly a safe way to increment counter
84                          * from different contexts, but all PMDs do it.
85                          */
86                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
87                                 RTE_DIM(objs);
88                         /* Return if we have posted nothing yet */
89                         if (added == rxq->added)
90                                 return;
91                         /* Push posted */
92                         break;
93                 }
94
95                 for (i = 0; i < RTE_DIM(objs);
96                      ++i, id = (id + 1) & rxq->ptr_mask) {
97                         m = objs[i];
98
99                         rxd = &rxq->sw_desc[id];
100                         rxd->mbuf = m;
101
102                         SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
103                         m->data_off = RTE_PKTMBUF_HEADROOM;
104                         SFC_ASSERT(m->next == NULL);
105                         SFC_ASSERT(m->nb_segs == 1);
106                         m->port = port_id;
107
108                         addr[i] = rte_pktmbuf_iova(m);
109                 }
110
111                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
112                              RTE_DIM(objs), rxq->completed, added);
113                 added += RTE_DIM(objs);
114         } while (--bulks > 0);
115
116         SFC_ASSERT(added != rxq->added);
117         rxq->added = added;
118         efx_rx_qpush(rxq->common, added, &rxq->pushed);
119 }
120
121 static uint64_t
122 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
123 {
124         uint64_t mbuf_flags = 0;
125
126         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
127         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
128                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
129                 break;
130         case EFX_PKT_IPV4:
131                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
132                 break;
133         default:
134                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
135                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
136                            PKT_RX_IP_CKSUM_UNKNOWN);
137                 break;
138         }
139
140         switch ((desc_flags &
141                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
142         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
143         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
144                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
145                 break;
146         case EFX_PKT_TCP:
147         case EFX_PKT_UDP:
148                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
149                 break;
150         default:
151                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
152                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
153                            PKT_RX_L4_CKSUM_UNKNOWN);
154                 break;
155         }
156
157         return mbuf_flags;
158 }
159
160 static uint32_t
161 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
162 {
163         return RTE_PTYPE_L2_ETHER |
164                 ((desc_flags & EFX_PKT_IPV4) ?
165                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
166                 ((desc_flags & EFX_PKT_IPV6) ?
167                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
168                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
169                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
170 }
171
172 static const uint32_t *
173 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
174 {
175         static const uint32_t ptypes[] = {
176                 RTE_PTYPE_L2_ETHER,
177                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
178                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
179                 RTE_PTYPE_L4_TCP,
180                 RTE_PTYPE_L4_UDP,
181                 RTE_PTYPE_UNKNOWN
182         };
183
184         return ptypes;
185 }
186
187 static void
188 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
189                         struct rte_mbuf *m)
190 {
191         uint8_t *mbuf_data;
192
193
194         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
195                 return;
196
197         mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
198
199         if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
200                 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
201                                                       EFX_RX_HASHALG_TOEPLITZ,
202                                                       mbuf_data);
203
204                 m->ol_flags |= PKT_RX_RSS_HASH;
205         }
206 }
207
208 static uint16_t
209 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
210 {
211         struct sfc_dp_rxq *dp_rxq = rx_queue;
212         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
213         unsigned int completed;
214         unsigned int prefix_size = rxq->prefix_size;
215         unsigned int done_pkts = 0;
216         boolean_t discard_next = B_FALSE;
217         struct rte_mbuf *scatter_pkt = NULL;
218
219         if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
220                 return 0;
221
222         sfc_ev_qpoll(rxq->evq);
223
224         completed = rxq->completed;
225         while (completed != rxq->pending && done_pkts < nb_pkts) {
226                 unsigned int id;
227                 struct sfc_efx_rx_sw_desc *rxd;
228                 struct rte_mbuf *m;
229                 unsigned int seg_len;
230                 unsigned int desc_flags;
231
232                 id = completed++ & rxq->ptr_mask;
233                 rxd = &rxq->sw_desc[id];
234                 m = rxd->mbuf;
235                 desc_flags = rxd->flags;
236
237                 if (discard_next)
238                         goto discard;
239
240                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
241                         goto discard;
242
243                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
244                         uint16_t tmp_size;
245                         int rc __rte_unused;
246
247                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
248                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
249                         SFC_ASSERT(rc == 0);
250                         seg_len = tmp_size;
251                 } else {
252                         seg_len = rxd->size - prefix_size;
253                 }
254
255                 rte_pktmbuf_data_len(m) = seg_len;
256                 rte_pktmbuf_pkt_len(m) = seg_len;
257
258                 if (scatter_pkt != NULL) {
259                         if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
260                                 rte_pktmbuf_free(scatter_pkt);
261                                 goto discard;
262                         }
263                         /* The packet to deliver */
264                         m = scatter_pkt;
265                 }
266
267                 if (desc_flags & EFX_PKT_CONT) {
268                         /* The packet is scattered, more fragments to come */
269                         scatter_pkt = m;
270                         /* Further fragments have no prefix */
271                         prefix_size = 0;
272                         continue;
273                 }
274
275                 /* Scattered packet is done */
276                 scatter_pkt = NULL;
277                 /* The first fragment of the packet has prefix */
278                 prefix_size = rxq->prefix_size;
279
280                 m->ol_flags =
281                         sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
282                 m->packet_type =
283                         sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
284
285                 /*
286                  * Extract RSS hash from the packet prefix and
287                  * set the corresponding field (if needed and possible)
288                  */
289                 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
290
291                 m->data_off += prefix_size;
292
293                 *rx_pkts++ = m;
294                 done_pkts++;
295                 continue;
296
297 discard:
298                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
299                 rte_mempool_put(rxq->refill_mb_pool, m);
300                 rxd->mbuf = NULL;
301         }
302
303         /* pending is only moved when entire packet is received */
304         SFC_ASSERT(scatter_pkt == NULL);
305
306         rxq->completed = completed;
307
308         sfc_efx_rx_qrefill(rxq);
309
310         return done_pkts;
311 }
312
313 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
314 static unsigned int
315 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
316 {
317         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
318
319         if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
320                 return 0;
321
322         sfc_ev_qpoll(rxq->evq);
323
324         return rxq->pending - rxq->completed;
325 }
326
327 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
328 static int
329 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
330 {
331         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
332
333         if (unlikely(offset > rxq->ptr_mask))
334                 return -EINVAL;
335
336         /*
337          * Poll EvQ to derive up-to-date 'rxq->pending' figure;
338          * it is required for the queue to be running, but the
339          * check is omitted because API design assumes that it
340          * is the duty of the caller to satisfy all conditions
341          */
342         SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
343                    SFC_EFX_RXQ_FLAG_RUNNING);
344         sfc_ev_qpoll(rxq->evq);
345
346         /*
347          * There is a handful of reserved entries in the ring,
348          * but an explicit check whether the offset points to
349          * a reserved entry is neglected since the two checks
350          * below rely on the figures which take the HW limits
351          * into account and thus if an entry is reserved, the
352          * checks will fail and UNAVAIL code will be returned
353          */
354
355         if (offset < (rxq->pending - rxq->completed))
356                 return RTE_ETH_RX_DESC_DONE;
357
358         if (offset < (rxq->added - rxq->completed))
359                 return RTE_ETH_RX_DESC_AVAIL;
360
361         return RTE_ETH_RX_DESC_UNAVAIL;
362 }
363
364 struct sfc_rxq *
365 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
366 {
367         const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
368         struct rte_eth_dev *eth_dev;
369         struct sfc_adapter *sa;
370         struct sfc_rxq *rxq;
371
372         SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
373         eth_dev = &rte_eth_devices[dpq->port_id];
374
375         sa = eth_dev->data->dev_private;
376
377         SFC_ASSERT(dpq->queue_id < sa->rxq_count);
378         rxq = sa->rxq_info[dpq->queue_id].rxq;
379
380         SFC_ASSERT(rxq != NULL);
381         return rxq;
382 }
383
384 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
385 static int
386 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
387                           __rte_unused struct rte_mempool *mb_pool,
388                           unsigned int *rxq_entries,
389                           unsigned int *evq_entries,
390                           unsigned int *rxq_max_fill_level)
391 {
392         *rxq_entries = nb_rx_desc;
393         *evq_entries = nb_rx_desc;
394         *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
395         return 0;
396 }
397
398 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
399 static int
400 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
401                    const struct rte_pci_addr *pci_addr, int socket_id,
402                    const struct sfc_dp_rx_qcreate_info *info,
403                    struct sfc_dp_rxq **dp_rxqp)
404 {
405         struct sfc_efx_rxq *rxq;
406         int rc;
407
408         rc = ENOMEM;
409         rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
410                                  RTE_CACHE_LINE_SIZE, socket_id);
411         if (rxq == NULL)
412                 goto fail_rxq_alloc;
413
414         sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
415
416         rc = ENOMEM;
417         rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
418                                          info->rxq_entries,
419                                          sizeof(*rxq->sw_desc),
420                                          RTE_CACHE_LINE_SIZE, socket_id);
421         if (rxq->sw_desc == NULL)
422                 goto fail_desc_alloc;
423
424         /* efx datapath is bound to efx control path */
425         rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
426         if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
427                 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
428         rxq->ptr_mask = info->rxq_entries - 1;
429         rxq->batch_max = info->batch_max;
430         rxq->prefix_size = info->prefix_size;
431         rxq->max_fill_level = info->max_fill_level;
432         rxq->refill_threshold = info->refill_threshold;
433         rxq->buf_size = info->buf_size;
434         rxq->refill_mb_pool = info->refill_mb_pool;
435
436         *dp_rxqp = &rxq->dp;
437         return 0;
438
439 fail_desc_alloc:
440         rte_free(rxq);
441
442 fail_rxq_alloc:
443         return rc;
444 }
445
446 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
447 static void
448 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
449 {
450         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
451
452         rte_free(rxq->sw_desc);
453         rte_free(rxq);
454 }
455
456 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
457 static int
458 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
459                   __rte_unused unsigned int evq_read_ptr)
460 {
461         /* libefx-based datapath is specific to libefx-based PMD */
462         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
463         struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
464
465         rxq->common = crxq->common;
466
467         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
468
469         sfc_efx_rx_qrefill(rxq);
470
471         rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
472
473         return 0;
474 }
475
476 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
477 static void
478 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
479                  __rte_unused unsigned int *evq_read_ptr)
480 {
481         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
482
483         rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
484
485         /* libefx-based datapath is bound to libefx-based PMD and uses
486          * event queue structure directly. So, there is no necessity to
487          * return EvQ read pointer.
488          */
489 }
490
491 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
492 static void
493 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
494 {
495         struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
496         unsigned int i;
497         struct sfc_efx_rx_sw_desc *rxd;
498
499         for (i = rxq->completed; i != rxq->added; ++i) {
500                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
501                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
502                 rxd->mbuf = NULL;
503                 /* Packed stream relies on 0 in inactive SW desc.
504                  * Rx queue stop is not performance critical, so
505                  * there is no harm to do it always.
506                  */
507                 rxd->flags = 0;
508                 rxd->size = 0;
509         }
510
511         rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
512 }
513
514 struct sfc_dp_rx sfc_efx_rx = {
515         .dp = {
516                 .name           = SFC_KVARG_DATAPATH_EFX,
517                 .type           = SFC_DP_RX,
518                 .hw_fw_caps     = 0,
519         },
520         .features               = SFC_DP_RX_FEAT_SCATTER,
521         .qsize_up_rings         = sfc_efx_rx_qsize_up_rings,
522         .qcreate                = sfc_efx_rx_qcreate,
523         .qdestroy               = sfc_efx_rx_qdestroy,
524         .qstart                 = sfc_efx_rx_qstart,
525         .qstop                  = sfc_efx_rx_qstop,
526         .qpurge                 = sfc_efx_rx_qpurge,
527         .supported_ptypes_get   = sfc_efx_supported_ptypes_get,
528         .qdesc_npending         = sfc_efx_rx_qdesc_npending,
529         .qdesc_status           = sfc_efx_rx_qdesc_status,
530         .pkt_burst              = sfc_efx_recv_pkts,
531 };
532
533 unsigned int
534 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
535 {
536         struct sfc_rxq *rxq;
537
538         SFC_ASSERT(sw_index < sa->rxq_count);
539         rxq = sa->rxq_info[sw_index].rxq;
540
541         if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
542                 return 0;
543
544         return sa->dp_rx->qdesc_npending(rxq->dp);
545 }
546
547 int
548 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
549 {
550         struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
551
552         return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
553 }
554
555 static void
556 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
557 {
558         struct sfc_rxq *rxq;
559         unsigned int retry_count;
560         unsigned int wait_count;
561         int rc;
562
563         rxq = sa->rxq_info[sw_index].rxq;
564         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
565
566         /*
567          * Retry Rx queue flushing in the case of flush failed or
568          * timeout. In the worst case it can delay for 6 seconds.
569          */
570         for (retry_count = 0;
571              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
572              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
573              ++retry_count) {
574                 rc = efx_rx_qflush(rxq->common);
575                 if (rc != 0) {
576                         rxq->state |= (rc == EALREADY) ?
577                                 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
578                         break;
579                 }
580                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
581                 rxq->state |= SFC_RXQ_FLUSHING;
582
583                 /*
584                  * Wait for Rx queue flush done or failed event at least
585                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
586                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
587                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
588                  */
589                 wait_count = 0;
590                 do {
591                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
592                         sfc_ev_qpoll(rxq->evq);
593                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
594                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
595
596                 if (rxq->state & SFC_RXQ_FLUSHING)
597                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
598
599                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
600                         sfc_err(sa, "RxQ %u flush failed", sw_index);
601
602                 if (rxq->state & SFC_RXQ_FLUSHED)
603                         sfc_notice(sa, "RxQ %u flushed", sw_index);
604         }
605
606         sa->dp_rx->qpurge(rxq->dp);
607 }
608
609 static int
610 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
611 {
612         struct sfc_rss *rss = &sa->rss;
613         boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
614         struct sfc_port *port = &sa->port;
615         int rc;
616
617         /*
618          * If promiscuous or all-multicast mode has been requested, setting
619          * filter for the default Rx queue might fail, in particular, while
620          * running over PCI function which is not a member of corresponding
621          * privilege groups; if this occurs, few iterations will be made to
622          * repeat this step without promiscuous and all-multicast flags set
623          */
624 retry:
625         rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
626         if (rc == 0)
627                 return 0;
628         else if (rc != EOPNOTSUPP)
629                 return rc;
630
631         if (port->promisc) {
632                 sfc_warn(sa, "promiscuous mode has been requested, "
633                              "but the HW rejects it");
634                 sfc_warn(sa, "promiscuous mode will be disabled");
635
636                 port->promisc = B_FALSE;
637                 rc = sfc_set_rx_mode(sa);
638                 if (rc != 0)
639                         return rc;
640
641                 goto retry;
642         }
643
644         if (port->allmulti) {
645                 sfc_warn(sa, "all-multicast mode has been requested, "
646                              "but the HW rejects it");
647                 sfc_warn(sa, "all-multicast mode will be disabled");
648
649                 port->allmulti = B_FALSE;
650                 rc = sfc_set_rx_mode(sa);
651                 if (rc != 0)
652                         return rc;
653
654                 goto retry;
655         }
656
657         return rc;
658 }
659
660 int
661 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
662 {
663         struct sfc_port *port = &sa->port;
664         struct sfc_rxq_info *rxq_info;
665         struct sfc_rxq *rxq;
666         struct sfc_evq *evq;
667         int rc;
668
669         sfc_log_init(sa, "sw_index=%u", sw_index);
670
671         SFC_ASSERT(sw_index < sa->rxq_count);
672
673         rxq_info = &sa->rxq_info[sw_index];
674         rxq = rxq_info->rxq;
675         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
676
677         evq = rxq->evq;
678
679         rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
680         if (rc != 0)
681                 goto fail_ev_qstart;
682
683         switch (rxq_info->type) {
684         case EFX_RXQ_TYPE_DEFAULT:
685                 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
686                         &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
687                         rxq_info->type_flags, evq->common, &rxq->common);
688                 break;
689         case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
690                 struct rte_mempool *mp = rxq->refill_mb_pool;
691                 struct rte_mempool_info mp_info;
692
693                 rc = rte_mempool_ops_get_info(mp, &mp_info);
694                 if (rc != 0) {
695                         /* Positive errno is used in the driver */
696                         rc = -rc;
697                         goto fail_mp_get_info;
698                 }
699                 if (mp_info.contig_block_size <= 0) {
700                         rc = EINVAL;
701                         goto fail_bad_contig_block_size;
702                 }
703                 rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
704                         mp_info.contig_block_size, rxq->buf_size,
705                         mp->header_size + mp->elt_size + mp->trailer_size,
706                         0 /* hol_block_timeout */,
707                         &rxq->mem, rxq_info->entries, rxq_info->type_flags,
708                         evq->common, &rxq->common);
709                 break;
710         }
711         default:
712                 rc = ENOTSUP;
713         }
714         if (rc != 0)
715                 goto fail_rx_qcreate;
716
717         efx_rx_qenable(rxq->common);
718
719         rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
720         if (rc != 0)
721                 goto fail_dp_qstart;
722
723         rxq->state |= SFC_RXQ_STARTED;
724
725         if ((sw_index == 0) && !port->isolated) {
726                 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
727                 if (rc != 0)
728                         goto fail_mac_filter_default_rxq_set;
729         }
730
731         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
732         sa->eth_dev->data->rx_queue_state[sw_index] =
733                 RTE_ETH_QUEUE_STATE_STARTED;
734
735         return 0;
736
737 fail_mac_filter_default_rxq_set:
738         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
739
740 fail_dp_qstart:
741         sfc_rx_qflush(sa, sw_index);
742
743 fail_rx_qcreate:
744 fail_bad_contig_block_size:
745 fail_mp_get_info:
746         sfc_ev_qstop(evq);
747
748 fail_ev_qstart:
749         return rc;
750 }
751
752 void
753 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
754 {
755         struct sfc_rxq_info *rxq_info;
756         struct sfc_rxq *rxq;
757
758         sfc_log_init(sa, "sw_index=%u", sw_index);
759
760         SFC_ASSERT(sw_index < sa->rxq_count);
761
762         rxq_info = &sa->rxq_info[sw_index];
763         rxq = rxq_info->rxq;
764
765         if (rxq->state == SFC_RXQ_INITIALIZED)
766                 return;
767         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
768
769         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
770         sa->eth_dev->data->rx_queue_state[sw_index] =
771                 RTE_ETH_QUEUE_STATE_STOPPED;
772
773         sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
774
775         if (sw_index == 0)
776                 efx_mac_filter_default_rxq_clear(sa->nic);
777
778         sfc_rx_qflush(sa, sw_index);
779
780         rxq->state = SFC_RXQ_INITIALIZED;
781
782         efx_rx_qdestroy(rxq->common);
783
784         sfc_ev_qstop(rxq->evq);
785 }
786
787 uint64_t
788 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
789 {
790         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
791         uint64_t caps = 0;
792
793         caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
794         caps |= DEV_RX_OFFLOAD_CRC_STRIP;
795         caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
796         caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
797         caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
798
799         if (encp->enc_tunnel_encapsulations_supported &&
800             (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
801                 caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
802
803         return caps;
804 }
805
806 uint64_t
807 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
808 {
809         uint64_t caps = 0;
810
811         if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
812                 caps |= DEV_RX_OFFLOAD_SCATTER;
813
814         return caps;
815 }
816
817 static void
818 sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
819                     const char *verdict, uint64_t offloads)
820 {
821         unsigned long long bit;
822
823         while ((bit = __builtin_ffsll(offloads)) != 0) {
824                 uint64_t flag = (1ULL << --bit);
825
826                 sfc_err(sa, "Rx %s offload %s %s", offload_group,
827                         rte_eth_dev_rx_offload_name(flag), verdict);
828
829                 offloads &= ~flag;
830         }
831 }
832
833 static boolean_t
834 sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
835 {
836         uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
837         uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
838                              sfc_rx_get_queue_offload_caps(sa);
839         uint64_t rejected = requested & ~supported;
840         uint64_t missing = (requested & mandatory) ^ mandatory;
841         boolean_t mismatch = B_FALSE;
842
843         if (rejected) {
844                 sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
845                 mismatch = B_TRUE;
846         }
847
848         if (missing) {
849                 sfc_rx_log_offloads(sa, "queue", "must be set", missing);
850                 mismatch = B_TRUE;
851         }
852
853         return mismatch;
854 }
855
856 static int
857 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
858                    const struct rte_eth_rxconf *rx_conf)
859 {
860         uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
861                                       sfc_rx_get_queue_offload_caps(sa);
862         int rc = 0;
863
864         if (rx_conf->rx_thresh.pthresh != 0 ||
865             rx_conf->rx_thresh.hthresh != 0 ||
866             rx_conf->rx_thresh.wthresh != 0) {
867                 sfc_warn(sa,
868                         "RxQ prefetch/host/writeback thresholds are not supported");
869         }
870
871         if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
872                 sfc_err(sa,
873                         "RxQ free threshold too large: %u vs maximum %u",
874                         rx_conf->rx_free_thresh, rxq_max_fill_level);
875                 rc = EINVAL;
876         }
877
878         if (rx_conf->rx_drop_en == 0) {
879                 sfc_err(sa, "RxQ drop disable is not supported");
880                 rc = EINVAL;
881         }
882
883         if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
884             DEV_RX_OFFLOAD_CHECKSUM)
885                 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
886
887         if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
888             (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
889                 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
890
891         if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
892                 rc = EINVAL;
893
894         return rc;
895 }
896
897 static unsigned int
898 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
899 {
900         uint32_t data_off;
901         uint32_t order;
902
903         /* The mbuf object itself is always cache line aligned */
904         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
905
906         /* Data offset from mbuf object start */
907         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
908                 RTE_PKTMBUF_HEADROOM;
909
910         order = MIN(order, rte_bsf32(data_off));
911
912         return 1u << order;
913 }
914
915 static uint16_t
916 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
917 {
918         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
919         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
920         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
921         uint16_t buf_size;
922         unsigned int buf_aligned;
923         unsigned int start_alignment;
924         unsigned int end_padding_alignment;
925
926         /* Below it is assumed that both alignments are power of 2 */
927         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
928         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
929
930         /*
931          * mbuf is always cache line aligned, double-check
932          * that it meets rx buffer start alignment requirements.
933          */
934
935         /* Start from mbuf pool data room size */
936         buf_size = rte_pktmbuf_data_room_size(mb_pool);
937
938         /* Remove headroom */
939         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
940                 sfc_err(sa,
941                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
942                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
943                 return 0;
944         }
945         buf_size -= RTE_PKTMBUF_HEADROOM;
946
947         /* Calculate guaranteed data start alignment */
948         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
949
950         /* Reserve space for start alignment */
951         if (buf_aligned < nic_align_start) {
952                 start_alignment = nic_align_start - buf_aligned;
953                 if (buf_size <= start_alignment) {
954                         sfc_err(sa,
955                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
956                                 mb_pool->name,
957                                 rte_pktmbuf_data_room_size(mb_pool),
958                                 RTE_PKTMBUF_HEADROOM, start_alignment);
959                         return 0;
960                 }
961                 buf_aligned = nic_align_start;
962                 buf_size -= start_alignment;
963         } else {
964                 start_alignment = 0;
965         }
966
967         /* Make sure that end padding does not write beyond the buffer */
968         if (buf_aligned < nic_align_end) {
969                 /*
970                  * Estimate space which can be lost. If guarnteed buffer
971                  * size is odd, lost space is (nic_align_end - 1). More
972                  * accurate formula is below.
973                  */
974                 end_padding_alignment = nic_align_end -
975                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
976                 if (buf_size <= end_padding_alignment) {
977                         sfc_err(sa,
978                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
979                                 mb_pool->name,
980                                 rte_pktmbuf_data_room_size(mb_pool),
981                                 RTE_PKTMBUF_HEADROOM, start_alignment,
982                                 end_padding_alignment);
983                         return 0;
984                 }
985                 buf_size -= end_padding_alignment;
986         } else {
987                 /*
988                  * Start is aligned the same or better than end,
989                  * just align length.
990                  */
991                 buf_size = P2ALIGN(buf_size, nic_align_end);
992         }
993
994         return buf_size;
995 }
996
997 int
998 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
999              uint16_t nb_rx_desc, unsigned int socket_id,
1000              const struct rte_eth_rxconf *rx_conf,
1001              struct rte_mempool *mb_pool)
1002 {
1003         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1004         struct sfc_rss *rss = &sa->rss;
1005         int rc;
1006         unsigned int rxq_entries;
1007         unsigned int evq_entries;
1008         unsigned int rxq_max_fill_level;
1009         uint16_t buf_size;
1010         struct sfc_rxq_info *rxq_info;
1011         struct sfc_evq *evq;
1012         struct sfc_rxq *rxq;
1013         struct sfc_dp_rx_qcreate_info info;
1014
1015         rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
1016                                        &evq_entries, &rxq_max_fill_level);
1017         if (rc != 0)
1018                 goto fail_size_up_rings;
1019         SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
1020         SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
1021         SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
1022
1023         rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
1024         if (rc != 0)
1025                 goto fail_bad_conf;
1026
1027         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
1028         if (buf_size == 0) {
1029                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
1030                         sw_index);
1031                 rc = EINVAL;
1032                 goto fail_bad_conf;
1033         }
1034
1035         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
1036             (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
1037                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
1038                         "object size is too small", sw_index);
1039                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
1040                         "PDU size %u plus Rx prefix %u bytes",
1041                         sw_index, buf_size, (unsigned int)sa->port.pdu,
1042                         encp->enc_rx_prefix_size);
1043                 rc = EINVAL;
1044                 goto fail_bad_conf;
1045         }
1046
1047         SFC_ASSERT(sw_index < sa->rxq_count);
1048         rxq_info = &sa->rxq_info[sw_index];
1049
1050         SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1051         rxq_info->entries = rxq_entries;
1052
1053         if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1054                 rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1055         else
1056                 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1057
1058         rxq_info->type_flags =
1059                 (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
1060                 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1061
1062         if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1063             (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
1064                 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1065
1066         rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1067                           evq_entries, socket_id, &evq);
1068         if (rc != 0)
1069                 goto fail_ev_qinit;
1070
1071         rc = ENOMEM;
1072         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
1073                                  socket_id);
1074         if (rxq == NULL)
1075                 goto fail_rxq_alloc;
1076
1077         rxq_info->rxq = rxq;
1078
1079         rxq->evq = evq;
1080         rxq->hw_index = sw_index;
1081         rxq->refill_threshold =
1082                 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
1083         rxq->refill_mb_pool = mb_pool;
1084         rxq->buf_size = buf_size;
1085
1086         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
1087                            socket_id, &rxq->mem);
1088         if (rc != 0)
1089                 goto fail_dma_alloc;
1090
1091         memset(&info, 0, sizeof(info));
1092         info.refill_mb_pool = rxq->refill_mb_pool;
1093         info.max_fill_level = rxq_max_fill_level;
1094         info.refill_threshold = rxq->refill_threshold;
1095         info.buf_size = buf_size;
1096         info.batch_max = encp->enc_rx_batch_max;
1097         info.prefix_size = encp->enc_rx_prefix_size;
1098
1099         if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
1100                 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1101
1102         info.rxq_entries = rxq_info->entries;
1103         info.rxq_hw_ring = rxq->mem.esm_base;
1104         info.evq_entries = evq_entries;
1105         info.evq_hw_ring = evq->mem.esm_base;
1106         info.hw_index = rxq->hw_index;
1107         info.mem_bar = sa->mem_bar.esb_base;
1108         info.vi_window_shift = encp->enc_vi_window_shift;
1109
1110         rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1111                                 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1112                                 socket_id, &info, &rxq->dp);
1113         if (rc != 0)
1114                 goto fail_dp_rx_qcreate;
1115
1116         evq->dp_rxq = rxq->dp;
1117
1118         rxq->state = SFC_RXQ_INITIALIZED;
1119
1120         rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1121
1122         return 0;
1123
1124 fail_dp_rx_qcreate:
1125         sfc_dma_free(sa, &rxq->mem);
1126
1127 fail_dma_alloc:
1128         rxq_info->rxq = NULL;
1129         rte_free(rxq);
1130
1131 fail_rxq_alloc:
1132         sfc_ev_qfini(evq);
1133
1134 fail_ev_qinit:
1135         rxq_info->entries = 0;
1136
1137 fail_bad_conf:
1138 fail_size_up_rings:
1139         sfc_log_init(sa, "failed %d", rc);
1140         return rc;
1141 }
1142
1143 void
1144 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1145 {
1146         struct sfc_rxq_info *rxq_info;
1147         struct sfc_rxq *rxq;
1148
1149         SFC_ASSERT(sw_index < sa->rxq_count);
1150
1151         rxq_info = &sa->rxq_info[sw_index];
1152
1153         rxq = rxq_info->rxq;
1154         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1155
1156         sa->dp_rx->qdestroy(rxq->dp);
1157         rxq->dp = NULL;
1158
1159         rxq_info->rxq = NULL;
1160         rxq_info->entries = 0;
1161
1162         sfc_dma_free(sa, &rxq->mem);
1163
1164         sfc_ev_qfini(rxq->evq);
1165         rxq->evq = NULL;
1166
1167         rte_free(rxq);
1168 }
1169
1170 /*
1171  * Mapping between RTE RSS hash functions and their EFX counterparts.
1172  */
1173 struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1174         { ETH_RSS_NONFRAG_IPV4_TCP,
1175           EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1176         { ETH_RSS_NONFRAG_IPV4_UDP,
1177           EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1178         { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
1179           EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1180         { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
1181           EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1182         { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
1183           EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1184           EFX_RX_HASH(IPV4, 2TUPLE) },
1185         { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
1186           ETH_RSS_IPV6_EX,
1187           EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1188           EFX_RX_HASH(IPV6, 2TUPLE) }
1189 };
1190
1191 static efx_rx_hash_type_t
1192 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1193                             unsigned int *hash_type_flags_supported,
1194                             unsigned int nb_hash_type_flags_supported)
1195 {
1196         efx_rx_hash_type_t hash_type_masked = 0;
1197         unsigned int i, j;
1198
1199         for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1200                 unsigned int class_tuple_lbn[] = {
1201                         EFX_RX_CLASS_IPV4_TCP_LBN,
1202                         EFX_RX_CLASS_IPV4_UDP_LBN,
1203                         EFX_RX_CLASS_IPV4_LBN,
1204                         EFX_RX_CLASS_IPV6_TCP_LBN,
1205                         EFX_RX_CLASS_IPV6_UDP_LBN,
1206                         EFX_RX_CLASS_IPV6_LBN
1207                 };
1208
1209                 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1210                         unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1211                         unsigned int flag;
1212
1213                         tuple_mask <<= class_tuple_lbn[j];
1214                         flag = hash_type & tuple_mask;
1215
1216                         if (flag == hash_type_flags_supported[i])
1217                                 hash_type_masked |= flag;
1218                 }
1219         }
1220
1221         return hash_type_masked;
1222 }
1223
1224 int
1225 sfc_rx_hash_init(struct sfc_adapter *sa)
1226 {
1227         struct sfc_rss *rss = &sa->rss;
1228         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1229         uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1230         efx_rx_hash_alg_t alg;
1231         unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1232         unsigned int nb_flags_supp;
1233         struct sfc_rss_hf_rte_to_efx *hf_map;
1234         struct sfc_rss_hf_rte_to_efx *entry;
1235         efx_rx_hash_type_t efx_hash_types;
1236         unsigned int i;
1237         int rc;
1238
1239         if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1240                 alg = EFX_RX_HASHALG_TOEPLITZ;
1241         else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1242                 alg = EFX_RX_HASHALG_PACKED_STREAM;
1243         else
1244                 return EINVAL;
1245
1246         rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1247                                          &nb_flags_supp);
1248         if (rc != 0)
1249                 return rc;
1250
1251         hf_map = rte_calloc_socket("sfc-rss-hf-map",
1252                                    RTE_DIM(sfc_rss_hf_map),
1253                                    sizeof(*hf_map), 0, sa->socket_id);
1254         if (hf_map == NULL)
1255                 return ENOMEM;
1256
1257         entry = hf_map;
1258         efx_hash_types = 0;
1259         for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1260                 efx_rx_hash_type_t ht;
1261
1262                 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1263                                                  flags_supp, nb_flags_supp);
1264                 if (ht != 0) {
1265                         entry->rte = sfc_rss_hf_map[i].rte;
1266                         entry->efx = ht;
1267                         efx_hash_types |= ht;
1268                         ++entry;
1269                 }
1270         }
1271
1272         rss->hash_alg = alg;
1273         rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1274         rss->hf_map = hf_map;
1275         rss->hash_types = efx_hash_types;
1276
1277         return 0;
1278 }
1279
1280 void
1281 sfc_rx_hash_fini(struct sfc_adapter *sa)
1282 {
1283         struct sfc_rss *rss = &sa->rss;
1284
1285         rte_free(rss->hf_map);
1286 }
1287
1288 int
1289 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1290                      efx_rx_hash_type_t *efx)
1291 {
1292         struct sfc_rss *rss = &sa->rss;
1293         efx_rx_hash_type_t hash_types = 0;
1294         unsigned int i;
1295
1296         for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1297                 uint64_t rte_mask = rss->hf_map[i].rte;
1298
1299                 if ((rte & rte_mask) != 0) {
1300                         rte &= ~rte_mask;
1301                         hash_types |= rss->hf_map[i].efx;
1302                 }
1303         }
1304
1305         if (rte != 0) {
1306                 sfc_err(sa, "unsupported hash functions requested");
1307                 return EINVAL;
1308         }
1309
1310         *efx = hash_types;
1311
1312         return 0;
1313 }
1314
1315 uint64_t
1316 sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
1317 {
1318         struct sfc_rss *rss = &sa->rss;
1319         uint64_t rte = 0;
1320         unsigned int i;
1321
1322         for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1323                 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1324
1325                 if ((efx & hash_type) == hash_type)
1326                         rte |= rss->hf_map[i].rte;
1327         }
1328
1329         return rte;
1330 }
1331
1332 static int
1333 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1334                             struct rte_eth_rss_conf *conf)
1335 {
1336         struct sfc_rss *rss = &sa->rss;
1337         efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1338         uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
1339         int rc;
1340
1341         if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1342                 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1343                     conf->rss_key != NULL)
1344                         return EINVAL;
1345         }
1346
1347         if (conf->rss_hf != 0) {
1348                 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1349                 if (rc != 0)
1350                         return rc;
1351         }
1352
1353         if (conf->rss_key != NULL) {
1354                 if (conf->rss_key_len != sizeof(rss->key)) {
1355                         sfc_err(sa, "RSS key size is wrong (should be %lu)",
1356                                 sizeof(rss->key));
1357                         return EINVAL;
1358                 }
1359                 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1360         }
1361
1362         rss->hash_types = efx_hash_types;
1363
1364         return 0;
1365 }
1366
1367 static int
1368 sfc_rx_rss_config(struct sfc_adapter *sa)
1369 {
1370         struct sfc_rss *rss = &sa->rss;
1371         int rc = 0;
1372
1373         if (rss->channels > 0) {
1374                 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1375                                            rss->hash_alg, rss->hash_types,
1376                                            B_TRUE);
1377                 if (rc != 0)
1378                         goto finish;
1379
1380                 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1381                                           rss->key, sizeof(rss->key));
1382                 if (rc != 0)
1383                         goto finish;
1384
1385                 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1386                                           rss->tbl, RTE_DIM(rss->tbl));
1387         }
1388
1389 finish:
1390         return rc;
1391 }
1392
1393 int
1394 sfc_rx_start(struct sfc_adapter *sa)
1395 {
1396         unsigned int sw_index;
1397         int rc;
1398
1399         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1400
1401         rc = efx_rx_init(sa->nic);
1402         if (rc != 0)
1403                 goto fail_rx_init;
1404
1405         rc = sfc_rx_rss_config(sa);
1406         if (rc != 0)
1407                 goto fail_rss_config;
1408
1409         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1410                 if ((!sa->rxq_info[sw_index].deferred_start ||
1411                      sa->rxq_info[sw_index].deferred_started)) {
1412                         rc = sfc_rx_qstart(sa, sw_index);
1413                         if (rc != 0)
1414                                 goto fail_rx_qstart;
1415                 }
1416         }
1417
1418         return 0;
1419
1420 fail_rx_qstart:
1421         while (sw_index-- > 0)
1422                 sfc_rx_qstop(sa, sw_index);
1423
1424 fail_rss_config:
1425         efx_rx_fini(sa->nic);
1426
1427 fail_rx_init:
1428         sfc_log_init(sa, "failed %d", rc);
1429         return rc;
1430 }
1431
1432 void
1433 sfc_rx_stop(struct sfc_adapter *sa)
1434 {
1435         unsigned int sw_index;
1436
1437         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1438
1439         sw_index = sa->rxq_count;
1440         while (sw_index-- > 0) {
1441                 if (sa->rxq_info[sw_index].rxq != NULL)
1442                         sfc_rx_qstop(sa, sw_index);
1443         }
1444
1445         efx_rx_fini(sa->nic);
1446 }
1447
1448 static int
1449 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1450 {
1451         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1452         unsigned int max_entries;
1453
1454         max_entries = EFX_RXQ_MAXNDESCS;
1455         SFC_ASSERT(rte_is_power_of_2(max_entries));
1456
1457         rxq_info->max_entries = max_entries;
1458
1459         return 0;
1460 }
1461
1462 static int
1463 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1464 {
1465         struct sfc_rss *rss = &sa->rss;
1466         uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1467                                       sfc_rx_get_queue_offload_caps(sa);
1468         uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
1469         int rc = 0;
1470
1471         switch (rxmode->mq_mode) {
1472         case ETH_MQ_RX_NONE:
1473                 /* No special checks are required */
1474                 break;
1475         case ETH_MQ_RX_RSS:
1476                 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1477                         sfc_err(sa, "RSS is not available");
1478                         rc = EINVAL;
1479                 }
1480                 break;
1481         default:
1482                 sfc_err(sa, "Rx multi-queue mode %u not supported",
1483                         rxmode->mq_mode);
1484                 rc = EINVAL;
1485         }
1486
1487         if (offloads_rejected) {
1488                 sfc_rx_log_offloads(sa, "device", "is unsupported",
1489                                     offloads_rejected);
1490                 rc = EINVAL;
1491         }
1492
1493         if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1494                 sfc_warn(sa, "FCS stripping cannot be disabled - always on");
1495                 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1496                 rxmode->hw_strip_crc = 1;
1497         }
1498
1499         return rc;
1500 }
1501
1502 /**
1503  * Destroy excess queues that are no longer needed after reconfiguration
1504  * or complete close.
1505  */
1506 static void
1507 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1508 {
1509         int sw_index;
1510
1511         SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1512
1513         sw_index = sa->rxq_count;
1514         while (--sw_index >= (int)nb_rx_queues) {
1515                 if (sa->rxq_info[sw_index].rxq != NULL)
1516                         sfc_rx_qfini(sa, sw_index);
1517         }
1518
1519         sa->rxq_count = nb_rx_queues;
1520 }
1521
1522 /**
1523  * Initialize Rx subsystem.
1524  *
1525  * Called at device (re)configuration stage when number of receive queues is
1526  * specified together with other device level receive configuration.
1527  *
1528  * It should be used to allocate NUMA-unaware resources.
1529  */
1530 int
1531 sfc_rx_configure(struct sfc_adapter *sa)
1532 {
1533         struct sfc_rss *rss = &sa->rss;
1534         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1535         const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1536         int rc;
1537
1538         sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1539                      nb_rx_queues, sa->rxq_count);
1540
1541         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1542         if (rc != 0)
1543                 goto fail_check_mode;
1544
1545         if (nb_rx_queues == sa->rxq_count)
1546                 goto done;
1547
1548         if (sa->rxq_info == NULL) {
1549                 rc = ENOMEM;
1550                 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1551                                                  sizeof(sa->rxq_info[0]), 0,
1552                                                  sa->socket_id);
1553                 if (sa->rxq_info == NULL)
1554                         goto fail_rxqs_alloc;
1555         } else {
1556                 struct sfc_rxq_info *new_rxq_info;
1557
1558                 if (nb_rx_queues < sa->rxq_count)
1559                         sfc_rx_fini_queues(sa, nb_rx_queues);
1560
1561                 rc = ENOMEM;
1562                 new_rxq_info =
1563                         rte_realloc(sa->rxq_info,
1564                                     nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1565                 if (new_rxq_info == NULL && nb_rx_queues > 0)
1566                         goto fail_rxqs_realloc;
1567
1568                 sa->rxq_info = new_rxq_info;
1569                 if (nb_rx_queues > sa->rxq_count)
1570                         memset(&sa->rxq_info[sa->rxq_count], 0,
1571                                (nb_rx_queues - sa->rxq_count) *
1572                                sizeof(sa->rxq_info[0]));
1573         }
1574
1575         while (sa->rxq_count < nb_rx_queues) {
1576                 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1577                 if (rc != 0)
1578                         goto fail_rx_qinit_info;
1579
1580                 sa->rxq_count++;
1581         }
1582
1583         rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1584                          MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1585
1586         if (rss->channels > 0) {
1587                 struct rte_eth_rss_conf *adv_conf_rss;
1588                 unsigned int sw_index;
1589
1590                 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1591                         rss->tbl[sw_index] = sw_index % rss->channels;
1592
1593                 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1594                 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1595                 if (rc != 0)
1596                         goto fail_rx_process_adv_conf_rss;
1597         }
1598
1599 done:
1600         return 0;
1601
1602 fail_rx_process_adv_conf_rss:
1603 fail_rx_qinit_info:
1604 fail_rxqs_realloc:
1605 fail_rxqs_alloc:
1606         sfc_rx_close(sa);
1607
1608 fail_check_mode:
1609         sfc_log_init(sa, "failed %d", rc);
1610         return rc;
1611 }
1612
1613 /**
1614  * Shutdown Rx subsystem.
1615  *
1616  * Called at device close stage, for example, before device shutdown.
1617  */
1618 void
1619 sfc_rx_close(struct sfc_adapter *sa)
1620 {
1621         struct sfc_rss *rss = &sa->rss;
1622
1623         sfc_rx_fini_queues(sa, 0);
1624
1625         rss->channels = 0;
1626
1627         rte_free(sa->rxq_info);
1628         sa->rxq_info = NULL;
1629 }