net/sfc: get RxQ pending descriptors count
[dpdk.git] / drivers / net / sfc / sfc_rx.c
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <rte_mempool.h>
31
32 #include "efx.h"
33
34 #include "sfc.h"
35 #include "sfc_debug.h"
36 #include "sfc_log.h"
37 #include "sfc_ev.h"
38 #include "sfc_rx.h"
39 #include "sfc_tweak.h"
40
41 /*
42  * Maximum number of Rx queue flush attempt in the case of failure or
43  * flush timeout
44  */
45 #define SFC_RX_QFLUSH_ATTEMPTS          (3)
46
47 /*
48  * Time to wait between event queue polling attempts when waiting for Rx
49  * queue flush done or failed events.
50  */
51 #define SFC_RX_QFLUSH_POLL_WAIT_MS      (1)
52
53 /*
54  * Maximum number of event queue polling attempts when waiting for Rx queue
55  * flush done or failed events. It defines Rx queue flush attempt timeout
56  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
57  */
58 #define SFC_RX_QFLUSH_POLL_ATTEMPTS     (2000)
59
60 void
61 sfc_rx_qflush_done(struct sfc_rxq *rxq)
62 {
63         rxq->state |= SFC_RXQ_FLUSHED;
64         rxq->state &= ~SFC_RXQ_FLUSHING;
65 }
66
67 void
68 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
69 {
70         rxq->state |= SFC_RXQ_FLUSH_FAILED;
71         rxq->state &= ~SFC_RXQ_FLUSHING;
72 }
73
74 static void
75 sfc_rx_qrefill(struct sfc_rxq *rxq)
76 {
77         unsigned int free_space;
78         unsigned int bulks;
79         void *objs[SFC_RX_REFILL_BULK];
80         efsys_dma_addr_t addr[RTE_DIM(objs)];
81         unsigned int added = rxq->added;
82         unsigned int id;
83         unsigned int i;
84         struct sfc_rx_sw_desc *rxd;
85         struct rte_mbuf *m;
86         uint8_t port_id = rxq->port_id;
87
88         free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
89                 (added - rxq->completed);
90
91         if (free_space < rxq->refill_threshold)
92                 return;
93
94         bulks = free_space / RTE_DIM(objs);
95
96         id = added & rxq->ptr_mask;
97         while (bulks-- > 0) {
98                 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
99                                          RTE_DIM(objs)) < 0) {
100                         /*
101                          * It is hardly a safe way to increment counter
102                          * from different contexts, but all PMDs do it.
103                          */
104                         rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
105                                 RTE_DIM(objs);
106                         break;
107                 }
108
109                 for (i = 0; i < RTE_DIM(objs);
110                      ++i, id = (id + 1) & rxq->ptr_mask) {
111                         m = objs[i];
112
113                         rxd = &rxq->sw_desc[id];
114                         rxd->mbuf = m;
115
116                         rte_mbuf_refcnt_set(m, 1);
117                         m->data_off = RTE_PKTMBUF_HEADROOM;
118                         m->next = NULL;
119                         m->nb_segs = 1;
120                         m->port = port_id;
121
122                         addr[i] = rte_pktmbuf_mtophys(m);
123                 }
124
125                 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
126                              RTE_DIM(objs), rxq->completed, added);
127                 added += RTE_DIM(objs);
128         }
129
130         /* Push doorbell if something is posted */
131         if (rxq->added != added) {
132                 rxq->added = added;
133                 efx_rx_qpush(rxq->common, added, &rxq->pushed);
134         }
135 }
136
137 static uint64_t
138 sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
139 {
140         uint64_t mbuf_flags = 0;
141
142         switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
143         case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
144                 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
145                 break;
146         case EFX_PKT_IPV4:
147                 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
148                 break;
149         default:
150                 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
151                 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
152                            PKT_RX_IP_CKSUM_UNKNOWN);
153                 break;
154         }
155
156         switch ((desc_flags &
157                  (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
158         case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
159         case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
160                 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
161                 break;
162         case EFX_PKT_TCP:
163         case EFX_PKT_UDP:
164                 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
165                 break;
166         default:
167                 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
168                 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
169                            PKT_RX_L4_CKSUM_UNKNOWN);
170                 break;
171         }
172
173         return mbuf_flags;
174 }
175
176 static uint32_t
177 sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
178 {
179         return RTE_PTYPE_L2_ETHER |
180                 ((desc_flags & EFX_PKT_IPV4) ?
181                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
182                 ((desc_flags & EFX_PKT_IPV6) ?
183                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
184                 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
185                 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
186 }
187
188 uint16_t
189 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
190 {
191         struct sfc_rxq *rxq = rx_queue;
192         unsigned int completed;
193         unsigned int prefix_size = rxq->prefix_size;
194         unsigned int done_pkts = 0;
195         boolean_t discard_next = B_FALSE;
196
197         if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
198                 return 0;
199
200         sfc_ev_qpoll(rxq->evq);
201
202         completed = rxq->completed;
203         while (completed != rxq->pending && done_pkts < nb_pkts) {
204                 unsigned int id;
205                 struct sfc_rx_sw_desc *rxd;
206                 struct rte_mbuf *m;
207                 unsigned int seg_len;
208                 unsigned int desc_flags;
209
210                 id = completed++ & rxq->ptr_mask;
211                 rxd = &rxq->sw_desc[id];
212                 m = rxd->mbuf;
213                 desc_flags = rxd->flags;
214
215                 if (discard_next)
216                         goto discard;
217
218                 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
219                         goto discard;
220
221                 if (desc_flags & EFX_PKT_CONT)
222                         goto discard;
223
224                 if (desc_flags & EFX_PKT_PREFIX_LEN) {
225                         uint16_t tmp_size;
226                         int rc __rte_unused;
227
228                         rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
229                                 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
230                         SFC_ASSERT(rc == 0);
231                         seg_len = tmp_size;
232                 } else {
233                         seg_len = rxd->size - prefix_size;
234                 }
235
236                 m->data_off += prefix_size;
237                 rte_pktmbuf_data_len(m) = seg_len;
238                 rte_pktmbuf_pkt_len(m) = seg_len;
239
240                 m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
241                 m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
242
243                 *rx_pkts++ = m;
244                 done_pkts++;
245                 continue;
246
247 discard:
248                 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
249                 rte_mempool_put(rxq->refill_mb_pool, m);
250                 rxd->mbuf = NULL;
251         }
252
253         rxq->completed = completed;
254
255         sfc_rx_qrefill(rxq);
256
257         return done_pkts;
258 }
259
260 unsigned int
261 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
262 {
263         struct sfc_rxq *rxq;
264
265         SFC_ASSERT(sw_index < sa->rxq_count);
266         rxq = sa->rxq_info[sw_index].rxq;
267
268         if (rxq == NULL || (rxq->state & SFC_RXQ_RUNNING) == 0)
269                 return 0;
270
271         sfc_ev_qpoll(rxq->evq);
272
273         return rxq->pending - rxq->completed;
274 }
275
276 static void
277 sfc_rx_qpurge(struct sfc_rxq *rxq)
278 {
279         unsigned int i;
280         struct sfc_rx_sw_desc *rxd;
281
282         for (i = rxq->completed; i != rxq->added; ++i) {
283                 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
284                 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
285                 rxd->mbuf = NULL;
286         }
287 }
288
289 static void
290 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
291 {
292         struct sfc_rxq *rxq;
293         unsigned int retry_count;
294         unsigned int wait_count;
295
296         rxq = sa->rxq_info[sw_index].rxq;
297         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
298
299         /*
300          * Retry Rx queue flushing in the case of flush failed or
301          * timeout. In the worst case it can delay for 6 seconds.
302          */
303         for (retry_count = 0;
304              ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
305              (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
306              ++retry_count) {
307                 if (efx_rx_qflush(rxq->common) != 0) {
308                         rxq->state |= SFC_RXQ_FLUSH_FAILED;
309                         break;
310                 }
311                 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
312                 rxq->state |= SFC_RXQ_FLUSHING;
313
314                 /*
315                  * Wait for Rx queue flush done or failed event at least
316                  * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
317                  * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
318                  * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
319                  */
320                 wait_count = 0;
321                 do {
322                         rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
323                         sfc_ev_qpoll(rxq->evq);
324                 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
325                          (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
326
327                 if (rxq->state & SFC_RXQ_FLUSHING)
328                         sfc_err(sa, "RxQ %u flush timed out", sw_index);
329
330                 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
331                         sfc_err(sa, "RxQ %u flush failed", sw_index);
332
333                 if (rxq->state & SFC_RXQ_FLUSHED)
334                         sfc_info(sa, "RxQ %u flushed", sw_index);
335         }
336
337         sfc_rx_qpurge(rxq);
338 }
339
340 int
341 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
342 {
343         struct sfc_rxq_info *rxq_info;
344         struct sfc_rxq *rxq;
345         struct sfc_evq *evq;
346         int rc;
347
348         sfc_log_init(sa, "sw_index=%u", sw_index);
349
350         SFC_ASSERT(sw_index < sa->rxq_count);
351
352         rxq_info = &sa->rxq_info[sw_index];
353         rxq = rxq_info->rxq;
354         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
355
356         evq = rxq->evq;
357
358         rc = sfc_ev_qstart(sa, evq->evq_index);
359         if (rc != 0)
360                 goto fail_ev_qstart;
361
362         rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
363                             &rxq->mem, rxq_info->entries,
364                             0 /* not used on EF10 */, evq->common,
365                             &rxq->common);
366         if (rc != 0)
367                 goto fail_rx_qcreate;
368
369         efx_rx_qenable(rxq->common);
370
371         rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
372
373         rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
374
375         sfc_rx_qrefill(rxq);
376
377         if (sw_index == 0) {
378                 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
379                                                     B_FALSE);
380                 if (rc != 0)
381                         goto fail_mac_filter_default_rxq_set;
382         }
383
384         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
385         sa->eth_dev->data->rx_queue_state[sw_index] =
386                 RTE_ETH_QUEUE_STATE_STARTED;
387
388         return 0;
389
390 fail_mac_filter_default_rxq_set:
391         sfc_rx_qflush(sa, sw_index);
392
393 fail_rx_qcreate:
394         sfc_ev_qstop(sa, evq->evq_index);
395
396 fail_ev_qstart:
397         return rc;
398 }
399
400 void
401 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
402 {
403         struct sfc_rxq_info *rxq_info;
404         struct sfc_rxq *rxq;
405
406         sfc_log_init(sa, "sw_index=%u", sw_index);
407
408         SFC_ASSERT(sw_index < sa->rxq_count);
409
410         rxq_info = &sa->rxq_info[sw_index];
411         rxq = rxq_info->rxq;
412         SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
413
414         /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
415         sa->eth_dev->data->rx_queue_state[sw_index] =
416                 RTE_ETH_QUEUE_STATE_STOPPED;
417
418         rxq->state &= ~SFC_RXQ_RUNNING;
419
420         if (sw_index == 0)
421                 efx_mac_filter_default_rxq_clear(sa->nic);
422
423         sfc_rx_qflush(sa, sw_index);
424
425         rxq->state = SFC_RXQ_INITIALIZED;
426
427         efx_rx_qdestroy(rxq->common);
428
429         sfc_ev_qstop(sa, rxq->evq->evq_index);
430 }
431
432 static int
433 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
434                    const struct rte_eth_rxconf *rx_conf)
435 {
436         const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
437         int rc = 0;
438
439         if (rx_conf->rx_thresh.pthresh != 0 ||
440             rx_conf->rx_thresh.hthresh != 0 ||
441             rx_conf->rx_thresh.wthresh != 0) {
442                 sfc_err(sa,
443                         "RxQ prefetch/host/writeback thresholds are not supported");
444                 rc = EINVAL;
445         }
446
447         if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
448                 sfc_err(sa,
449                         "RxQ free threshold too large: %u vs maximum %u",
450                         rx_conf->rx_free_thresh, rx_free_thresh_max);
451                 rc = EINVAL;
452         }
453
454         if (rx_conf->rx_drop_en == 0) {
455                 sfc_err(sa, "RxQ drop disable is not supported");
456                 rc = EINVAL;
457         }
458
459         if (rx_conf->rx_deferred_start != 0) {
460                 sfc_err(sa, "RxQ deferred start is not supported");
461                 rc = EINVAL;
462         }
463
464         return rc;
465 }
466
467 static unsigned int
468 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
469 {
470         uint32_t data_off;
471         uint32_t order;
472
473         /* The mbuf object itself is always cache line aligned */
474         order = rte_bsf32(RTE_CACHE_LINE_SIZE);
475
476         /* Data offset from mbuf object start */
477         data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
478                 RTE_PKTMBUF_HEADROOM;
479
480         order = MIN(order, rte_bsf32(data_off));
481
482         return 1u << (order - 1);
483 }
484
485 static uint16_t
486 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
487 {
488         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
489         const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
490         const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
491         uint16_t buf_size;
492         unsigned int buf_aligned;
493         unsigned int start_alignment;
494         unsigned int end_padding_alignment;
495
496         /* Below it is assumed that both alignments are power of 2 */
497         SFC_ASSERT(rte_is_power_of_2(nic_align_start));
498         SFC_ASSERT(rte_is_power_of_2(nic_align_end));
499
500         /*
501          * mbuf is always cache line aligned, double-check
502          * that it meets rx buffer start alignment requirements.
503          */
504
505         /* Start from mbuf pool data room size */
506         buf_size = rte_pktmbuf_data_room_size(mb_pool);
507
508         /* Remove headroom */
509         if (buf_size <= RTE_PKTMBUF_HEADROOM) {
510                 sfc_err(sa,
511                         "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
512                         mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
513                 return 0;
514         }
515         buf_size -= RTE_PKTMBUF_HEADROOM;
516
517         /* Calculate guaranteed data start alignment */
518         buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
519
520         /* Reserve space for start alignment */
521         if (buf_aligned < nic_align_start) {
522                 start_alignment = nic_align_start - buf_aligned;
523                 if (buf_size <= start_alignment) {
524                         sfc_err(sa,
525                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
526                                 mb_pool->name,
527                                 rte_pktmbuf_data_room_size(mb_pool),
528                                 RTE_PKTMBUF_HEADROOM, start_alignment);
529                         return 0;
530                 }
531                 buf_aligned = nic_align_start;
532                 buf_size -= start_alignment;
533         } else {
534                 start_alignment = 0;
535         }
536
537         /* Make sure that end padding does not write beyond the buffer */
538         if (buf_aligned < nic_align_end) {
539                 /*
540                  * Estimate space which can be lost. If guarnteed buffer
541                  * size is odd, lost space is (nic_align_end - 1). More
542                  * accurate formula is below.
543                  */
544                 end_padding_alignment = nic_align_end -
545                         MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
546                 if (buf_size <= end_padding_alignment) {
547                         sfc_err(sa,
548                                 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
549                                 mb_pool->name,
550                                 rte_pktmbuf_data_room_size(mb_pool),
551                                 RTE_PKTMBUF_HEADROOM, start_alignment,
552                                 end_padding_alignment);
553                         return 0;
554                 }
555                 buf_size -= end_padding_alignment;
556         } else {
557                 /*
558                  * Start is aligned the same or better than end,
559                  * just align length.
560                  */
561                 buf_size = P2ALIGN(buf_size, nic_align_end);
562         }
563
564         return buf_size;
565 }
566
567 int
568 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
569              uint16_t nb_rx_desc, unsigned int socket_id,
570              const struct rte_eth_rxconf *rx_conf,
571              struct rte_mempool *mb_pool)
572 {
573         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
574         int rc;
575         uint16_t buf_size;
576         struct sfc_rxq_info *rxq_info;
577         unsigned int evq_index;
578         struct sfc_evq *evq;
579         struct sfc_rxq *rxq;
580
581         rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
582         if (rc != 0)
583                 goto fail_bad_conf;
584
585         buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
586         if (buf_size == 0) {
587                 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
588                         sw_index);
589                 rc = EINVAL;
590                 goto fail_bad_conf;
591         }
592
593         if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
594             !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
595                 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
596                         "object size is too small", sw_index);
597                 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
598                         "PDU size %u plus Rx prefix %u bytes",
599                         sw_index, buf_size, (unsigned int)sa->port.pdu,
600                         encp->enc_rx_prefix_size);
601                 rc = EINVAL;
602                 goto fail_bad_conf;
603         }
604
605         SFC_ASSERT(sw_index < sa->rxq_count);
606         rxq_info = &sa->rxq_info[sw_index];
607
608         SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
609         rxq_info->entries = nb_rx_desc;
610         rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
611
612         evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
613
614         rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
615         if (rc != 0)
616                 goto fail_ev_qinit;
617
618         evq = sa->evq_info[evq_index].evq;
619
620         rc = ENOMEM;
621         rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
622                                  socket_id);
623         if (rxq == NULL)
624                 goto fail_rxq_alloc;
625
626         rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
627                            socket_id, &rxq->mem);
628         if (rc != 0)
629                 goto fail_dma_alloc;
630
631         rc = ENOMEM;
632         rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
633                                          sizeof(*rxq->sw_desc),
634                                          RTE_CACHE_LINE_SIZE, socket_id);
635         if (rxq->sw_desc == NULL)
636                 goto fail_desc_alloc;
637
638         evq->rxq = rxq;
639         rxq->evq = evq;
640         rxq->ptr_mask = rxq_info->entries - 1;
641         rxq->refill_threshold = rx_conf->rx_free_thresh;
642         rxq->refill_mb_pool = mb_pool;
643         rxq->buf_size = buf_size;
644         rxq->hw_index = sw_index;
645         rxq->port_id = sa->eth_dev->data->port_id;
646
647         /* Cache limits required on datapath in RxQ structure */
648         rxq->batch_max = encp->enc_rx_batch_max;
649         rxq->prefix_size = encp->enc_rx_prefix_size;
650
651         rxq->state = SFC_RXQ_INITIALIZED;
652
653         rxq_info->rxq = rxq;
654
655         return 0;
656
657 fail_desc_alloc:
658         sfc_dma_free(sa, &rxq->mem);
659
660 fail_dma_alloc:
661         rte_free(rxq);
662
663 fail_rxq_alloc:
664         sfc_ev_qfini(sa, evq_index);
665
666 fail_ev_qinit:
667         rxq_info->entries = 0;
668
669 fail_bad_conf:
670         sfc_log_init(sa, "failed %d", rc);
671         return rc;
672 }
673
674 void
675 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
676 {
677         struct sfc_rxq_info *rxq_info;
678         struct sfc_rxq *rxq;
679
680         SFC_ASSERT(sw_index < sa->rxq_count);
681
682         rxq_info = &sa->rxq_info[sw_index];
683
684         rxq = rxq_info->rxq;
685         SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
686
687         rxq_info->rxq = NULL;
688         rxq_info->entries = 0;
689
690         rte_free(rxq->sw_desc);
691         sfc_dma_free(sa, &rxq->mem);
692         rte_free(rxq);
693 }
694
695 int
696 sfc_rx_start(struct sfc_adapter *sa)
697 {
698         unsigned int sw_index;
699         int rc;
700
701         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
702
703         rc = efx_rx_init(sa->nic);
704         if (rc != 0)
705                 goto fail_rx_init;
706
707         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
708                 rc = sfc_rx_qstart(sa, sw_index);
709                 if (rc != 0)
710                         goto fail_rx_qstart;
711         }
712
713         return 0;
714
715 fail_rx_qstart:
716         while (sw_index-- > 0)
717                 sfc_rx_qstop(sa, sw_index);
718
719         efx_rx_fini(sa->nic);
720
721 fail_rx_init:
722         sfc_log_init(sa, "failed %d", rc);
723         return rc;
724 }
725
726 void
727 sfc_rx_stop(struct sfc_adapter *sa)
728 {
729         unsigned int sw_index;
730
731         sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
732
733         sw_index = sa->rxq_count;
734         while (sw_index-- > 0) {
735                 if (sa->rxq_info[sw_index].rxq != NULL)
736                         sfc_rx_qstop(sa, sw_index);
737         }
738
739         efx_rx_fini(sa->nic);
740 }
741
742 static int
743 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
744 {
745         struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
746         unsigned int max_entries;
747
748         max_entries = EFX_RXQ_MAXNDESCS;
749         SFC_ASSERT(rte_is_power_of_2(max_entries));
750
751         rxq_info->max_entries = max_entries;
752
753         return 0;
754 }
755
756 static int
757 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
758 {
759         int rc = 0;
760
761         switch (rxmode->mq_mode) {
762         case ETH_MQ_RX_NONE:
763                 /* No special checks are required */
764                 break;
765         default:
766                 sfc_err(sa, "Rx multi-queue mode %u not supported",
767                         rxmode->mq_mode);
768                 rc = EINVAL;
769         }
770
771         if (rxmode->header_split) {
772                 sfc_err(sa, "Header split on Rx not supported");
773                 rc = EINVAL;
774         }
775
776         if (rxmode->hw_vlan_filter) {
777                 sfc_err(sa, "HW VLAN filtering not supported");
778                 rc = EINVAL;
779         }
780
781         if (rxmode->hw_vlan_strip) {
782                 sfc_err(sa, "HW VLAN stripping not supported");
783                 rc = EINVAL;
784         }
785
786         if (rxmode->hw_vlan_extend) {
787                 sfc_err(sa,
788                         "Q-in-Q HW VLAN stripping not supported");
789                 rc = EINVAL;
790         }
791
792         if (!rxmode->hw_strip_crc) {
793                 sfc_warn(sa,
794                          "FCS stripping control not supported - always stripped");
795                 rxmode->hw_strip_crc = 1;
796         }
797
798         if (rxmode->enable_scatter) {
799                 sfc_err(sa, "Scatter on Rx not supported");
800                 rc = EINVAL;
801         }
802
803         if (rxmode->enable_lro) {
804                 sfc_err(sa, "LRO not supported");
805                 rc = EINVAL;
806         }
807
808         return rc;
809 }
810
811 /**
812  * Initialize Rx subsystem.
813  *
814  * Called at device configuration stage when number of receive queues is
815  * specified together with other device level receive configuration.
816  *
817  * It should be used to allocate NUMA-unaware resources.
818  */
819 int
820 sfc_rx_init(struct sfc_adapter *sa)
821 {
822         struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
823         unsigned int sw_index;
824         int rc;
825
826         rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
827         if (rc != 0)
828                 goto fail_check_mode;
829
830         sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
831
832         rc = ENOMEM;
833         sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
834                                          sizeof(struct sfc_rxq_info), 0,
835                                          sa->socket_id);
836         if (sa->rxq_info == NULL)
837                 goto fail_rxqs_alloc;
838
839         for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
840                 rc = sfc_rx_qinit_info(sa, sw_index);
841                 if (rc != 0)
842                         goto fail_rx_qinit_info;
843         }
844
845         return 0;
846
847 fail_rx_qinit_info:
848         rte_free(sa->rxq_info);
849         sa->rxq_info = NULL;
850
851 fail_rxqs_alloc:
852         sa->rxq_count = 0;
853 fail_check_mode:
854         sfc_log_init(sa, "failed %d", rc);
855         return rc;
856 }
857
858 /**
859  * Shutdown Rx subsystem.
860  *
861  * Called at device close stage, for example, before device
862  * reconfiguration or shutdown.
863  */
864 void
865 sfc_rx_fini(struct sfc_adapter *sa)
866 {
867         unsigned int sw_index;
868
869         sw_index = sa->rxq_count;
870         while (sw_index-- > 0) {
871                 if (sa->rxq_info[sw_index].rxq != NULL)
872                         sfc_rx_qfini(sa, sw_index);
873         }
874
875         rte_free(sa->rxq_info);
876         sa->rxq_info = NULL;
877         sa->rxq_count = 0;
878 }