372564bc0a6655e69c26b38e5c7f6f02637561f2
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40 #include <rte_kvargs.h>
41
42 #include "fm10k.h"
43 #include "base/fm10k_api.h"
44
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48
49 #define MAIN_VSI_POOL_NUMBER 0
50
51 /* Max try times to acquire switch status */
52 #define MAX_QUERY_SWITCH_STATE_TIMES 10
53 /* Wait interval to get switch status */
54 #define WAIT_SWITCH_MSG_US    100000
55 /* A period of quiescence for switch */
56 #define FM10K_SWITCH_QUIESCE_US 10000
57 /* Number of chars per uint32 type */
58 #define CHARS_PER_UINT32 (sizeof(uint32_t))
59 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
60
61 /* default 1:1 map from queue ID to interrupt vector ID */
62 #define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])
63
64 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
65 #define MAX_LPORT_NUM    128
66 #define GLORT_FD_Q_BASE  0x40
67 #define GLORT_PF_MASK    0xFFC0
68 #define GLORT_FD_MASK    GLORT_PF_MASK
69 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
70
71 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
72 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
73 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
74 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
75 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
76 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
77 static int
78 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
79 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
80         const u8 *mac, bool add, uint32_t pool);
81 static void fm10k_tx_queue_release(void *queue);
82 static void fm10k_rx_queue_release(void *queue);
83 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
84 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
85 static int fm10k_check_ftag(struct rte_devargs *devargs);
86
87 struct fm10k_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
93         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
94         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
95         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
96         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
97         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
98         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
99         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
100         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
101                 nodesc_drop)},
102 };
103
104 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
105                 sizeof(fm10k_hw_stats_strings[0]))
106
107 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
108         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
109         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
110         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
111 };
112
113 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
114                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
115
116 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
117         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
118         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
119 };
120
121 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
122                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
123
124 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
125                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
126 static int
127 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
128
129 static void
130 fm10k_mbx_initlock(struct fm10k_hw *hw)
131 {
132         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
133 }
134
135 static void
136 fm10k_mbx_lock(struct fm10k_hw *hw)
137 {
138         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
139                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
140 }
141
142 static void
143 fm10k_mbx_unlock(struct fm10k_hw *hw)
144 {
145         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
146 }
147
148 /* Stubs needed for linkage when vPMD is disabled */
149 int __attribute__((weak))
150 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
151 {
152         return -1;
153 }
154
155 uint16_t __attribute__((weak))
156 fm10k_recv_pkts_vec(
157         __rte_unused void *rx_queue,
158         __rte_unused struct rte_mbuf **rx_pkts,
159         __rte_unused uint16_t nb_pkts)
160 {
161         return 0;
162 }
163
164 uint16_t __attribute__((weak))
165 fm10k_recv_scattered_pkts_vec(
166                 __rte_unused void *rx_queue,
167                 __rte_unused struct rte_mbuf **rx_pkts,
168                 __rte_unused uint16_t nb_pkts)
169 {
170         return 0;
171 }
172
173 int __attribute__((weak))
174 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
175
176 {
177         return -1;
178 }
179
180 void __attribute__((weak))
181 fm10k_rx_queue_release_mbufs_vec(
182                 __rte_unused struct fm10k_rx_queue *rxq)
183 {
184         return;
185 }
186
187 void __attribute__((weak))
188 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
189 {
190         return;
191 }
192
193 int __attribute__((weak))
194 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
195 {
196         return -1;
197 }
198
199 uint16_t __attribute__((weak))
200 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
201                 __rte_unused struct rte_mbuf **tx_pkts,
202                 __rte_unused uint16_t nb_pkts)
203 {
204         return 0;
205 }
206
207 /*
208  * reset queue to initial state, allocate software buffers used when starting
209  * device.
210  * return 0 on success
211  * return -ENOMEM if buffers cannot be allocated
212  * return -EINVAL if buffers do not satisfy alignment condition
213  */
214 static inline int
215 rx_queue_reset(struct fm10k_rx_queue *q)
216 {
217         static const union fm10k_rx_desc zero = {{0} };
218         uint64_t dma_addr;
219         int i, diag;
220         PMD_INIT_FUNC_TRACE();
221
222         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
223         if (diag != 0)
224                 return -ENOMEM;
225
226         for (i = 0; i < q->nb_desc; ++i) {
227                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
228                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
229                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
230                                                 q->nb_desc);
231                         return -EINVAL;
232                 }
233                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
234                 q->hw_ring[i].q.pkt_addr = dma_addr;
235                 q->hw_ring[i].q.hdr_addr = dma_addr;
236         }
237
238         /* initialize extra software ring entries. Space for these extra
239          * entries is always allocated.
240          */
241         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
242         for (i = 0; i < q->nb_fake_desc; ++i) {
243                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
244                 q->hw_ring[q->nb_desc + i] = zero;
245         }
246
247         q->next_dd = 0;
248         q->next_alloc = 0;
249         q->next_trigger = q->alloc_thresh - 1;
250         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
251         q->rxrearm_start = 0;
252         q->rxrearm_nb = 0;
253
254         return 0;
255 }
256
257 /*
258  * clean queue, descriptor rings, free software buffers used when stopping
259  * device.
260  */
261 static inline void
262 rx_queue_clean(struct fm10k_rx_queue *q)
263 {
264         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
265         uint32_t i;
266         PMD_INIT_FUNC_TRACE();
267
268         /* zero descriptor rings */
269         for (i = 0; i < q->nb_desc; ++i)
270                 q->hw_ring[i] = zero;
271
272         /* zero faked descriptors */
273         for (i = 0; i < q->nb_fake_desc; ++i)
274                 q->hw_ring[q->nb_desc + i] = zero;
275
276         /* vPMD driver has a different way of releasing mbufs. */
277         if (q->rx_using_sse) {
278                 fm10k_rx_queue_release_mbufs_vec(q);
279                 return;
280         }
281
282         /* free software buffers */
283         for (i = 0; i < q->nb_desc; ++i) {
284                 if (q->sw_ring[i]) {
285                         rte_pktmbuf_free_seg(q->sw_ring[i]);
286                         q->sw_ring[i] = NULL;
287                 }
288         }
289 }
290
291 /*
292  * free all queue memory used when releasing the queue (i.e. configure)
293  */
294 static inline void
295 rx_queue_free(struct fm10k_rx_queue *q)
296 {
297         PMD_INIT_FUNC_TRACE();
298         if (q) {
299                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
300                 rx_queue_clean(q);
301                 if (q->sw_ring) {
302                         rte_free(q->sw_ring);
303                         q->sw_ring = NULL;
304                 }
305                 rte_free(q);
306                 q = NULL;
307         }
308 }
309
310 /*
311  * disable RX queue, wait unitl HW finished necessary flush operation
312  */
313 static inline int
314 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
315 {
316         uint32_t reg, i;
317
318         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
319         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
320                         reg & ~FM10K_RXQCTL_ENABLE);
321
322         /* Wait 100us at most */
323         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
324                 rte_delay_us(1);
325                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
326                 if (!(reg & FM10K_RXQCTL_ENABLE))
327                         break;
328         }
329
330         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
331                 return -1;
332
333         return 0;
334 }
335
336 /*
337  * reset queue to initial state, allocate software buffers used when starting
338  * device
339  */
340 static inline void
341 tx_queue_reset(struct fm10k_tx_queue *q)
342 {
343         PMD_INIT_FUNC_TRACE();
344         q->last_free = 0;
345         q->next_free = 0;
346         q->nb_used = 0;
347         q->nb_free = q->nb_desc - 1;
348         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
349         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
350 }
351
352 /*
353  * clean queue, descriptor rings, free software buffers used when stopping
354  * device
355  */
356 static inline void
357 tx_queue_clean(struct fm10k_tx_queue *q)
358 {
359         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
360         uint32_t i;
361         PMD_INIT_FUNC_TRACE();
362
363         /* zero descriptor rings */
364         for (i = 0; i < q->nb_desc; ++i)
365                 q->hw_ring[i] = zero;
366
367         /* free software buffers */
368         for (i = 0; i < q->nb_desc; ++i) {
369                 if (q->sw_ring[i]) {
370                         rte_pktmbuf_free_seg(q->sw_ring[i]);
371                         q->sw_ring[i] = NULL;
372                 }
373         }
374 }
375
376 /*
377  * free all queue memory used when releasing the queue (i.e. configure)
378  */
379 static inline void
380 tx_queue_free(struct fm10k_tx_queue *q)
381 {
382         PMD_INIT_FUNC_TRACE();
383         if (q) {
384                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
385                 tx_queue_clean(q);
386                 if (q->rs_tracker.list) {
387                         rte_free(q->rs_tracker.list);
388                         q->rs_tracker.list = NULL;
389                 }
390                 if (q->sw_ring) {
391                         rte_free(q->sw_ring);
392                         q->sw_ring = NULL;
393                 }
394                 rte_free(q);
395                 q = NULL;
396         }
397 }
398
399 /*
400  * disable TX queue, wait unitl HW finished necessary flush operation
401  */
402 static inline int
403 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
404 {
405         uint32_t reg, i;
406
407         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
408         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
409                         reg & ~FM10K_TXDCTL_ENABLE);
410
411         /* Wait 100us at most */
412         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
413                 rte_delay_us(1);
414                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
415                 if (!(reg & FM10K_TXDCTL_ENABLE))
416                         break;
417         }
418
419         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
420                 return -1;
421
422         return 0;
423 }
424
425 static int
426 fm10k_check_mq_mode(struct rte_eth_dev *dev)
427 {
428         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
429         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
430         struct rte_eth_vmdq_rx_conf *vmdq_conf;
431         uint16_t nb_rx_q = dev->data->nb_rx_queues;
432
433         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
434
435         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
436                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
437                 return -EINVAL;
438         }
439
440         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
441                 return 0;
442
443         if (hw->mac.type == fm10k_mac_vf) {
444                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
445                 return -EINVAL;
446         }
447
448         /* Check VMDQ queue pool number */
449         if (vmdq_conf->nb_queue_pools >
450                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
451                         vmdq_conf->nb_queue_pools > nb_rx_q) {
452                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
453                         vmdq_conf->nb_queue_pools);
454                 return -EINVAL;
455         }
456
457         return 0;
458 }
459
460 static const struct fm10k_txq_ops def_txq_ops = {
461         .reset = tx_queue_reset,
462 };
463
464 static int
465 fm10k_dev_configure(struct rte_eth_dev *dev)
466 {
467         int ret;
468
469         PMD_INIT_FUNC_TRACE();
470
471         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
472                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
473         /* multipe queue mode checking */
474         ret  = fm10k_check_mq_mode(dev);
475         if (ret != 0) {
476                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
477                             ret);
478                 return ret;
479         }
480
481         return 0;
482 }
483
484 /* fls = find last set bit = 32 minus the number of leading zeros */
485 #ifndef fls
486 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
487 #endif
488
489 static void
490 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
491 {
492         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
493         struct rte_eth_vmdq_rx_conf *vmdq_conf;
494         uint32_t i;
495
496         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
497
498         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
499                 if (!vmdq_conf->pool_map[i].pools)
500                         continue;
501                 fm10k_mbx_lock(hw);
502                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
503                 fm10k_mbx_unlock(hw);
504         }
505 }
506
507 static void
508 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
509 {
510         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
511
512         /* Add default mac address */
513         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
514                 MAIN_VSI_POOL_NUMBER);
515 }
516
517 static void
518 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
519 {
520         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
521         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
522         uint32_t mrqc, *key, i, reta, j;
523         uint64_t hf;
524
525 #define RSS_KEY_SIZE 40
526         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
527                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
528                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
529                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
530                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
531                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
532         };
533
534         if (dev->data->nb_rx_queues == 1 ||
535             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
536             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
537                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
538                 return;
539         }
540
541         /* random key is rss_intel_key (default) or user provided (rss_key) */
542         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
543                 key = (uint32_t *)rss_intel_key;
544         else
545                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
546
547         /* Now fill our hash function seeds, 4 bytes at a time */
548         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
549                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
550
551         /*
552          * Fill in redirection table
553          * The byte-swap is needed because NIC registers are in
554          * little-endian order.
555          */
556         reta = 0;
557         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
558                 if (j == dev->data->nb_rx_queues)
559                         j = 0;
560                 reta = (reta << CHAR_BIT) | j;
561                 if ((i & 3) == 3)
562                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
563                                         rte_bswap32(reta));
564         }
565
566         /*
567          * Generate RSS hash based on packet types, TCP/UDP
568          * port numbers and/or IPv4/v6 src and dst addresses
569          */
570         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
571         mrqc = 0;
572         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
573         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
574         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
575         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
576         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
577         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
578         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
579         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
580         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
581
582         if (mrqc == 0) {
583                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
584                         "supported", hf);
585                 return;
586         }
587
588         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
589 }
590
591 static void
592 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
593 {
594         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
595         uint32_t i;
596
597         for (i = 0; i < nb_lport_new; i++) {
598                 /* Set unicast mode by default. App can change
599                  * to other mode in other API func.
600                  */
601                 fm10k_mbx_lock(hw);
602                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
603                         FM10K_XCAST_MODE_NONE);
604                 fm10k_mbx_unlock(hw);
605         }
606 }
607
608 static void
609 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
610 {
611         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612         struct rte_eth_vmdq_rx_conf *vmdq_conf;
613         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
614         struct fm10k_macvlan_filter_info *macvlan;
615         uint16_t nb_queue_pools = 0; /* pool number in configuration */
616         uint16_t nb_lport_new;
617
618         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
619         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
620
621         fm10k_dev_rss_configure(dev);
622
623         /* only PF supports VMDQ */
624         if (hw->mac.type != fm10k_mac_pf)
625                 return;
626
627         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
628                 nb_queue_pools = vmdq_conf->nb_queue_pools;
629
630         /* no pool number change, no need to update logic port and VLAN/MAC */
631         if (macvlan->nb_queue_pools == nb_queue_pools)
632                 return;
633
634         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
635         fm10k_dev_logic_port_update(dev, nb_lport_new);
636
637         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
638         memset(dev->data->mac_addrs, 0,
639                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
640         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
641                 &dev->data->mac_addrs[0]);
642         memset(macvlan, 0, sizeof(*macvlan));
643         macvlan->nb_queue_pools = nb_queue_pools;
644
645         if (nb_queue_pools)
646                 fm10k_dev_vmdq_rx_configure(dev);
647         else
648                 fm10k_dev_pf_main_vsi_reset(dev);
649 }
650
651 static int
652 fm10k_dev_tx_init(struct rte_eth_dev *dev)
653 {
654         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655         int i, ret;
656         struct fm10k_tx_queue *txq;
657         uint64_t base_addr;
658         uint32_t size;
659
660         /* Disable TXINT to avoid possible interrupt */
661         for (i = 0; i < hw->mac.max_queues; i++)
662                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
663                                 3 << FM10K_TXINT_TIMER_SHIFT);
664
665         /* Setup TX queue */
666         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
667                 txq = dev->data->tx_queues[i];
668                 base_addr = txq->hw_ring_phys_addr;
669                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
670
671                 /* disable queue to avoid issues while updating state */
672                 ret = tx_queue_disable(hw, i);
673                 if (ret) {
674                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
675                         return -1;
676                 }
677                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
678                  * register is read-only for VF.
679                  */
680                 if (fm10k_check_ftag(dev->pci_dev->device.devargs)) {
681                         if (hw->mac.type == fm10k_mac_pf) {
682                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
683                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
684                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
685                         } else {
686                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
687                                 return -ENOTSUP;
688                         }
689                 }
690
691                 /* set location and size for descriptor ring */
692                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
693                                 base_addr & UINT64_LOWER_32BITS_MASK);
694                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
695                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
696                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
697
698                 /* assign default SGLORT for each TX queue */
699                 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
700         }
701
702         /* set up vector or scalar TX function as appropriate */
703         fm10k_set_tx_function(dev);
704
705         return 0;
706 }
707
708 static int
709 fm10k_dev_rx_init(struct rte_eth_dev *dev)
710 {
711         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
712         struct fm10k_macvlan_filter_info *macvlan;
713         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
714         int i, ret;
715         struct fm10k_rx_queue *rxq;
716         uint64_t base_addr;
717         uint32_t size;
718         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
719         uint32_t logic_port = hw->mac.dglort_map;
720         uint16_t buf_size;
721         uint16_t queue_stride = 0;
722
723         /* enable RXINT for interrupt mode */
724         i = 0;
725         if (rte_intr_dp_is_en(intr_handle)) {
726                 for (; i < dev->data->nb_rx_queues; i++) {
727                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
728                         if (hw->mac.type == fm10k_mac_pf)
729                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
730                                         FM10K_ITR_AUTOMASK |
731                                         FM10K_ITR_MASK_CLEAR);
732                         else
733                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
734                                         FM10K_ITR_AUTOMASK |
735                                         FM10K_ITR_MASK_CLEAR);
736                 }
737         }
738         /* Disable other RXINT to avoid possible interrupt */
739         for (; i < hw->mac.max_queues; i++)
740                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
741                         3 << FM10K_RXINT_TIMER_SHIFT);
742
743         /* Setup RX queues */
744         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
745                 rxq = dev->data->rx_queues[i];
746                 base_addr = rxq->hw_ring_phys_addr;
747                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
748
749                 /* disable queue to avoid issues while updating state */
750                 ret = rx_queue_disable(hw, i);
751                 if (ret) {
752                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
753                         return -1;
754                 }
755
756                 /* Setup the Base and Length of the Rx Descriptor Ring */
757                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
758                                 base_addr & UINT64_LOWER_32BITS_MASK);
759                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
760                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
761                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
762
763                 /* Configure the Rx buffer size for one buff without split */
764                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
765                         RTE_PKTMBUF_HEADROOM);
766                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
767                  * reserved for this purpose, and the worst case could be 511B.
768                  * But SRR reg assumes all buffers have the same size. In order
769                  * to fill the gap, we'll have to consider the worst case and
770                  * assume 512B is reserved. If we don't do so, it's possible
771                  * for HW to overwrite data to next mbuf.
772                  */
773                 buf_size -= FM10K_RX_DATABUF_ALIGN;
774
775                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
776                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
777                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
778
779                 /* It adds dual VLAN length for supporting dual VLAN */
780                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
781                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
782                         dev->data->dev_conf.rxmode.enable_scatter) {
783                         uint32_t reg;
784                         dev->data->scattered_rx = 1;
785                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
786                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
787                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
788                 }
789
790                 /* Enable drop on empty, it's RO for VF */
791                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
792                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
793
794                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
795                 FM10K_WRITE_FLUSH(hw);
796         }
797
798         /* Configure VMDQ/RSS if applicable */
799         fm10k_dev_mq_rx_configure(dev);
800
801         /* Decide the best RX function */
802         fm10k_set_rx_function(dev);
803
804         /* update RX_SGLORT for loopback suppress*/
805         if (hw->mac.type != fm10k_mac_pf)
806                 return 0;
807         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
808         if (macvlan->nb_queue_pools)
809                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
810         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
811                 if (i && queue_stride && !(i % queue_stride))
812                         logic_port++;
813                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
814         }
815
816         return 0;
817 }
818
819 static int
820 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
821 {
822         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
823         int err = -1;
824         uint32_t reg;
825         struct fm10k_rx_queue *rxq;
826
827         PMD_INIT_FUNC_TRACE();
828
829         if (rx_queue_id < dev->data->nb_rx_queues) {
830                 rxq = dev->data->rx_queues[rx_queue_id];
831                 err = rx_queue_reset(rxq);
832                 if (err == -ENOMEM) {
833                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
834                         return err;
835                 } else if (err == -EINVAL) {
836                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
837                                 " %d", err);
838                         return err;
839                 }
840
841                 /* Setup the HW Rx Head and Tail Descriptor Pointers
842                  * Note: this must be done AFTER the queue is enabled on real
843                  * hardware, but BEFORE the queue is enabled when using the
844                  * emulation platform. Do it in both places for now and remove
845                  * this comment and the following two register writes when the
846                  * emulation platform is no longer being used.
847                  */
848                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
849                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
850
851                 /* Set PF ownership flag for PF devices */
852                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
853                 if (hw->mac.type == fm10k_mac_pf)
854                         reg |= FM10K_RXQCTL_PF;
855                 reg |= FM10K_RXQCTL_ENABLE;
856                 /* enable RX queue */
857                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
858                 FM10K_WRITE_FLUSH(hw);
859
860                 /* Setup the HW Rx Head and Tail Descriptor Pointers
861                  * Note: this must be done AFTER the queue is enabled
862                  */
863                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
864                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
865                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
866         }
867
868         return err;
869 }
870
871 static int
872 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
873 {
874         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
875
876         PMD_INIT_FUNC_TRACE();
877
878         if (rx_queue_id < dev->data->nb_rx_queues) {
879                 /* Disable RX queue */
880                 rx_queue_disable(hw, rx_queue_id);
881
882                 /* Free mbuf and clean HW ring */
883                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
884                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
885         }
886
887         return 0;
888 }
889
890 static int
891 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
892 {
893         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894         /** @todo - this should be defined in the shared code */
895 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
896         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
897         int err = 0;
898
899         PMD_INIT_FUNC_TRACE();
900
901         if (tx_queue_id < dev->data->nb_tx_queues) {
902                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
903
904                 q->ops->reset(q);
905
906                 /* reset head and tail pointers */
907                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
908                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
909
910                 /* enable TX queue */
911                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
912                                         FM10K_TXDCTL_ENABLE | txdctl);
913                 FM10K_WRITE_FLUSH(hw);
914                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
915         } else
916                 err = -1;
917
918         return err;
919 }
920
921 static int
922 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
923 {
924         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
925
926         PMD_INIT_FUNC_TRACE();
927
928         if (tx_queue_id < dev->data->nb_tx_queues) {
929                 tx_queue_disable(hw, tx_queue_id);
930                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
931                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
932         }
933
934         return 0;
935 }
936
937 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
938 {
939         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
940                 != FM10K_DGLORTMAP_NONE);
941 }
942
943 static void
944 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
945 {
946         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
947         int status;
948
949         PMD_INIT_FUNC_TRACE();
950
951         /* Return if it didn't acquire valid glort range */
952         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
953                 return;
954
955         fm10k_mbx_lock(hw);
956         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
957                                 FM10K_XCAST_MODE_PROMISC);
958         fm10k_mbx_unlock(hw);
959
960         if (status != FM10K_SUCCESS)
961                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
962 }
963
964 static void
965 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
966 {
967         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
968         uint8_t mode;
969         int status;
970
971         PMD_INIT_FUNC_TRACE();
972
973         /* Return if it didn't acquire valid glort range */
974         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
975                 return;
976
977         if (dev->data->all_multicast == 1)
978                 mode = FM10K_XCAST_MODE_ALLMULTI;
979         else
980                 mode = FM10K_XCAST_MODE_NONE;
981
982         fm10k_mbx_lock(hw);
983         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
984                                 mode);
985         fm10k_mbx_unlock(hw);
986
987         if (status != FM10K_SUCCESS)
988                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
989 }
990
991 static void
992 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
993 {
994         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
995         int status;
996
997         PMD_INIT_FUNC_TRACE();
998
999         /* Return if it didn't acquire valid glort range */
1000         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1001                 return;
1002
1003         /* If promiscuous mode is enabled, it doesn't make sense to enable
1004          * allmulticast and disable promiscuous since fm10k only can select
1005          * one of the modes.
1006          */
1007         if (dev->data->promiscuous) {
1008                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1009                         "needn't enable allmulticast");
1010                 return;
1011         }
1012
1013         fm10k_mbx_lock(hw);
1014         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1015                                 FM10K_XCAST_MODE_ALLMULTI);
1016         fm10k_mbx_unlock(hw);
1017
1018         if (status != FM10K_SUCCESS)
1019                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1020 }
1021
1022 static void
1023 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1024 {
1025         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1026         int status;
1027
1028         PMD_INIT_FUNC_TRACE();
1029
1030         /* Return if it didn't acquire valid glort range */
1031         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1032                 return;
1033
1034         if (dev->data->promiscuous) {
1035                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1036                         "since promisc mode is enabled");
1037                 return;
1038         }
1039
1040         fm10k_mbx_lock(hw);
1041         /* Change mode to unicast mode */
1042         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1043                                 FM10K_XCAST_MODE_NONE);
1044         fm10k_mbx_unlock(hw);
1045
1046         if (status != FM10K_SUCCESS)
1047                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1048 }
1049
1050 static void
1051 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1052 {
1053         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1054         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1055         uint16_t nb_queue_pools;
1056         struct fm10k_macvlan_filter_info *macvlan;
1057
1058         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1059         nb_queue_pools = macvlan->nb_queue_pools;
1060         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1061         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1062
1063         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1064         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1065         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1066                         hw->mac.dglort_map;
1067         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1068         /* Configure VMDQ/RSS DGlort Decoder */
1069         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1070
1071         /* Flow Director configurations, only queue number is valid. */
1072         dglortdec = fls(dev->data->nb_rx_queues - 1);
1073         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1074                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1075         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1076         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1077
1078         /* Invalidate all other GLORT entries */
1079         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1080                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1081                                 FM10K_DGLORTMAP_NONE);
1082 }
1083
1084 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1085 static int
1086 fm10k_dev_start(struct rte_eth_dev *dev)
1087 {
1088         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089         int i, diag;
1090
1091         PMD_INIT_FUNC_TRACE();
1092
1093         /* stop, init, then start the hw */
1094         diag = fm10k_stop_hw(hw);
1095         if (diag != FM10K_SUCCESS) {
1096                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1097                 return -EIO;
1098         }
1099
1100         diag = fm10k_init_hw(hw);
1101         if (diag != FM10K_SUCCESS) {
1102                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1103                 return -EIO;
1104         }
1105
1106         diag = fm10k_start_hw(hw);
1107         if (diag != FM10K_SUCCESS) {
1108                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1109                 return -EIO;
1110         }
1111
1112         diag = fm10k_dev_tx_init(dev);
1113         if (diag) {
1114                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1115                 return diag;
1116         }
1117
1118         if (fm10k_dev_rxq_interrupt_setup(dev))
1119                 return -EIO;
1120
1121         diag = fm10k_dev_rx_init(dev);
1122         if (diag) {
1123                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1124                 return diag;
1125         }
1126
1127         if (hw->mac.type == fm10k_mac_pf)
1128                 fm10k_dev_dglort_map_configure(dev);
1129
1130         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1131                 struct fm10k_rx_queue *rxq;
1132                 rxq = dev->data->rx_queues[i];
1133
1134                 if (rxq->rx_deferred_start)
1135                         continue;
1136                 diag = fm10k_dev_rx_queue_start(dev, i);
1137                 if (diag != 0) {
1138                         int j;
1139                         for (j = 0; j < i; ++j)
1140                                 rx_queue_clean(dev->data->rx_queues[j]);
1141                         return diag;
1142                 }
1143         }
1144
1145         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1146                 struct fm10k_tx_queue *txq;
1147                 txq = dev->data->tx_queues[i];
1148
1149                 if (txq->tx_deferred_start)
1150                         continue;
1151                 diag = fm10k_dev_tx_queue_start(dev, i);
1152                 if (diag != 0) {
1153                         int j;
1154                         for (j = 0; j < i; ++j)
1155                                 tx_queue_clean(dev->data->tx_queues[j]);
1156                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1157                                 rx_queue_clean(dev->data->rx_queues[j]);
1158                         return diag;
1159                 }
1160         }
1161
1162         /* Update default vlan when not in VMDQ mode */
1163         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1164                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1165
1166         return 0;
1167 }
1168
1169 static void
1170 fm10k_dev_stop(struct rte_eth_dev *dev)
1171 {
1172         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1174         int i;
1175
1176         PMD_INIT_FUNC_TRACE();
1177
1178         if (dev->data->tx_queues)
1179                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1180                         fm10k_dev_tx_queue_stop(dev, i);
1181
1182         if (dev->data->rx_queues)
1183                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1184                         fm10k_dev_rx_queue_stop(dev, i);
1185
1186         /* Disable datapath event */
1187         if (rte_intr_dp_is_en(intr_handle)) {
1188                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1189                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1190                                 3 << FM10K_RXINT_TIMER_SHIFT);
1191                         if (hw->mac.type == fm10k_mac_pf)
1192                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
1193                                         FM10K_ITR_MASK_SET);
1194                         else
1195                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
1196                                         FM10K_ITR_MASK_SET);
1197                 }
1198         }
1199         /* Clean datapath event and queue/vec mapping */
1200         rte_intr_efd_disable(intr_handle);
1201         rte_free(intr_handle->intr_vec);
1202         intr_handle->intr_vec = NULL;
1203 }
1204
1205 static void
1206 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1207 {
1208         int i;
1209
1210         PMD_INIT_FUNC_TRACE();
1211
1212         if (dev->data->tx_queues) {
1213                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1214                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1215
1216                         tx_queue_free(txq);
1217                 }
1218         }
1219
1220         if (dev->data->rx_queues) {
1221                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1222                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1223         }
1224 }
1225
1226 static void
1227 fm10k_dev_close(struct rte_eth_dev *dev)
1228 {
1229         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230
1231         PMD_INIT_FUNC_TRACE();
1232
1233         fm10k_mbx_lock(hw);
1234         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1235                 MAX_LPORT_NUM, false);
1236         fm10k_mbx_unlock(hw);
1237
1238         /* allow 10ms for device to quiesce */
1239         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1240
1241         /* Stop mailbox service first */
1242         fm10k_close_mbx_service(hw);
1243         fm10k_dev_stop(dev);
1244         fm10k_dev_queue_release(dev);
1245         fm10k_stop_hw(hw);
1246 }
1247
1248 static int
1249 fm10k_link_update(struct rte_eth_dev *dev,
1250         __rte_unused int wait_to_complete)
1251 {
1252         PMD_INIT_FUNC_TRACE();
1253
1254         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1255          * x8 PCIe interface. For now, we leave the speed undefined since there
1256          * is no 50Gbps Ethernet. */
1257         dev->data->dev_link.link_speed  = 0;
1258         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1259         dev->data->dev_link.link_status = ETH_LINK_UP;
1260
1261         return 0;
1262 }
1263
1264 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1265         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1266 {
1267         unsigned i, q;
1268         unsigned count = 0;
1269
1270         if (xstats_names != NULL) {
1271                 /* Note: limit checked in rte_eth_xstats_names() */
1272
1273                 /* Global stats */
1274                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1275                         snprintf(xstats_names[count].name,
1276                                 sizeof(xstats_names[count].name),
1277                                 "%s", fm10k_hw_stats_strings[count].name);
1278                         count++;
1279                 }
1280
1281                 /* PF queue stats */
1282                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1283                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1284                                 snprintf(xstats_names[count].name,
1285                                         sizeof(xstats_names[count].name),
1286                                         "rx_q%u_%s", q,
1287                                         fm10k_hw_stats_rx_q_strings[i].name);
1288                                 count++;
1289                         }
1290                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1291                                 snprintf(xstats_names[count].name,
1292                                         sizeof(xstats_names[count].name),
1293                                         "tx_q%u_%s", q,
1294                                         fm10k_hw_stats_tx_q_strings[i].name);
1295                                 count++;
1296                         }
1297                 }
1298         }
1299         return FM10K_NB_XSTATS;
1300 }
1301
1302 static int
1303 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1304                  unsigned n)
1305 {
1306         struct fm10k_hw_stats *hw_stats =
1307                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1308         unsigned i, q, count = 0;
1309
1310         if (n < FM10K_NB_XSTATS)
1311                 return FM10K_NB_XSTATS;
1312
1313         /* Global stats */
1314         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1315                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1316                         fm10k_hw_stats_strings[count].offset);
1317                 count++;
1318         }
1319
1320         /* PF queue stats */
1321         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1322                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1323                         xstats[count].value =
1324                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1325                                 fm10k_hw_stats_rx_q_strings[i].offset);
1326                         count++;
1327                 }
1328                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1329                         xstats[count].value =
1330                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1331                                 fm10k_hw_stats_tx_q_strings[i].offset);
1332                         count++;
1333                 }
1334         }
1335
1336         return FM10K_NB_XSTATS;
1337 }
1338
1339 static void
1340 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1341 {
1342         uint64_t ipackets, opackets, ibytes, obytes;
1343         struct fm10k_hw *hw =
1344                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1345         struct fm10k_hw_stats *hw_stats =
1346                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1347         int i;
1348
1349         PMD_INIT_FUNC_TRACE();
1350
1351         fm10k_update_hw_stats(hw, hw_stats);
1352
1353         ipackets = opackets = ibytes = obytes = 0;
1354         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1355                 (i < hw->mac.max_queues); ++i) {
1356                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1357                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1358                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1359                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1360                 ipackets += stats->q_ipackets[i];
1361                 opackets += stats->q_opackets[i];
1362                 ibytes   += stats->q_ibytes[i];
1363                 obytes   += stats->q_obytes[i];
1364         }
1365         stats->ipackets = ipackets;
1366         stats->opackets = opackets;
1367         stats->ibytes = ibytes;
1368         stats->obytes = obytes;
1369 }
1370
1371 static void
1372 fm10k_stats_reset(struct rte_eth_dev *dev)
1373 {
1374         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1375         struct fm10k_hw_stats *hw_stats =
1376                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1377
1378         PMD_INIT_FUNC_TRACE();
1379
1380         memset(hw_stats, 0, sizeof(*hw_stats));
1381         fm10k_rebind_hw_stats(hw, hw_stats);
1382 }
1383
1384 static void
1385 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1386         struct rte_eth_dev_info *dev_info)
1387 {
1388         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1389
1390         PMD_INIT_FUNC_TRACE();
1391
1392         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1393         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1394         dev_info->max_rx_queues      = hw->mac.max_queues;
1395         dev_info->max_tx_queues      = hw->mac.max_queues;
1396         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1397         dev_info->max_hash_mac_addrs = 0;
1398         dev_info->max_vfs            = dev->pci_dev->max_vfs;
1399         dev_info->vmdq_pool_base     = 0;
1400         dev_info->vmdq_queue_base    = 0;
1401         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1402         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1403         dev_info->rx_offload_capa =
1404                 DEV_RX_OFFLOAD_VLAN_STRIP |
1405                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1406                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1407                 DEV_RX_OFFLOAD_TCP_CKSUM;
1408         dev_info->tx_offload_capa =
1409                 DEV_TX_OFFLOAD_VLAN_INSERT |
1410                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1411                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1412                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1413                 DEV_TX_OFFLOAD_TCP_TSO;
1414
1415         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1416         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1417
1418         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1419                 .rx_thresh = {
1420                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1421                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1422                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1423                 },
1424                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1425                 .rx_drop_en = 0,
1426         };
1427
1428         dev_info->default_txconf = (struct rte_eth_txconf) {
1429                 .tx_thresh = {
1430                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1431                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1432                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1433                 },
1434                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1435                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1436                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1437         };
1438
1439         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1440                 .nb_max = FM10K_MAX_RX_DESC,
1441                 .nb_min = FM10K_MIN_RX_DESC,
1442                 .nb_align = FM10K_MULT_RX_DESC,
1443         };
1444
1445         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1446                 .nb_max = FM10K_MAX_TX_DESC,
1447                 .nb_min = FM10K_MIN_TX_DESC,
1448                 .nb_align = FM10K_MULT_TX_DESC,
1449         };
1450
1451         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1452                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1453                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1454 }
1455
1456 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1457 static const uint32_t *
1458 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1459 {
1460         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1461             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1462                 static uint32_t ptypes[] = {
1463                         /* refers to rx_desc_to_ol_flags() */
1464                         RTE_PTYPE_L2_ETHER,
1465                         RTE_PTYPE_L3_IPV4,
1466                         RTE_PTYPE_L3_IPV4_EXT,
1467                         RTE_PTYPE_L3_IPV6,
1468                         RTE_PTYPE_L3_IPV6_EXT,
1469                         RTE_PTYPE_L4_TCP,
1470                         RTE_PTYPE_L4_UDP,
1471                         RTE_PTYPE_UNKNOWN
1472                 };
1473
1474                 return ptypes;
1475         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1476                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1477                 static uint32_t ptypes_vec[] = {
1478                         /* refers to fm10k_desc_to_pktype_v() */
1479                         RTE_PTYPE_L3_IPV4,
1480                         RTE_PTYPE_L3_IPV4_EXT,
1481                         RTE_PTYPE_L3_IPV6,
1482                         RTE_PTYPE_L3_IPV6_EXT,
1483                         RTE_PTYPE_L4_TCP,
1484                         RTE_PTYPE_L4_UDP,
1485                         RTE_PTYPE_TUNNEL_GENEVE,
1486                         RTE_PTYPE_TUNNEL_NVGRE,
1487                         RTE_PTYPE_TUNNEL_VXLAN,
1488                         RTE_PTYPE_TUNNEL_GRE,
1489                         RTE_PTYPE_UNKNOWN
1490                 };
1491
1492                 return ptypes_vec;
1493         }
1494
1495         return NULL;
1496 }
1497 #else
1498 static const uint32_t *
1499 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1500 {
1501         return NULL;
1502 }
1503 #endif
1504
1505 static int
1506 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1507 {
1508         s32 result;
1509         uint16_t mac_num = 0;
1510         uint32_t vid_idx, vid_bit, mac_index;
1511         struct fm10k_hw *hw;
1512         struct fm10k_macvlan_filter_info *macvlan;
1513         struct rte_eth_dev_data *data = dev->data;
1514
1515         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1517
1518         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1519                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1520                 return -EINVAL;
1521         }
1522
1523         if (vlan_id > ETH_VLAN_ID_MAX) {
1524                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1525                 return -EINVAL;
1526         }
1527
1528         vid_idx = FM10K_VFTA_IDX(vlan_id);
1529         vid_bit = FM10K_VFTA_BIT(vlan_id);
1530         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1531         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1532                 return 0;
1533         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1534         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1535                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1536                         "in the VLAN filter table");
1537                 return -EINVAL;
1538         }
1539
1540         fm10k_mbx_lock(hw);
1541         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1542         fm10k_mbx_unlock(hw);
1543         if (result != FM10K_SUCCESS) {
1544                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1545                 return -EIO;
1546         }
1547
1548         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1549                         (result == FM10K_SUCCESS); mac_index++) {
1550                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1551                         continue;
1552                 if (mac_num > macvlan->mac_num - 1) {
1553                         PMD_INIT_LOG(ERR, "MAC address number "
1554                                         "not match");
1555                         break;
1556                 }
1557                 fm10k_mbx_lock(hw);
1558                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1559                         data->mac_addrs[mac_index].addr_bytes,
1560                         vlan_id, on, 0);
1561                 fm10k_mbx_unlock(hw);
1562                 mac_num++;
1563         }
1564         if (result != FM10K_SUCCESS) {
1565                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1566                 return -EIO;
1567         }
1568
1569         if (on) {
1570                 macvlan->vlan_num++;
1571                 macvlan->vfta[vid_idx] |= vid_bit;
1572         } else {
1573                 macvlan->vlan_num--;
1574                 macvlan->vfta[vid_idx] &= ~vid_bit;
1575         }
1576         return 0;
1577 }
1578
1579 static void
1580 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1581 {
1582         if (mask & ETH_VLAN_STRIP_MASK) {
1583                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1584                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1585                                         "always on in fm10k");
1586         }
1587
1588         if (mask & ETH_VLAN_EXTEND_MASK) {
1589                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1590                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1591                                         "supported in fm10k");
1592         }
1593
1594         if (mask & ETH_VLAN_FILTER_MASK) {
1595                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1596                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1597         }
1598 }
1599
1600 /* Add/Remove a MAC address, and update filters to main VSI */
1601 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1602                 const u8 *mac, bool add, uint32_t pool)
1603 {
1604         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605         struct fm10k_macvlan_filter_info *macvlan;
1606         uint32_t i, j, k;
1607
1608         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1609
1610         if (pool != MAIN_VSI_POOL_NUMBER) {
1611                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1612                         "mac to pool %u", pool);
1613                 return;
1614         }
1615         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1616                 if (!macvlan->vfta[j])
1617                         continue;
1618                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1619                         if (!(macvlan->vfta[j] & (1 << k)))
1620                                 continue;
1621                         if (i + 1 > macvlan->vlan_num) {
1622                                 PMD_INIT_LOG(ERR, "vlan number not match");
1623                                 return;
1624                         }
1625                         fm10k_mbx_lock(hw);
1626                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1627                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1628                         fm10k_mbx_unlock(hw);
1629                         i++;
1630                 }
1631         }
1632 }
1633
1634 /* Add/Remove a MAC address, and update filters to VMDQ */
1635 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1636                 const u8 *mac, bool add, uint32_t pool)
1637 {
1638         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639         struct fm10k_macvlan_filter_info *macvlan;
1640         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1641         uint32_t i;
1642
1643         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1644         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1645
1646         if (pool > macvlan->nb_queue_pools) {
1647                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1648                         " Max pool is %u",
1649                         pool, macvlan->nb_queue_pools);
1650                 return;
1651         }
1652         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1653                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1654                         continue;
1655                 fm10k_mbx_lock(hw);
1656                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1657                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1658                 fm10k_mbx_unlock(hw);
1659         }
1660 }
1661
1662 /* Add/Remove a MAC address, and update filters */
1663 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1664                 const u8 *mac, bool add, uint32_t pool)
1665 {
1666         struct fm10k_macvlan_filter_info *macvlan;
1667
1668         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1669
1670         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1671                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1672         else
1673                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1674
1675         if (add)
1676                 macvlan->mac_num++;
1677         else
1678                 macvlan->mac_num--;
1679 }
1680
1681 /* Add a MAC address, and update filters */
1682 static void
1683 fm10k_macaddr_add(struct rte_eth_dev *dev,
1684                 struct ether_addr *mac_addr,
1685                 uint32_t index,
1686                 uint32_t pool)
1687 {
1688         struct fm10k_macvlan_filter_info *macvlan;
1689
1690         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1691         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1692         macvlan->mac_vmdq_id[index] = pool;
1693 }
1694
1695 /* Remove a MAC address, and update filters */
1696 static void
1697 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1698 {
1699         struct rte_eth_dev_data *data = dev->data;
1700         struct fm10k_macvlan_filter_info *macvlan;
1701
1702         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1703         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1704                         FALSE, macvlan->mac_vmdq_id[index]);
1705         macvlan->mac_vmdq_id[index] = 0;
1706 }
1707
1708 static inline int
1709 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1710 {
1711         if ((request < min) || (request > max) || ((request % mult) != 0))
1712                 return -1;
1713         else
1714                 return 0;
1715 }
1716
1717
1718 static inline int
1719 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1720 {
1721         if ((request < min) || (request > max) || ((div % request) != 0))
1722                 return -1;
1723         else
1724                 return 0;
1725 }
1726
1727 static inline int
1728 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1729 {
1730         uint16_t rx_free_thresh;
1731
1732         if (conf->rx_free_thresh == 0)
1733                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1734         else
1735                 rx_free_thresh = conf->rx_free_thresh;
1736
1737         /* make sure the requested threshold satisfies the constraints */
1738         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1739                         FM10K_RX_FREE_THRESH_MAX(q),
1740                         FM10K_RX_FREE_THRESH_DIV(q),
1741                         rx_free_thresh)) {
1742                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1743                         "less than or equal to %u, "
1744                         "greater than or equal to %u, "
1745                         "and a divisor of %u",
1746                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1747                         FM10K_RX_FREE_THRESH_MIN(q),
1748                         FM10K_RX_FREE_THRESH_DIV(q));
1749                 return -EINVAL;
1750         }
1751
1752         q->alloc_thresh = rx_free_thresh;
1753         q->drop_en = conf->rx_drop_en;
1754         q->rx_deferred_start = conf->rx_deferred_start;
1755
1756         return 0;
1757 }
1758
1759 /*
1760  * Hardware requires specific alignment for Rx packet buffers. At
1761  * least one of the following two conditions must be satisfied.
1762  *  1. Address is 512B aligned
1763  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1764  *
1765  * As such, the driver may need to adjust the DMA address within the
1766  * buffer by up to 512B.
1767  *
1768  * return 1 if the element size is valid, otherwise return 0.
1769  */
1770 static int
1771 mempool_element_size_valid(struct rte_mempool *mp)
1772 {
1773         uint32_t min_size;
1774
1775         /* elt_size includes mbuf header and headroom */
1776         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1777                         RTE_PKTMBUF_HEADROOM;
1778
1779         /* account for up to 512B of alignment */
1780         min_size -= FM10K_RX_DATABUF_ALIGN;
1781
1782         /* sanity check for overflow */
1783         if (min_size > mp->elt_size)
1784                 return 0;
1785
1786         /* size is valid */
1787         return 1;
1788 }
1789
1790 static int
1791 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1792         uint16_t nb_desc, unsigned int socket_id,
1793         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1794 {
1795         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1796         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1797         struct fm10k_rx_queue *q;
1798         const struct rte_memzone *mz;
1799
1800         PMD_INIT_FUNC_TRACE();
1801
1802         /* make sure the mempool element size can account for alignment. */
1803         if (!mempool_element_size_valid(mp)) {
1804                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1805                 return -EINVAL;
1806         }
1807
1808         /* make sure a valid number of descriptors have been requested */
1809         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1810                                 FM10K_MULT_RX_DESC, nb_desc)) {
1811                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1812                         "less than or equal to %"PRIu32", "
1813                         "greater than or equal to %u, "
1814                         "and a multiple of %u",
1815                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1816                         FM10K_MULT_RX_DESC);
1817                 return -EINVAL;
1818         }
1819
1820         /*
1821          * if this queue existed already, free the associated memory. The
1822          * queue cannot be reused in case we need to allocate memory on
1823          * different socket than was previously used.
1824          */
1825         if (dev->data->rx_queues[queue_id] != NULL) {
1826                 rx_queue_free(dev->data->rx_queues[queue_id]);
1827                 dev->data->rx_queues[queue_id] = NULL;
1828         }
1829
1830         /* allocate memory for the queue structure */
1831         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1832                                 socket_id);
1833         if (q == NULL) {
1834                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1835                 return -ENOMEM;
1836         }
1837
1838         /* setup queue */
1839         q->mp = mp;
1840         q->nb_desc = nb_desc;
1841         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1842         q->port_id = dev->data->port_id;
1843         q->queue_id = queue_id;
1844         q->tail_ptr = (volatile uint32_t *)
1845                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1846         if (handle_rxconf(q, conf))
1847                 return -EINVAL;
1848
1849         /* allocate memory for the software ring */
1850         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1851                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1852                         RTE_CACHE_LINE_SIZE, socket_id);
1853         if (q->sw_ring == NULL) {
1854                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1855                 rte_free(q);
1856                 return -ENOMEM;
1857         }
1858
1859         /*
1860          * allocate memory for the hardware descriptor ring. A memzone large
1861          * enough to hold the maximum ring size is requested to allow for
1862          * resizing in later calls to the queue setup function.
1863          */
1864         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1865                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1866                                       socket_id);
1867         if (mz == NULL) {
1868                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1869                 rte_free(q->sw_ring);
1870                 rte_free(q);
1871                 return -ENOMEM;
1872         }
1873         q->hw_ring = mz->addr;
1874         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1875
1876         /* Check if number of descs satisfied Vector requirement */
1877         if (!rte_is_power_of_2(nb_desc)) {
1878                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1879                                     "preconditions - canceling the feature for "
1880                                     "the whole port[%d]",
1881                              q->queue_id, q->port_id);
1882                 dev_info->rx_vec_allowed = false;
1883         } else
1884                 fm10k_rxq_vec_setup(q);
1885
1886         dev->data->rx_queues[queue_id] = q;
1887         return 0;
1888 }
1889
1890 static void
1891 fm10k_rx_queue_release(void *queue)
1892 {
1893         PMD_INIT_FUNC_TRACE();
1894
1895         rx_queue_free(queue);
1896 }
1897
1898 static inline int
1899 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1900 {
1901         uint16_t tx_free_thresh;
1902         uint16_t tx_rs_thresh;
1903
1904         /* constraint MACROs require that tx_free_thresh is configured
1905          * before tx_rs_thresh */
1906         if (conf->tx_free_thresh == 0)
1907                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1908         else
1909                 tx_free_thresh = conf->tx_free_thresh;
1910
1911         /* make sure the requested threshold satisfies the constraints */
1912         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1913                         FM10K_TX_FREE_THRESH_MAX(q),
1914                         FM10K_TX_FREE_THRESH_DIV(q),
1915                         tx_free_thresh)) {
1916                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1917                         "less than or equal to %u, "
1918                         "greater than or equal to %u, "
1919                         "and a divisor of %u",
1920                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1921                         FM10K_TX_FREE_THRESH_MIN(q),
1922                         FM10K_TX_FREE_THRESH_DIV(q));
1923                 return -EINVAL;
1924         }
1925
1926         q->free_thresh = tx_free_thresh;
1927
1928         if (conf->tx_rs_thresh == 0)
1929                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1930         else
1931                 tx_rs_thresh = conf->tx_rs_thresh;
1932
1933         q->tx_deferred_start = conf->tx_deferred_start;
1934
1935         /* make sure the requested threshold satisfies the constraints */
1936         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1937                         FM10K_TX_RS_THRESH_MAX(q),
1938                         FM10K_TX_RS_THRESH_DIV(q),
1939                         tx_rs_thresh)) {
1940                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1941                         "less than or equal to %u, "
1942                         "greater than or equal to %u, "
1943                         "and a divisor of %u",
1944                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1945                         FM10K_TX_RS_THRESH_MIN(q),
1946                         FM10K_TX_RS_THRESH_DIV(q));
1947                 return -EINVAL;
1948         }
1949
1950         q->rs_thresh = tx_rs_thresh;
1951
1952         return 0;
1953 }
1954
1955 static int
1956 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1957         uint16_t nb_desc, unsigned int socket_id,
1958         const struct rte_eth_txconf *conf)
1959 {
1960         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1961         struct fm10k_tx_queue *q;
1962         const struct rte_memzone *mz;
1963
1964         PMD_INIT_FUNC_TRACE();
1965
1966         /* make sure a valid number of descriptors have been requested */
1967         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1968                                 FM10K_MULT_TX_DESC, nb_desc)) {
1969                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1970                         "less than or equal to %"PRIu32", "
1971                         "greater than or equal to %u, "
1972                         "and a multiple of %u",
1973                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1974                         FM10K_MULT_TX_DESC);
1975                 return -EINVAL;
1976         }
1977
1978         /*
1979          * if this queue existed already, free the associated memory. The
1980          * queue cannot be reused in case we need to allocate memory on
1981          * different socket than was previously used.
1982          */
1983         if (dev->data->tx_queues[queue_id] != NULL) {
1984                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1985
1986                 tx_queue_free(txq);
1987                 dev->data->tx_queues[queue_id] = NULL;
1988         }
1989
1990         /* allocate memory for the queue structure */
1991         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1992                                 socket_id);
1993         if (q == NULL) {
1994                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1995                 return -ENOMEM;
1996         }
1997
1998         /* setup queue */
1999         q->nb_desc = nb_desc;
2000         q->port_id = dev->data->port_id;
2001         q->queue_id = queue_id;
2002         q->txq_flags = conf->txq_flags;
2003         q->ops = &def_txq_ops;
2004         q->tail_ptr = (volatile uint32_t *)
2005                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2006         if (handle_txconf(q, conf))
2007                 return -EINVAL;
2008
2009         /* allocate memory for the software ring */
2010         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2011                                         nb_desc * sizeof(struct rte_mbuf *),
2012                                         RTE_CACHE_LINE_SIZE, socket_id);
2013         if (q->sw_ring == NULL) {
2014                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2015                 rte_free(q);
2016                 return -ENOMEM;
2017         }
2018
2019         /*
2020          * allocate memory for the hardware descriptor ring. A memzone large
2021          * enough to hold the maximum ring size is requested to allow for
2022          * resizing in later calls to the queue setup function.
2023          */
2024         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2025                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2026                                       socket_id);
2027         if (mz == NULL) {
2028                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2029                 rte_free(q->sw_ring);
2030                 rte_free(q);
2031                 return -ENOMEM;
2032         }
2033         q->hw_ring = mz->addr;
2034         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2035
2036         /*
2037          * allocate memory for the RS bit tracker. Enough slots to hold the
2038          * descriptor index for each RS bit needing to be set are required.
2039          */
2040         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2041                                 ((nb_desc + 1) / q->rs_thresh) *
2042                                 sizeof(uint16_t),
2043                                 RTE_CACHE_LINE_SIZE, socket_id);
2044         if (q->rs_tracker.list == NULL) {
2045                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2046                 rte_free(q->sw_ring);
2047                 rte_free(q);
2048                 return -ENOMEM;
2049         }
2050
2051         dev->data->tx_queues[queue_id] = q;
2052         return 0;
2053 }
2054
2055 static void
2056 fm10k_tx_queue_release(void *queue)
2057 {
2058         struct fm10k_tx_queue *q = queue;
2059         PMD_INIT_FUNC_TRACE();
2060
2061         tx_queue_free(q);
2062 }
2063
2064 static int
2065 fm10k_reta_update(struct rte_eth_dev *dev,
2066                         struct rte_eth_rss_reta_entry64 *reta_conf,
2067                         uint16_t reta_size)
2068 {
2069         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2070         uint16_t i, j, idx, shift;
2071         uint8_t mask;
2072         uint32_t reta;
2073
2074         PMD_INIT_FUNC_TRACE();
2075
2076         if (reta_size > FM10K_MAX_RSS_INDICES) {
2077                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2078                         "(%d) doesn't match the number hardware can supported "
2079                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2080                 return -EINVAL;
2081         }
2082
2083         /*
2084          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2085          * 128-entries in 32 registers
2086          */
2087         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2088                 idx = i / RTE_RETA_GROUP_SIZE;
2089                 shift = i % RTE_RETA_GROUP_SIZE;
2090                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2091                                 BIT_MASK_PER_UINT32);
2092                 if (mask == 0)
2093                         continue;
2094
2095                 reta = 0;
2096                 if (mask != BIT_MASK_PER_UINT32)
2097                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2098
2099                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2100                         if (mask & (0x1 << j)) {
2101                                 if (mask != 0xF)
2102                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2103                                 reta |= reta_conf[idx].reta[shift + j] <<
2104                                                 (CHAR_BIT * j);
2105                         }
2106                 }
2107                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2108         }
2109
2110         return 0;
2111 }
2112
2113 static int
2114 fm10k_reta_query(struct rte_eth_dev *dev,
2115                         struct rte_eth_rss_reta_entry64 *reta_conf,
2116                         uint16_t reta_size)
2117 {
2118         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2119         uint16_t i, j, idx, shift;
2120         uint8_t mask;
2121         uint32_t reta;
2122
2123         PMD_INIT_FUNC_TRACE();
2124
2125         if (reta_size < FM10K_MAX_RSS_INDICES) {
2126                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2127                         "(%d) doesn't match the number hardware can supported "
2128                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2129                 return -EINVAL;
2130         }
2131
2132         /*
2133          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2134          * 128-entries in 32 registers
2135          */
2136         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2137                 idx = i / RTE_RETA_GROUP_SIZE;
2138                 shift = i % RTE_RETA_GROUP_SIZE;
2139                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2140                                 BIT_MASK_PER_UINT32);
2141                 if (mask == 0)
2142                         continue;
2143
2144                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2145                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2146                         if (mask & (0x1 << j))
2147                                 reta_conf[idx].reta[shift + j] = ((reta >>
2148                                         CHAR_BIT * j) & UINT8_MAX);
2149                 }
2150         }
2151
2152         return 0;
2153 }
2154
2155 static int
2156 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2157         struct rte_eth_rss_conf *rss_conf)
2158 {
2159         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2161         uint32_t mrqc;
2162         uint64_t hf = rss_conf->rss_hf;
2163         int i;
2164
2165         PMD_INIT_FUNC_TRACE();
2166
2167         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2168                                 FM10K_RSSRK_ENTRIES_PER_REG))
2169                 return -EINVAL;
2170
2171         if (hf == 0)
2172                 return -EINVAL;
2173
2174         mrqc = 0;
2175         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2176         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2177         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2178         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2179         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2180         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2181         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2182         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2183         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2184
2185         /* If the mapping doesn't fit any supported, return */
2186         if (mrqc == 0)
2187                 return -EINVAL;
2188
2189         if (key != NULL)
2190                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2191                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2192
2193         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2194
2195         return 0;
2196 }
2197
2198 static int
2199 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2200         struct rte_eth_rss_conf *rss_conf)
2201 {
2202         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2203         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2204         uint32_t mrqc;
2205         uint64_t hf;
2206         int i;
2207
2208         PMD_INIT_FUNC_TRACE();
2209
2210         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2211                                 FM10K_RSSRK_ENTRIES_PER_REG))
2212                 return -EINVAL;
2213
2214         if (key != NULL)
2215                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2216                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2217
2218         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2219         hf = 0;
2220         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2221         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2222         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2223         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2224         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2225         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2226         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2227         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2228         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2229
2230         rss_conf->rss_hf = hf;
2231
2232         return 0;
2233 }
2234
2235 static void
2236 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2237 {
2238         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2239         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2240
2241         /* Bind all local non-queue interrupt to vector 0 */
2242         int_map |= FM10K_MISC_VEC_ID;
2243
2244         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2245         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2246         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2247         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2248         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2249         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2250
2251         /* Enable misc causes */
2252         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2253                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2254                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2255                                 FM10K_EIMR_ENABLE(MAILBOX) |
2256                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2257                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2258                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2259                                 FM10K_EIMR_ENABLE(VFLR));
2260
2261         /* Enable ITR 0 */
2262         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2263                                         FM10K_ITR_MASK_CLEAR);
2264         FM10K_WRITE_FLUSH(hw);
2265 }
2266
2267 static void
2268 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2269 {
2270         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2271         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2272
2273         int_map |= FM10K_MISC_VEC_ID;
2274
2275         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2276         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2277         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2278         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2279         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2280         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2281
2282         /* Disable misc causes */
2283         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2284                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2285                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2286                                 FM10K_EIMR_DISABLE(MAILBOX) |
2287                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2288                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2289                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2290                                 FM10K_EIMR_DISABLE(VFLR));
2291
2292         /* Disable ITR 0 */
2293         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2294         FM10K_WRITE_FLUSH(hw);
2295 }
2296
2297 static void
2298 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2299 {
2300         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2301         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2302
2303         /* Bind all local non-queue interrupt to vector 0 */
2304         int_map |= FM10K_MISC_VEC_ID;
2305
2306         /* Only INT 0 available, other 15 are reserved. */
2307         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2308
2309         /* Enable ITR 0 */
2310         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2311                                         FM10K_ITR_MASK_CLEAR);
2312         FM10K_WRITE_FLUSH(hw);
2313 }
2314
2315 static void
2316 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2317 {
2318         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2319         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2320
2321         int_map |= FM10K_MISC_VEC_ID;
2322
2323         /* Only INT 0 available, other 15 are reserved. */
2324         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2325
2326         /* Disable ITR 0 */
2327         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2328         FM10K_WRITE_FLUSH(hw);
2329 }
2330
2331 static int
2332 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2333 {
2334         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2335
2336         /* Enable ITR */
2337         if (hw->mac.type == fm10k_mac_pf)
2338                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2339                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2340         else
2341                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2342                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2343         rte_intr_enable(&dev->pci_dev->intr_handle);
2344         return 0;
2345 }
2346
2347 static int
2348 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2349 {
2350         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2351
2352         /* Disable ITR */
2353         if (hw->mac.type == fm10k_mac_pf)
2354                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2355                         FM10K_ITR_MASK_SET);
2356         else
2357                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2358                         FM10K_ITR_MASK_SET);
2359         return 0;
2360 }
2361
2362 static int
2363 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2364 {
2365         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2366         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2367         uint32_t intr_vector, vec;
2368         uint16_t queue_id;
2369         int result = 0;
2370
2371         /* fm10k needs one separate interrupt for mailbox,
2372          * so only drivers which support multiple interrupt vectors
2373          * e.g. vfio-pci can work for fm10k interrupt mode
2374          */
2375         if (!rte_intr_cap_multiple(intr_handle) ||
2376                         dev->data->dev_conf.intr_conf.rxq == 0)
2377                 return result;
2378
2379         intr_vector = dev->data->nb_rx_queues;
2380
2381         /* disable interrupt first */
2382         rte_intr_disable(&dev->pci_dev->intr_handle);
2383         if (hw->mac.type == fm10k_mac_pf)
2384                 fm10k_dev_disable_intr_pf(dev);
2385         else
2386                 fm10k_dev_disable_intr_vf(dev);
2387
2388         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2389                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2390                 result = -EIO;
2391         }
2392
2393         if (rte_intr_dp_is_en(intr_handle) && !result) {
2394                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2395                         dev->data->nb_rx_queues * sizeof(int), 0);
2396                 if (intr_handle->intr_vec) {
2397                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2398                                         queue_id < dev->data->nb_rx_queues;
2399                                         queue_id++) {
2400                                 intr_handle->intr_vec[queue_id] = vec;
2401                                 if (vec < intr_handle->nb_efd - 1
2402                                                 + FM10K_RX_VEC_START)
2403                                         vec++;
2404                         }
2405                 } else {
2406                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2407                                 " intr_vec", dev->data->nb_rx_queues);
2408                         rte_intr_efd_disable(intr_handle);
2409                         result = -ENOMEM;
2410                 }
2411         }
2412
2413         if (hw->mac.type == fm10k_mac_pf)
2414                 fm10k_dev_enable_intr_pf(dev);
2415         else
2416                 fm10k_dev_enable_intr_vf(dev);
2417         rte_intr_enable(&dev->pci_dev->intr_handle);
2418         hw->mac.ops.update_int_moderator(hw);
2419         return result;
2420 }
2421
2422 static int
2423 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2424 {
2425         struct fm10k_fault fault;
2426         int err;
2427         const char *estr = "Unknown error";
2428
2429         /* Process PCA fault */
2430         if (eicr & FM10K_EICR_PCA_FAULT) {
2431                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2432                 if (err)
2433                         goto error;
2434                 switch (fault.type) {
2435                 case PCA_NO_FAULT:
2436                         estr = "PCA_NO_FAULT"; break;
2437                 case PCA_UNMAPPED_ADDR:
2438                         estr = "PCA_UNMAPPED_ADDR"; break;
2439                 case PCA_BAD_QACCESS_PF:
2440                         estr = "PCA_BAD_QACCESS_PF"; break;
2441                 case PCA_BAD_QACCESS_VF:
2442                         estr = "PCA_BAD_QACCESS_VF"; break;
2443                 case PCA_MALICIOUS_REQ:
2444                         estr = "PCA_MALICIOUS_REQ"; break;
2445                 case PCA_POISONED_TLP:
2446                         estr = "PCA_POISONED_TLP"; break;
2447                 case PCA_TLP_ABORT:
2448                         estr = "PCA_TLP_ABORT"; break;
2449                 default:
2450                         goto error;
2451                 }
2452                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2453                         estr, fault.func ? "VF" : "PF", fault.func,
2454                         fault.address, fault.specinfo);
2455         }
2456
2457         /* Process THI fault */
2458         if (eicr & FM10K_EICR_THI_FAULT) {
2459                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2460                 if (err)
2461                         goto error;
2462                 switch (fault.type) {
2463                 case THI_NO_FAULT:
2464                         estr = "THI_NO_FAULT"; break;
2465                 case THI_MAL_DIS_Q_FAULT:
2466                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2467                 default:
2468                         goto error;
2469                 }
2470                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2471                         estr, fault.func ? "VF" : "PF", fault.func,
2472                         fault.address, fault.specinfo);
2473         }
2474
2475         /* Process FUM fault */
2476         if (eicr & FM10K_EICR_FUM_FAULT) {
2477                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2478                 if (err)
2479                         goto error;
2480                 switch (fault.type) {
2481                 case FUM_NO_FAULT:
2482                         estr = "FUM_NO_FAULT"; break;
2483                 case FUM_UNMAPPED_ADDR:
2484                         estr = "FUM_UNMAPPED_ADDR"; break;
2485                 case FUM_POISONED_TLP:
2486                         estr = "FUM_POISONED_TLP"; break;
2487                 case FUM_BAD_VF_QACCESS:
2488                         estr = "FUM_BAD_VF_QACCESS"; break;
2489                 case FUM_ADD_DECODE_ERR:
2490                         estr = "FUM_ADD_DECODE_ERR"; break;
2491                 case FUM_RO_ERROR:
2492                         estr = "FUM_RO_ERROR"; break;
2493                 case FUM_QPRC_CRC_ERROR:
2494                         estr = "FUM_QPRC_CRC_ERROR"; break;
2495                 case FUM_CSR_TIMEOUT:
2496                         estr = "FUM_CSR_TIMEOUT"; break;
2497                 case FUM_INVALID_TYPE:
2498                         estr = "FUM_INVALID_TYPE"; break;
2499                 case FUM_INVALID_LENGTH:
2500                         estr = "FUM_INVALID_LENGTH"; break;
2501                 case FUM_INVALID_BE:
2502                         estr = "FUM_INVALID_BE"; break;
2503                 case FUM_INVALID_ALIGN:
2504                         estr = "FUM_INVALID_ALIGN"; break;
2505                 default:
2506                         goto error;
2507                 }
2508                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2509                         estr, fault.func ? "VF" : "PF", fault.func,
2510                         fault.address, fault.specinfo);
2511         }
2512
2513         return 0;
2514 error:
2515         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2516         return err;
2517 }
2518
2519 /**
2520  * PF interrupt handler triggered by NIC for handling specific interrupt.
2521  *
2522  * @param handle
2523  *  Pointer to interrupt handle.
2524  * @param param
2525  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2526  *
2527  * @return
2528  *  void
2529  */
2530 static void
2531 fm10k_dev_interrupt_handler_pf(
2532                         __rte_unused struct rte_intr_handle *handle,
2533                         void *param)
2534 {
2535         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2536         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2537         uint32_t cause, status;
2538
2539         if (hw->mac.type != fm10k_mac_pf)
2540                 return;
2541
2542         cause = FM10K_READ_REG(hw, FM10K_EICR);
2543
2544         /* Handle PCI fault cases */
2545         if (cause & FM10K_EICR_FAULT_MASK) {
2546                 PMD_INIT_LOG(ERR, "INT: find fault!");
2547                 fm10k_dev_handle_fault(hw, cause);
2548         }
2549
2550         /* Handle switch up/down */
2551         if (cause & FM10K_EICR_SWITCHNOTREADY)
2552                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2553
2554         if (cause & FM10K_EICR_SWITCHREADY)
2555                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2556
2557         /* Handle mailbox message */
2558         fm10k_mbx_lock(hw);
2559         hw->mbx.ops.process(hw, &hw->mbx);
2560         fm10k_mbx_unlock(hw);
2561
2562         /* Handle SRAM error */
2563         if (cause & FM10K_EICR_SRAMERROR) {
2564                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2565
2566                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2567                 /* Write to clear pending bits */
2568                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2569
2570                 /* Todo: print out error message after shared code  updates */
2571         }
2572
2573         /* Clear these 3 events if having any */
2574         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2575                  FM10K_EICR_SWITCHREADY;
2576         if (cause)
2577                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2578
2579         /* Re-enable interrupt from device side */
2580         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2581                                         FM10K_ITR_MASK_CLEAR);
2582         /* Re-enable interrupt from host side */
2583         rte_intr_enable(&(dev->pci_dev->intr_handle));
2584 }
2585
2586 /**
2587  * VF interrupt handler triggered by NIC for handling specific interrupt.
2588  *
2589  * @param handle
2590  *  Pointer to interrupt handle.
2591  * @param param
2592  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2593  *
2594  * @return
2595  *  void
2596  */
2597 static void
2598 fm10k_dev_interrupt_handler_vf(
2599                         __rte_unused struct rte_intr_handle *handle,
2600                         void *param)
2601 {
2602         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2603         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2604
2605         if (hw->mac.type != fm10k_mac_vf)
2606                 return;
2607
2608         /* Handle mailbox message if lock is acquired */
2609         fm10k_mbx_lock(hw);
2610         hw->mbx.ops.process(hw, &hw->mbx);
2611         fm10k_mbx_unlock(hw);
2612
2613         /* Re-enable interrupt from device side */
2614         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2615                                         FM10K_ITR_MASK_CLEAR);
2616         /* Re-enable interrupt from host side */
2617         rte_intr_enable(&(dev->pci_dev->intr_handle));
2618 }
2619
2620 /* Mailbox message handler in VF */
2621 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2622         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2623         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2624         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2625         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2626 };
2627
2628 static int
2629 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2630 {
2631         int err = 0;
2632
2633         /* Initialize mailbox lock */
2634         fm10k_mbx_initlock(hw);
2635
2636         /* Replace default message handler with new ones */
2637         if (hw->mac.type == fm10k_mac_vf)
2638                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2639
2640         if (err) {
2641                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2642                                 err);
2643                 return err;
2644         }
2645         /* Connect to SM for PF device or PF for VF device */
2646         return hw->mbx.ops.connect(hw, &hw->mbx);
2647 }
2648
2649 static void
2650 fm10k_close_mbx_service(struct fm10k_hw *hw)
2651 {
2652         /* Disconnect from SM for PF device or PF for VF device */
2653         hw->mbx.ops.disconnect(hw, &hw->mbx);
2654 }
2655
2656 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2657         .dev_configure          = fm10k_dev_configure,
2658         .dev_start              = fm10k_dev_start,
2659         .dev_stop               = fm10k_dev_stop,
2660         .dev_close              = fm10k_dev_close,
2661         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2662         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2663         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2664         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2665         .stats_get              = fm10k_stats_get,
2666         .xstats_get             = fm10k_xstats_get,
2667         .xstats_get_names       = fm10k_xstats_get_names,
2668         .stats_reset            = fm10k_stats_reset,
2669         .xstats_reset           = fm10k_stats_reset,
2670         .link_update            = fm10k_link_update,
2671         .dev_infos_get          = fm10k_dev_infos_get,
2672         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2673         .vlan_filter_set        = fm10k_vlan_filter_set,
2674         .vlan_offload_set       = fm10k_vlan_offload_set,
2675         .mac_addr_add           = fm10k_macaddr_add,
2676         .mac_addr_remove        = fm10k_macaddr_remove,
2677         .rx_queue_start         = fm10k_dev_rx_queue_start,
2678         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2679         .tx_queue_start         = fm10k_dev_tx_queue_start,
2680         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2681         .rx_queue_setup         = fm10k_rx_queue_setup,
2682         .rx_queue_release       = fm10k_rx_queue_release,
2683         .tx_queue_setup         = fm10k_tx_queue_setup,
2684         .tx_queue_release       = fm10k_tx_queue_release,
2685         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2686         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2687         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2688         .reta_update            = fm10k_reta_update,
2689         .reta_query             = fm10k_reta_query,
2690         .rss_hash_update        = fm10k_rss_hash_update,
2691         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2692 };
2693
2694 static int ftag_check_handler(__rte_unused const char *key,
2695                 const char *value, __rte_unused void *opaque)
2696 {
2697         if (strcmp(value, "1"))
2698                 return -1;
2699
2700         return 0;
2701 }
2702
2703 static int
2704 fm10k_check_ftag(struct rte_devargs *devargs)
2705 {
2706         struct rte_kvargs *kvlist;
2707         const char *ftag_key = "enable_ftag";
2708
2709         if (devargs == NULL)
2710                 return 0;
2711
2712         kvlist = rte_kvargs_parse(devargs->args, NULL);
2713         if (kvlist == NULL)
2714                 return 0;
2715
2716         if (!rte_kvargs_count(kvlist, ftag_key)) {
2717                 rte_kvargs_free(kvlist);
2718                 return 0;
2719         }
2720         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2721         if (rte_kvargs_process(kvlist, ftag_key,
2722                                 ftag_check_handler, NULL) < 0) {
2723                 rte_kvargs_free(kvlist);
2724                 return 0;
2725         }
2726         rte_kvargs_free(kvlist);
2727
2728         return 1;
2729 }
2730
2731 static void __attribute__((cold))
2732 fm10k_set_tx_function(struct rte_eth_dev *dev)
2733 {
2734         struct fm10k_tx_queue *txq;
2735         int i;
2736         int use_sse = 1;
2737         uint16_t tx_ftag_en = 0;
2738
2739         if (fm10k_check_ftag(dev->pci_dev->device.devargs))
2740                 tx_ftag_en = 1;
2741
2742         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2743                 txq = dev->data->tx_queues[i];
2744                 txq->tx_ftag_en = tx_ftag_en;
2745                 /* Check if Vector Tx is satisfied */
2746                 if (fm10k_tx_vec_condition_check(txq))
2747                         use_sse = 0;
2748         }
2749
2750         if (use_sse) {
2751                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2752                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2753                         txq = dev->data->tx_queues[i];
2754                         fm10k_txq_vec_setup(txq);
2755                 }
2756                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2757         } else {
2758                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2759                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2760         }
2761 }
2762
2763 static void __attribute__((cold))
2764 fm10k_set_rx_function(struct rte_eth_dev *dev)
2765 {
2766         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2767         uint16_t i, rx_using_sse;
2768         uint16_t rx_ftag_en = 0;
2769
2770         if (fm10k_check_ftag(dev->pci_dev->device.devargs))
2771                 rx_ftag_en = 1;
2772
2773         /* In order to allow Vector Rx there are a few configuration
2774          * conditions to be met.
2775          */
2776         if (!fm10k_rx_vec_condition_check(dev) &&
2777                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2778                 if (dev->data->scattered_rx)
2779                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2780                 else
2781                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2782         } else if (dev->data->scattered_rx)
2783                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2784         else
2785                 dev->rx_pkt_burst = fm10k_recv_pkts;
2786
2787         rx_using_sse =
2788                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2789                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2790
2791         if (rx_using_sse)
2792                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2793         else
2794                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2795
2796         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2797                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2798
2799                 rxq->rx_using_sse = rx_using_sse;
2800                 rxq->rx_ftag_en = rx_ftag_en;
2801         }
2802 }
2803
2804 static void
2805 fm10k_params_init(struct rte_eth_dev *dev)
2806 {
2807         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2808         struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2809
2810         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2811          * there is no way to get link status without reading BAR4.  Until this
2812          * works, assume we have maximum bandwidth.
2813          * @todo - fix bus info
2814          */
2815         hw->bus_caps.speed = fm10k_bus_speed_8000;
2816         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2817         hw->bus_caps.payload = fm10k_bus_payload_512;
2818         hw->bus.speed = fm10k_bus_speed_8000;
2819         hw->bus.width = fm10k_bus_width_pcie_x8;
2820         hw->bus.payload = fm10k_bus_payload_256;
2821
2822         info->rx_vec_allowed = true;
2823 }
2824
2825 static int
2826 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2827 {
2828         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2829         int diag, i;
2830         struct fm10k_macvlan_filter_info *macvlan;
2831
2832         PMD_INIT_FUNC_TRACE();
2833
2834         dev->dev_ops = &fm10k_eth_dev_ops;
2835         dev->rx_pkt_burst = &fm10k_recv_pkts;
2836         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2837
2838         /* only initialize in the primary process */
2839         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2840                 return 0;
2841
2842         rte_eth_copy_pci_info(dev, dev->pci_dev);
2843
2844         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2845         memset(macvlan, 0, sizeof(*macvlan));
2846         /* Vendor and Device ID need to be set before init of shared code */
2847         memset(hw, 0, sizeof(*hw));
2848         hw->device_id = dev->pci_dev->id.device_id;
2849         hw->vendor_id = dev->pci_dev->id.vendor_id;
2850         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2851         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2852         hw->revision_id = 0;
2853         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2854         if (hw->hw_addr == NULL) {
2855                 PMD_INIT_LOG(ERR, "Bad mem resource."
2856                         " Try to blacklist unused devices.");
2857                 return -EIO;
2858         }
2859
2860         /* Store fm10k_adapter pointer */
2861         hw->back = dev->data->dev_private;
2862
2863         /* Initialize the shared code */
2864         diag = fm10k_init_shared_code(hw);
2865         if (diag != FM10K_SUCCESS) {
2866                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2867                 return -EIO;
2868         }
2869
2870         /* Initialize parameters */
2871         fm10k_params_init(dev);
2872
2873         /* Initialize the hw */
2874         diag = fm10k_init_hw(hw);
2875         if (diag != FM10K_SUCCESS) {
2876                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2877                 return -EIO;
2878         }
2879
2880         /* Initialize MAC address(es) */
2881         dev->data->mac_addrs = rte_zmalloc("fm10k",
2882                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2883         if (dev->data->mac_addrs == NULL) {
2884                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2885                 return -ENOMEM;
2886         }
2887
2888         diag = fm10k_read_mac_addr(hw);
2889
2890         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2891                         &dev->data->mac_addrs[0]);
2892
2893         if (diag != FM10K_SUCCESS ||
2894                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2895
2896                 /* Generate a random addr */
2897                 eth_random_addr(hw->mac.addr);
2898                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2899                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2900                 &dev->data->mac_addrs[0]);
2901         }
2902
2903         /* Reset the hw statistics */
2904         fm10k_stats_reset(dev);
2905
2906         /* Reset the hw */
2907         diag = fm10k_reset_hw(hw);
2908         if (diag != FM10K_SUCCESS) {
2909                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2910                 return -EIO;
2911         }
2912
2913         /* Setup mailbox service */
2914         diag = fm10k_setup_mbx_service(hw);
2915         if (diag != FM10K_SUCCESS) {
2916                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2917                 return -EIO;
2918         }
2919
2920         /*PF/VF has different interrupt handling mechanism */
2921         if (hw->mac.type == fm10k_mac_pf) {
2922                 /* register callback func to eal lib */
2923                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2924                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2925
2926                 /* enable MISC interrupt */
2927                 fm10k_dev_enable_intr_pf(dev);
2928         } else { /* VF */
2929                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2930                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2931
2932                 fm10k_dev_enable_intr_vf(dev);
2933         }
2934
2935         /* Enable intr after callback registered */
2936         rte_intr_enable(&(dev->pci_dev->intr_handle));
2937
2938         hw->mac.ops.update_int_moderator(hw);
2939
2940         /* Make sure Switch Manager is ready before going forward. */
2941         if (hw->mac.type == fm10k_mac_pf) {
2942                 int switch_ready = 0;
2943
2944                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2945                         fm10k_mbx_lock(hw);
2946                         hw->mac.ops.get_host_state(hw, &switch_ready);
2947                         fm10k_mbx_unlock(hw);
2948                         if (switch_ready)
2949                                 break;
2950                         /* Delay some time to acquire async LPORT_MAP info. */
2951                         rte_delay_us(WAIT_SWITCH_MSG_US);
2952                 }
2953
2954                 if (switch_ready == 0) {
2955                         PMD_INIT_LOG(ERR, "switch is not ready");
2956                         return -1;
2957                 }
2958         }
2959
2960         /*
2961          * Below function will trigger operations on mailbox, acquire lock to
2962          * avoid race condition from interrupt handler. Operations on mailbox
2963          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2964          * will handle and generate an interrupt to our side. Then,  FIFO in
2965          * mailbox will be touched.
2966          */
2967         fm10k_mbx_lock(hw);
2968         /* Enable port first */
2969         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2970                                         MAX_LPORT_NUM, 1);
2971
2972         /* Set unicast mode by default. App can change to other mode in other
2973          * API func.
2974          */
2975         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2976                                         FM10K_XCAST_MODE_NONE);
2977
2978         fm10k_mbx_unlock(hw);
2979
2980         /* Make sure default VID is ready before going forward. */
2981         if (hw->mac.type == fm10k_mac_pf) {
2982                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2983                         if (hw->mac.default_vid)
2984                                 break;
2985                         /* Delay some time to acquire async port VLAN info. */
2986                         rte_delay_us(WAIT_SWITCH_MSG_US);
2987                 }
2988
2989                 if (!hw->mac.default_vid) {
2990                         PMD_INIT_LOG(ERR, "default VID is not ready");
2991                         return -1;
2992                 }
2993         }
2994
2995         /* Add default mac address */
2996         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2997                 MAIN_VSI_POOL_NUMBER);
2998
2999         return 0;
3000 }
3001
3002 static int
3003 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3004 {
3005         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3006
3007         PMD_INIT_FUNC_TRACE();
3008
3009         /* only uninitialize in the primary process */
3010         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3011                 return 0;
3012
3013         /* safe to close dev here */
3014         fm10k_dev_close(dev);
3015
3016         dev->dev_ops = NULL;
3017         dev->rx_pkt_burst = NULL;
3018         dev->tx_pkt_burst = NULL;
3019
3020         /* disable uio/vfio intr */
3021         rte_intr_disable(&(dev->pci_dev->intr_handle));
3022
3023         /*PF/VF has different interrupt handling mechanism */
3024         if (hw->mac.type == fm10k_mac_pf) {
3025                 /* disable interrupt */
3026                 fm10k_dev_disable_intr_pf(dev);
3027
3028                 /* unregister callback func to eal lib */
3029                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
3030                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3031         } else {
3032                 /* disable interrupt */
3033                 fm10k_dev_disable_intr_vf(dev);
3034
3035                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
3036                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3037         }
3038
3039         /* free mac memory */
3040         if (dev->data->mac_addrs) {
3041                 rte_free(dev->data->mac_addrs);
3042                 dev->data->mac_addrs = NULL;
3043         }
3044
3045         memset(hw, 0, sizeof(*hw));
3046
3047         return 0;
3048 }
3049
3050 /*
3051  * The set of PCI devices this driver supports. This driver will enable both PF
3052  * and SRIOV-VF devices.
3053  */
3054 static const struct rte_pci_id pci_id_fm10k_map[] = {
3055         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3056         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3057         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3058         { .vendor_id = 0, /* sentinel */ },
3059 };
3060
3061 static struct eth_driver rte_pmd_fm10k = {
3062         .pci_drv = {
3063                 .id_table = pci_id_fm10k_map,
3064                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3065                         RTE_PCI_DRV_DETACHABLE,
3066                 .probe = rte_eth_dev_pci_probe,
3067                 .remove = rte_eth_dev_pci_remove,
3068         },
3069         .eth_dev_init = eth_fm10k_dev_init,
3070         .eth_dev_uninit = eth_fm10k_dev_uninit,
3071         .dev_private_size = sizeof(struct fm10k_adapter),
3072 };
3073
3074 DRIVER_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv);
3075 DRIVER_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);