2d05a466965b84a3a94d50901415d9f8b1ba415b
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_ethdev_pci.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_string_fns.h>
39 #include <rte_dev.h>
40 #include <rte_spinlock.h>
41 #include <rte_kvargs.h>
42
43 #include "fm10k.h"
44 #include "base/fm10k_api.h"
45
46 /* Default delay to acquire mailbox lock */
47 #define FM10K_MBXLOCK_DELAY_US 20
48 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
49
50 #define MAIN_VSI_POOL_NUMBER 0
51
52 /* Max try times to acquire switch status */
53 #define MAX_QUERY_SWITCH_STATE_TIMES 10
54 /* Wait interval to get switch status */
55 #define WAIT_SWITCH_MSG_US    100000
56 /* A period of quiescence for switch */
57 #define FM10K_SWITCH_QUIESCE_US 10000
58 /* Number of chars per uint32 type */
59 #define CHARS_PER_UINT32 (sizeof(uint32_t))
60 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
61
62 /* default 1:1 map from queue ID to interrupt vector ID */
63 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
64
65 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
66 #define MAX_LPORT_NUM    128
67 #define GLORT_FD_Q_BASE  0x40
68 #define GLORT_PF_MASK    0xFFC0
69 #define GLORT_FD_MASK    GLORT_PF_MASK
70 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
71
72 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
73 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
74 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
75 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
76 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
77 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
78 static int
79 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
80 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
81         const u8 *mac, bool add, uint32_t pool);
82 static void fm10k_tx_queue_release(void *queue);
83 static void fm10k_rx_queue_release(void *queue);
84 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
85 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
86 static int fm10k_check_ftag(struct rte_devargs *devargs);
87 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
88
89 struct fm10k_xstats_name_off {
90         char name[RTE_ETH_XSTATS_NAME_SIZE];
91         unsigned offset;
92 };
93
94 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
95         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
96         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
97         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
98         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
99         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
100         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
101         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
102         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
103                 nodesc_drop)},
104 };
105
106 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
107                 sizeof(fm10k_hw_stats_strings[0]))
108
109 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
110         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
111         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
112         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
113 };
114
115 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
116                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
117
118 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
119         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
120         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
121 };
122
123 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
124                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
125
126 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
127                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
128 static int
129 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
130
131 static void
132 fm10k_mbx_initlock(struct fm10k_hw *hw)
133 {
134         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
135 }
136
137 static void
138 fm10k_mbx_lock(struct fm10k_hw *hw)
139 {
140         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
141                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
142 }
143
144 static void
145 fm10k_mbx_unlock(struct fm10k_hw *hw)
146 {
147         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
148 }
149
150 /* Stubs needed for linkage when vPMD is disabled */
151 int __attribute__((weak))
152 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
153 {
154         return -1;
155 }
156
157 uint16_t __attribute__((weak))
158 fm10k_recv_pkts_vec(
159         __rte_unused void *rx_queue,
160         __rte_unused struct rte_mbuf **rx_pkts,
161         __rte_unused uint16_t nb_pkts)
162 {
163         return 0;
164 }
165
166 uint16_t __attribute__((weak))
167 fm10k_recv_scattered_pkts_vec(
168                 __rte_unused void *rx_queue,
169                 __rte_unused struct rte_mbuf **rx_pkts,
170                 __rte_unused uint16_t nb_pkts)
171 {
172         return 0;
173 }
174
175 int __attribute__((weak))
176 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
177
178 {
179         return -1;
180 }
181
182 void __attribute__((weak))
183 fm10k_rx_queue_release_mbufs_vec(
184                 __rte_unused struct fm10k_rx_queue *rxq)
185 {
186         return;
187 }
188
189 void __attribute__((weak))
190 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
191 {
192         return;
193 }
194
195 int __attribute__((weak))
196 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
197 {
198         return -1;
199 }
200
201 uint16_t __attribute__((weak))
202 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
203                            __rte_unused struct rte_mbuf **tx_pkts,
204                            __rte_unused uint16_t nb_pkts)
205 {
206         return 0;
207 }
208
209 /*
210  * reset queue to initial state, allocate software buffers used when starting
211  * device.
212  * return 0 on success
213  * return -ENOMEM if buffers cannot be allocated
214  * return -EINVAL if buffers do not satisfy alignment condition
215  */
216 static inline int
217 rx_queue_reset(struct fm10k_rx_queue *q)
218 {
219         static const union fm10k_rx_desc zero = {{0} };
220         uint64_t dma_addr;
221         int i, diag;
222         PMD_INIT_FUNC_TRACE();
223
224         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
225         if (diag != 0)
226                 return -ENOMEM;
227
228         for (i = 0; i < q->nb_desc; ++i) {
229                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
230                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
231                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
232                                                 q->nb_desc);
233                         return -EINVAL;
234                 }
235                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
236                 q->hw_ring[i].q.pkt_addr = dma_addr;
237                 q->hw_ring[i].q.hdr_addr = dma_addr;
238         }
239
240         /* initialize extra software ring entries. Space for these extra
241          * entries is always allocated.
242          */
243         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
244         for (i = 0; i < q->nb_fake_desc; ++i) {
245                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
246                 q->hw_ring[q->nb_desc + i] = zero;
247         }
248
249         q->next_dd = 0;
250         q->next_alloc = 0;
251         q->next_trigger = q->alloc_thresh - 1;
252         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
253         q->rxrearm_start = 0;
254         q->rxrearm_nb = 0;
255
256         return 0;
257 }
258
259 /*
260  * clean queue, descriptor rings, free software buffers used when stopping
261  * device.
262  */
263 static inline void
264 rx_queue_clean(struct fm10k_rx_queue *q)
265 {
266         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
267         uint32_t i;
268         PMD_INIT_FUNC_TRACE();
269
270         /* zero descriptor rings */
271         for (i = 0; i < q->nb_desc; ++i)
272                 q->hw_ring[i] = zero;
273
274         /* zero faked descriptors */
275         for (i = 0; i < q->nb_fake_desc; ++i)
276                 q->hw_ring[q->nb_desc + i] = zero;
277
278         /* vPMD driver has a different way of releasing mbufs. */
279         if (q->rx_using_sse) {
280                 fm10k_rx_queue_release_mbufs_vec(q);
281                 return;
282         }
283
284         /* free software buffers */
285         for (i = 0; i < q->nb_desc; ++i) {
286                 if (q->sw_ring[i]) {
287                         rte_pktmbuf_free_seg(q->sw_ring[i]);
288                         q->sw_ring[i] = NULL;
289                 }
290         }
291 }
292
293 /*
294  * free all queue memory used when releasing the queue (i.e. configure)
295  */
296 static inline void
297 rx_queue_free(struct fm10k_rx_queue *q)
298 {
299         PMD_INIT_FUNC_TRACE();
300         if (q) {
301                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
302                 rx_queue_clean(q);
303                 if (q->sw_ring) {
304                         rte_free(q->sw_ring);
305                         q->sw_ring = NULL;
306                 }
307                 rte_free(q);
308                 q = NULL;
309         }
310 }
311
312 /*
313  * disable RX queue, wait unitl HW finished necessary flush operation
314  */
315 static inline int
316 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
317 {
318         uint32_t reg, i;
319
320         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
321         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
322                         reg & ~FM10K_RXQCTL_ENABLE);
323
324         /* Wait 100us at most */
325         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
326                 rte_delay_us(1);
327                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
328                 if (!(reg & FM10K_RXQCTL_ENABLE))
329                         break;
330         }
331
332         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
333                 return -1;
334
335         return 0;
336 }
337
338 /*
339  * reset queue to initial state, allocate software buffers used when starting
340  * device
341  */
342 static inline void
343 tx_queue_reset(struct fm10k_tx_queue *q)
344 {
345         PMD_INIT_FUNC_TRACE();
346         q->last_free = 0;
347         q->next_free = 0;
348         q->nb_used = 0;
349         q->nb_free = q->nb_desc - 1;
350         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
351         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
352 }
353
354 /*
355  * clean queue, descriptor rings, free software buffers used when stopping
356  * device
357  */
358 static inline void
359 tx_queue_clean(struct fm10k_tx_queue *q)
360 {
361         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
362         uint32_t i;
363         PMD_INIT_FUNC_TRACE();
364
365         /* zero descriptor rings */
366         for (i = 0; i < q->nb_desc; ++i)
367                 q->hw_ring[i] = zero;
368
369         /* free software buffers */
370         for (i = 0; i < q->nb_desc; ++i) {
371                 if (q->sw_ring[i]) {
372                         rte_pktmbuf_free_seg(q->sw_ring[i]);
373                         q->sw_ring[i] = NULL;
374                 }
375         }
376 }
377
378 /*
379  * free all queue memory used when releasing the queue (i.e. configure)
380  */
381 static inline void
382 tx_queue_free(struct fm10k_tx_queue *q)
383 {
384         PMD_INIT_FUNC_TRACE();
385         if (q) {
386                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
387                 tx_queue_clean(q);
388                 if (q->rs_tracker.list) {
389                         rte_free(q->rs_tracker.list);
390                         q->rs_tracker.list = NULL;
391                 }
392                 if (q->sw_ring) {
393                         rte_free(q->sw_ring);
394                         q->sw_ring = NULL;
395                 }
396                 rte_free(q);
397                 q = NULL;
398         }
399 }
400
401 /*
402  * disable TX queue, wait unitl HW finished necessary flush operation
403  */
404 static inline int
405 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
406 {
407         uint32_t reg, i;
408
409         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
410         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
411                         reg & ~FM10K_TXDCTL_ENABLE);
412
413         /* Wait 100us at most */
414         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
415                 rte_delay_us(1);
416                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
417                 if (!(reg & FM10K_TXDCTL_ENABLE))
418                         break;
419         }
420
421         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
422                 return -1;
423
424         return 0;
425 }
426
427 static int
428 fm10k_check_mq_mode(struct rte_eth_dev *dev)
429 {
430         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
431         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
432         struct rte_eth_vmdq_rx_conf *vmdq_conf;
433         uint16_t nb_rx_q = dev->data->nb_rx_queues;
434
435         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
436
437         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
438                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
439                 return -EINVAL;
440         }
441
442         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
443                 return 0;
444
445         if (hw->mac.type == fm10k_mac_vf) {
446                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
447                 return -EINVAL;
448         }
449
450         /* Check VMDQ queue pool number */
451         if (vmdq_conf->nb_queue_pools >
452                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
453                         vmdq_conf->nb_queue_pools > nb_rx_q) {
454                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
455                         vmdq_conf->nb_queue_pools);
456                 return -EINVAL;
457         }
458
459         return 0;
460 }
461
462 static const struct fm10k_txq_ops def_txq_ops = {
463         .reset = tx_queue_reset,
464 };
465
466 static int
467 fm10k_dev_configure(struct rte_eth_dev *dev)
468 {
469         int ret;
470
471         PMD_INIT_FUNC_TRACE();
472
473         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
474                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
475         /* multipe queue mode checking */
476         ret  = fm10k_check_mq_mode(dev);
477         if (ret != 0) {
478                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
479                             ret);
480                 return ret;
481         }
482
483         return 0;
484 }
485
486 /* fls = find last set bit = 32 minus the number of leading zeros */
487 #ifndef fls
488 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
489 #endif
490
491 static void
492 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
493 {
494         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495         struct rte_eth_vmdq_rx_conf *vmdq_conf;
496         uint32_t i;
497
498         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
499
500         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
501                 if (!vmdq_conf->pool_map[i].pools)
502                         continue;
503                 fm10k_mbx_lock(hw);
504                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
505                 fm10k_mbx_unlock(hw);
506         }
507 }
508
509 static void
510 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
511 {
512         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513
514         /* Add default mac address */
515         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
516                 MAIN_VSI_POOL_NUMBER);
517 }
518
519 static void
520 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
521 {
522         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
524         uint32_t mrqc, *key, i, reta, j;
525         uint64_t hf;
526
527 #define RSS_KEY_SIZE 40
528         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
529                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
530                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
531                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
532                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
533                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
534         };
535
536         if (dev->data->nb_rx_queues == 1 ||
537             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
538             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
539                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
540                 return;
541         }
542
543         /* random key is rss_intel_key (default) or user provided (rss_key) */
544         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
545                 key = (uint32_t *)rss_intel_key;
546         else
547                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
548
549         /* Now fill our hash function seeds, 4 bytes at a time */
550         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
551                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
552
553         /*
554          * Fill in redirection table
555          * The byte-swap is needed because NIC registers are in
556          * little-endian order.
557          */
558         reta = 0;
559         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
560                 if (j == dev->data->nb_rx_queues)
561                         j = 0;
562                 reta = (reta << CHAR_BIT) | j;
563                 if ((i & 3) == 3)
564                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
565                                         rte_bswap32(reta));
566         }
567
568         /*
569          * Generate RSS hash based on packet types, TCP/UDP
570          * port numbers and/or IPv4/v6 src and dst addresses
571          */
572         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
573         mrqc = 0;
574         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
575         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
576         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
577         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
578         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
579         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
580         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
581         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
582         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
583
584         if (mrqc == 0) {
585                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
586                         "supported", hf);
587                 return;
588         }
589
590         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
591 }
592
593 static void
594 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
595 {
596         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
597         uint32_t i;
598
599         for (i = 0; i < nb_lport_new; i++) {
600                 /* Set unicast mode by default. App can change
601                  * to other mode in other API func.
602                  */
603                 fm10k_mbx_lock(hw);
604                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
605                         FM10K_XCAST_MODE_NONE);
606                 fm10k_mbx_unlock(hw);
607         }
608 }
609
610 static void
611 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
612 {
613         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
614         struct rte_eth_vmdq_rx_conf *vmdq_conf;
615         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
616         struct fm10k_macvlan_filter_info *macvlan;
617         uint16_t nb_queue_pools = 0; /* pool number in configuration */
618         uint16_t nb_lport_new;
619
620         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
621         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
622
623         fm10k_dev_rss_configure(dev);
624
625         /* only PF supports VMDQ */
626         if (hw->mac.type != fm10k_mac_pf)
627                 return;
628
629         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
630                 nb_queue_pools = vmdq_conf->nb_queue_pools;
631
632         /* no pool number change, no need to update logic port and VLAN/MAC */
633         if (macvlan->nb_queue_pools == nb_queue_pools)
634                 return;
635
636         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
637         fm10k_dev_logic_port_update(dev, nb_lport_new);
638
639         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
640         memset(dev->data->mac_addrs, 0,
641                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
642         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
643                 &dev->data->mac_addrs[0]);
644         memset(macvlan, 0, sizeof(*macvlan));
645         macvlan->nb_queue_pools = nb_queue_pools;
646
647         if (nb_queue_pools)
648                 fm10k_dev_vmdq_rx_configure(dev);
649         else
650                 fm10k_dev_pf_main_vsi_reset(dev);
651 }
652
653 static int
654 fm10k_dev_tx_init(struct rte_eth_dev *dev)
655 {
656         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
657         int i, ret;
658         struct fm10k_tx_queue *txq;
659         uint64_t base_addr;
660         uint32_t size;
661
662         /* Disable TXINT to avoid possible interrupt */
663         for (i = 0; i < hw->mac.max_queues; i++)
664                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
665                                 3 << FM10K_TXINT_TIMER_SHIFT);
666
667         /* Setup TX queue */
668         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
669                 txq = dev->data->tx_queues[i];
670                 base_addr = txq->hw_ring_phys_addr;
671                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
672
673                 /* disable queue to avoid issues while updating state */
674                 ret = tx_queue_disable(hw, i);
675                 if (ret) {
676                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
677                         return -1;
678                 }
679                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
680                  * register is read-only for VF.
681                  */
682                 if (fm10k_check_ftag(dev->device->devargs)) {
683                         if (hw->mac.type == fm10k_mac_pf) {
684                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
685                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
686                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
687                         } else {
688                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
689                                 return -ENOTSUP;
690                         }
691                 }
692
693                 /* set location and size for descriptor ring */
694                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
695                                 base_addr & UINT64_LOWER_32BITS_MASK);
696                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
697                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
698                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
699
700                 /* assign default SGLORT for each TX queue by PF */
701                 if (hw->mac.type == fm10k_mac_pf)
702                         FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
703         }
704
705         /* set up vector or scalar TX function as appropriate */
706         fm10k_set_tx_function(dev);
707
708         return 0;
709 }
710
711 static int
712 fm10k_dev_rx_init(struct rte_eth_dev *dev)
713 {
714         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
715         struct fm10k_macvlan_filter_info *macvlan;
716         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
717         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
718         int i, ret;
719         struct fm10k_rx_queue *rxq;
720         uint64_t base_addr;
721         uint32_t size;
722         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
723         uint32_t logic_port = hw->mac.dglort_map;
724         uint16_t buf_size;
725         uint16_t queue_stride = 0;
726
727         /* enable RXINT for interrupt mode */
728         i = 0;
729         if (rte_intr_dp_is_en(intr_handle)) {
730                 for (; i < dev->data->nb_rx_queues; i++) {
731                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
732                         if (hw->mac.type == fm10k_mac_pf)
733                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
734                                         FM10K_ITR_AUTOMASK |
735                                         FM10K_ITR_MASK_CLEAR);
736                         else
737                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
738                                         FM10K_ITR_AUTOMASK |
739                                         FM10K_ITR_MASK_CLEAR);
740                 }
741         }
742         /* Disable other RXINT to avoid possible interrupt */
743         for (; i < hw->mac.max_queues; i++)
744                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
745                         3 << FM10K_RXINT_TIMER_SHIFT);
746
747         /* Setup RX queues */
748         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
749                 rxq = dev->data->rx_queues[i];
750                 base_addr = rxq->hw_ring_phys_addr;
751                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
752
753                 /* disable queue to avoid issues while updating state */
754                 ret = rx_queue_disable(hw, i);
755                 if (ret) {
756                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
757                         return -1;
758                 }
759
760                 /* Setup the Base and Length of the Rx Descriptor Ring */
761                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
762                                 base_addr & UINT64_LOWER_32BITS_MASK);
763                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
764                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
765                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
766
767                 /* Configure the Rx buffer size for one buff without split */
768                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
769                         RTE_PKTMBUF_HEADROOM);
770                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
771                  * reserved for this purpose, and the worst case could be 511B.
772                  * But SRR reg assumes all buffers have the same size. In order
773                  * to fill the gap, we'll have to consider the worst case and
774                  * assume 512B is reserved. If we don't do so, it's possible
775                  * for HW to overwrite data to next mbuf.
776                  */
777                 buf_size -= FM10K_RX_DATABUF_ALIGN;
778
779                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
780                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
781                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
782
783                 /* It adds dual VLAN length for supporting dual VLAN */
784                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
785                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
786                         dev->data->dev_conf.rxmode.enable_scatter) {
787                         uint32_t reg;
788                         dev->data->scattered_rx = 1;
789                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
790                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
791                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
792                 }
793
794                 /* Enable drop on empty, it's RO for VF */
795                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
796                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
797
798                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
799                 FM10K_WRITE_FLUSH(hw);
800         }
801
802         /* Configure VMDQ/RSS if applicable */
803         fm10k_dev_mq_rx_configure(dev);
804
805         /* Decide the best RX function */
806         fm10k_set_rx_function(dev);
807
808         /* update RX_SGLORT for loopback suppress*/
809         if (hw->mac.type != fm10k_mac_pf)
810                 return 0;
811         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
812         if (macvlan->nb_queue_pools)
813                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
814         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
815                 if (i && queue_stride && !(i % queue_stride))
816                         logic_port++;
817                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
818         }
819
820         return 0;
821 }
822
823 static int
824 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
825 {
826         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
827         int err = -1;
828         uint32_t reg;
829         struct fm10k_rx_queue *rxq;
830
831         PMD_INIT_FUNC_TRACE();
832
833         if (rx_queue_id < dev->data->nb_rx_queues) {
834                 rxq = dev->data->rx_queues[rx_queue_id];
835                 err = rx_queue_reset(rxq);
836                 if (err == -ENOMEM) {
837                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
838                         return err;
839                 } else if (err == -EINVAL) {
840                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
841                                 " %d", err);
842                         return err;
843                 }
844
845                 /* Setup the HW Rx Head and Tail Descriptor Pointers
846                  * Note: this must be done AFTER the queue is enabled on real
847                  * hardware, but BEFORE the queue is enabled when using the
848                  * emulation platform. Do it in both places for now and remove
849                  * this comment and the following two register writes when the
850                  * emulation platform is no longer being used.
851                  */
852                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
853                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
854
855                 /* Set PF ownership flag for PF devices */
856                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
857                 if (hw->mac.type == fm10k_mac_pf)
858                         reg |= FM10K_RXQCTL_PF;
859                 reg |= FM10K_RXQCTL_ENABLE;
860                 /* enable RX queue */
861                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
862                 FM10K_WRITE_FLUSH(hw);
863
864                 /* Setup the HW Rx Head and Tail Descriptor Pointers
865                  * Note: this must be done AFTER the queue is enabled
866                  */
867                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
868                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
869                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
870         }
871
872         return err;
873 }
874
875 static int
876 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
877 {
878         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
879
880         PMD_INIT_FUNC_TRACE();
881
882         if (rx_queue_id < dev->data->nb_rx_queues) {
883                 /* Disable RX queue */
884                 rx_queue_disable(hw, rx_queue_id);
885
886                 /* Free mbuf and clean HW ring */
887                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
888                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
889         }
890
891         return 0;
892 }
893
894 static int
895 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
896 {
897         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
898         /** @todo - this should be defined in the shared code */
899 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
900         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
901         int err = 0;
902
903         PMD_INIT_FUNC_TRACE();
904
905         if (tx_queue_id < dev->data->nb_tx_queues) {
906                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
907
908                 q->ops->reset(q);
909
910                 /* reset head and tail pointers */
911                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
912                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
913
914                 /* enable TX queue */
915                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
916                                         FM10K_TXDCTL_ENABLE | txdctl);
917                 FM10K_WRITE_FLUSH(hw);
918                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
919         } else
920                 err = -1;
921
922         return err;
923 }
924
925 static int
926 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
927 {
928         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
929
930         PMD_INIT_FUNC_TRACE();
931
932         if (tx_queue_id < dev->data->nb_tx_queues) {
933                 tx_queue_disable(hw, tx_queue_id);
934                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
935                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
936         }
937
938         return 0;
939 }
940
941 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
942 {
943         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
944                 != FM10K_DGLORTMAP_NONE);
945 }
946
947 static void
948 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
949 {
950         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951         int status;
952
953         PMD_INIT_FUNC_TRACE();
954
955         /* Return if it didn't acquire valid glort range */
956         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
957                 return;
958
959         fm10k_mbx_lock(hw);
960         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
961                                 FM10K_XCAST_MODE_PROMISC);
962         fm10k_mbx_unlock(hw);
963
964         if (status != FM10K_SUCCESS)
965                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
966 }
967
968 static void
969 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
970 {
971         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972         uint8_t mode;
973         int status;
974
975         PMD_INIT_FUNC_TRACE();
976
977         /* Return if it didn't acquire valid glort range */
978         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
979                 return;
980
981         if (dev->data->all_multicast == 1)
982                 mode = FM10K_XCAST_MODE_ALLMULTI;
983         else
984                 mode = FM10K_XCAST_MODE_NONE;
985
986         fm10k_mbx_lock(hw);
987         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
988                                 mode);
989         fm10k_mbx_unlock(hw);
990
991         if (status != FM10K_SUCCESS)
992                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
993 }
994
995 static void
996 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
997 {
998         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
999         int status;
1000
1001         PMD_INIT_FUNC_TRACE();
1002
1003         /* Return if it didn't acquire valid glort range */
1004         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1005                 return;
1006
1007         /* If promiscuous mode is enabled, it doesn't make sense to enable
1008          * allmulticast and disable promiscuous since fm10k only can select
1009          * one of the modes.
1010          */
1011         if (dev->data->promiscuous) {
1012                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1013                         "needn't enable allmulticast");
1014                 return;
1015         }
1016
1017         fm10k_mbx_lock(hw);
1018         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1019                                 FM10K_XCAST_MODE_ALLMULTI);
1020         fm10k_mbx_unlock(hw);
1021
1022         if (status != FM10K_SUCCESS)
1023                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1024 }
1025
1026 static void
1027 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1028 {
1029         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1030         int status;
1031
1032         PMD_INIT_FUNC_TRACE();
1033
1034         /* Return if it didn't acquire valid glort range */
1035         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1036                 return;
1037
1038         if (dev->data->promiscuous) {
1039                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1040                         "since promisc mode is enabled");
1041                 return;
1042         }
1043
1044         fm10k_mbx_lock(hw);
1045         /* Change mode to unicast mode */
1046         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1047                                 FM10K_XCAST_MODE_NONE);
1048         fm10k_mbx_unlock(hw);
1049
1050         if (status != FM10K_SUCCESS)
1051                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1052 }
1053
1054 static void
1055 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1056 {
1057         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1058         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1059         uint16_t nb_queue_pools;
1060         struct fm10k_macvlan_filter_info *macvlan;
1061
1062         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1063         nb_queue_pools = macvlan->nb_queue_pools;
1064         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1065         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1066
1067         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1068         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1069         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1070                         hw->mac.dglort_map;
1071         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1072         /* Configure VMDQ/RSS DGlort Decoder */
1073         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1074
1075         /* Flow Director configurations, only queue number is valid. */
1076         dglortdec = fls(dev->data->nb_rx_queues - 1);
1077         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1078                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1079         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1080         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1081
1082         /* Invalidate all other GLORT entries */
1083         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1084                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1085                                 FM10K_DGLORTMAP_NONE);
1086 }
1087
1088 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1089 static int
1090 fm10k_dev_start(struct rte_eth_dev *dev)
1091 {
1092         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1093         int i, diag;
1094
1095         PMD_INIT_FUNC_TRACE();
1096
1097         /* stop, init, then start the hw */
1098         diag = fm10k_stop_hw(hw);
1099         if (diag != FM10K_SUCCESS) {
1100                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1101                 return -EIO;
1102         }
1103
1104         diag = fm10k_init_hw(hw);
1105         if (diag != FM10K_SUCCESS) {
1106                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1107                 return -EIO;
1108         }
1109
1110         diag = fm10k_start_hw(hw);
1111         if (diag != FM10K_SUCCESS) {
1112                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1113                 return -EIO;
1114         }
1115
1116         diag = fm10k_dev_tx_init(dev);
1117         if (diag) {
1118                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1119                 return diag;
1120         }
1121
1122         if (fm10k_dev_rxq_interrupt_setup(dev))
1123                 return -EIO;
1124
1125         diag = fm10k_dev_rx_init(dev);
1126         if (diag) {
1127                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1128                 return diag;
1129         }
1130
1131         if (hw->mac.type == fm10k_mac_pf)
1132                 fm10k_dev_dglort_map_configure(dev);
1133
1134         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1135                 struct fm10k_rx_queue *rxq;
1136                 rxq = dev->data->rx_queues[i];
1137
1138                 if (rxq->rx_deferred_start)
1139                         continue;
1140                 diag = fm10k_dev_rx_queue_start(dev, i);
1141                 if (diag != 0) {
1142                         int j;
1143                         for (j = 0; j < i; ++j)
1144                                 rx_queue_clean(dev->data->rx_queues[j]);
1145                         return diag;
1146                 }
1147         }
1148
1149         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1150                 struct fm10k_tx_queue *txq;
1151                 txq = dev->data->tx_queues[i];
1152
1153                 if (txq->tx_deferred_start)
1154                         continue;
1155                 diag = fm10k_dev_tx_queue_start(dev, i);
1156                 if (diag != 0) {
1157                         int j;
1158                         for (j = 0; j < i; ++j)
1159                                 tx_queue_clean(dev->data->tx_queues[j]);
1160                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1161                                 rx_queue_clean(dev->data->rx_queues[j]);
1162                         return diag;
1163                 }
1164         }
1165
1166         /* Update default vlan when not in VMDQ mode */
1167         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1168                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1169
1170         fm10k_link_update(dev, 0);
1171
1172         return 0;
1173 }
1174
1175 static void
1176 fm10k_dev_stop(struct rte_eth_dev *dev)
1177 {
1178         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1179         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1180         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1181         int i;
1182
1183         PMD_INIT_FUNC_TRACE();
1184
1185         if (dev->data->tx_queues)
1186                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1187                         fm10k_dev_tx_queue_stop(dev, i);
1188
1189         if (dev->data->rx_queues)
1190                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1191                         fm10k_dev_rx_queue_stop(dev, i);
1192
1193         /* Disable datapath event */
1194         if (rte_intr_dp_is_en(intr_handle)) {
1195                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1196                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1197                                 3 << FM10K_RXINT_TIMER_SHIFT);
1198                         if (hw->mac.type == fm10k_mac_pf)
1199                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1200                                         FM10K_ITR_MASK_SET);
1201                         else
1202                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1203                                         FM10K_ITR_MASK_SET);
1204                 }
1205         }
1206         /* Clean datapath event and queue/vec mapping */
1207         rte_intr_efd_disable(intr_handle);
1208         rte_free(intr_handle->intr_vec);
1209         intr_handle->intr_vec = NULL;
1210 }
1211
1212 static void
1213 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1214 {
1215         int i;
1216
1217         PMD_INIT_FUNC_TRACE();
1218
1219         if (dev->data->tx_queues) {
1220                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1221                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1222
1223                         tx_queue_free(txq);
1224                 }
1225         }
1226
1227         if (dev->data->rx_queues) {
1228                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1229                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1230         }
1231 }
1232
1233 static void
1234 fm10k_dev_close(struct rte_eth_dev *dev)
1235 {
1236         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237
1238         PMD_INIT_FUNC_TRACE();
1239
1240         fm10k_mbx_lock(hw);
1241         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1242                 MAX_LPORT_NUM, false);
1243         fm10k_mbx_unlock(hw);
1244
1245         /* allow 10ms for device to quiesce */
1246         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1247
1248         /* Stop mailbox service first */
1249         fm10k_close_mbx_service(hw);
1250         fm10k_dev_stop(dev);
1251         fm10k_dev_queue_release(dev);
1252         fm10k_stop_hw(hw);
1253 }
1254
1255 static int
1256 fm10k_link_update(struct rte_eth_dev *dev,
1257         __rte_unused int wait_to_complete)
1258 {
1259         struct fm10k_dev_info *dev_info =
1260                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1261         PMD_INIT_FUNC_TRACE();
1262
1263         /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1264          * leave the speed undefined since there is no 50Gbps Ethernet.
1265          */
1266         dev->data->dev_link.link_speed  = 0;
1267         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1268         dev->data->dev_link.link_status =
1269                 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1270
1271         return 0;
1272 }
1273
1274 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1275         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1276 {
1277         unsigned i, q;
1278         unsigned count = 0;
1279
1280         if (xstats_names != NULL) {
1281                 /* Note: limit checked in rte_eth_xstats_names() */
1282
1283                 /* Global stats */
1284                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1285                         snprintf(xstats_names[count].name,
1286                                 sizeof(xstats_names[count].name),
1287                                 "%s", fm10k_hw_stats_strings[count].name);
1288                         count++;
1289                 }
1290
1291                 /* PF queue stats */
1292                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1293                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1294                                 snprintf(xstats_names[count].name,
1295                                         sizeof(xstats_names[count].name),
1296                                         "rx_q%u_%s", q,
1297                                         fm10k_hw_stats_rx_q_strings[i].name);
1298                                 count++;
1299                         }
1300                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1301                                 snprintf(xstats_names[count].name,
1302                                         sizeof(xstats_names[count].name),
1303                                         "tx_q%u_%s", q,
1304                                         fm10k_hw_stats_tx_q_strings[i].name);
1305                                 count++;
1306                         }
1307                 }
1308         }
1309         return FM10K_NB_XSTATS;
1310 }
1311
1312 static int
1313 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1314                  unsigned n)
1315 {
1316         struct fm10k_hw_stats *hw_stats =
1317                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1318         unsigned i, q, count = 0;
1319
1320         if (n < FM10K_NB_XSTATS)
1321                 return FM10K_NB_XSTATS;
1322
1323         /* Global stats */
1324         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1325                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1326                         fm10k_hw_stats_strings[count].offset);
1327                 xstats[count].id = count;
1328                 count++;
1329         }
1330
1331         /* PF queue stats */
1332         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1333                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1334                         xstats[count].value =
1335                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1336                                 fm10k_hw_stats_rx_q_strings[i].offset);
1337                         xstats[count].id = count;
1338                         count++;
1339                 }
1340                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1341                         xstats[count].value =
1342                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1343                                 fm10k_hw_stats_tx_q_strings[i].offset);
1344                         xstats[count].id = count;
1345                         count++;
1346                 }
1347         }
1348
1349         return FM10K_NB_XSTATS;
1350 }
1351
1352 static int
1353 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1354 {
1355         uint64_t ipackets, opackets, ibytes, obytes;
1356         struct fm10k_hw *hw =
1357                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358         struct fm10k_hw_stats *hw_stats =
1359                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1360         int i;
1361
1362         PMD_INIT_FUNC_TRACE();
1363
1364         fm10k_update_hw_stats(hw, hw_stats);
1365
1366         ipackets = opackets = ibytes = obytes = 0;
1367         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1368                 (i < hw->mac.max_queues); ++i) {
1369                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1370                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1371                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1372                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1373                 ipackets += stats->q_ipackets[i];
1374                 opackets += stats->q_opackets[i];
1375                 ibytes   += stats->q_ibytes[i];
1376                 obytes   += stats->q_obytes[i];
1377         }
1378         stats->ipackets = ipackets;
1379         stats->opackets = opackets;
1380         stats->ibytes = ibytes;
1381         stats->obytes = obytes;
1382         return 0;
1383 }
1384
1385 static void
1386 fm10k_stats_reset(struct rte_eth_dev *dev)
1387 {
1388         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1389         struct fm10k_hw_stats *hw_stats =
1390                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1391
1392         PMD_INIT_FUNC_TRACE();
1393
1394         memset(hw_stats, 0, sizeof(*hw_stats));
1395         fm10k_rebind_hw_stats(hw, hw_stats);
1396 }
1397
1398 static void
1399 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1400         struct rte_eth_dev_info *dev_info)
1401 {
1402         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1403         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1404
1405         PMD_INIT_FUNC_TRACE();
1406
1407         dev_info->pci_dev            = pdev;
1408         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1409         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1410         dev_info->max_rx_queues      = hw->mac.max_queues;
1411         dev_info->max_tx_queues      = hw->mac.max_queues;
1412         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1413         dev_info->max_hash_mac_addrs = 0;
1414         dev_info->max_vfs            = pdev->max_vfs;
1415         dev_info->vmdq_pool_base     = 0;
1416         dev_info->vmdq_queue_base    = 0;
1417         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1418         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1419         dev_info->rx_offload_capa =
1420                 DEV_RX_OFFLOAD_VLAN_STRIP |
1421                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1422                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1423                 DEV_RX_OFFLOAD_TCP_CKSUM;
1424         dev_info->tx_offload_capa =
1425                 DEV_TX_OFFLOAD_VLAN_INSERT |
1426                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1427                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1428                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1429                 DEV_TX_OFFLOAD_TCP_TSO;
1430
1431         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1432         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1433
1434         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1435                 .rx_thresh = {
1436                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1437                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1438                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1439                 },
1440                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1441                 .rx_drop_en = 0,
1442         };
1443
1444         dev_info->default_txconf = (struct rte_eth_txconf) {
1445                 .tx_thresh = {
1446                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1447                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1448                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1449                 },
1450                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1451                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1452                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1453         };
1454
1455         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1456                 .nb_max = FM10K_MAX_RX_DESC,
1457                 .nb_min = FM10K_MIN_RX_DESC,
1458                 .nb_align = FM10K_MULT_RX_DESC,
1459         };
1460
1461         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1462                 .nb_max = FM10K_MAX_TX_DESC,
1463                 .nb_min = FM10K_MIN_TX_DESC,
1464                 .nb_align = FM10K_MULT_TX_DESC,
1465                 .nb_seg_max = FM10K_TX_MAX_SEG,
1466                 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1467         };
1468
1469         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1470                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1471                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1472 }
1473
1474 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1475 static const uint32_t *
1476 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1477 {
1478         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1479             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1480                 static uint32_t ptypes[] = {
1481                         /* refers to rx_desc_to_ol_flags() */
1482                         RTE_PTYPE_L2_ETHER,
1483                         RTE_PTYPE_L3_IPV4,
1484                         RTE_PTYPE_L3_IPV4_EXT,
1485                         RTE_PTYPE_L3_IPV6,
1486                         RTE_PTYPE_L3_IPV6_EXT,
1487                         RTE_PTYPE_L4_TCP,
1488                         RTE_PTYPE_L4_UDP,
1489                         RTE_PTYPE_UNKNOWN
1490                 };
1491
1492                 return ptypes;
1493         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1494                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1495                 static uint32_t ptypes_vec[] = {
1496                         /* refers to fm10k_desc_to_pktype_v() */
1497                         RTE_PTYPE_L3_IPV4,
1498                         RTE_PTYPE_L3_IPV4_EXT,
1499                         RTE_PTYPE_L3_IPV6,
1500                         RTE_PTYPE_L3_IPV6_EXT,
1501                         RTE_PTYPE_L4_TCP,
1502                         RTE_PTYPE_L4_UDP,
1503                         RTE_PTYPE_TUNNEL_GENEVE,
1504                         RTE_PTYPE_TUNNEL_NVGRE,
1505                         RTE_PTYPE_TUNNEL_VXLAN,
1506                         RTE_PTYPE_TUNNEL_GRE,
1507                         RTE_PTYPE_UNKNOWN
1508                 };
1509
1510                 return ptypes_vec;
1511         }
1512
1513         return NULL;
1514 }
1515 #else
1516 static const uint32_t *
1517 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1518 {
1519         return NULL;
1520 }
1521 #endif
1522
1523 static int
1524 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1525 {
1526         s32 result;
1527         uint16_t mac_num = 0;
1528         uint32_t vid_idx, vid_bit, mac_index;
1529         struct fm10k_hw *hw;
1530         struct fm10k_macvlan_filter_info *macvlan;
1531         struct rte_eth_dev_data *data = dev->data;
1532
1533         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1534         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1535
1536         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1537                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1538                 return -EINVAL;
1539         }
1540
1541         if (vlan_id > ETH_VLAN_ID_MAX) {
1542                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1543                 return -EINVAL;
1544         }
1545
1546         vid_idx = FM10K_VFTA_IDX(vlan_id);
1547         vid_bit = FM10K_VFTA_BIT(vlan_id);
1548         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1549         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1550                 return 0;
1551         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1552         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1553                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1554                         "in the VLAN filter table");
1555                 return -EINVAL;
1556         }
1557
1558         fm10k_mbx_lock(hw);
1559         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1560         fm10k_mbx_unlock(hw);
1561         if (result != FM10K_SUCCESS) {
1562                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1563                 return -EIO;
1564         }
1565
1566         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1567                         (result == FM10K_SUCCESS); mac_index++) {
1568                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1569                         continue;
1570                 if (mac_num > macvlan->mac_num - 1) {
1571                         PMD_INIT_LOG(ERR, "MAC address number "
1572                                         "not match");
1573                         break;
1574                 }
1575                 fm10k_mbx_lock(hw);
1576                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1577                         data->mac_addrs[mac_index].addr_bytes,
1578                         vlan_id, on, 0);
1579                 fm10k_mbx_unlock(hw);
1580                 mac_num++;
1581         }
1582         if (result != FM10K_SUCCESS) {
1583                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1584                 return -EIO;
1585         }
1586
1587         if (on) {
1588                 macvlan->vlan_num++;
1589                 macvlan->vfta[vid_idx] |= vid_bit;
1590         } else {
1591                 macvlan->vlan_num--;
1592                 macvlan->vfta[vid_idx] &= ~vid_bit;
1593         }
1594         return 0;
1595 }
1596
1597 static int
1598 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1599 {
1600         if (mask & ETH_VLAN_STRIP_MASK) {
1601                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1602                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1603                                         "always on in fm10k");
1604         }
1605
1606         if (mask & ETH_VLAN_EXTEND_MASK) {
1607                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1608                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1609                                         "supported in fm10k");
1610         }
1611
1612         if (mask & ETH_VLAN_FILTER_MASK) {
1613                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1614                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1615         }
1616
1617         return 0;
1618 }
1619
1620 /* Add/Remove a MAC address, and update filters to main VSI */
1621 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1622                 const u8 *mac, bool add, uint32_t pool)
1623 {
1624         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1625         struct fm10k_macvlan_filter_info *macvlan;
1626         uint32_t i, j, k;
1627
1628         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1629
1630         if (pool != MAIN_VSI_POOL_NUMBER) {
1631                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1632                         "mac to pool %u", pool);
1633                 return;
1634         }
1635         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1636                 if (!macvlan->vfta[j])
1637                         continue;
1638                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1639                         if (!(macvlan->vfta[j] & (1 << k)))
1640                                 continue;
1641                         if (i + 1 > macvlan->vlan_num) {
1642                                 PMD_INIT_LOG(ERR, "vlan number not match");
1643                                 return;
1644                         }
1645                         fm10k_mbx_lock(hw);
1646                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1647                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1648                         fm10k_mbx_unlock(hw);
1649                         i++;
1650                 }
1651         }
1652 }
1653
1654 /* Add/Remove a MAC address, and update filters to VMDQ */
1655 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1656                 const u8 *mac, bool add, uint32_t pool)
1657 {
1658         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1659         struct fm10k_macvlan_filter_info *macvlan;
1660         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1661         uint32_t i;
1662
1663         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1664         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1665
1666         if (pool > macvlan->nb_queue_pools) {
1667                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1668                         " Max pool is %u",
1669                         pool, macvlan->nb_queue_pools);
1670                 return;
1671         }
1672         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1673                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1674                         continue;
1675                 fm10k_mbx_lock(hw);
1676                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1677                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1678                 fm10k_mbx_unlock(hw);
1679         }
1680 }
1681
1682 /* Add/Remove a MAC address, and update filters */
1683 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1684                 const u8 *mac, bool add, uint32_t pool)
1685 {
1686         struct fm10k_macvlan_filter_info *macvlan;
1687
1688         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1689
1690         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1691                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1692         else
1693                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1694
1695         if (add)
1696                 macvlan->mac_num++;
1697         else
1698                 macvlan->mac_num--;
1699 }
1700
1701 /* Add a MAC address, and update filters */
1702 static int
1703 fm10k_macaddr_add(struct rte_eth_dev *dev,
1704                 struct ether_addr *mac_addr,
1705                 uint32_t index,
1706                 uint32_t pool)
1707 {
1708         struct fm10k_macvlan_filter_info *macvlan;
1709
1710         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1711         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1712         macvlan->mac_vmdq_id[index] = pool;
1713         return 0;
1714 }
1715
1716 /* Remove a MAC address, and update filters */
1717 static void
1718 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1719 {
1720         struct rte_eth_dev_data *data = dev->data;
1721         struct fm10k_macvlan_filter_info *macvlan;
1722
1723         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1724         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1725                         FALSE, macvlan->mac_vmdq_id[index]);
1726         macvlan->mac_vmdq_id[index] = 0;
1727 }
1728
1729 static inline int
1730 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1731 {
1732         if ((request < min) || (request > max) || ((request % mult) != 0))
1733                 return -1;
1734         else
1735                 return 0;
1736 }
1737
1738
1739 static inline int
1740 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1741 {
1742         if ((request < min) || (request > max) || ((div % request) != 0))
1743                 return -1;
1744         else
1745                 return 0;
1746 }
1747
1748 static inline int
1749 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1750 {
1751         uint16_t rx_free_thresh;
1752
1753         if (conf->rx_free_thresh == 0)
1754                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1755         else
1756                 rx_free_thresh = conf->rx_free_thresh;
1757
1758         /* make sure the requested threshold satisfies the constraints */
1759         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1760                         FM10K_RX_FREE_THRESH_MAX(q),
1761                         FM10K_RX_FREE_THRESH_DIV(q),
1762                         rx_free_thresh)) {
1763                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1764                         "less than or equal to %u, "
1765                         "greater than or equal to %u, "
1766                         "and a divisor of %u",
1767                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1768                         FM10K_RX_FREE_THRESH_MIN(q),
1769                         FM10K_RX_FREE_THRESH_DIV(q));
1770                 return -EINVAL;
1771         }
1772
1773         q->alloc_thresh = rx_free_thresh;
1774         q->drop_en = conf->rx_drop_en;
1775         q->rx_deferred_start = conf->rx_deferred_start;
1776
1777         return 0;
1778 }
1779
1780 /*
1781  * Hardware requires specific alignment for Rx packet buffers. At
1782  * least one of the following two conditions must be satisfied.
1783  *  1. Address is 512B aligned
1784  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1785  *
1786  * As such, the driver may need to adjust the DMA address within the
1787  * buffer by up to 512B.
1788  *
1789  * return 1 if the element size is valid, otherwise return 0.
1790  */
1791 static int
1792 mempool_element_size_valid(struct rte_mempool *mp)
1793 {
1794         uint32_t min_size;
1795
1796         /* elt_size includes mbuf header and headroom */
1797         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1798                         RTE_PKTMBUF_HEADROOM;
1799
1800         /* account for up to 512B of alignment */
1801         min_size -= FM10K_RX_DATABUF_ALIGN;
1802
1803         /* sanity check for overflow */
1804         if (min_size > mp->elt_size)
1805                 return 0;
1806
1807         /* size is valid */
1808         return 1;
1809 }
1810
1811 static int
1812 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1813         uint16_t nb_desc, unsigned int socket_id,
1814         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1815 {
1816         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1817         struct fm10k_dev_info *dev_info =
1818                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1819         struct fm10k_rx_queue *q;
1820         const struct rte_memzone *mz;
1821
1822         PMD_INIT_FUNC_TRACE();
1823
1824         /* make sure the mempool element size can account for alignment. */
1825         if (!mempool_element_size_valid(mp)) {
1826                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1827                 return -EINVAL;
1828         }
1829
1830         /* make sure a valid number of descriptors have been requested */
1831         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1832                                 FM10K_MULT_RX_DESC, nb_desc)) {
1833                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1834                         "less than or equal to %"PRIu32", "
1835                         "greater than or equal to %u, "
1836                         "and a multiple of %u",
1837                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1838                         FM10K_MULT_RX_DESC);
1839                 return -EINVAL;
1840         }
1841
1842         /*
1843          * if this queue existed already, free the associated memory. The
1844          * queue cannot be reused in case we need to allocate memory on
1845          * different socket than was previously used.
1846          */
1847         if (dev->data->rx_queues[queue_id] != NULL) {
1848                 rx_queue_free(dev->data->rx_queues[queue_id]);
1849                 dev->data->rx_queues[queue_id] = NULL;
1850         }
1851
1852         /* allocate memory for the queue structure */
1853         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1854                                 socket_id);
1855         if (q == NULL) {
1856                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1857                 return -ENOMEM;
1858         }
1859
1860         /* setup queue */
1861         q->mp = mp;
1862         q->nb_desc = nb_desc;
1863         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1864         q->port_id = dev->data->port_id;
1865         q->queue_id = queue_id;
1866         q->tail_ptr = (volatile uint32_t *)
1867                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1868         if (handle_rxconf(q, conf))
1869                 return -EINVAL;
1870
1871         /* allocate memory for the software ring */
1872         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1873                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1874                         RTE_CACHE_LINE_SIZE, socket_id);
1875         if (q->sw_ring == NULL) {
1876                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1877                 rte_free(q);
1878                 return -ENOMEM;
1879         }
1880
1881         /*
1882          * allocate memory for the hardware descriptor ring. A memzone large
1883          * enough to hold the maximum ring size is requested to allow for
1884          * resizing in later calls to the queue setup function.
1885          */
1886         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1887                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1888                                       socket_id);
1889         if (mz == NULL) {
1890                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1891                 rte_free(q->sw_ring);
1892                 rte_free(q);
1893                 return -ENOMEM;
1894         }
1895         q->hw_ring = mz->addr;
1896         q->hw_ring_phys_addr = mz->iova;
1897
1898         /* Check if number of descs satisfied Vector requirement */
1899         if (!rte_is_power_of_2(nb_desc)) {
1900                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1901                                     "preconditions - canceling the feature for "
1902                                     "the whole port[%d]",
1903                              q->queue_id, q->port_id);
1904                 dev_info->rx_vec_allowed = false;
1905         } else
1906                 fm10k_rxq_vec_setup(q);
1907
1908         dev->data->rx_queues[queue_id] = q;
1909         return 0;
1910 }
1911
1912 static void
1913 fm10k_rx_queue_release(void *queue)
1914 {
1915         PMD_INIT_FUNC_TRACE();
1916
1917         rx_queue_free(queue);
1918 }
1919
1920 static inline int
1921 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1922 {
1923         uint16_t tx_free_thresh;
1924         uint16_t tx_rs_thresh;
1925
1926         /* constraint MACROs require that tx_free_thresh is configured
1927          * before tx_rs_thresh */
1928         if (conf->tx_free_thresh == 0)
1929                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1930         else
1931                 tx_free_thresh = conf->tx_free_thresh;
1932
1933         /* make sure the requested threshold satisfies the constraints */
1934         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1935                         FM10K_TX_FREE_THRESH_MAX(q),
1936                         FM10K_TX_FREE_THRESH_DIV(q),
1937                         tx_free_thresh)) {
1938                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1939                         "less than or equal to %u, "
1940                         "greater than or equal to %u, "
1941                         "and a divisor of %u",
1942                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1943                         FM10K_TX_FREE_THRESH_MIN(q),
1944                         FM10K_TX_FREE_THRESH_DIV(q));
1945                 return -EINVAL;
1946         }
1947
1948         q->free_thresh = tx_free_thresh;
1949
1950         if (conf->tx_rs_thresh == 0)
1951                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1952         else
1953                 tx_rs_thresh = conf->tx_rs_thresh;
1954
1955         q->tx_deferred_start = conf->tx_deferred_start;
1956
1957         /* make sure the requested threshold satisfies the constraints */
1958         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1959                         FM10K_TX_RS_THRESH_MAX(q),
1960                         FM10K_TX_RS_THRESH_DIV(q),
1961                         tx_rs_thresh)) {
1962                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1963                         "less than or equal to %u, "
1964                         "greater than or equal to %u, "
1965                         "and a divisor of %u",
1966                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1967                         FM10K_TX_RS_THRESH_MIN(q),
1968                         FM10K_TX_RS_THRESH_DIV(q));
1969                 return -EINVAL;
1970         }
1971
1972         q->rs_thresh = tx_rs_thresh;
1973
1974         return 0;
1975 }
1976
1977 static int
1978 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1979         uint16_t nb_desc, unsigned int socket_id,
1980         const struct rte_eth_txconf *conf)
1981 {
1982         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1983         struct fm10k_tx_queue *q;
1984         const struct rte_memzone *mz;
1985
1986         PMD_INIT_FUNC_TRACE();
1987
1988         /* make sure a valid number of descriptors have been requested */
1989         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1990                                 FM10K_MULT_TX_DESC, nb_desc)) {
1991                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1992                         "less than or equal to %"PRIu32", "
1993                         "greater than or equal to %u, "
1994                         "and a multiple of %u",
1995                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1996                         FM10K_MULT_TX_DESC);
1997                 return -EINVAL;
1998         }
1999
2000         /*
2001          * if this queue existed already, free the associated memory. The
2002          * queue cannot be reused in case we need to allocate memory on
2003          * different socket than was previously used.
2004          */
2005         if (dev->data->tx_queues[queue_id] != NULL) {
2006                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2007
2008                 tx_queue_free(txq);
2009                 dev->data->tx_queues[queue_id] = NULL;
2010         }
2011
2012         /* allocate memory for the queue structure */
2013         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2014                                 socket_id);
2015         if (q == NULL) {
2016                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2017                 return -ENOMEM;
2018         }
2019
2020         /* setup queue */
2021         q->nb_desc = nb_desc;
2022         q->port_id = dev->data->port_id;
2023         q->queue_id = queue_id;
2024         q->txq_flags = conf->txq_flags;
2025         q->ops = &def_txq_ops;
2026         q->tail_ptr = (volatile uint32_t *)
2027                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2028         if (handle_txconf(q, conf))
2029                 return -EINVAL;
2030
2031         /* allocate memory for the software ring */
2032         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2033                                         nb_desc * sizeof(struct rte_mbuf *),
2034                                         RTE_CACHE_LINE_SIZE, socket_id);
2035         if (q->sw_ring == NULL) {
2036                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2037                 rte_free(q);
2038                 return -ENOMEM;
2039         }
2040
2041         /*
2042          * allocate memory for the hardware descriptor ring. A memzone large
2043          * enough to hold the maximum ring size is requested to allow for
2044          * resizing in later calls to the queue setup function.
2045          */
2046         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2047                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2048                                       socket_id);
2049         if (mz == NULL) {
2050                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2051                 rte_free(q->sw_ring);
2052                 rte_free(q);
2053                 return -ENOMEM;
2054         }
2055         q->hw_ring = mz->addr;
2056         q->hw_ring_phys_addr = mz->iova;
2057
2058         /*
2059          * allocate memory for the RS bit tracker. Enough slots to hold the
2060          * descriptor index for each RS bit needing to be set are required.
2061          */
2062         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2063                                 ((nb_desc + 1) / q->rs_thresh) *
2064                                 sizeof(uint16_t),
2065                                 RTE_CACHE_LINE_SIZE, socket_id);
2066         if (q->rs_tracker.list == NULL) {
2067                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2068                 rte_free(q->sw_ring);
2069                 rte_free(q);
2070                 return -ENOMEM;
2071         }
2072
2073         dev->data->tx_queues[queue_id] = q;
2074         return 0;
2075 }
2076
2077 static void
2078 fm10k_tx_queue_release(void *queue)
2079 {
2080         struct fm10k_tx_queue *q = queue;
2081         PMD_INIT_FUNC_TRACE();
2082
2083         tx_queue_free(q);
2084 }
2085
2086 static int
2087 fm10k_reta_update(struct rte_eth_dev *dev,
2088                         struct rte_eth_rss_reta_entry64 *reta_conf,
2089                         uint16_t reta_size)
2090 {
2091         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2092         uint16_t i, j, idx, shift;
2093         uint8_t mask;
2094         uint32_t reta;
2095
2096         PMD_INIT_FUNC_TRACE();
2097
2098         if (reta_size > FM10K_MAX_RSS_INDICES) {
2099                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2100                         "(%d) doesn't match the number hardware can supported "
2101                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2102                 return -EINVAL;
2103         }
2104
2105         /*
2106          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2107          * 128-entries in 32 registers
2108          */
2109         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2110                 idx = i / RTE_RETA_GROUP_SIZE;
2111                 shift = i % RTE_RETA_GROUP_SIZE;
2112                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2113                                 BIT_MASK_PER_UINT32);
2114                 if (mask == 0)
2115                         continue;
2116
2117                 reta = 0;
2118                 if (mask != BIT_MASK_PER_UINT32)
2119                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2120
2121                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2122                         if (mask & (0x1 << j)) {
2123                                 if (mask != 0xF)
2124                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2125                                 reta |= reta_conf[idx].reta[shift + j] <<
2126                                                 (CHAR_BIT * j);
2127                         }
2128                 }
2129                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2130         }
2131
2132         return 0;
2133 }
2134
2135 static int
2136 fm10k_reta_query(struct rte_eth_dev *dev,
2137                         struct rte_eth_rss_reta_entry64 *reta_conf,
2138                         uint16_t reta_size)
2139 {
2140         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2141         uint16_t i, j, idx, shift;
2142         uint8_t mask;
2143         uint32_t reta;
2144
2145         PMD_INIT_FUNC_TRACE();
2146
2147         if (reta_size < FM10K_MAX_RSS_INDICES) {
2148                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2149                         "(%d) doesn't match the number hardware can supported "
2150                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2151                 return -EINVAL;
2152         }
2153
2154         /*
2155          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2156          * 128-entries in 32 registers
2157          */
2158         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2159                 idx = i / RTE_RETA_GROUP_SIZE;
2160                 shift = i % RTE_RETA_GROUP_SIZE;
2161                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2162                                 BIT_MASK_PER_UINT32);
2163                 if (mask == 0)
2164                         continue;
2165
2166                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2167                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2168                         if (mask & (0x1 << j))
2169                                 reta_conf[idx].reta[shift + j] = ((reta >>
2170                                         CHAR_BIT * j) & UINT8_MAX);
2171                 }
2172         }
2173
2174         return 0;
2175 }
2176
2177 static int
2178 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2179         struct rte_eth_rss_conf *rss_conf)
2180 {
2181         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2182         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2183         uint32_t mrqc;
2184         uint64_t hf = rss_conf->rss_hf;
2185         int i;
2186
2187         PMD_INIT_FUNC_TRACE();
2188
2189         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2190                                 FM10K_RSSRK_ENTRIES_PER_REG))
2191                 return -EINVAL;
2192
2193         if (hf == 0)
2194                 return -EINVAL;
2195
2196         mrqc = 0;
2197         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2198         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2199         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2200         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2201         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2202         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2203         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2204         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2205         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2206
2207         /* If the mapping doesn't fit any supported, return */
2208         if (mrqc == 0)
2209                 return -EINVAL;
2210
2211         if (key != NULL)
2212                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2213                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2214
2215         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2216
2217         return 0;
2218 }
2219
2220 static int
2221 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2222         struct rte_eth_rss_conf *rss_conf)
2223 {
2224         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2225         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2226         uint32_t mrqc;
2227         uint64_t hf;
2228         int i;
2229
2230         PMD_INIT_FUNC_TRACE();
2231
2232         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2233                                 FM10K_RSSRK_ENTRIES_PER_REG))
2234                 return -EINVAL;
2235
2236         if (key != NULL)
2237                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2238                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2239
2240         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2241         hf = 0;
2242         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2243         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2244         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2245         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2246         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2247         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2248         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2249         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2250         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2251
2252         rss_conf->rss_hf = hf;
2253
2254         return 0;
2255 }
2256
2257 static void
2258 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2259 {
2260         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2261         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2262
2263         /* Bind all local non-queue interrupt to vector 0 */
2264         int_map |= FM10K_MISC_VEC_ID;
2265
2266         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2267         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2268         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2269         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2270         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2271         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2272
2273         /* Enable misc causes */
2274         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2275                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2276                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2277                                 FM10K_EIMR_ENABLE(MAILBOX) |
2278                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2279                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2280                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2281                                 FM10K_EIMR_ENABLE(VFLR));
2282
2283         /* Enable ITR 0 */
2284         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2285                                         FM10K_ITR_MASK_CLEAR);
2286         FM10K_WRITE_FLUSH(hw);
2287 }
2288
2289 static void
2290 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2291 {
2292         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2294
2295         int_map |= FM10K_MISC_VEC_ID;
2296
2297         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2298         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2299         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2300         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2301         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2302         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2303
2304         /* Disable misc causes */
2305         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2306                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2307                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2308                                 FM10K_EIMR_DISABLE(MAILBOX) |
2309                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2310                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2311                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2312                                 FM10K_EIMR_DISABLE(VFLR));
2313
2314         /* Disable ITR 0 */
2315         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2316         FM10K_WRITE_FLUSH(hw);
2317 }
2318
2319 static void
2320 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2321 {
2322         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2323         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2324
2325         /* Bind all local non-queue interrupt to vector 0 */
2326         int_map |= FM10K_MISC_VEC_ID;
2327
2328         /* Only INT 0 available, other 15 are reserved. */
2329         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2330
2331         /* Enable ITR 0 */
2332         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2333                                         FM10K_ITR_MASK_CLEAR);
2334         FM10K_WRITE_FLUSH(hw);
2335 }
2336
2337 static void
2338 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2339 {
2340         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2341         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2342
2343         int_map |= FM10K_MISC_VEC_ID;
2344
2345         /* Only INT 0 available, other 15 are reserved. */
2346         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2347
2348         /* Disable ITR 0 */
2349         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2350         FM10K_WRITE_FLUSH(hw);
2351 }
2352
2353 static int
2354 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2355 {
2356         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2357         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2358
2359         /* Enable ITR */
2360         if (hw->mac.type == fm10k_mac_pf)
2361                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2362                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2363         else
2364                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2365                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2366         rte_intr_enable(&pdev->intr_handle);
2367         return 0;
2368 }
2369
2370 static int
2371 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2372 {
2373         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2374         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2375
2376         /* Disable ITR */
2377         if (hw->mac.type == fm10k_mac_pf)
2378                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2379                         FM10K_ITR_MASK_SET);
2380         else
2381                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2382                         FM10K_ITR_MASK_SET);
2383         return 0;
2384 }
2385
2386 static int
2387 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2388 {
2389         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2390         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2391         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2392         uint32_t intr_vector, vec;
2393         uint16_t queue_id;
2394         int result = 0;
2395
2396         /* fm10k needs one separate interrupt for mailbox,
2397          * so only drivers which support multiple interrupt vectors
2398          * e.g. vfio-pci can work for fm10k interrupt mode
2399          */
2400         if (!rte_intr_cap_multiple(intr_handle) ||
2401                         dev->data->dev_conf.intr_conf.rxq == 0)
2402                 return result;
2403
2404         intr_vector = dev->data->nb_rx_queues;
2405
2406         /* disable interrupt first */
2407         rte_intr_disable(intr_handle);
2408         if (hw->mac.type == fm10k_mac_pf)
2409                 fm10k_dev_disable_intr_pf(dev);
2410         else
2411                 fm10k_dev_disable_intr_vf(dev);
2412
2413         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2414                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2415                 result = -EIO;
2416         }
2417
2418         if (rte_intr_dp_is_en(intr_handle) && !result) {
2419                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2420                         dev->data->nb_rx_queues * sizeof(int), 0);
2421                 if (intr_handle->intr_vec) {
2422                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2423                                         queue_id < dev->data->nb_rx_queues;
2424                                         queue_id++) {
2425                                 intr_handle->intr_vec[queue_id] = vec;
2426                                 if (vec < intr_handle->nb_efd - 1
2427                                                 + FM10K_RX_VEC_START)
2428                                         vec++;
2429                         }
2430                 } else {
2431                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2432                                 " intr_vec", dev->data->nb_rx_queues);
2433                         rte_intr_efd_disable(intr_handle);
2434                         result = -ENOMEM;
2435                 }
2436         }
2437
2438         if (hw->mac.type == fm10k_mac_pf)
2439                 fm10k_dev_enable_intr_pf(dev);
2440         else
2441                 fm10k_dev_enable_intr_vf(dev);
2442         rte_intr_enable(intr_handle);
2443         hw->mac.ops.update_int_moderator(hw);
2444         return result;
2445 }
2446
2447 static int
2448 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2449 {
2450         struct fm10k_fault fault;
2451         int err;
2452         const char *estr = "Unknown error";
2453
2454         /* Process PCA fault */
2455         if (eicr & FM10K_EICR_PCA_FAULT) {
2456                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2457                 if (err)
2458                         goto error;
2459                 switch (fault.type) {
2460                 case PCA_NO_FAULT:
2461                         estr = "PCA_NO_FAULT"; break;
2462                 case PCA_UNMAPPED_ADDR:
2463                         estr = "PCA_UNMAPPED_ADDR"; break;
2464                 case PCA_BAD_QACCESS_PF:
2465                         estr = "PCA_BAD_QACCESS_PF"; break;
2466                 case PCA_BAD_QACCESS_VF:
2467                         estr = "PCA_BAD_QACCESS_VF"; break;
2468                 case PCA_MALICIOUS_REQ:
2469                         estr = "PCA_MALICIOUS_REQ"; break;
2470                 case PCA_POISONED_TLP:
2471                         estr = "PCA_POISONED_TLP"; break;
2472                 case PCA_TLP_ABORT:
2473                         estr = "PCA_TLP_ABORT"; break;
2474                 default:
2475                         goto error;
2476                 }
2477                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2478                         estr, fault.func ? "VF" : "PF", fault.func,
2479                         fault.address, fault.specinfo);
2480         }
2481
2482         /* Process THI fault */
2483         if (eicr & FM10K_EICR_THI_FAULT) {
2484                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2485                 if (err)
2486                         goto error;
2487                 switch (fault.type) {
2488                 case THI_NO_FAULT:
2489                         estr = "THI_NO_FAULT"; break;
2490                 case THI_MAL_DIS_Q_FAULT:
2491                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2492                 default:
2493                         goto error;
2494                 }
2495                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2496                         estr, fault.func ? "VF" : "PF", fault.func,
2497                         fault.address, fault.specinfo);
2498         }
2499
2500         /* Process FUM fault */
2501         if (eicr & FM10K_EICR_FUM_FAULT) {
2502                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2503                 if (err)
2504                         goto error;
2505                 switch (fault.type) {
2506                 case FUM_NO_FAULT:
2507                         estr = "FUM_NO_FAULT"; break;
2508                 case FUM_UNMAPPED_ADDR:
2509                         estr = "FUM_UNMAPPED_ADDR"; break;
2510                 case FUM_POISONED_TLP:
2511                         estr = "FUM_POISONED_TLP"; break;
2512                 case FUM_BAD_VF_QACCESS:
2513                         estr = "FUM_BAD_VF_QACCESS"; break;
2514                 case FUM_ADD_DECODE_ERR:
2515                         estr = "FUM_ADD_DECODE_ERR"; break;
2516                 case FUM_RO_ERROR:
2517                         estr = "FUM_RO_ERROR"; break;
2518                 case FUM_QPRC_CRC_ERROR:
2519                         estr = "FUM_QPRC_CRC_ERROR"; break;
2520                 case FUM_CSR_TIMEOUT:
2521                         estr = "FUM_CSR_TIMEOUT"; break;
2522                 case FUM_INVALID_TYPE:
2523                         estr = "FUM_INVALID_TYPE"; break;
2524                 case FUM_INVALID_LENGTH:
2525                         estr = "FUM_INVALID_LENGTH"; break;
2526                 case FUM_INVALID_BE:
2527                         estr = "FUM_INVALID_BE"; break;
2528                 case FUM_INVALID_ALIGN:
2529                         estr = "FUM_INVALID_ALIGN"; break;
2530                 default:
2531                         goto error;
2532                 }
2533                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2534                         estr, fault.func ? "VF" : "PF", fault.func,
2535                         fault.address, fault.specinfo);
2536         }
2537
2538         return 0;
2539 error:
2540         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2541         return err;
2542 }
2543
2544 /**
2545  * PF interrupt handler triggered by NIC for handling specific interrupt.
2546  *
2547  * @param handle
2548  *  Pointer to interrupt handle.
2549  * @param param
2550  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2551  *
2552  * @return
2553  *  void
2554  */
2555 static void
2556 fm10k_dev_interrupt_handler_pf(void *param)
2557 {
2558         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2559         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2560         uint32_t cause, status;
2561         struct fm10k_dev_info *dev_info =
2562                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2563         int status_mbx;
2564         s32 err;
2565
2566         if (hw->mac.type != fm10k_mac_pf)
2567                 return;
2568
2569         cause = FM10K_READ_REG(hw, FM10K_EICR);
2570
2571         /* Handle PCI fault cases */
2572         if (cause & FM10K_EICR_FAULT_MASK) {
2573                 PMD_INIT_LOG(ERR, "INT: find fault!");
2574                 fm10k_dev_handle_fault(hw, cause);
2575         }
2576
2577         /* Handle switch up/down */
2578         if (cause & FM10K_EICR_SWITCHNOTREADY)
2579                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2580
2581         if (cause & FM10K_EICR_SWITCHREADY) {
2582                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2583                 if (dev_info->sm_down == 1) {
2584                         fm10k_mbx_lock(hw);
2585
2586                         /* For recreating logical ports */
2587                         status_mbx = hw->mac.ops.update_lport_state(hw,
2588                                         hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2589                         if (status_mbx == FM10K_SUCCESS)
2590                                 PMD_INIT_LOG(INFO,
2591                                         "INT: Recreated Logical port");
2592                         else
2593                                 PMD_INIT_LOG(INFO,
2594                                         "INT: Logical ports weren't recreated");
2595
2596                         status_mbx = hw->mac.ops.update_xcast_mode(hw,
2597                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2598                         if (status_mbx != FM10K_SUCCESS)
2599                                 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2600
2601                         fm10k_mbx_unlock(hw);
2602
2603                         /* first clear the internal SW recording structure */
2604                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2605                                                 ETH_MQ_RX_VMDQ_FLAG))
2606                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2607                                         false);
2608
2609                         fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2610                                         MAIN_VSI_POOL_NUMBER);
2611
2612                         /*
2613                          * Add default mac address and vlan for the logical
2614                          * ports that have been created, leave to the
2615                          * application to fully recover Rx filtering.
2616                          */
2617                         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2618                                         MAIN_VSI_POOL_NUMBER);
2619
2620                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2621                                                 ETH_MQ_RX_VMDQ_FLAG))
2622                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2623                                         true);
2624
2625                         dev_info->sm_down = 0;
2626                         _rte_eth_dev_callback_process(dev,
2627                                         RTE_ETH_EVENT_INTR_LSC,
2628                                         NULL, NULL);
2629                 }
2630         }
2631
2632         /* Handle mailbox message */
2633         fm10k_mbx_lock(hw);
2634         err = hw->mbx.ops.process(hw, &hw->mbx);
2635         fm10k_mbx_unlock(hw);
2636
2637         if (err == FM10K_ERR_RESET_REQUESTED) {
2638                 PMD_INIT_LOG(INFO, "INT: Switch is down");
2639                 dev_info->sm_down = 1;
2640                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2641                                 NULL, NULL);
2642         }
2643
2644         /* Handle SRAM error */
2645         if (cause & FM10K_EICR_SRAMERROR) {
2646                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2647
2648                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2649                 /* Write to clear pending bits */
2650                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2651
2652                 /* Todo: print out error message after shared code  updates */
2653         }
2654
2655         /* Clear these 3 events if having any */
2656         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2657                  FM10K_EICR_SWITCHREADY;
2658         if (cause)
2659                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2660
2661         /* Re-enable interrupt from device side */
2662         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2663                                         FM10K_ITR_MASK_CLEAR);
2664         /* Re-enable interrupt from host side */
2665         rte_intr_enable(dev->intr_handle);
2666 }
2667
2668 /**
2669  * VF interrupt handler triggered by NIC for handling specific interrupt.
2670  *
2671  * @param handle
2672  *  Pointer to interrupt handle.
2673  * @param param
2674  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2675  *
2676  * @return
2677  *  void
2678  */
2679 static void
2680 fm10k_dev_interrupt_handler_vf(void *param)
2681 {
2682         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2683         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2684         struct fm10k_mbx_info *mbx = &hw->mbx;
2685         struct fm10k_dev_info *dev_info =
2686                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2687         const enum fm10k_mbx_state state = mbx->state;
2688         int status_mbx;
2689
2690         if (hw->mac.type != fm10k_mac_vf)
2691                 return;
2692
2693         /* Handle mailbox message if lock is acquired */
2694         fm10k_mbx_lock(hw);
2695         hw->mbx.ops.process(hw, &hw->mbx);
2696         fm10k_mbx_unlock(hw);
2697
2698         if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2699                 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2700
2701                 fm10k_mbx_lock(hw);
2702                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2703                                 MAX_LPORT_NUM, 1);
2704                 fm10k_mbx_unlock(hw);
2705
2706                 /* Setting reset flag */
2707                 dev_info->sm_down = 1;
2708                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2709                                 NULL, NULL);
2710         }
2711
2712         if (dev_info->sm_down == 1 &&
2713                         hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2714                 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2715                 fm10k_mbx_lock(hw);
2716                 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2717                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2718                 if (status_mbx != FM10K_SUCCESS)
2719                         PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2720                 fm10k_mbx_unlock(hw);
2721
2722                 /* first clear the internal SW recording structure */
2723                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2724                 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2725                                 MAIN_VSI_POOL_NUMBER);
2726
2727                 /*
2728                  * Add default mac address and vlan for the logical ports that
2729                  * have been created, leave to the application to fully recover
2730                  * Rx filtering.
2731                  */
2732                 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2733                                 MAIN_VSI_POOL_NUMBER);
2734                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2735
2736                 dev_info->sm_down = 0;
2737                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2738                                 NULL, NULL);
2739         }
2740
2741         /* Re-enable interrupt from device side */
2742         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2743                                         FM10K_ITR_MASK_CLEAR);
2744         /* Re-enable interrupt from host side */
2745         rte_intr_enable(dev->intr_handle);
2746 }
2747
2748 /* Mailbox message handler in VF */
2749 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2750         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2751         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2752         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2753         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2754 };
2755
2756 static int
2757 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2758 {
2759         int err = 0;
2760
2761         /* Initialize mailbox lock */
2762         fm10k_mbx_initlock(hw);
2763
2764         /* Replace default message handler with new ones */
2765         if (hw->mac.type == fm10k_mac_vf)
2766                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2767
2768         if (err) {
2769                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2770                                 err);
2771                 return err;
2772         }
2773         /* Connect to SM for PF device or PF for VF device */
2774         return hw->mbx.ops.connect(hw, &hw->mbx);
2775 }
2776
2777 static void
2778 fm10k_close_mbx_service(struct fm10k_hw *hw)
2779 {
2780         /* Disconnect from SM for PF device or PF for VF device */
2781         hw->mbx.ops.disconnect(hw, &hw->mbx);
2782 }
2783
2784 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2785         .dev_configure          = fm10k_dev_configure,
2786         .dev_start              = fm10k_dev_start,
2787         .dev_stop               = fm10k_dev_stop,
2788         .dev_close              = fm10k_dev_close,
2789         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2790         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2791         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2792         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2793         .stats_get              = fm10k_stats_get,
2794         .xstats_get             = fm10k_xstats_get,
2795         .xstats_get_names       = fm10k_xstats_get_names,
2796         .stats_reset            = fm10k_stats_reset,
2797         .xstats_reset           = fm10k_stats_reset,
2798         .link_update            = fm10k_link_update,
2799         .dev_infos_get          = fm10k_dev_infos_get,
2800         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2801         .vlan_filter_set        = fm10k_vlan_filter_set,
2802         .vlan_offload_set       = fm10k_vlan_offload_set,
2803         .mac_addr_add           = fm10k_macaddr_add,
2804         .mac_addr_remove        = fm10k_macaddr_remove,
2805         .rx_queue_start         = fm10k_dev_rx_queue_start,
2806         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2807         .tx_queue_start         = fm10k_dev_tx_queue_start,
2808         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2809         .rx_queue_setup         = fm10k_rx_queue_setup,
2810         .rx_queue_release       = fm10k_rx_queue_release,
2811         .tx_queue_setup         = fm10k_tx_queue_setup,
2812         .tx_queue_release       = fm10k_tx_queue_release,
2813         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2814         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2815         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2816         .reta_update            = fm10k_reta_update,
2817         .reta_query             = fm10k_reta_query,
2818         .rss_hash_update        = fm10k_rss_hash_update,
2819         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2820 };
2821
2822 static int ftag_check_handler(__rte_unused const char *key,
2823                 const char *value, __rte_unused void *opaque)
2824 {
2825         if (strcmp(value, "1"))
2826                 return -1;
2827
2828         return 0;
2829 }
2830
2831 static int
2832 fm10k_check_ftag(struct rte_devargs *devargs)
2833 {
2834         struct rte_kvargs *kvlist;
2835         const char *ftag_key = "enable_ftag";
2836
2837         if (devargs == NULL)
2838                 return 0;
2839
2840         kvlist = rte_kvargs_parse(devargs->args, NULL);
2841         if (kvlist == NULL)
2842                 return 0;
2843
2844         if (!rte_kvargs_count(kvlist, ftag_key)) {
2845                 rte_kvargs_free(kvlist);
2846                 return 0;
2847         }
2848         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2849         if (rte_kvargs_process(kvlist, ftag_key,
2850                                 ftag_check_handler, NULL) < 0) {
2851                 rte_kvargs_free(kvlist);
2852                 return 0;
2853         }
2854         rte_kvargs_free(kvlist);
2855
2856         return 1;
2857 }
2858
2859 static uint16_t
2860 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2861                     uint16_t nb_pkts)
2862 {
2863         uint16_t nb_tx = 0;
2864         struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2865
2866         while (nb_pkts) {
2867                 uint16_t ret, num;
2868
2869                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2870                 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2871                                                  num);
2872                 nb_tx += ret;
2873                 nb_pkts -= ret;
2874                 if (ret < num)
2875                         break;
2876         }
2877
2878         return nb_tx;
2879 }
2880
2881 static void __attribute__((cold))
2882 fm10k_set_tx_function(struct rte_eth_dev *dev)
2883 {
2884         struct fm10k_tx_queue *txq;
2885         int i;
2886         int use_sse = 1;
2887         uint16_t tx_ftag_en = 0;
2888
2889         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2890                 /* primary process has set the ftag flag and txq_flags */
2891                 txq = dev->data->tx_queues[0];
2892                 if (fm10k_tx_vec_condition_check(txq)) {
2893                         dev->tx_pkt_burst = fm10k_xmit_pkts;
2894                         dev->tx_pkt_prepare = fm10k_prep_pkts;
2895                         PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2896                 } else {
2897                         PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2898                         dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2899                         dev->tx_pkt_prepare = NULL;
2900                 }
2901                 return;
2902         }
2903
2904         if (fm10k_check_ftag(dev->device->devargs))
2905                 tx_ftag_en = 1;
2906
2907         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2908                 txq = dev->data->tx_queues[i];
2909                 txq->tx_ftag_en = tx_ftag_en;
2910                 /* Check if Vector Tx is satisfied */
2911                 if (fm10k_tx_vec_condition_check(txq))
2912                         use_sse = 0;
2913         }
2914
2915         if (use_sse) {
2916                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2917                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2918                         txq = dev->data->tx_queues[i];
2919                         fm10k_txq_vec_setup(txq);
2920                 }
2921                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2922                 dev->tx_pkt_prepare = NULL;
2923         } else {
2924                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2925                 dev->tx_pkt_prepare = fm10k_prep_pkts;
2926                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2927         }
2928 }
2929
2930 static void __attribute__((cold))
2931 fm10k_set_rx_function(struct rte_eth_dev *dev)
2932 {
2933         struct fm10k_dev_info *dev_info =
2934                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2935         uint16_t i, rx_using_sse;
2936         uint16_t rx_ftag_en = 0;
2937
2938         if (fm10k_check_ftag(dev->device->devargs))
2939                 rx_ftag_en = 1;
2940
2941         /* In order to allow Vector Rx there are a few configuration
2942          * conditions to be met.
2943          */
2944         if (!fm10k_rx_vec_condition_check(dev) &&
2945                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2946                 if (dev->data->scattered_rx)
2947                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2948                 else
2949                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2950         } else if (dev->data->scattered_rx)
2951                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2952         else
2953                 dev->rx_pkt_burst = fm10k_recv_pkts;
2954
2955         rx_using_sse =
2956                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2957                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2958
2959         if (rx_using_sse)
2960                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2961         else
2962                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2963
2964         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2965                 return;
2966
2967         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2968                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2969
2970                 rxq->rx_using_sse = rx_using_sse;
2971                 rxq->rx_ftag_en = rx_ftag_en;
2972         }
2973 }
2974
2975 static void
2976 fm10k_params_init(struct rte_eth_dev *dev)
2977 {
2978         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2979         struct fm10k_dev_info *info =
2980                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2981
2982         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2983          * there is no way to get link status without reading BAR4.  Until this
2984          * works, assume we have maximum bandwidth.
2985          * @todo - fix bus info
2986          */
2987         hw->bus_caps.speed = fm10k_bus_speed_8000;
2988         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2989         hw->bus_caps.payload = fm10k_bus_payload_512;
2990         hw->bus.speed = fm10k_bus_speed_8000;
2991         hw->bus.width = fm10k_bus_width_pcie_x8;
2992         hw->bus.payload = fm10k_bus_payload_256;
2993
2994         info->rx_vec_allowed = true;
2995 }
2996
2997 static int
2998 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2999 {
3000         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3001         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3002         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3003         int diag, i;
3004         struct fm10k_macvlan_filter_info *macvlan;
3005
3006         PMD_INIT_FUNC_TRACE();
3007
3008         dev->dev_ops = &fm10k_eth_dev_ops;
3009         dev->rx_pkt_burst = &fm10k_recv_pkts;
3010         dev->tx_pkt_burst = &fm10k_xmit_pkts;
3011         dev->tx_pkt_prepare = &fm10k_prep_pkts;
3012
3013         /*
3014          * Primary process does the whole initialization, for secondary
3015          * processes, we just select the same Rx and Tx function as primary.
3016          */
3017         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3018                 fm10k_set_rx_function(dev);
3019                 fm10k_set_tx_function(dev);
3020                 return 0;
3021         }
3022
3023         rte_eth_copy_pci_info(dev, pdev);
3024
3025         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3026         memset(macvlan, 0, sizeof(*macvlan));
3027         /* Vendor and Device ID need to be set before init of shared code */
3028         memset(hw, 0, sizeof(*hw));
3029         hw->device_id = pdev->id.device_id;
3030         hw->vendor_id = pdev->id.vendor_id;
3031         hw->subsystem_device_id = pdev->id.subsystem_device_id;
3032         hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3033         hw->revision_id = 0;
3034         hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3035         if (hw->hw_addr == NULL) {
3036                 PMD_INIT_LOG(ERR, "Bad mem resource."
3037                         " Try to blacklist unused devices.");
3038                 return -EIO;
3039         }
3040
3041         /* Store fm10k_adapter pointer */
3042         hw->back = dev->data->dev_private;
3043
3044         /* Initialize the shared code */
3045         diag = fm10k_init_shared_code(hw);
3046         if (diag != FM10K_SUCCESS) {
3047                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3048                 return -EIO;
3049         }
3050
3051         /* Initialize parameters */
3052         fm10k_params_init(dev);
3053
3054         /* Initialize the hw */
3055         diag = fm10k_init_hw(hw);
3056         if (diag != FM10K_SUCCESS) {
3057                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3058                 return -EIO;
3059         }
3060
3061         /* Initialize MAC address(es) */
3062         dev->data->mac_addrs = rte_zmalloc("fm10k",
3063                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3064         if (dev->data->mac_addrs == NULL) {
3065                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3066                 return -ENOMEM;
3067         }
3068
3069         diag = fm10k_read_mac_addr(hw);
3070
3071         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3072                         &dev->data->mac_addrs[0]);
3073
3074         if (diag != FM10K_SUCCESS ||
3075                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3076
3077                 /* Generate a random addr */
3078                 eth_random_addr(hw->mac.addr);
3079                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3080                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3081                 &dev->data->mac_addrs[0]);
3082         }
3083
3084         /* Reset the hw statistics */
3085         fm10k_stats_reset(dev);
3086
3087         /* Reset the hw */
3088         diag = fm10k_reset_hw(hw);
3089         if (diag != FM10K_SUCCESS) {
3090                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3091                 return -EIO;
3092         }
3093
3094         /* Setup mailbox service */
3095         diag = fm10k_setup_mbx_service(hw);
3096         if (diag != FM10K_SUCCESS) {
3097                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3098                 return -EIO;
3099         }
3100
3101         /*PF/VF has different interrupt handling mechanism */
3102         if (hw->mac.type == fm10k_mac_pf) {
3103                 /* register callback func to eal lib */
3104                 rte_intr_callback_register(intr_handle,
3105                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3106
3107                 /* enable MISC interrupt */
3108                 fm10k_dev_enable_intr_pf(dev);
3109         } else { /* VF */
3110                 rte_intr_callback_register(intr_handle,
3111                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3112
3113                 fm10k_dev_enable_intr_vf(dev);
3114         }
3115
3116         /* Enable intr after callback registered */
3117         rte_intr_enable(intr_handle);
3118
3119         hw->mac.ops.update_int_moderator(hw);
3120
3121         /* Make sure Switch Manager is ready before going forward. */
3122         if (hw->mac.type == fm10k_mac_pf) {
3123                 int switch_ready = 0;
3124
3125                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3126                         fm10k_mbx_lock(hw);
3127                         hw->mac.ops.get_host_state(hw, &switch_ready);
3128                         fm10k_mbx_unlock(hw);
3129                         if (switch_ready)
3130                                 break;
3131                         /* Delay some time to acquire async LPORT_MAP info. */
3132                         rte_delay_us(WAIT_SWITCH_MSG_US);
3133                 }
3134
3135                 if (switch_ready == 0) {
3136                         PMD_INIT_LOG(ERR, "switch is not ready");
3137                         return -1;
3138                 }
3139         }
3140
3141         /*
3142          * Below function will trigger operations on mailbox, acquire lock to
3143          * avoid race condition from interrupt handler. Operations on mailbox
3144          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3145          * will handle and generate an interrupt to our side. Then,  FIFO in
3146          * mailbox will be touched.
3147          */
3148         fm10k_mbx_lock(hw);
3149         /* Enable port first */
3150         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3151                                         MAX_LPORT_NUM, 1);
3152
3153         /* Set unicast mode by default. App can change to other mode in other
3154          * API func.
3155          */
3156         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3157                                         FM10K_XCAST_MODE_NONE);
3158
3159         fm10k_mbx_unlock(hw);
3160
3161         /* Make sure default VID is ready before going forward. */
3162         if (hw->mac.type == fm10k_mac_pf) {
3163                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3164                         if (hw->mac.default_vid)
3165                                 break;
3166                         /* Delay some time to acquire async port VLAN info. */
3167                         rte_delay_us(WAIT_SWITCH_MSG_US);
3168                 }
3169
3170                 if (!hw->mac.default_vid) {
3171                         PMD_INIT_LOG(ERR, "default VID is not ready");
3172                         return -1;
3173                 }
3174         }
3175
3176         /* Add default mac address */
3177         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3178                 MAIN_VSI_POOL_NUMBER);
3179
3180         return 0;
3181 }
3182
3183 static int
3184 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3185 {
3186         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3187         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3188         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3189         PMD_INIT_FUNC_TRACE();
3190
3191         /* only uninitialize in the primary process */
3192         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3193                 return 0;
3194
3195         /* safe to close dev here */
3196         fm10k_dev_close(dev);
3197
3198         dev->dev_ops = NULL;
3199         dev->rx_pkt_burst = NULL;
3200         dev->tx_pkt_burst = NULL;
3201
3202         /* disable uio/vfio intr */
3203         rte_intr_disable(intr_handle);
3204
3205         /*PF/VF has different interrupt handling mechanism */
3206         if (hw->mac.type == fm10k_mac_pf) {
3207                 /* disable interrupt */
3208                 fm10k_dev_disable_intr_pf(dev);
3209
3210                 /* unregister callback func to eal lib */
3211                 rte_intr_callback_unregister(intr_handle,
3212                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3213         } else {
3214                 /* disable interrupt */
3215                 fm10k_dev_disable_intr_vf(dev);
3216
3217                 rte_intr_callback_unregister(intr_handle,
3218                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3219         }
3220
3221         /* free mac memory */
3222         if (dev->data->mac_addrs) {
3223                 rte_free(dev->data->mac_addrs);
3224                 dev->data->mac_addrs = NULL;
3225         }
3226
3227         memset(hw, 0, sizeof(*hw));
3228
3229         return 0;
3230 }
3231
3232 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3233         struct rte_pci_device *pci_dev)
3234 {
3235         return rte_eth_dev_pci_generic_probe(pci_dev,
3236                 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3237 }
3238
3239 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3240 {
3241         return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3242 }
3243
3244 /*
3245  * The set of PCI devices this driver supports. This driver will enable both PF
3246  * and SRIOV-VF devices.
3247  */
3248 static const struct rte_pci_id pci_id_fm10k_map[] = {
3249         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3250         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3251         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3252         { .vendor_id = 0, /* sentinel */ },
3253 };
3254
3255 static struct rte_pci_driver rte_pmd_fm10k = {
3256         .id_table = pci_id_fm10k_map,
3257         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3258                      RTE_PCI_DRV_IOVA_AS_VA,
3259         .probe = eth_fm10k_pci_probe,
3260         .remove = eth_fm10k_pci_remove,
3261 };
3262
3263 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3264 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3265 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");