remove extra parentheses in return statement
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 #define MAIN_VSI_POOL_NUMBER 0
49
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US    100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
57
58 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
59 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
61 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
62 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
63 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
64 static int
65 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
66 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
67         const u8 *mac, bool add, uint32_t pool);
68 static void fm10k_tx_queue_release(void *queue);
69 static void fm10k_rx_queue_release(void *queue);
70 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
71 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
72
73 struct fm10k_xstats_name_off {
74         char name[RTE_ETH_XSTATS_NAME_SIZE];
75         unsigned offset;
76 };
77
78 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
79         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
80         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
81         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
82         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
83         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
84         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
85         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
86         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
87                 nodesc_drop)},
88 };
89
90 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
91                 sizeof(fm10k_hw_stats_strings[0]))
92
93 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
94         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
95         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
96         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
97 };
98
99 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
100                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
101
102 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
103         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
104         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
105 };
106
107 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
108                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
109
110 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
111                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
112
113 static void
114 fm10k_mbx_initlock(struct fm10k_hw *hw)
115 {
116         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
117 }
118
119 static void
120 fm10k_mbx_lock(struct fm10k_hw *hw)
121 {
122         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
123                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
124 }
125
126 static void
127 fm10k_mbx_unlock(struct fm10k_hw *hw)
128 {
129         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
130 }
131
132 /* Stubs needed for linkage when vPMD is disabled */
133 int __attribute__((weak))
134 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
135 {
136         return -1;
137 }
138
139 uint16_t __attribute__((weak))
140 fm10k_recv_pkts_vec(
141         __rte_unused void *rx_queue,
142         __rte_unused struct rte_mbuf **rx_pkts,
143         __rte_unused uint16_t nb_pkts)
144 {
145         return 0;
146 }
147
148 uint16_t __attribute__((weak))
149 fm10k_recv_scattered_pkts_vec(
150                 __rte_unused void *rx_queue,
151                 __rte_unused struct rte_mbuf **rx_pkts,
152                 __rte_unused uint16_t nb_pkts)
153 {
154         return 0;
155 }
156
157 int __attribute__((weak))
158 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
159
160 {
161         return -1;
162 }
163
164 void __attribute__((weak))
165 fm10k_rx_queue_release_mbufs_vec(
166                 __rte_unused struct fm10k_rx_queue *rxq)
167 {
168         return;
169 }
170
171 void __attribute__((weak))
172 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
173 {
174         return;
175 }
176
177 int __attribute__((weak))
178 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
179 {
180         return -1;
181 }
182
183 uint16_t __attribute__((weak))
184 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
185                 __rte_unused struct rte_mbuf **tx_pkts,
186                 __rte_unused uint16_t nb_pkts)
187 {
188         return 0;
189 }
190
191 /*
192  * reset queue to initial state, allocate software buffers used when starting
193  * device.
194  * return 0 on success
195  * return -ENOMEM if buffers cannot be allocated
196  * return -EINVAL if buffers do not satisfy alignment condition
197  */
198 static inline int
199 rx_queue_reset(struct fm10k_rx_queue *q)
200 {
201         static const union fm10k_rx_desc zero = {{0} };
202         uint64_t dma_addr;
203         int i, diag;
204         PMD_INIT_FUNC_TRACE();
205
206         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
207         if (diag != 0)
208                 return -ENOMEM;
209
210         for (i = 0; i < q->nb_desc; ++i) {
211                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
212                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
213                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
214                                                 q->nb_desc);
215                         return -EINVAL;
216                 }
217                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
218                 q->hw_ring[i].q.pkt_addr = dma_addr;
219                 q->hw_ring[i].q.hdr_addr = dma_addr;
220         }
221
222         /* initialize extra software ring entries. Space for these extra
223          * entries is always allocated.
224          */
225         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
226         for (i = 0; i < q->nb_fake_desc; ++i) {
227                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
228                 q->hw_ring[q->nb_desc + i] = zero;
229         }
230
231         q->next_dd = 0;
232         q->next_alloc = 0;
233         q->next_trigger = q->alloc_thresh - 1;
234         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
235         q->rxrearm_start = 0;
236         q->rxrearm_nb = 0;
237
238         return 0;
239 }
240
241 /*
242  * clean queue, descriptor rings, free software buffers used when stopping
243  * device.
244  */
245 static inline void
246 rx_queue_clean(struct fm10k_rx_queue *q)
247 {
248         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
249         uint32_t i;
250         PMD_INIT_FUNC_TRACE();
251
252         /* zero descriptor rings */
253         for (i = 0; i < q->nb_desc; ++i)
254                 q->hw_ring[i] = zero;
255
256         /* zero faked descriptors */
257         for (i = 0; i < q->nb_fake_desc; ++i)
258                 q->hw_ring[q->nb_desc + i] = zero;
259
260         /* vPMD driver has a different way of releasing mbufs. */
261         if (q->rx_using_sse) {
262                 fm10k_rx_queue_release_mbufs_vec(q);
263                 return;
264         }
265
266         /* free software buffers */
267         for (i = 0; i < q->nb_desc; ++i) {
268                 if (q->sw_ring[i]) {
269                         rte_pktmbuf_free_seg(q->sw_ring[i]);
270                         q->sw_ring[i] = NULL;
271                 }
272         }
273 }
274
275 /*
276  * free all queue memory used when releasing the queue (i.e. configure)
277  */
278 static inline void
279 rx_queue_free(struct fm10k_rx_queue *q)
280 {
281         PMD_INIT_FUNC_TRACE();
282         if (q) {
283                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
284                 rx_queue_clean(q);
285                 if (q->sw_ring) {
286                         rte_free(q->sw_ring);
287                         q->sw_ring = NULL;
288                 }
289                 rte_free(q);
290                 q = NULL;
291         }
292 }
293
294 /*
295  * disable RX queue, wait unitl HW finished necessary flush operation
296  */
297 static inline int
298 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
299 {
300         uint32_t reg, i;
301
302         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
303         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
304                         reg & ~FM10K_RXQCTL_ENABLE);
305
306         /* Wait 100us at most */
307         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
308                 rte_delay_us(1);
309                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
310                 if (!(reg & FM10K_RXQCTL_ENABLE))
311                         break;
312         }
313
314         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
315                 return -1;
316
317         return 0;
318 }
319
320 /*
321  * reset queue to initial state, allocate software buffers used when starting
322  * device
323  */
324 static inline void
325 tx_queue_reset(struct fm10k_tx_queue *q)
326 {
327         PMD_INIT_FUNC_TRACE();
328         q->last_free = 0;
329         q->next_free = 0;
330         q->nb_used = 0;
331         q->nb_free = q->nb_desc - 1;
332         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
333         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
334 }
335
336 /*
337  * clean queue, descriptor rings, free software buffers used when stopping
338  * device
339  */
340 static inline void
341 tx_queue_clean(struct fm10k_tx_queue *q)
342 {
343         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
344         uint32_t i;
345         PMD_INIT_FUNC_TRACE();
346
347         /* zero descriptor rings */
348         for (i = 0; i < q->nb_desc; ++i)
349                 q->hw_ring[i] = zero;
350
351         /* free software buffers */
352         for (i = 0; i < q->nb_desc; ++i) {
353                 if (q->sw_ring[i]) {
354                         rte_pktmbuf_free_seg(q->sw_ring[i]);
355                         q->sw_ring[i] = NULL;
356                 }
357         }
358 }
359
360 /*
361  * free all queue memory used when releasing the queue (i.e. configure)
362  */
363 static inline void
364 tx_queue_free(struct fm10k_tx_queue *q)
365 {
366         PMD_INIT_FUNC_TRACE();
367         if (q) {
368                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
369                 tx_queue_clean(q);
370                 if (q->rs_tracker.list) {
371                         rte_free(q->rs_tracker.list);
372                         q->rs_tracker.list = NULL;
373                 }
374                 if (q->sw_ring) {
375                         rte_free(q->sw_ring);
376                         q->sw_ring = NULL;
377                 }
378                 rte_free(q);
379                 q = NULL;
380         }
381 }
382
383 /*
384  * disable TX queue, wait unitl HW finished necessary flush operation
385  */
386 static inline int
387 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
388 {
389         uint32_t reg, i;
390
391         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
392         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
393                         reg & ~FM10K_TXDCTL_ENABLE);
394
395         /* Wait 100us at most */
396         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
397                 rte_delay_us(1);
398                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
399                 if (!(reg & FM10K_TXDCTL_ENABLE))
400                         break;
401         }
402
403         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
404                 return -1;
405
406         return 0;
407 }
408
409 static int
410 fm10k_check_mq_mode(struct rte_eth_dev *dev)
411 {
412         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
413         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
414         struct rte_eth_vmdq_rx_conf *vmdq_conf;
415         uint16_t nb_rx_q = dev->data->nb_rx_queues;
416
417         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
418
419         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
420                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
421                 return -EINVAL;
422         }
423
424         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
425                 return 0;
426
427         if (hw->mac.type == fm10k_mac_vf) {
428                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
429                 return -EINVAL;
430         }
431
432         /* Check VMDQ queue pool number */
433         if (vmdq_conf->nb_queue_pools >
434                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
435                         vmdq_conf->nb_queue_pools > nb_rx_q) {
436                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
437                         vmdq_conf->nb_queue_pools);
438                 return -EINVAL;
439         }
440
441         return 0;
442 }
443
444 static const struct fm10k_txq_ops def_txq_ops = {
445         .reset = tx_queue_reset,
446 };
447
448 static int
449 fm10k_dev_configure(struct rte_eth_dev *dev)
450 {
451         int ret;
452
453         PMD_INIT_FUNC_TRACE();
454
455         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
456                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
457         /* multipe queue mode checking */
458         ret  = fm10k_check_mq_mode(dev);
459         if (ret != 0) {
460                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
461                             ret);
462                 return ret;
463         }
464
465         return 0;
466 }
467
468 /* fls = find last set bit = 32 minus the number of leading zeros */
469 #ifndef fls
470 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
471 #endif
472
473 static void
474 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
475 {
476         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
477         struct rte_eth_vmdq_rx_conf *vmdq_conf;
478         uint32_t i;
479
480         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
481
482         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
483                 if (!vmdq_conf->pool_map[i].pools)
484                         continue;
485                 fm10k_mbx_lock(hw);
486                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
487                 fm10k_mbx_unlock(hw);
488         }
489 }
490
491 static void
492 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
493 {
494         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495
496         /* Add default mac address */
497         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
498                 MAIN_VSI_POOL_NUMBER);
499 }
500
501 static void
502 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
503 {
504         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
505         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
506         uint32_t mrqc, *key, i, reta, j;
507         uint64_t hf;
508
509 #define RSS_KEY_SIZE 40
510         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
511                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
512                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
513                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
514                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
515                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
516         };
517
518         if (dev->data->nb_rx_queues == 1 ||
519             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
520             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
521                 return;
522
523         /* random key is rss_intel_key (default) or user provided (rss_key) */
524         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
525                 key = (uint32_t *)rss_intel_key;
526         else
527                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
528
529         /* Now fill our hash function seeds, 4 bytes at a time */
530         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
531                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
532
533         /*
534          * Fill in redirection table
535          * The byte-swap is needed because NIC registers are in
536          * little-endian order.
537          */
538         reta = 0;
539         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
540                 if (j == dev->data->nb_rx_queues)
541                         j = 0;
542                 reta = (reta << CHAR_BIT) | j;
543                 if ((i & 3) == 3)
544                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
545                                         rte_bswap32(reta));
546         }
547
548         /*
549          * Generate RSS hash based on packet types, TCP/UDP
550          * port numbers and/or IPv4/v6 src and dst addresses
551          */
552         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
553         mrqc = 0;
554         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
555         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
556         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
557         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
558         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
559         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
560         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
561         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
562         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
563
564         if (mrqc == 0) {
565                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
566                         "supported", hf);
567                 return;
568         }
569
570         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
571 }
572
573 static void
574 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
575         uint16_t nb_lport_old, uint16_t nb_lport_new)
576 {
577         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
578         uint32_t i;
579
580         fm10k_mbx_lock(hw);
581         /* Disable previous logic ports */
582         if (nb_lport_old)
583                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
584                         nb_lport_old, false);
585         /* Enable new logic ports */
586         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
587                 nb_lport_new, true);
588         fm10k_mbx_unlock(hw);
589
590         for (i = 0; i < nb_lport_new; i++) {
591                 /* Set unicast mode by default. App can change
592                  * to other mode in other API func.
593                  */
594                 fm10k_mbx_lock(hw);
595                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
596                         FM10K_XCAST_MODE_NONE);
597                 fm10k_mbx_unlock(hw);
598         }
599 }
600
601 static void
602 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
603 {
604         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605         struct rte_eth_vmdq_rx_conf *vmdq_conf;
606         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
607         struct fm10k_macvlan_filter_info *macvlan;
608         uint16_t nb_queue_pools = 0; /* pool number in configuration */
609         uint16_t nb_lport_new, nb_lport_old;
610
611         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
612         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
613
614         fm10k_dev_rss_configure(dev);
615
616         /* only PF supports VMDQ */
617         if (hw->mac.type != fm10k_mac_pf)
618                 return;
619
620         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
621                 nb_queue_pools = vmdq_conf->nb_queue_pools;
622
623         /* no pool number change, no need to update logic port and VLAN/MAC */
624         if (macvlan->nb_queue_pools == nb_queue_pools)
625                 return;
626
627         nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
628         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
629         fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
630
631         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
632         memset(dev->data->mac_addrs, 0,
633                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
634         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
635                 &dev->data->mac_addrs[0]);
636         memset(macvlan, 0, sizeof(*macvlan));
637         macvlan->nb_queue_pools = nb_queue_pools;
638
639         if (nb_queue_pools)
640                 fm10k_dev_vmdq_rx_configure(dev);
641         else
642                 fm10k_dev_pf_main_vsi_reset(dev);
643 }
644
645 static int
646 fm10k_dev_tx_init(struct rte_eth_dev *dev)
647 {
648         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         int i, ret;
650         struct fm10k_tx_queue *txq;
651         uint64_t base_addr;
652         uint32_t size;
653
654         /* Disable TXINT to avoid possible interrupt */
655         for (i = 0; i < hw->mac.max_queues; i++)
656                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
657                                 3 << FM10K_TXINT_TIMER_SHIFT);
658
659         /* Setup TX queue */
660         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
661                 txq = dev->data->tx_queues[i];
662                 base_addr = txq->hw_ring_phys_addr;
663                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
664
665                 /* disable queue to avoid issues while updating state */
666                 ret = tx_queue_disable(hw, i);
667                 if (ret) {
668                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
669                         return -1;
670                 }
671
672                 /* set location and size for descriptor ring */
673                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
674                                 base_addr & UINT64_LOWER_32BITS_MASK);
675                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
676                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
677                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
678         }
679
680         /* set up vector or scalar TX function as appropriate */
681         fm10k_set_tx_function(dev);
682
683         return 0;
684 }
685
686 static int
687 fm10k_dev_rx_init(struct rte_eth_dev *dev)
688 {
689         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690         int i, ret;
691         struct fm10k_rx_queue *rxq;
692         uint64_t base_addr;
693         uint32_t size;
694         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
695         uint16_t buf_size;
696
697         /* Disable RXINT to avoid possible interrupt */
698         for (i = 0; i < hw->mac.max_queues; i++)
699                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
700                                 3 << FM10K_RXINT_TIMER_SHIFT);
701
702         /* Setup RX queues */
703         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
704                 rxq = dev->data->rx_queues[i];
705                 base_addr = rxq->hw_ring_phys_addr;
706                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
707
708                 /* disable queue to avoid issues while updating state */
709                 ret = rx_queue_disable(hw, i);
710                 if (ret) {
711                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
712                         return -1;
713                 }
714
715                 /* Setup the Base and Length of the Rx Descriptor Ring */
716                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
717                                 base_addr & UINT64_LOWER_32BITS_MASK);
718                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
719                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
720                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
721
722                 /* Configure the Rx buffer size for one buff without split */
723                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
724                         RTE_PKTMBUF_HEADROOM);
725                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
726                  * reserved for this purpose, and the worst case could be 511B.
727                  * But SRR reg assumes all buffers have the same size. In order
728                  * to fill the gap, we'll have to consider the worst case and
729                  * assume 512B is reserved. If we don't do so, it's possible
730                  * for HW to overwrite data to next mbuf.
731                  */
732                 buf_size -= FM10K_RX_DATABUF_ALIGN;
733
734                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
735                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
736
737                 /* It adds dual VLAN length for supporting dual VLAN */
738                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
739                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
740                         dev->data->dev_conf.rxmode.enable_scatter) {
741                         uint32_t reg;
742                         dev->data->scattered_rx = 1;
743                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
744                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
745                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
746                 }
747
748                 /* Enable drop on empty, it's RO for VF */
749                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
750                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
751
752                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
753                 FM10K_WRITE_FLUSH(hw);
754         }
755
756         /* Configure VMDQ/RSS if applicable */
757         fm10k_dev_mq_rx_configure(dev);
758
759         /* Decide the best RX function */
760         fm10k_set_rx_function(dev);
761
762         return 0;
763 }
764
765 static int
766 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
767 {
768         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
769         int err = -1;
770         uint32_t reg;
771         struct fm10k_rx_queue *rxq;
772
773         PMD_INIT_FUNC_TRACE();
774
775         if (rx_queue_id < dev->data->nb_rx_queues) {
776                 rxq = dev->data->rx_queues[rx_queue_id];
777                 err = rx_queue_reset(rxq);
778                 if (err == -ENOMEM) {
779                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
780                         return err;
781                 } else if (err == -EINVAL) {
782                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
783                                 " %d", err);
784                         return err;
785                 }
786
787                 /* Setup the HW Rx Head and Tail Descriptor Pointers
788                  * Note: this must be done AFTER the queue is enabled on real
789                  * hardware, but BEFORE the queue is enabled when using the
790                  * emulation platform. Do it in both places for now and remove
791                  * this comment and the following two register writes when the
792                  * emulation platform is no longer being used.
793                  */
794                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
795                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
796
797                 /* Set PF ownership flag for PF devices */
798                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
799                 if (hw->mac.type == fm10k_mac_pf)
800                         reg |= FM10K_RXQCTL_PF;
801                 reg |= FM10K_RXQCTL_ENABLE;
802                 /* enable RX queue */
803                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
804                 FM10K_WRITE_FLUSH(hw);
805
806                 /* Setup the HW Rx Head and Tail Descriptor Pointers
807                  * Note: this must be done AFTER the queue is enabled
808                  */
809                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
810                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
811                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
812         }
813
814         return err;
815 }
816
817 static int
818 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
819 {
820         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821
822         PMD_INIT_FUNC_TRACE();
823
824         if (rx_queue_id < dev->data->nb_rx_queues) {
825                 /* Disable RX queue */
826                 rx_queue_disable(hw, rx_queue_id);
827
828                 /* Free mbuf and clean HW ring */
829                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
830                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
831         }
832
833         return 0;
834 }
835
836 static int
837 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
838 {
839         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
840         /** @todo - this should be defined in the shared code */
841 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
842         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
843         int err = 0;
844
845         PMD_INIT_FUNC_TRACE();
846
847         if (tx_queue_id < dev->data->nb_tx_queues) {
848                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
849
850                 q->ops->reset(q);
851
852                 /* reset head and tail pointers */
853                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
854                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
855
856                 /* enable TX queue */
857                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
858                                         FM10K_TXDCTL_ENABLE | txdctl);
859                 FM10K_WRITE_FLUSH(hw);
860                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
861         } else
862                 err = -1;
863
864         return err;
865 }
866
867 static int
868 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
869 {
870         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
871
872         PMD_INIT_FUNC_TRACE();
873
874         if (tx_queue_id < dev->data->nb_tx_queues) {
875                 tx_queue_disable(hw, tx_queue_id);
876                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
877                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
878         }
879
880         return 0;
881 }
882
883 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
884 {
885         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
886                 != FM10K_DGLORTMAP_NONE);
887 }
888
889 static void
890 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
891 {
892         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893         int status;
894
895         PMD_INIT_FUNC_TRACE();
896
897         /* Return if it didn't acquire valid glort range */
898         if (!fm10k_glort_valid(hw))
899                 return;
900
901         fm10k_mbx_lock(hw);
902         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
903                                 FM10K_XCAST_MODE_PROMISC);
904         fm10k_mbx_unlock(hw);
905
906         if (status != FM10K_SUCCESS)
907                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
908 }
909
910 static void
911 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
912 {
913         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914         uint8_t mode;
915         int status;
916
917         PMD_INIT_FUNC_TRACE();
918
919         /* Return if it didn't acquire valid glort range */
920         if (!fm10k_glort_valid(hw))
921                 return;
922
923         if (dev->data->all_multicast == 1)
924                 mode = FM10K_XCAST_MODE_ALLMULTI;
925         else
926                 mode = FM10K_XCAST_MODE_NONE;
927
928         fm10k_mbx_lock(hw);
929         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
930                                 mode);
931         fm10k_mbx_unlock(hw);
932
933         if (status != FM10K_SUCCESS)
934                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
935 }
936
937 static void
938 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
939 {
940         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941         int status;
942
943         PMD_INIT_FUNC_TRACE();
944
945         /* Return if it didn't acquire valid glort range */
946         if (!fm10k_glort_valid(hw))
947                 return;
948
949         /* If promiscuous mode is enabled, it doesn't make sense to enable
950          * allmulticast and disable promiscuous since fm10k only can select
951          * one of the modes.
952          */
953         if (dev->data->promiscuous) {
954                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
955                         "needn't enable allmulticast");
956                 return;
957         }
958
959         fm10k_mbx_lock(hw);
960         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
961                                 FM10K_XCAST_MODE_ALLMULTI);
962         fm10k_mbx_unlock(hw);
963
964         if (status != FM10K_SUCCESS)
965                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
966 }
967
968 static void
969 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
970 {
971         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
972         int status;
973
974         PMD_INIT_FUNC_TRACE();
975
976         /* Return if it didn't acquire valid glort range */
977         if (!fm10k_glort_valid(hw))
978                 return;
979
980         if (dev->data->promiscuous) {
981                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
982                         "since promisc mode is enabled");
983                 return;
984         }
985
986         fm10k_mbx_lock(hw);
987         /* Change mode to unicast mode */
988         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
989                                 FM10K_XCAST_MODE_NONE);
990         fm10k_mbx_unlock(hw);
991
992         if (status != FM10K_SUCCESS)
993                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
994 }
995
996 static void
997 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
998 {
999         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1000         uint32_t dglortdec, pool_len, rss_len, i;
1001         uint16_t nb_queue_pools;
1002         struct fm10k_macvlan_filter_info *macvlan;
1003
1004         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1005         nb_queue_pools = macvlan->nb_queue_pools;
1006         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1007         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1008         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1009
1010         /* Establish only MAP 0 as valid */
1011         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
1012
1013         /* Configure VMDQ/RSS DGlort Decoder */
1014         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1015
1016         /* Invalidate all other GLORT entries */
1017         for (i = 1; i < FM10K_DGLORT_COUNT; i++)
1018                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1019                                 FM10K_DGLORTMAP_NONE);
1020 }
1021
1022 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1023 static int
1024 fm10k_dev_start(struct rte_eth_dev *dev)
1025 {
1026         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1027         int i, diag;
1028
1029         PMD_INIT_FUNC_TRACE();
1030
1031         /* stop, init, then start the hw */
1032         diag = fm10k_stop_hw(hw);
1033         if (diag != FM10K_SUCCESS) {
1034                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1035                 return -EIO;
1036         }
1037
1038         diag = fm10k_init_hw(hw);
1039         if (diag != FM10K_SUCCESS) {
1040                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1041                 return -EIO;
1042         }
1043
1044         diag = fm10k_start_hw(hw);
1045         if (diag != FM10K_SUCCESS) {
1046                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1047                 return -EIO;
1048         }
1049
1050         diag = fm10k_dev_tx_init(dev);
1051         if (diag) {
1052                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1053                 return diag;
1054         }
1055
1056         diag = fm10k_dev_rx_init(dev);
1057         if (diag) {
1058                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1059                 return diag;
1060         }
1061
1062         if (hw->mac.type == fm10k_mac_pf)
1063                 fm10k_dev_dglort_map_configure(dev);
1064
1065         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1066                 struct fm10k_rx_queue *rxq;
1067                 rxq = dev->data->rx_queues[i];
1068
1069                 if (rxq->rx_deferred_start)
1070                         continue;
1071                 diag = fm10k_dev_rx_queue_start(dev, i);
1072                 if (diag != 0) {
1073                         int j;
1074                         for (j = 0; j < i; ++j)
1075                                 rx_queue_clean(dev->data->rx_queues[j]);
1076                         return diag;
1077                 }
1078         }
1079
1080         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1081                 struct fm10k_tx_queue *txq;
1082                 txq = dev->data->tx_queues[i];
1083
1084                 if (txq->tx_deferred_start)
1085                         continue;
1086                 diag = fm10k_dev_tx_queue_start(dev, i);
1087                 if (diag != 0) {
1088                         int j;
1089                         for (j = 0; j < i; ++j)
1090                                 tx_queue_clean(dev->data->tx_queues[j]);
1091                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1092                                 rx_queue_clean(dev->data->rx_queues[j]);
1093                         return diag;
1094                 }
1095         }
1096
1097         /* Update default vlan when not in VMDQ mode */
1098         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1099                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1100
1101         return 0;
1102 }
1103
1104 static void
1105 fm10k_dev_stop(struct rte_eth_dev *dev)
1106 {
1107         int i;
1108
1109         PMD_INIT_FUNC_TRACE();
1110
1111         if (dev->data->tx_queues)
1112                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1113                         fm10k_dev_tx_queue_stop(dev, i);
1114
1115         if (dev->data->rx_queues)
1116                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1117                         fm10k_dev_rx_queue_stop(dev, i);
1118 }
1119
1120 static void
1121 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1122 {
1123         int i;
1124
1125         PMD_INIT_FUNC_TRACE();
1126
1127         if (dev->data->tx_queues) {
1128                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1129                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1130
1131                         tx_queue_free(txq);
1132                 }
1133         }
1134
1135         if (dev->data->rx_queues) {
1136                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1137                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1138         }
1139 }
1140
1141 static void
1142 fm10k_dev_close(struct rte_eth_dev *dev)
1143 {
1144         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1145         uint16_t nb_lport;
1146         struct fm10k_macvlan_filter_info *macvlan;
1147
1148         PMD_INIT_FUNC_TRACE();
1149
1150         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1151         nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1152         fm10k_mbx_lock(hw);
1153         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1154                 nb_lport, false);
1155         fm10k_mbx_unlock(hw);
1156
1157         /* Stop mailbox service first */
1158         fm10k_close_mbx_service(hw);
1159         fm10k_dev_stop(dev);
1160         fm10k_dev_queue_release(dev);
1161         fm10k_stop_hw(hw);
1162 }
1163
1164 static int
1165 fm10k_link_update(struct rte_eth_dev *dev,
1166         __rte_unused int wait_to_complete)
1167 {
1168         PMD_INIT_FUNC_TRACE();
1169
1170         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1171          * x8 PCIe interface. For now, we leave the speed undefined since there
1172          * is no 50Gbps Ethernet. */
1173         dev->data->dev_link.link_speed  = 0;
1174         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1175         dev->data->dev_link.link_status = 1;
1176
1177         return 0;
1178 }
1179
1180 static int
1181 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1182                  unsigned n)
1183 {
1184         struct fm10k_hw_stats *hw_stats =
1185                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1186         unsigned i, q, count = 0;
1187
1188         if (n < FM10K_NB_XSTATS)
1189                 return FM10K_NB_XSTATS;
1190
1191         /* Global stats */
1192         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1193                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1194                          "%s", fm10k_hw_stats_strings[count].name);
1195                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1196                         fm10k_hw_stats_strings[count].offset);
1197                 count++;
1198         }
1199
1200         /* PF queue stats */
1201         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1202                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1203                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1204                                  "rx_q%u_%s", q,
1205                                  fm10k_hw_stats_rx_q_strings[i].name);
1206                         xstats[count].value =
1207                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1208                                 fm10k_hw_stats_rx_q_strings[i].offset);
1209                         count++;
1210                 }
1211                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1212                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1213                                  "tx_q%u_%s", q,
1214                                  fm10k_hw_stats_tx_q_strings[i].name);
1215                         xstats[count].value =
1216                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1217                                 fm10k_hw_stats_tx_q_strings[i].offset);
1218                         count++;
1219                 }
1220         }
1221
1222         return FM10K_NB_XSTATS;
1223 }
1224
1225 static void
1226 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1227 {
1228         uint64_t ipackets, opackets, ibytes, obytes;
1229         struct fm10k_hw *hw =
1230                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231         struct fm10k_hw_stats *hw_stats =
1232                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1233         int i;
1234
1235         PMD_INIT_FUNC_TRACE();
1236
1237         fm10k_update_hw_stats(hw, hw_stats);
1238
1239         ipackets = opackets = ibytes = obytes = 0;
1240         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1241                 (i < hw->mac.max_queues); ++i) {
1242                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1243                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1244                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1245                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1246                 ipackets += stats->q_ipackets[i];
1247                 opackets += stats->q_opackets[i];
1248                 ibytes   += stats->q_ibytes[i];
1249                 obytes   += stats->q_obytes[i];
1250         }
1251         stats->ipackets = ipackets;
1252         stats->opackets = opackets;
1253         stats->ibytes = ibytes;
1254         stats->obytes = obytes;
1255 }
1256
1257 static void
1258 fm10k_stats_reset(struct rte_eth_dev *dev)
1259 {
1260         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1261         struct fm10k_hw_stats *hw_stats =
1262                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1263
1264         PMD_INIT_FUNC_TRACE();
1265
1266         memset(hw_stats, 0, sizeof(*hw_stats));
1267         fm10k_rebind_hw_stats(hw, hw_stats);
1268 }
1269
1270 static void
1271 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1272         struct rte_eth_dev_info *dev_info)
1273 {
1274         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1275
1276         PMD_INIT_FUNC_TRACE();
1277
1278         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1279         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1280         dev_info->max_rx_queues      = hw->mac.max_queues;
1281         dev_info->max_tx_queues      = hw->mac.max_queues;
1282         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1283         dev_info->max_hash_mac_addrs = 0;
1284         dev_info->max_vfs            = dev->pci_dev->max_vfs;
1285         dev_info->vmdq_pool_base     = 0;
1286         dev_info->vmdq_queue_base    = 0;
1287         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1288         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1289         dev_info->rx_offload_capa =
1290                 DEV_RX_OFFLOAD_VLAN_STRIP |
1291                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1292                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1293                 DEV_RX_OFFLOAD_TCP_CKSUM;
1294         dev_info->tx_offload_capa =
1295                 DEV_TX_OFFLOAD_VLAN_INSERT |
1296                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1297                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1298                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1299                 DEV_TX_OFFLOAD_TCP_TSO;
1300
1301         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1302         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1303
1304         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1305                 .rx_thresh = {
1306                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1307                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1308                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1309                 },
1310                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1311                 .rx_drop_en = 0,
1312         };
1313
1314         dev_info->default_txconf = (struct rte_eth_txconf) {
1315                 .tx_thresh = {
1316                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1317                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1318                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1319                 },
1320                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1321                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1322                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1323         };
1324
1325         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1326                 .nb_max = FM10K_MAX_RX_DESC,
1327                 .nb_min = FM10K_MIN_RX_DESC,
1328                 .nb_align = FM10K_MULT_RX_DESC,
1329         };
1330
1331         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1332                 .nb_max = FM10K_MAX_TX_DESC,
1333                 .nb_min = FM10K_MIN_TX_DESC,
1334                 .nb_align = FM10K_MULT_TX_DESC,
1335         };
1336 }
1337
1338 static int
1339 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1340 {
1341         s32 result;
1342         uint16_t mac_num = 0;
1343         uint32_t vid_idx, vid_bit, mac_index;
1344         struct fm10k_hw *hw;
1345         struct fm10k_macvlan_filter_info *macvlan;
1346         struct rte_eth_dev_data *data = dev->data;
1347
1348         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1350
1351         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1352                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1353                 return -EINVAL;
1354         }
1355
1356         if (vlan_id > ETH_VLAN_ID_MAX) {
1357                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1358                 return -EINVAL;
1359         }
1360
1361         vid_idx = FM10K_VFTA_IDX(vlan_id);
1362         vid_bit = FM10K_VFTA_BIT(vlan_id);
1363         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1364         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1365                 return 0;
1366         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1367         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1368                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1369                         "in the VLAN filter table");
1370                 return -EINVAL;
1371         }
1372
1373         fm10k_mbx_lock(hw);
1374         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1375         fm10k_mbx_unlock(hw);
1376         if (result != FM10K_SUCCESS) {
1377                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1378                 return -EIO;
1379         }
1380
1381         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1382                         (result == FM10K_SUCCESS); mac_index++) {
1383                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1384                         continue;
1385                 if (mac_num > macvlan->mac_num - 1) {
1386                         PMD_INIT_LOG(ERR, "MAC address number "
1387                                         "not match");
1388                         break;
1389                 }
1390                 fm10k_mbx_lock(hw);
1391                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1392                         data->mac_addrs[mac_index].addr_bytes,
1393                         vlan_id, on, 0);
1394                 fm10k_mbx_unlock(hw);
1395                 mac_num++;
1396         }
1397         if (result != FM10K_SUCCESS) {
1398                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1399                 return -EIO;
1400         }
1401
1402         if (on) {
1403                 macvlan->vlan_num++;
1404                 macvlan->vfta[vid_idx] |= vid_bit;
1405         } else {
1406                 macvlan->vlan_num--;
1407                 macvlan->vfta[vid_idx] &= ~vid_bit;
1408         }
1409         return 0;
1410 }
1411
1412 static void
1413 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1414 {
1415         if (mask & ETH_VLAN_STRIP_MASK) {
1416                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1417                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1418                                         "always on in fm10k");
1419         }
1420
1421         if (mask & ETH_VLAN_EXTEND_MASK) {
1422                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1423                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1424                                         "supported in fm10k");
1425         }
1426
1427         if (mask & ETH_VLAN_FILTER_MASK) {
1428                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1429                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1430         }
1431 }
1432
1433 /* Add/Remove a MAC address, and update filters to main VSI */
1434 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1435                 const u8 *mac, bool add, uint32_t pool)
1436 {
1437         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1438         struct fm10k_macvlan_filter_info *macvlan;
1439         uint32_t i, j, k;
1440
1441         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1442
1443         if (pool != MAIN_VSI_POOL_NUMBER) {
1444                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1445                         "mac to pool %u", pool);
1446                 return;
1447         }
1448         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1449                 if (!macvlan->vfta[j])
1450                         continue;
1451                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1452                         if (!(macvlan->vfta[j] & (1 << k)))
1453                                 continue;
1454                         if (i + 1 > macvlan->vlan_num) {
1455                                 PMD_INIT_LOG(ERR, "vlan number not match");
1456                                 return;
1457                         }
1458                         fm10k_mbx_lock(hw);
1459                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1460                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1461                         fm10k_mbx_unlock(hw);
1462                         i++;
1463                 }
1464         }
1465 }
1466
1467 /* Add/Remove a MAC address, and update filters to VMDQ */
1468 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1469                 const u8 *mac, bool add, uint32_t pool)
1470 {
1471         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1472         struct fm10k_macvlan_filter_info *macvlan;
1473         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1474         uint32_t i;
1475
1476         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1477         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1478
1479         if (pool > macvlan->nb_queue_pools) {
1480                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1481                         " Max pool is %u",
1482                         pool, macvlan->nb_queue_pools);
1483                 return;
1484         }
1485         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1486                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1487                         continue;
1488                 fm10k_mbx_lock(hw);
1489                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1490                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1491                 fm10k_mbx_unlock(hw);
1492         }
1493 }
1494
1495 /* Add/Remove a MAC address, and update filters */
1496 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1497                 const u8 *mac, bool add, uint32_t pool)
1498 {
1499         struct fm10k_macvlan_filter_info *macvlan;
1500
1501         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1502
1503         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1504                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1505         else
1506                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1507
1508         if (add)
1509                 macvlan->mac_num++;
1510         else
1511                 macvlan->mac_num--;
1512 }
1513
1514 /* Add a MAC address, and update filters */
1515 static void
1516 fm10k_macaddr_add(struct rte_eth_dev *dev,
1517                 struct ether_addr *mac_addr,
1518                 uint32_t index,
1519                 uint32_t pool)
1520 {
1521         struct fm10k_macvlan_filter_info *macvlan;
1522
1523         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1524         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1525         macvlan->mac_vmdq_id[index] = pool;
1526 }
1527
1528 /* Remove a MAC address, and update filters */
1529 static void
1530 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1531 {
1532         struct rte_eth_dev_data *data = dev->data;
1533         struct fm10k_macvlan_filter_info *macvlan;
1534
1535         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1536         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1537                         FALSE, macvlan->mac_vmdq_id[index]);
1538         macvlan->mac_vmdq_id[index] = 0;
1539 }
1540
1541 static inline int
1542 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1543 {
1544         if ((request < min) || (request > max) || ((request % mult) != 0))
1545                 return -1;
1546         else
1547                 return 0;
1548 }
1549
1550
1551 static inline int
1552 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1553 {
1554         if ((request < min) || (request > max) || ((div % request) != 0))
1555                 return -1;
1556         else
1557                 return 0;
1558 }
1559
1560 static inline int
1561 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1562 {
1563         uint16_t rx_free_thresh;
1564
1565         if (conf->rx_free_thresh == 0)
1566                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1567         else
1568                 rx_free_thresh = conf->rx_free_thresh;
1569
1570         /* make sure the requested threshold satisfies the constraints */
1571         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1572                         FM10K_RX_FREE_THRESH_MAX(q),
1573                         FM10K_RX_FREE_THRESH_DIV(q),
1574                         rx_free_thresh)) {
1575                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1576                         "less than or equal to %u, "
1577                         "greater than or equal to %u, "
1578                         "and a divisor of %u",
1579                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1580                         FM10K_RX_FREE_THRESH_MIN(q),
1581                         FM10K_RX_FREE_THRESH_DIV(q));
1582                 return -EINVAL;
1583         }
1584
1585         q->alloc_thresh = rx_free_thresh;
1586         q->drop_en = conf->rx_drop_en;
1587         q->rx_deferred_start = conf->rx_deferred_start;
1588
1589         return 0;
1590 }
1591
1592 /*
1593  * Hardware requires specific alignment for Rx packet buffers. At
1594  * least one of the following two conditions must be satisfied.
1595  *  1. Address is 512B aligned
1596  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1597  *
1598  * As such, the driver may need to adjust the DMA address within the
1599  * buffer by up to 512B.
1600  *
1601  * return 1 if the element size is valid, otherwise return 0.
1602  */
1603 static int
1604 mempool_element_size_valid(struct rte_mempool *mp)
1605 {
1606         uint32_t min_size;
1607
1608         /* elt_size includes mbuf header and headroom */
1609         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1610                         RTE_PKTMBUF_HEADROOM;
1611
1612         /* account for up to 512B of alignment */
1613         min_size -= FM10K_RX_DATABUF_ALIGN;
1614
1615         /* sanity check for overflow */
1616         if (min_size > mp->elt_size)
1617                 return 0;
1618
1619         /* size is valid */
1620         return 1;
1621 }
1622
1623 static int
1624 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1625         uint16_t nb_desc, unsigned int socket_id,
1626         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1627 {
1628         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1630         struct fm10k_rx_queue *q;
1631         const struct rte_memzone *mz;
1632
1633         PMD_INIT_FUNC_TRACE();
1634
1635         /* make sure the mempool element size can account for alignment. */
1636         if (!mempool_element_size_valid(mp)) {
1637                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1638                 return -EINVAL;
1639         }
1640
1641         /* make sure a valid number of descriptors have been requested */
1642         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1643                                 FM10K_MULT_RX_DESC, nb_desc)) {
1644                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1645                         "less than or equal to %"PRIu32", "
1646                         "greater than or equal to %u, "
1647                         "and a multiple of %u",
1648                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1649                         FM10K_MULT_RX_DESC);
1650                 return -EINVAL;
1651         }
1652
1653         /*
1654          * if this queue existed already, free the associated memory. The
1655          * queue cannot be reused in case we need to allocate memory on
1656          * different socket than was previously used.
1657          */
1658         if (dev->data->rx_queues[queue_id] != NULL) {
1659                 rx_queue_free(dev->data->rx_queues[queue_id]);
1660                 dev->data->rx_queues[queue_id] = NULL;
1661         }
1662
1663         /* allocate memory for the queue structure */
1664         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1665                                 socket_id);
1666         if (q == NULL) {
1667                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1668                 return -ENOMEM;
1669         }
1670
1671         /* setup queue */
1672         q->mp = mp;
1673         q->nb_desc = nb_desc;
1674         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1675         q->port_id = dev->data->port_id;
1676         q->queue_id = queue_id;
1677         q->tail_ptr = (volatile uint32_t *)
1678                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1679         if (handle_rxconf(q, conf))
1680                 return -EINVAL;
1681
1682         /* allocate memory for the software ring */
1683         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1684                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1685                         RTE_CACHE_LINE_SIZE, socket_id);
1686         if (q->sw_ring == NULL) {
1687                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1688                 rte_free(q);
1689                 return -ENOMEM;
1690         }
1691
1692         /*
1693          * allocate memory for the hardware descriptor ring. A memzone large
1694          * enough to hold the maximum ring size is requested to allow for
1695          * resizing in later calls to the queue setup function.
1696          */
1697         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1698                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1699                                       socket_id);
1700         if (mz == NULL) {
1701                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1702                 rte_free(q->sw_ring);
1703                 rte_free(q);
1704                 return -ENOMEM;
1705         }
1706         q->hw_ring = mz->addr;
1707         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1708
1709         /* Check if number of descs satisfied Vector requirement */
1710         if (!rte_is_power_of_2(nb_desc)) {
1711                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1712                                     "preconditions - canceling the feature for "
1713                                     "the whole port[%d]",
1714                              q->queue_id, q->port_id);
1715                 dev_info->rx_vec_allowed = false;
1716         } else
1717                 fm10k_rxq_vec_setup(q);
1718
1719         dev->data->rx_queues[queue_id] = q;
1720         return 0;
1721 }
1722
1723 static void
1724 fm10k_rx_queue_release(void *queue)
1725 {
1726         PMD_INIT_FUNC_TRACE();
1727
1728         rx_queue_free(queue);
1729 }
1730
1731 static inline int
1732 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1733 {
1734         uint16_t tx_free_thresh;
1735         uint16_t tx_rs_thresh;
1736
1737         /* constraint MACROs require that tx_free_thresh is configured
1738          * before tx_rs_thresh */
1739         if (conf->tx_free_thresh == 0)
1740                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1741         else
1742                 tx_free_thresh = conf->tx_free_thresh;
1743
1744         /* make sure the requested threshold satisfies the constraints */
1745         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1746                         FM10K_TX_FREE_THRESH_MAX(q),
1747                         FM10K_TX_FREE_THRESH_DIV(q),
1748                         tx_free_thresh)) {
1749                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1750                         "less than or equal to %u, "
1751                         "greater than or equal to %u, "
1752                         "and a divisor of %u",
1753                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1754                         FM10K_TX_FREE_THRESH_MIN(q),
1755                         FM10K_TX_FREE_THRESH_DIV(q));
1756                 return -EINVAL;
1757         }
1758
1759         q->free_thresh = tx_free_thresh;
1760
1761         if (conf->tx_rs_thresh == 0)
1762                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1763         else
1764                 tx_rs_thresh = conf->tx_rs_thresh;
1765
1766         q->tx_deferred_start = conf->tx_deferred_start;
1767
1768         /* make sure the requested threshold satisfies the constraints */
1769         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1770                         FM10K_TX_RS_THRESH_MAX(q),
1771                         FM10K_TX_RS_THRESH_DIV(q),
1772                         tx_rs_thresh)) {
1773                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1774                         "less than or equal to %u, "
1775                         "greater than or equal to %u, "
1776                         "and a divisor of %u",
1777                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1778                         FM10K_TX_RS_THRESH_MIN(q),
1779                         FM10K_TX_RS_THRESH_DIV(q));
1780                 return -EINVAL;
1781         }
1782
1783         q->rs_thresh = tx_rs_thresh;
1784
1785         return 0;
1786 }
1787
1788 static int
1789 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1790         uint16_t nb_desc, unsigned int socket_id,
1791         const struct rte_eth_txconf *conf)
1792 {
1793         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794         struct fm10k_tx_queue *q;
1795         const struct rte_memzone *mz;
1796
1797         PMD_INIT_FUNC_TRACE();
1798
1799         /* make sure a valid number of descriptors have been requested */
1800         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1801                                 FM10K_MULT_TX_DESC, nb_desc)) {
1802                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1803                         "less than or equal to %"PRIu32", "
1804                         "greater than or equal to %u, "
1805                         "and a multiple of %u",
1806                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1807                         FM10K_MULT_TX_DESC);
1808                 return -EINVAL;
1809         }
1810
1811         /*
1812          * if this queue existed already, free the associated memory. The
1813          * queue cannot be reused in case we need to allocate memory on
1814          * different socket than was previously used.
1815          */
1816         if (dev->data->tx_queues[queue_id] != NULL) {
1817                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1818
1819                 tx_queue_free(txq);
1820                 dev->data->tx_queues[queue_id] = NULL;
1821         }
1822
1823         /* allocate memory for the queue structure */
1824         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1825                                 socket_id);
1826         if (q == NULL) {
1827                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1828                 return -ENOMEM;
1829         }
1830
1831         /* setup queue */
1832         q->nb_desc = nb_desc;
1833         q->port_id = dev->data->port_id;
1834         q->queue_id = queue_id;
1835         q->txq_flags = conf->txq_flags;
1836         q->ops = &def_txq_ops;
1837         q->tail_ptr = (volatile uint32_t *)
1838                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1839         if (handle_txconf(q, conf))
1840                 return -EINVAL;
1841
1842         /* allocate memory for the software ring */
1843         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1844                                         nb_desc * sizeof(struct rte_mbuf *),
1845                                         RTE_CACHE_LINE_SIZE, socket_id);
1846         if (q->sw_ring == NULL) {
1847                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1848                 rte_free(q);
1849                 return -ENOMEM;
1850         }
1851
1852         /*
1853          * allocate memory for the hardware descriptor ring. A memzone large
1854          * enough to hold the maximum ring size is requested to allow for
1855          * resizing in later calls to the queue setup function.
1856          */
1857         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1858                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1859                                       socket_id);
1860         if (mz == NULL) {
1861                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1862                 rte_free(q->sw_ring);
1863                 rte_free(q);
1864                 return -ENOMEM;
1865         }
1866         q->hw_ring = mz->addr;
1867         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1868
1869         /*
1870          * allocate memory for the RS bit tracker. Enough slots to hold the
1871          * descriptor index for each RS bit needing to be set are required.
1872          */
1873         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1874                                 ((nb_desc + 1) / q->rs_thresh) *
1875                                 sizeof(uint16_t),
1876                                 RTE_CACHE_LINE_SIZE, socket_id);
1877         if (q->rs_tracker.list == NULL) {
1878                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1879                 rte_free(q->sw_ring);
1880                 rte_free(q);
1881                 return -ENOMEM;
1882         }
1883
1884         dev->data->tx_queues[queue_id] = q;
1885         return 0;
1886 }
1887
1888 static void
1889 fm10k_tx_queue_release(void *queue)
1890 {
1891         struct fm10k_tx_queue *q = queue;
1892         PMD_INIT_FUNC_TRACE();
1893
1894         tx_queue_free(q);
1895 }
1896
1897 static int
1898 fm10k_reta_update(struct rte_eth_dev *dev,
1899                         struct rte_eth_rss_reta_entry64 *reta_conf,
1900                         uint16_t reta_size)
1901 {
1902         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1903         uint16_t i, j, idx, shift;
1904         uint8_t mask;
1905         uint32_t reta;
1906
1907         PMD_INIT_FUNC_TRACE();
1908
1909         if (reta_size > FM10K_MAX_RSS_INDICES) {
1910                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1911                         "(%d) doesn't match the number hardware can supported "
1912                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1913                 return -EINVAL;
1914         }
1915
1916         /*
1917          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1918          * 128-entries in 32 registers
1919          */
1920         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1921                 idx = i / RTE_RETA_GROUP_SIZE;
1922                 shift = i % RTE_RETA_GROUP_SIZE;
1923                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1924                                 BIT_MASK_PER_UINT32);
1925                 if (mask == 0)
1926                         continue;
1927
1928                 reta = 0;
1929                 if (mask != BIT_MASK_PER_UINT32)
1930                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1931
1932                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1933                         if (mask & (0x1 << j)) {
1934                                 if (mask != 0xF)
1935                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1936                                 reta |= reta_conf[idx].reta[shift + j] <<
1937                                                 (CHAR_BIT * j);
1938                         }
1939                 }
1940                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1941         }
1942
1943         return 0;
1944 }
1945
1946 static int
1947 fm10k_reta_query(struct rte_eth_dev *dev,
1948                         struct rte_eth_rss_reta_entry64 *reta_conf,
1949                         uint16_t reta_size)
1950 {
1951         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1952         uint16_t i, j, idx, shift;
1953         uint8_t mask;
1954         uint32_t reta;
1955
1956         PMD_INIT_FUNC_TRACE();
1957
1958         if (reta_size < FM10K_MAX_RSS_INDICES) {
1959                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1960                         "(%d) doesn't match the number hardware can supported "
1961                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1962                 return -EINVAL;
1963         }
1964
1965         /*
1966          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1967          * 128-entries in 32 registers
1968          */
1969         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1970                 idx = i / RTE_RETA_GROUP_SIZE;
1971                 shift = i % RTE_RETA_GROUP_SIZE;
1972                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1973                                 BIT_MASK_PER_UINT32);
1974                 if (mask == 0)
1975                         continue;
1976
1977                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1978                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1979                         if (mask & (0x1 << j))
1980                                 reta_conf[idx].reta[shift + j] = ((reta >>
1981                                         CHAR_BIT * j) & UINT8_MAX);
1982                 }
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int
1989 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1990         struct rte_eth_rss_conf *rss_conf)
1991 {
1992         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1993         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1994         uint32_t mrqc;
1995         uint64_t hf = rss_conf->rss_hf;
1996         int i;
1997
1998         PMD_INIT_FUNC_TRACE();
1999
2000         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2001                 FM10K_RSSRK_ENTRIES_PER_REG)
2002                 return -EINVAL;
2003
2004         if (hf == 0)
2005                 return -EINVAL;
2006
2007         mrqc = 0;
2008         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2009         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2010         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2011         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2012         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2013         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2014         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2015         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2016         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2017
2018         /* If the mapping doesn't fit any supported, return */
2019         if (mrqc == 0)
2020                 return -EINVAL;
2021
2022         if (key != NULL)
2023                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2024                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2025
2026         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2027
2028         return 0;
2029 }
2030
2031 static int
2032 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2033         struct rte_eth_rss_conf *rss_conf)
2034 {
2035         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2036         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2037         uint32_t mrqc;
2038         uint64_t hf;
2039         int i;
2040
2041         PMD_INIT_FUNC_TRACE();
2042
2043         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2044                                 FM10K_RSSRK_ENTRIES_PER_REG)
2045                 return -EINVAL;
2046
2047         if (key != NULL)
2048                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2049                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2050
2051         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2052         hf = 0;
2053         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2054         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2055         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2056         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2057         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2058         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2059         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2060         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2061         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2062
2063         rss_conf->rss_hf = hf;
2064
2065         return 0;
2066 }
2067
2068 static void
2069 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2070 {
2071         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2073
2074         /* Bind all local non-queue interrupt to vector 0 */
2075         int_map |= 0;
2076
2077         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2078         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2079         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2080         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2081         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2082         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2083
2084         /* Enable misc causes */
2085         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2086                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2087                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2088                                 FM10K_EIMR_ENABLE(MAILBOX) |
2089                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2090                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2091                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2092                                 FM10K_EIMR_ENABLE(VFLR));
2093
2094         /* Enable ITR 0 */
2095         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2096                                         FM10K_ITR_MASK_CLEAR);
2097         FM10K_WRITE_FLUSH(hw);
2098 }
2099
2100 static void
2101 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2102 {
2103         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2105
2106         int_map |= 0;
2107
2108         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2109         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2110         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2111         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2112         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2113         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2114
2115         /* Disable misc causes */
2116         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2117                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2118                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2119                                 FM10K_EIMR_DISABLE(MAILBOX) |
2120                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2121                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2122                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2123                                 FM10K_EIMR_DISABLE(VFLR));
2124
2125         /* Disable ITR 0 */
2126         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2127         FM10K_WRITE_FLUSH(hw);
2128 }
2129
2130 static void
2131 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2132 {
2133         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2134         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2135
2136         /* Bind all local non-queue interrupt to vector 0 */
2137         int_map |= 0;
2138
2139         /* Only INT 0 available, other 15 are reserved. */
2140         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2141
2142         /* Enable ITR 0 */
2143         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2144                                         FM10K_ITR_MASK_CLEAR);
2145         FM10K_WRITE_FLUSH(hw);
2146 }
2147
2148 static void
2149 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2150 {
2151         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2152         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2153
2154         int_map |= 0;
2155
2156         /* Only INT 0 available, other 15 are reserved. */
2157         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2158
2159         /* Disable ITR 0 */
2160         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2161         FM10K_WRITE_FLUSH(hw);
2162 }
2163
2164 static int
2165 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2166 {
2167         struct fm10k_fault fault;
2168         int err;
2169         const char *estr = "Unknown error";
2170
2171         /* Process PCA fault */
2172         if (eicr & FM10K_EICR_PCA_FAULT) {
2173                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2174                 if (err)
2175                         goto error;
2176                 switch (fault.type) {
2177                 case PCA_NO_FAULT:
2178                         estr = "PCA_NO_FAULT"; break;
2179                 case PCA_UNMAPPED_ADDR:
2180                         estr = "PCA_UNMAPPED_ADDR"; break;
2181                 case PCA_BAD_QACCESS_PF:
2182                         estr = "PCA_BAD_QACCESS_PF"; break;
2183                 case PCA_BAD_QACCESS_VF:
2184                         estr = "PCA_BAD_QACCESS_VF"; break;
2185                 case PCA_MALICIOUS_REQ:
2186                         estr = "PCA_MALICIOUS_REQ"; break;
2187                 case PCA_POISONED_TLP:
2188                         estr = "PCA_POISONED_TLP"; break;
2189                 case PCA_TLP_ABORT:
2190                         estr = "PCA_TLP_ABORT"; break;
2191                 default:
2192                         goto error;
2193                 }
2194                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2195                         estr, fault.func ? "VF" : "PF", fault.func,
2196                         fault.address, fault.specinfo);
2197         }
2198
2199         /* Process THI fault */
2200         if (eicr & FM10K_EICR_THI_FAULT) {
2201                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2202                 if (err)
2203                         goto error;
2204                 switch (fault.type) {
2205                 case THI_NO_FAULT:
2206                         estr = "THI_NO_FAULT"; break;
2207                 case THI_MAL_DIS_Q_FAULT:
2208                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2209                 default:
2210                         goto error;
2211                 }
2212                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2213                         estr, fault.func ? "VF" : "PF", fault.func,
2214                         fault.address, fault.specinfo);
2215         }
2216
2217         /* Process FUM fault */
2218         if (eicr & FM10K_EICR_FUM_FAULT) {
2219                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2220                 if (err)
2221                         goto error;
2222                 switch (fault.type) {
2223                 case FUM_NO_FAULT:
2224                         estr = "FUM_NO_FAULT"; break;
2225                 case FUM_UNMAPPED_ADDR:
2226                         estr = "FUM_UNMAPPED_ADDR"; break;
2227                 case FUM_POISONED_TLP:
2228                         estr = "FUM_POISONED_TLP"; break;
2229                 case FUM_BAD_VF_QACCESS:
2230                         estr = "FUM_BAD_VF_QACCESS"; break;
2231                 case FUM_ADD_DECODE_ERR:
2232                         estr = "FUM_ADD_DECODE_ERR"; break;
2233                 case FUM_RO_ERROR:
2234                         estr = "FUM_RO_ERROR"; break;
2235                 case FUM_QPRC_CRC_ERROR:
2236                         estr = "FUM_QPRC_CRC_ERROR"; break;
2237                 case FUM_CSR_TIMEOUT:
2238                         estr = "FUM_CSR_TIMEOUT"; break;
2239                 case FUM_INVALID_TYPE:
2240                         estr = "FUM_INVALID_TYPE"; break;
2241                 case FUM_INVALID_LENGTH:
2242                         estr = "FUM_INVALID_LENGTH"; break;
2243                 case FUM_INVALID_BE:
2244                         estr = "FUM_INVALID_BE"; break;
2245                 case FUM_INVALID_ALIGN:
2246                         estr = "FUM_INVALID_ALIGN"; break;
2247                 default:
2248                         goto error;
2249                 }
2250                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2251                         estr, fault.func ? "VF" : "PF", fault.func,
2252                         fault.address, fault.specinfo);
2253         }
2254
2255         return 0;
2256 error:
2257         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2258         return err;
2259 }
2260
2261 /**
2262  * PF interrupt handler triggered by NIC for handling specific interrupt.
2263  *
2264  * @param handle
2265  *  Pointer to interrupt handle.
2266  * @param param
2267  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2268  *
2269  * @return
2270  *  void
2271  */
2272 static void
2273 fm10k_dev_interrupt_handler_pf(
2274                         __rte_unused struct rte_intr_handle *handle,
2275                         void *param)
2276 {
2277         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2278         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2279         uint32_t cause, status;
2280
2281         if (hw->mac.type != fm10k_mac_pf)
2282                 return;
2283
2284         cause = FM10K_READ_REG(hw, FM10K_EICR);
2285
2286         /* Handle PCI fault cases */
2287         if (cause & FM10K_EICR_FAULT_MASK) {
2288                 PMD_INIT_LOG(ERR, "INT: find fault!");
2289                 fm10k_dev_handle_fault(hw, cause);
2290         }
2291
2292         /* Handle switch up/down */
2293         if (cause & FM10K_EICR_SWITCHNOTREADY)
2294                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2295
2296         if (cause & FM10K_EICR_SWITCHREADY)
2297                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2298
2299         /* Handle mailbox message */
2300         fm10k_mbx_lock(hw);
2301         hw->mbx.ops.process(hw, &hw->mbx);
2302         fm10k_mbx_unlock(hw);
2303
2304         /* Handle SRAM error */
2305         if (cause & FM10K_EICR_SRAMERROR) {
2306                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2307
2308                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2309                 /* Write to clear pending bits */
2310                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2311
2312                 /* Todo: print out error message after shared code  updates */
2313         }
2314
2315         /* Clear these 3 events if having any */
2316         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2317                  FM10K_EICR_SWITCHREADY;
2318         if (cause)
2319                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2320
2321         /* Re-enable interrupt from device side */
2322         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2323                                         FM10K_ITR_MASK_CLEAR);
2324         /* Re-enable interrupt from host side */
2325         rte_intr_enable(&(dev->pci_dev->intr_handle));
2326 }
2327
2328 /**
2329  * VF interrupt handler triggered by NIC for handling specific interrupt.
2330  *
2331  * @param handle
2332  *  Pointer to interrupt handle.
2333  * @param param
2334  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2335  *
2336  * @return
2337  *  void
2338  */
2339 static void
2340 fm10k_dev_interrupt_handler_vf(
2341                         __rte_unused struct rte_intr_handle *handle,
2342                         void *param)
2343 {
2344         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2345         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2346
2347         if (hw->mac.type != fm10k_mac_vf)
2348                 return;
2349
2350         /* Handle mailbox message if lock is acquired */
2351         fm10k_mbx_lock(hw);
2352         hw->mbx.ops.process(hw, &hw->mbx);
2353         fm10k_mbx_unlock(hw);
2354
2355         /* Re-enable interrupt from device side */
2356         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2357                                         FM10K_ITR_MASK_CLEAR);
2358         /* Re-enable interrupt from host side */
2359         rte_intr_enable(&(dev->pci_dev->intr_handle));
2360 }
2361
2362 /* Mailbox message handler in VF */
2363 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2364         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2365         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2366         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2367         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2368 };
2369
2370 /* Mailbox message handler in PF */
2371 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2372         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2373         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2374         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2375         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2376         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2377         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2378         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2379 };
2380
2381 static int
2382 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2383 {
2384         int err;
2385
2386         /* Initialize mailbox lock */
2387         fm10k_mbx_initlock(hw);
2388
2389         /* Replace default message handler with new ones */
2390         if (hw->mac.type == fm10k_mac_pf)
2391                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2392         else
2393                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2394
2395         if (err) {
2396                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2397                                 err);
2398                 return err;
2399         }
2400         /* Connect to SM for PF device or PF for VF device */
2401         return hw->mbx.ops.connect(hw, &hw->mbx);
2402 }
2403
2404 static void
2405 fm10k_close_mbx_service(struct fm10k_hw *hw)
2406 {
2407         /* Disconnect from SM for PF device or PF for VF device */
2408         hw->mbx.ops.disconnect(hw, &hw->mbx);
2409 }
2410
2411 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2412         .dev_configure          = fm10k_dev_configure,
2413         .dev_start              = fm10k_dev_start,
2414         .dev_stop               = fm10k_dev_stop,
2415         .dev_close              = fm10k_dev_close,
2416         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2417         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2418         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2419         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2420         .stats_get              = fm10k_stats_get,
2421         .xstats_get             = fm10k_xstats_get,
2422         .stats_reset            = fm10k_stats_reset,
2423         .xstats_reset           = fm10k_stats_reset,
2424         .link_update            = fm10k_link_update,
2425         .dev_infos_get          = fm10k_dev_infos_get,
2426         .vlan_filter_set        = fm10k_vlan_filter_set,
2427         .vlan_offload_set       = fm10k_vlan_offload_set,
2428         .mac_addr_add           = fm10k_macaddr_add,
2429         .mac_addr_remove        = fm10k_macaddr_remove,
2430         .rx_queue_start         = fm10k_dev_rx_queue_start,
2431         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2432         .tx_queue_start         = fm10k_dev_tx_queue_start,
2433         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2434         .rx_queue_setup         = fm10k_rx_queue_setup,
2435         .rx_queue_release       = fm10k_rx_queue_release,
2436         .tx_queue_setup         = fm10k_tx_queue_setup,
2437         .tx_queue_release       = fm10k_tx_queue_release,
2438         .reta_update            = fm10k_reta_update,
2439         .reta_query             = fm10k_reta_query,
2440         .rss_hash_update        = fm10k_rss_hash_update,
2441         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2442 };
2443
2444 static void __attribute__((cold))
2445 fm10k_set_tx_function(struct rte_eth_dev *dev)
2446 {
2447         struct fm10k_tx_queue *txq;
2448         int i;
2449         int use_sse = 1;
2450
2451         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2452                 txq = dev->data->tx_queues[i];
2453                 /* Check if Vector Tx is satisfied */
2454                 if (fm10k_tx_vec_condition_check(txq)) {
2455                         use_sse = 0;
2456                         break;
2457                 }
2458         }
2459
2460         if (use_sse) {
2461                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2462                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2463                         txq = dev->data->tx_queues[i];
2464                         fm10k_txq_vec_setup(txq);
2465                 }
2466                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2467         } else {
2468                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2469                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2470         }
2471 }
2472
2473 static void __attribute__((cold))
2474 fm10k_set_rx_function(struct rte_eth_dev *dev)
2475 {
2476         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2477         uint16_t i, rx_using_sse;
2478
2479         /* In order to allow Vector Rx there are a few configuration
2480          * conditions to be met.
2481          */
2482         if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2483                 if (dev->data->scattered_rx)
2484                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2485                 else
2486                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2487         } else if (dev->data->scattered_rx)
2488                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2489         else
2490                 dev->rx_pkt_burst = fm10k_recv_pkts;
2491
2492         rx_using_sse =
2493                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2494                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2495
2496         if (rx_using_sse)
2497                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2498         else
2499                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2500
2501         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2502                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2503
2504                 rxq->rx_using_sse = rx_using_sse;
2505         }
2506 }
2507
2508 static void
2509 fm10k_params_init(struct rte_eth_dev *dev)
2510 {
2511         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2512         struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2513
2514         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2515          * there is no way to get link status without reading BAR4.  Until this
2516          * works, assume we have maximum bandwidth.
2517          * @todo - fix bus info
2518          */
2519         hw->bus_caps.speed = fm10k_bus_speed_8000;
2520         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2521         hw->bus_caps.payload = fm10k_bus_payload_512;
2522         hw->bus.speed = fm10k_bus_speed_8000;
2523         hw->bus.width = fm10k_bus_width_pcie_x8;
2524         hw->bus.payload = fm10k_bus_payload_256;
2525
2526         info->rx_vec_allowed = true;
2527 }
2528
2529 static int
2530 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2531 {
2532         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533         int diag;
2534         struct fm10k_macvlan_filter_info *macvlan;
2535
2536         PMD_INIT_FUNC_TRACE();
2537
2538         dev->dev_ops = &fm10k_eth_dev_ops;
2539         dev->rx_pkt_burst = &fm10k_recv_pkts;
2540         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2541
2542         /* only initialize in the primary process */
2543         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2544                 return 0;
2545
2546         rte_eth_copy_pci_info(dev, dev->pci_dev);
2547
2548         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2549         memset(macvlan, 0, sizeof(*macvlan));
2550         /* Vendor and Device ID need to be set before init of shared code */
2551         memset(hw, 0, sizeof(*hw));
2552         hw->device_id = dev->pci_dev->id.device_id;
2553         hw->vendor_id = dev->pci_dev->id.vendor_id;
2554         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2555         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2556         hw->revision_id = 0;
2557         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2558         if (hw->hw_addr == NULL) {
2559                 PMD_INIT_LOG(ERR, "Bad mem resource."
2560                         " Try to blacklist unused devices.");
2561                 return -EIO;
2562         }
2563
2564         /* Store fm10k_adapter pointer */
2565         hw->back = dev->data->dev_private;
2566
2567         /* Initialize the shared code */
2568         diag = fm10k_init_shared_code(hw);
2569         if (diag != FM10K_SUCCESS) {
2570                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2571                 return -EIO;
2572         }
2573
2574         /* Initialize parameters */
2575         fm10k_params_init(dev);
2576
2577         /* Initialize the hw */
2578         diag = fm10k_init_hw(hw);
2579         if (diag != FM10K_SUCCESS) {
2580                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2581                 return -EIO;
2582         }
2583
2584         /* Initialize MAC address(es) */
2585         dev->data->mac_addrs = rte_zmalloc("fm10k",
2586                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2587         if (dev->data->mac_addrs == NULL) {
2588                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2589                 return -ENOMEM;
2590         }
2591
2592         diag = fm10k_read_mac_addr(hw);
2593
2594         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2595                         &dev->data->mac_addrs[0]);
2596
2597         if (diag != FM10K_SUCCESS ||
2598                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2599
2600                 /* Generate a random addr */
2601                 eth_random_addr(hw->mac.addr);
2602                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2603                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2604                 &dev->data->mac_addrs[0]);
2605         }
2606
2607         /* Reset the hw statistics */
2608         fm10k_stats_reset(dev);
2609
2610         /* Reset the hw */
2611         diag = fm10k_reset_hw(hw);
2612         if (diag != FM10K_SUCCESS) {
2613                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2614                 return -EIO;
2615         }
2616
2617         /* Setup mailbox service */
2618         diag = fm10k_setup_mbx_service(hw);
2619         if (diag != FM10K_SUCCESS) {
2620                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2621                 return -EIO;
2622         }
2623
2624         /*PF/VF has different interrupt handling mechanism */
2625         if (hw->mac.type == fm10k_mac_pf) {
2626                 /* register callback func to eal lib */
2627                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2628                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2629
2630                 /* enable MISC interrupt */
2631                 fm10k_dev_enable_intr_pf(dev);
2632         } else { /* VF */
2633                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2634                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2635
2636                 fm10k_dev_enable_intr_vf(dev);
2637         }
2638
2639         /* Enable uio intr after callback registered */
2640         rte_intr_enable(&(dev->pci_dev->intr_handle));
2641
2642         hw->mac.ops.update_int_moderator(hw);
2643
2644         /* Make sure Switch Manager is ready before going forward. */
2645         if (hw->mac.type == fm10k_mac_pf) {
2646                 int switch_ready = 0;
2647                 int i;
2648
2649                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2650                         fm10k_mbx_lock(hw);
2651                         hw->mac.ops.get_host_state(hw, &switch_ready);
2652                         fm10k_mbx_unlock(hw);
2653                         if (switch_ready)
2654                                 break;
2655                         /* Delay some time to acquire async LPORT_MAP info. */
2656                         rte_delay_us(WAIT_SWITCH_MSG_US);
2657                 }
2658
2659                 if (switch_ready == 0) {
2660                         PMD_INIT_LOG(ERR, "switch is not ready");
2661                         return -1;
2662                 }
2663         }
2664
2665         /*
2666          * Below function will trigger operations on mailbox, acquire lock to
2667          * avoid race condition from interrupt handler. Operations on mailbox
2668          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2669          * will handle and generate an interrupt to our side. Then,  FIFO in
2670          * mailbox will be touched.
2671          */
2672         fm10k_mbx_lock(hw);
2673         /* Enable port first */
2674         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2675
2676         /* Set unicast mode by default. App can change to other mode in other
2677          * API func.
2678          */
2679         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2680                                         FM10K_XCAST_MODE_NONE);
2681
2682         fm10k_mbx_unlock(hw);
2683
2684         /* Add default mac address */
2685         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2686                 MAIN_VSI_POOL_NUMBER);
2687
2688         return 0;
2689 }
2690
2691 static int
2692 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2693 {
2694         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2695
2696         PMD_INIT_FUNC_TRACE();
2697
2698         /* only uninitialize in the primary process */
2699         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2700                 return 0;
2701
2702         /* safe to close dev here */
2703         fm10k_dev_close(dev);
2704
2705         dev->dev_ops = NULL;
2706         dev->rx_pkt_burst = NULL;
2707         dev->tx_pkt_burst = NULL;
2708
2709         /* disable uio/vfio intr */
2710         rte_intr_disable(&(dev->pci_dev->intr_handle));
2711
2712         /*PF/VF has different interrupt handling mechanism */
2713         if (hw->mac.type == fm10k_mac_pf) {
2714                 /* disable interrupt */
2715                 fm10k_dev_disable_intr_pf(dev);
2716
2717                 /* unregister callback func to eal lib */
2718                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2719                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2720         } else {
2721                 /* disable interrupt */
2722                 fm10k_dev_disable_intr_vf(dev);
2723
2724                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2725                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2726         }
2727
2728         /* free mac memory */
2729         if (dev->data->mac_addrs) {
2730                 rte_free(dev->data->mac_addrs);
2731                 dev->data->mac_addrs = NULL;
2732         }
2733
2734         memset(hw, 0, sizeof(*hw));
2735
2736         return 0;
2737 }
2738
2739 /*
2740  * The set of PCI devices this driver supports. This driver will enable both PF
2741  * and SRIOV-VF devices.
2742  */
2743 static const struct rte_pci_id pci_id_fm10k_map[] = {
2744 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2745 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2746 #include "rte_pci_dev_ids.h"
2747         { .vendor_id = 0, /* sentinel */ },
2748 };
2749
2750 static struct eth_driver rte_pmd_fm10k = {
2751         .pci_drv = {
2752                 .name = "rte_pmd_fm10k",
2753                 .id_table = pci_id_fm10k_map,
2754                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2755         },
2756         .eth_dev_init = eth_fm10k_dev_init,
2757         .eth_dev_uninit = eth_fm10k_dev_uninit,
2758         .dev_private_size = sizeof(struct fm10k_adapter),
2759 };
2760
2761 /*
2762  * Driver initialization routine.
2763  * Invoked once at EAL init time.
2764  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2765  */
2766 static int
2767 rte_pmd_fm10k_init(__rte_unused const char *name,
2768         __rte_unused const char *params)
2769 {
2770         PMD_INIT_FUNC_TRACE();
2771         rte_eth_driver_register(&rte_pmd_fm10k);
2772         return 0;
2773 }
2774
2775 static struct rte_driver rte_fm10k_driver = {
2776         .type = PMD_PDEV,
2777         .init = rte_pmd_fm10k_init,
2778 };
2779
2780 PMD_REGISTER_DRIVER(rte_fm10k_driver);