6d6bbe3fe508e105c81207d059e56a8675216cd5
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40 #include <rte_kvargs.h>
41
42 #include "fm10k.h"
43 #include "base/fm10k_api.h"
44
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48
49 #define MAIN_VSI_POOL_NUMBER 0
50
51 /* Max try times to acquire switch status */
52 #define MAX_QUERY_SWITCH_STATE_TIMES 10
53 /* Wait interval to get switch status */
54 #define WAIT_SWITCH_MSG_US    100000
55 /* Number of chars per uint32 type */
56 #define CHARS_PER_UINT32 (sizeof(uint32_t))
57 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
58
59 /* default 1:1 map from queue ID to interrupt vector ID */
60 #define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])
61
62 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
63 #define MAX_LPORT_NUM    128
64 #define GLORT_FD_Q_BASE  0x40
65 #define GLORT_PF_MASK    0xFFC0
66 #define GLORT_FD_MASK    GLORT_PF_MASK
67 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
68
69 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
70 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
71 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
73 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
74 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
75 static int
76 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
77 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
78         const u8 *mac, bool add, uint32_t pool);
79 static void fm10k_tx_queue_release(void *queue);
80 static void fm10k_rx_queue_release(void *queue);
81 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
82 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
83 static int fm10k_check_ftag(struct rte_devargs *devargs);
84
85 struct fm10k_xstats_name_off {
86         char name[RTE_ETH_XSTATS_NAME_SIZE];
87         unsigned offset;
88 };
89
90 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
91         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
92         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
93         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
94         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
95         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
96         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
97         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
98         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
99                 nodesc_drop)},
100 };
101
102 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
103                 sizeof(fm10k_hw_stats_strings[0]))
104
105 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
106         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
107         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
108         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
109 };
110
111 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
112                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
113
114 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
115         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
116         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
117 };
118
119 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
120                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
121
122 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
123                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
124 static int
125 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
126
127 static void
128 fm10k_mbx_initlock(struct fm10k_hw *hw)
129 {
130         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
131 }
132
133 static void
134 fm10k_mbx_lock(struct fm10k_hw *hw)
135 {
136         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
137                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
138 }
139
140 static void
141 fm10k_mbx_unlock(struct fm10k_hw *hw)
142 {
143         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
144 }
145
146 /* Stubs needed for linkage when vPMD is disabled */
147 int __attribute__((weak))
148 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
149 {
150         return -1;
151 }
152
153 uint16_t __attribute__((weak))
154 fm10k_recv_pkts_vec(
155         __rte_unused void *rx_queue,
156         __rte_unused struct rte_mbuf **rx_pkts,
157         __rte_unused uint16_t nb_pkts)
158 {
159         return 0;
160 }
161
162 uint16_t __attribute__((weak))
163 fm10k_recv_scattered_pkts_vec(
164                 __rte_unused void *rx_queue,
165                 __rte_unused struct rte_mbuf **rx_pkts,
166                 __rte_unused uint16_t nb_pkts)
167 {
168         return 0;
169 }
170
171 int __attribute__((weak))
172 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
173
174 {
175         return -1;
176 }
177
178 void __attribute__((weak))
179 fm10k_rx_queue_release_mbufs_vec(
180                 __rte_unused struct fm10k_rx_queue *rxq)
181 {
182         return;
183 }
184
185 void __attribute__((weak))
186 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
187 {
188         return;
189 }
190
191 int __attribute__((weak))
192 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
193 {
194         return -1;
195 }
196
197 uint16_t __attribute__((weak))
198 fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
199                 __rte_unused struct rte_mbuf **tx_pkts,
200                 __rte_unused uint16_t nb_pkts)
201 {
202         return 0;
203 }
204
205 /*
206  * reset queue to initial state, allocate software buffers used when starting
207  * device.
208  * return 0 on success
209  * return -ENOMEM if buffers cannot be allocated
210  * return -EINVAL if buffers do not satisfy alignment condition
211  */
212 static inline int
213 rx_queue_reset(struct fm10k_rx_queue *q)
214 {
215         static const union fm10k_rx_desc zero = {{0} };
216         uint64_t dma_addr;
217         int i, diag;
218         PMD_INIT_FUNC_TRACE();
219
220         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
221         if (diag != 0)
222                 return -ENOMEM;
223
224         for (i = 0; i < q->nb_desc; ++i) {
225                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
226                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
227                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
228                                                 q->nb_desc);
229                         return -EINVAL;
230                 }
231                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
232                 q->hw_ring[i].q.pkt_addr = dma_addr;
233                 q->hw_ring[i].q.hdr_addr = dma_addr;
234         }
235
236         /* initialize extra software ring entries. Space for these extra
237          * entries is always allocated.
238          */
239         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
240         for (i = 0; i < q->nb_fake_desc; ++i) {
241                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
242                 q->hw_ring[q->nb_desc + i] = zero;
243         }
244
245         q->next_dd = 0;
246         q->next_alloc = 0;
247         q->next_trigger = q->alloc_thresh - 1;
248         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
249         q->rxrearm_start = 0;
250         q->rxrearm_nb = 0;
251
252         return 0;
253 }
254
255 /*
256  * clean queue, descriptor rings, free software buffers used when stopping
257  * device.
258  */
259 static inline void
260 rx_queue_clean(struct fm10k_rx_queue *q)
261 {
262         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
263         uint32_t i;
264         PMD_INIT_FUNC_TRACE();
265
266         /* zero descriptor rings */
267         for (i = 0; i < q->nb_desc; ++i)
268                 q->hw_ring[i] = zero;
269
270         /* zero faked descriptors */
271         for (i = 0; i < q->nb_fake_desc; ++i)
272                 q->hw_ring[q->nb_desc + i] = zero;
273
274         /* vPMD driver has a different way of releasing mbufs. */
275         if (q->rx_using_sse) {
276                 fm10k_rx_queue_release_mbufs_vec(q);
277                 return;
278         }
279
280         /* free software buffers */
281         for (i = 0; i < q->nb_desc; ++i) {
282                 if (q->sw_ring[i]) {
283                         rte_pktmbuf_free_seg(q->sw_ring[i]);
284                         q->sw_ring[i] = NULL;
285                 }
286         }
287 }
288
289 /*
290  * free all queue memory used when releasing the queue (i.e. configure)
291  */
292 static inline void
293 rx_queue_free(struct fm10k_rx_queue *q)
294 {
295         PMD_INIT_FUNC_TRACE();
296         if (q) {
297                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
298                 rx_queue_clean(q);
299                 if (q->sw_ring) {
300                         rte_free(q->sw_ring);
301                         q->sw_ring = NULL;
302                 }
303                 rte_free(q);
304                 q = NULL;
305         }
306 }
307
308 /*
309  * disable RX queue, wait unitl HW finished necessary flush operation
310  */
311 static inline int
312 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
313 {
314         uint32_t reg, i;
315
316         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
317         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
318                         reg & ~FM10K_RXQCTL_ENABLE);
319
320         /* Wait 100us at most */
321         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
322                 rte_delay_us(1);
323                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
324                 if (!(reg & FM10K_RXQCTL_ENABLE))
325                         break;
326         }
327
328         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
329                 return -1;
330
331         return 0;
332 }
333
334 /*
335  * reset queue to initial state, allocate software buffers used when starting
336  * device
337  */
338 static inline void
339 tx_queue_reset(struct fm10k_tx_queue *q)
340 {
341         PMD_INIT_FUNC_TRACE();
342         q->last_free = 0;
343         q->next_free = 0;
344         q->nb_used = 0;
345         q->nb_free = q->nb_desc - 1;
346         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
347         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
348 }
349
350 /*
351  * clean queue, descriptor rings, free software buffers used when stopping
352  * device
353  */
354 static inline void
355 tx_queue_clean(struct fm10k_tx_queue *q)
356 {
357         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
358         uint32_t i;
359         PMD_INIT_FUNC_TRACE();
360
361         /* zero descriptor rings */
362         for (i = 0; i < q->nb_desc; ++i)
363                 q->hw_ring[i] = zero;
364
365         /* free software buffers */
366         for (i = 0; i < q->nb_desc; ++i) {
367                 if (q->sw_ring[i]) {
368                         rte_pktmbuf_free_seg(q->sw_ring[i]);
369                         q->sw_ring[i] = NULL;
370                 }
371         }
372 }
373
374 /*
375  * free all queue memory used when releasing the queue (i.e. configure)
376  */
377 static inline void
378 tx_queue_free(struct fm10k_tx_queue *q)
379 {
380         PMD_INIT_FUNC_TRACE();
381         if (q) {
382                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
383                 tx_queue_clean(q);
384                 if (q->rs_tracker.list) {
385                         rte_free(q->rs_tracker.list);
386                         q->rs_tracker.list = NULL;
387                 }
388                 if (q->sw_ring) {
389                         rte_free(q->sw_ring);
390                         q->sw_ring = NULL;
391                 }
392                 rte_free(q);
393                 q = NULL;
394         }
395 }
396
397 /*
398  * disable TX queue, wait unitl HW finished necessary flush operation
399  */
400 static inline int
401 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
402 {
403         uint32_t reg, i;
404
405         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
406         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
407                         reg & ~FM10K_TXDCTL_ENABLE);
408
409         /* Wait 100us at most */
410         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
411                 rte_delay_us(1);
412                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
413                 if (!(reg & FM10K_TXDCTL_ENABLE))
414                         break;
415         }
416
417         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
418                 return -1;
419
420         return 0;
421 }
422
423 static int
424 fm10k_check_mq_mode(struct rte_eth_dev *dev)
425 {
426         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
427         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
428         struct rte_eth_vmdq_rx_conf *vmdq_conf;
429         uint16_t nb_rx_q = dev->data->nb_rx_queues;
430
431         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
432
433         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
434                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
435                 return -EINVAL;
436         }
437
438         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
439                 return 0;
440
441         if (hw->mac.type == fm10k_mac_vf) {
442                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
443                 return -EINVAL;
444         }
445
446         /* Check VMDQ queue pool number */
447         if (vmdq_conf->nb_queue_pools >
448                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
449                         vmdq_conf->nb_queue_pools > nb_rx_q) {
450                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
451                         vmdq_conf->nb_queue_pools);
452                 return -EINVAL;
453         }
454
455         return 0;
456 }
457
458 static const struct fm10k_txq_ops def_txq_ops = {
459         .reset = tx_queue_reset,
460 };
461
462 static int
463 fm10k_dev_configure(struct rte_eth_dev *dev)
464 {
465         int ret;
466
467         PMD_INIT_FUNC_TRACE();
468
469         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
470                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
471         /* multipe queue mode checking */
472         ret  = fm10k_check_mq_mode(dev);
473         if (ret != 0) {
474                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
475                             ret);
476                 return ret;
477         }
478
479         return 0;
480 }
481
482 /* fls = find last set bit = 32 minus the number of leading zeros */
483 #ifndef fls
484 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
485 #endif
486
487 static void
488 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
489 {
490         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
491         struct rte_eth_vmdq_rx_conf *vmdq_conf;
492         uint32_t i;
493
494         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
495
496         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
497                 if (!vmdq_conf->pool_map[i].pools)
498                         continue;
499                 fm10k_mbx_lock(hw);
500                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
501                 fm10k_mbx_unlock(hw);
502         }
503 }
504
505 static void
506 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
507 {
508         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
509
510         /* Add default mac address */
511         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
512                 MAIN_VSI_POOL_NUMBER);
513 }
514
515 static void
516 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
517 {
518         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
519         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
520         uint32_t mrqc, *key, i, reta, j;
521         uint64_t hf;
522
523 #define RSS_KEY_SIZE 40
524         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
525                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
526                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
527                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
528                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
529                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
530         };
531
532         if (dev->data->nb_rx_queues == 1 ||
533             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
534             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
535                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
536                 return;
537         }
538
539         /* random key is rss_intel_key (default) or user provided (rss_key) */
540         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
541                 key = (uint32_t *)rss_intel_key;
542         else
543                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
544
545         /* Now fill our hash function seeds, 4 bytes at a time */
546         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
547                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
548
549         /*
550          * Fill in redirection table
551          * The byte-swap is needed because NIC registers are in
552          * little-endian order.
553          */
554         reta = 0;
555         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
556                 if (j == dev->data->nb_rx_queues)
557                         j = 0;
558                 reta = (reta << CHAR_BIT) | j;
559                 if ((i & 3) == 3)
560                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
561                                         rte_bswap32(reta));
562         }
563
564         /*
565          * Generate RSS hash based on packet types, TCP/UDP
566          * port numbers and/or IPv4/v6 src and dst addresses
567          */
568         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
569         mrqc = 0;
570         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
571         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
572         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
573         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
574         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
575         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
576         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
577         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
578         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
579
580         if (mrqc == 0) {
581                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
582                         "supported", hf);
583                 return;
584         }
585
586         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
587 }
588
589 static void
590 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
591 {
592         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
593         uint32_t i;
594
595         for (i = 0; i < nb_lport_new; i++) {
596                 /* Set unicast mode by default. App can change
597                  * to other mode in other API func.
598                  */
599                 fm10k_mbx_lock(hw);
600                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
601                         FM10K_XCAST_MODE_NONE);
602                 fm10k_mbx_unlock(hw);
603         }
604 }
605
606 static void
607 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
608 {
609         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
610         struct rte_eth_vmdq_rx_conf *vmdq_conf;
611         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
612         struct fm10k_macvlan_filter_info *macvlan;
613         uint16_t nb_queue_pools = 0; /* pool number in configuration */
614         uint16_t nb_lport_new;
615
616         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
617         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
618
619         fm10k_dev_rss_configure(dev);
620
621         /* only PF supports VMDQ */
622         if (hw->mac.type != fm10k_mac_pf)
623                 return;
624
625         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
626                 nb_queue_pools = vmdq_conf->nb_queue_pools;
627
628         /* no pool number change, no need to update logic port and VLAN/MAC */
629         if (macvlan->nb_queue_pools == nb_queue_pools)
630                 return;
631
632         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
633         fm10k_dev_logic_port_update(dev, nb_lport_new);
634
635         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
636         memset(dev->data->mac_addrs, 0,
637                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
638         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
639                 &dev->data->mac_addrs[0]);
640         memset(macvlan, 0, sizeof(*macvlan));
641         macvlan->nb_queue_pools = nb_queue_pools;
642
643         if (nb_queue_pools)
644                 fm10k_dev_vmdq_rx_configure(dev);
645         else
646                 fm10k_dev_pf_main_vsi_reset(dev);
647 }
648
649 static int
650 fm10k_dev_tx_init(struct rte_eth_dev *dev)
651 {
652         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
653         int i, ret;
654         struct fm10k_tx_queue *txq;
655         uint64_t base_addr;
656         uint32_t size;
657
658         /* Disable TXINT to avoid possible interrupt */
659         for (i = 0; i < hw->mac.max_queues; i++)
660                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
661                                 3 << FM10K_TXINT_TIMER_SHIFT);
662
663         /* Setup TX queue */
664         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
665                 txq = dev->data->tx_queues[i];
666                 base_addr = txq->hw_ring_phys_addr;
667                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
668
669                 /* disable queue to avoid issues while updating state */
670                 ret = tx_queue_disable(hw, i);
671                 if (ret) {
672                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
673                         return -1;
674                 }
675                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
676                  * register is read-only for VF.
677                  */
678                 if (fm10k_check_ftag(dev->pci_dev->devargs)) {
679                         if (hw->mac.type == fm10k_mac_pf) {
680                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
681                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
682                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
683                         } else {
684                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
685                                 return -ENOTSUP;
686                         }
687                 }
688
689                 /* set location and size for descriptor ring */
690                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
691                                 base_addr & UINT64_LOWER_32BITS_MASK);
692                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
693                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
694                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
695
696                 /* assign default SGLORT for each TX queue */
697                 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
698         }
699
700         /* set up vector or scalar TX function as appropriate */
701         fm10k_set_tx_function(dev);
702
703         return 0;
704 }
705
706 static int
707 fm10k_dev_rx_init(struct rte_eth_dev *dev)
708 {
709         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710         struct fm10k_macvlan_filter_info *macvlan;
711         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
712         int i, ret;
713         struct fm10k_rx_queue *rxq;
714         uint64_t base_addr;
715         uint32_t size;
716         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
717         uint32_t logic_port = hw->mac.dglort_map;
718         uint16_t buf_size;
719         uint16_t queue_stride = 0;
720
721         /* enable RXINT for interrupt mode */
722         i = 0;
723         if (rte_intr_dp_is_en(intr_handle)) {
724                 for (; i < dev->data->nb_rx_queues; i++) {
725                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
726                         if (hw->mac.type == fm10k_mac_pf)
727                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
728                                         FM10K_ITR_AUTOMASK |
729                                         FM10K_ITR_MASK_CLEAR);
730                         else
731                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
732                                         FM10K_ITR_AUTOMASK |
733                                         FM10K_ITR_MASK_CLEAR);
734                 }
735         }
736         /* Disable other RXINT to avoid possible interrupt */
737         for (; i < hw->mac.max_queues; i++)
738                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
739                         3 << FM10K_RXINT_TIMER_SHIFT);
740
741         /* Setup RX queues */
742         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
743                 rxq = dev->data->rx_queues[i];
744                 base_addr = rxq->hw_ring_phys_addr;
745                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
746
747                 /* disable queue to avoid issues while updating state */
748                 ret = rx_queue_disable(hw, i);
749                 if (ret) {
750                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
751                         return -1;
752                 }
753
754                 /* Setup the Base and Length of the Rx Descriptor Ring */
755                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
756                                 base_addr & UINT64_LOWER_32BITS_MASK);
757                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
758                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
759                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
760
761                 /* Configure the Rx buffer size for one buff without split */
762                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
763                         RTE_PKTMBUF_HEADROOM);
764                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
765                  * reserved for this purpose, and the worst case could be 511B.
766                  * But SRR reg assumes all buffers have the same size. In order
767                  * to fill the gap, we'll have to consider the worst case and
768                  * assume 512B is reserved. If we don't do so, it's possible
769                  * for HW to overwrite data to next mbuf.
770                  */
771                 buf_size -= FM10K_RX_DATABUF_ALIGN;
772
773                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
774                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
775                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
776
777                 /* It adds dual VLAN length for supporting dual VLAN */
778                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
779                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
780                         dev->data->dev_conf.rxmode.enable_scatter) {
781                         uint32_t reg;
782                         dev->data->scattered_rx = 1;
783                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
784                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
785                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
786                 }
787
788                 /* Enable drop on empty, it's RO for VF */
789                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
790                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
791
792                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
793                 FM10K_WRITE_FLUSH(hw);
794         }
795
796         /* Configure VMDQ/RSS if applicable */
797         fm10k_dev_mq_rx_configure(dev);
798
799         /* Decide the best RX function */
800         fm10k_set_rx_function(dev);
801
802         /* update RX_SGLORT for loopback suppress*/
803         if (hw->mac.type != fm10k_mac_pf)
804                 return 0;
805         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
806         if (macvlan->nb_queue_pools)
807                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
808         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
809                 if (i && queue_stride && !(i % queue_stride))
810                         logic_port++;
811                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
812         }
813
814         return 0;
815 }
816
817 static int
818 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
819 {
820         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821         int err = -1;
822         uint32_t reg;
823         struct fm10k_rx_queue *rxq;
824
825         PMD_INIT_FUNC_TRACE();
826
827         if (rx_queue_id < dev->data->nb_rx_queues) {
828                 rxq = dev->data->rx_queues[rx_queue_id];
829                 err = rx_queue_reset(rxq);
830                 if (err == -ENOMEM) {
831                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
832                         return err;
833                 } else if (err == -EINVAL) {
834                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
835                                 " %d", err);
836                         return err;
837                 }
838
839                 /* Setup the HW Rx Head and Tail Descriptor Pointers
840                  * Note: this must be done AFTER the queue is enabled on real
841                  * hardware, but BEFORE the queue is enabled when using the
842                  * emulation platform. Do it in both places for now and remove
843                  * this comment and the following two register writes when the
844                  * emulation platform is no longer being used.
845                  */
846                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
847                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
848
849                 /* Set PF ownership flag for PF devices */
850                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
851                 if (hw->mac.type == fm10k_mac_pf)
852                         reg |= FM10K_RXQCTL_PF;
853                 reg |= FM10K_RXQCTL_ENABLE;
854                 /* enable RX queue */
855                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
856                 FM10K_WRITE_FLUSH(hw);
857
858                 /* Setup the HW Rx Head and Tail Descriptor Pointers
859                  * Note: this must be done AFTER the queue is enabled
860                  */
861                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
862                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
863                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
864         }
865
866         return err;
867 }
868
869 static int
870 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
871 {
872         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873
874         PMD_INIT_FUNC_TRACE();
875
876         if (rx_queue_id < dev->data->nb_rx_queues) {
877                 /* Disable RX queue */
878                 rx_queue_disable(hw, rx_queue_id);
879
880                 /* Free mbuf and clean HW ring */
881                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
882                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
883         }
884
885         return 0;
886 }
887
888 static int
889 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
890 {
891         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
892         /** @todo - this should be defined in the shared code */
893 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
894         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
895         int err = 0;
896
897         PMD_INIT_FUNC_TRACE();
898
899         if (tx_queue_id < dev->data->nb_tx_queues) {
900                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
901
902                 q->ops->reset(q);
903
904                 /* reset head and tail pointers */
905                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
906                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
907
908                 /* enable TX queue */
909                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
910                                         FM10K_TXDCTL_ENABLE | txdctl);
911                 FM10K_WRITE_FLUSH(hw);
912                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
913         } else
914                 err = -1;
915
916         return err;
917 }
918
919 static int
920 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 {
922         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
923
924         PMD_INIT_FUNC_TRACE();
925
926         if (tx_queue_id < dev->data->nb_tx_queues) {
927                 tx_queue_disable(hw, tx_queue_id);
928                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
929                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
930         }
931
932         return 0;
933 }
934
935 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
936 {
937         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
938                 != FM10K_DGLORTMAP_NONE);
939 }
940
941 static void
942 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
943 {
944         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
945         int status;
946
947         PMD_INIT_FUNC_TRACE();
948
949         /* Return if it didn't acquire valid glort range */
950         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
951                 return;
952
953         fm10k_mbx_lock(hw);
954         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
955                                 FM10K_XCAST_MODE_PROMISC);
956         fm10k_mbx_unlock(hw);
957
958         if (status != FM10K_SUCCESS)
959                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
960 }
961
962 static void
963 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
964 {
965         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
966         uint8_t mode;
967         int status;
968
969         PMD_INIT_FUNC_TRACE();
970
971         /* Return if it didn't acquire valid glort range */
972         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
973                 return;
974
975         if (dev->data->all_multicast == 1)
976                 mode = FM10K_XCAST_MODE_ALLMULTI;
977         else
978                 mode = FM10K_XCAST_MODE_NONE;
979
980         fm10k_mbx_lock(hw);
981         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
982                                 mode);
983         fm10k_mbx_unlock(hw);
984
985         if (status != FM10K_SUCCESS)
986                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
987 }
988
989 static void
990 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
991 {
992         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
993         int status;
994
995         PMD_INIT_FUNC_TRACE();
996
997         /* Return if it didn't acquire valid glort range */
998         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
999                 return;
1000
1001         /* If promiscuous mode is enabled, it doesn't make sense to enable
1002          * allmulticast and disable promiscuous since fm10k only can select
1003          * one of the modes.
1004          */
1005         if (dev->data->promiscuous) {
1006                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1007                         "needn't enable allmulticast");
1008                 return;
1009         }
1010
1011         fm10k_mbx_lock(hw);
1012         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1013                                 FM10K_XCAST_MODE_ALLMULTI);
1014         fm10k_mbx_unlock(hw);
1015
1016         if (status != FM10K_SUCCESS)
1017                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1018 }
1019
1020 static void
1021 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1022 {
1023         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1024         int status;
1025
1026         PMD_INIT_FUNC_TRACE();
1027
1028         /* Return if it didn't acquire valid glort range */
1029         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1030                 return;
1031
1032         if (dev->data->promiscuous) {
1033                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1034                         "since promisc mode is enabled");
1035                 return;
1036         }
1037
1038         fm10k_mbx_lock(hw);
1039         /* Change mode to unicast mode */
1040         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1041                                 FM10K_XCAST_MODE_NONE);
1042         fm10k_mbx_unlock(hw);
1043
1044         if (status != FM10K_SUCCESS)
1045                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1046 }
1047
1048 static void
1049 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1050 {
1051         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1052         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1053         uint16_t nb_queue_pools;
1054         struct fm10k_macvlan_filter_info *macvlan;
1055
1056         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1057         nb_queue_pools = macvlan->nb_queue_pools;
1058         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1059         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1060
1061         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1062         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1063         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1064                         hw->mac.dglort_map;
1065         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1066         /* Configure VMDQ/RSS DGlort Decoder */
1067         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1068
1069         /* Flow Director configurations, only queue number is valid. */
1070         dglortdec = fls(dev->data->nb_rx_queues - 1);
1071         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1072                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1073         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1074         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1075
1076         /* Invalidate all other GLORT entries */
1077         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1078                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1079                                 FM10K_DGLORTMAP_NONE);
1080 }
1081
1082 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1083 static int
1084 fm10k_dev_start(struct rte_eth_dev *dev)
1085 {
1086         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087         int i, diag;
1088
1089         PMD_INIT_FUNC_TRACE();
1090
1091         /* stop, init, then start the hw */
1092         diag = fm10k_stop_hw(hw);
1093         if (diag != FM10K_SUCCESS) {
1094                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1095                 return -EIO;
1096         }
1097
1098         diag = fm10k_init_hw(hw);
1099         if (diag != FM10K_SUCCESS) {
1100                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1101                 return -EIO;
1102         }
1103
1104         diag = fm10k_start_hw(hw);
1105         if (diag != FM10K_SUCCESS) {
1106                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1107                 return -EIO;
1108         }
1109
1110         diag = fm10k_dev_tx_init(dev);
1111         if (diag) {
1112                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1113                 return diag;
1114         }
1115
1116         if (fm10k_dev_rxq_interrupt_setup(dev))
1117                 return -EIO;
1118
1119         diag = fm10k_dev_rx_init(dev);
1120         if (diag) {
1121                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1122                 return diag;
1123         }
1124
1125         if (hw->mac.type == fm10k_mac_pf)
1126                 fm10k_dev_dglort_map_configure(dev);
1127
1128         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1129                 struct fm10k_rx_queue *rxq;
1130                 rxq = dev->data->rx_queues[i];
1131
1132                 if (rxq->rx_deferred_start)
1133                         continue;
1134                 diag = fm10k_dev_rx_queue_start(dev, i);
1135                 if (diag != 0) {
1136                         int j;
1137                         for (j = 0; j < i; ++j)
1138                                 rx_queue_clean(dev->data->rx_queues[j]);
1139                         return diag;
1140                 }
1141         }
1142
1143         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1144                 struct fm10k_tx_queue *txq;
1145                 txq = dev->data->tx_queues[i];
1146
1147                 if (txq->tx_deferred_start)
1148                         continue;
1149                 diag = fm10k_dev_tx_queue_start(dev, i);
1150                 if (diag != 0) {
1151                         int j;
1152                         for (j = 0; j < i; ++j)
1153                                 tx_queue_clean(dev->data->tx_queues[j]);
1154                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1155                                 rx_queue_clean(dev->data->rx_queues[j]);
1156                         return diag;
1157                 }
1158         }
1159
1160         /* Update default vlan when not in VMDQ mode */
1161         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1162                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1163
1164         return 0;
1165 }
1166
1167 static void
1168 fm10k_dev_stop(struct rte_eth_dev *dev)
1169 {
1170         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1171         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1172         int i;
1173
1174         PMD_INIT_FUNC_TRACE();
1175
1176         if (dev->data->tx_queues)
1177                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1178                         fm10k_dev_tx_queue_stop(dev, i);
1179
1180         if (dev->data->rx_queues)
1181                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1182                         fm10k_dev_rx_queue_stop(dev, i);
1183
1184         /* Disable datapath event */
1185         if (rte_intr_dp_is_en(intr_handle)) {
1186                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1187                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1188                                 3 << FM10K_RXINT_TIMER_SHIFT);
1189                         if (hw->mac.type == fm10k_mac_pf)
1190                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
1191                                         FM10K_ITR_MASK_SET);
1192                         else
1193                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
1194                                         FM10K_ITR_MASK_SET);
1195                 }
1196         }
1197         /* Clean datapath event and queue/vec mapping */
1198         rte_intr_efd_disable(intr_handle);
1199         rte_free(intr_handle->intr_vec);
1200         intr_handle->intr_vec = NULL;
1201 }
1202
1203 static void
1204 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1205 {
1206         int i;
1207
1208         PMD_INIT_FUNC_TRACE();
1209
1210         if (dev->data->tx_queues) {
1211                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1212                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1213
1214                         tx_queue_free(txq);
1215                 }
1216         }
1217
1218         if (dev->data->rx_queues) {
1219                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1220                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1221         }
1222 }
1223
1224 static void
1225 fm10k_dev_close(struct rte_eth_dev *dev)
1226 {
1227         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1228
1229         PMD_INIT_FUNC_TRACE();
1230
1231         fm10k_mbx_lock(hw);
1232         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1233                 MAX_LPORT_NUM, false);
1234         fm10k_mbx_unlock(hw);
1235
1236         /* Stop mailbox service first */
1237         fm10k_close_mbx_service(hw);
1238         fm10k_dev_stop(dev);
1239         fm10k_dev_queue_release(dev);
1240         fm10k_stop_hw(hw);
1241 }
1242
1243 static int
1244 fm10k_link_update(struct rte_eth_dev *dev,
1245         __rte_unused int wait_to_complete)
1246 {
1247         PMD_INIT_FUNC_TRACE();
1248
1249         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1250          * x8 PCIe interface. For now, we leave the speed undefined since there
1251          * is no 50Gbps Ethernet. */
1252         dev->data->dev_link.link_speed  = 0;
1253         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1254         dev->data->dev_link.link_status = ETH_LINK_UP;
1255
1256         return 0;
1257 }
1258
1259 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1260         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1261 {
1262         unsigned i, q;
1263         unsigned count = 0;
1264
1265         if (xstats_names != NULL) {
1266                 /* Note: limit checked in rte_eth_xstats_names() */
1267
1268                 /* Global stats */
1269                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1270                         snprintf(xstats_names[count].name,
1271                                 sizeof(xstats_names[count].name),
1272                                 "%s", fm10k_hw_stats_strings[count].name);
1273                         xstats_names[count].id = count;
1274                         count++;
1275                 }
1276
1277                 /* PF queue stats */
1278                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1279                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1280                                 snprintf(xstats_names[count].name,
1281                                         sizeof(xstats_names[count].name),
1282                                         "rx_q%u_%s", q,
1283                                         fm10k_hw_stats_rx_q_strings[i].name);
1284                                 xstats_names[count].id = count;
1285                                 count++;
1286                         }
1287                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1288                                 snprintf(xstats_names[count].name,
1289                                         sizeof(xstats_names[count].name),
1290                                         "tx_q%u_%s", q,
1291                                         fm10k_hw_stats_tx_q_strings[i].name);
1292                                 xstats_names[count].id = count;
1293                                 count++;
1294                         }
1295                 }
1296         }
1297         return FM10K_NB_XSTATS;
1298 }
1299
1300 static int
1301 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1302                  unsigned n)
1303 {
1304         struct fm10k_hw_stats *hw_stats =
1305                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1306         unsigned i, q, count = 0;
1307
1308         if (n < FM10K_NB_XSTATS)
1309                 return FM10K_NB_XSTATS;
1310
1311         /* Global stats */
1312         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1313                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1314                         fm10k_hw_stats_strings[count].offset);
1315                 count++;
1316         }
1317
1318         /* PF queue stats */
1319         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1320                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1321                         xstats[count].value =
1322                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1323                                 fm10k_hw_stats_rx_q_strings[i].offset);
1324                         count++;
1325                 }
1326                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1327                         xstats[count].value =
1328                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1329                                 fm10k_hw_stats_tx_q_strings[i].offset);
1330                         count++;
1331                 }
1332         }
1333
1334         return FM10K_NB_XSTATS;
1335 }
1336
1337 static void
1338 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1339 {
1340         uint64_t ipackets, opackets, ibytes, obytes;
1341         struct fm10k_hw *hw =
1342                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1343         struct fm10k_hw_stats *hw_stats =
1344                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1345         int i;
1346
1347         PMD_INIT_FUNC_TRACE();
1348
1349         fm10k_update_hw_stats(hw, hw_stats);
1350
1351         ipackets = opackets = ibytes = obytes = 0;
1352         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1353                 (i < hw->mac.max_queues); ++i) {
1354                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1355                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1356                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1357                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1358                 ipackets += stats->q_ipackets[i];
1359                 opackets += stats->q_opackets[i];
1360                 ibytes   += stats->q_ibytes[i];
1361                 obytes   += stats->q_obytes[i];
1362         }
1363         stats->ipackets = ipackets;
1364         stats->opackets = opackets;
1365         stats->ibytes = ibytes;
1366         stats->obytes = obytes;
1367 }
1368
1369 static void
1370 fm10k_stats_reset(struct rte_eth_dev *dev)
1371 {
1372         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373         struct fm10k_hw_stats *hw_stats =
1374                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1375
1376         PMD_INIT_FUNC_TRACE();
1377
1378         memset(hw_stats, 0, sizeof(*hw_stats));
1379         fm10k_rebind_hw_stats(hw, hw_stats);
1380 }
1381
1382 static void
1383 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1384         struct rte_eth_dev_info *dev_info)
1385 {
1386         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1387
1388         PMD_INIT_FUNC_TRACE();
1389
1390         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1391         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1392         dev_info->max_rx_queues      = hw->mac.max_queues;
1393         dev_info->max_tx_queues      = hw->mac.max_queues;
1394         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1395         dev_info->max_hash_mac_addrs = 0;
1396         dev_info->max_vfs            = dev->pci_dev->max_vfs;
1397         dev_info->vmdq_pool_base     = 0;
1398         dev_info->vmdq_queue_base    = 0;
1399         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1400         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1401         dev_info->rx_offload_capa =
1402                 DEV_RX_OFFLOAD_VLAN_STRIP |
1403                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1404                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1405                 DEV_RX_OFFLOAD_TCP_CKSUM;
1406         dev_info->tx_offload_capa =
1407                 DEV_TX_OFFLOAD_VLAN_INSERT |
1408                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1409                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1410                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1411                 DEV_TX_OFFLOAD_TCP_TSO;
1412
1413         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1414         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1415
1416         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1417                 .rx_thresh = {
1418                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1419                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1420                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1421                 },
1422                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1423                 .rx_drop_en = 0,
1424         };
1425
1426         dev_info->default_txconf = (struct rte_eth_txconf) {
1427                 .tx_thresh = {
1428                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1429                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1430                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1431                 },
1432                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1433                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1434                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1435         };
1436
1437         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1438                 .nb_max = FM10K_MAX_RX_DESC,
1439                 .nb_min = FM10K_MIN_RX_DESC,
1440                 .nb_align = FM10K_MULT_RX_DESC,
1441         };
1442
1443         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1444                 .nb_max = FM10K_MAX_TX_DESC,
1445                 .nb_min = FM10K_MIN_TX_DESC,
1446                 .nb_align = FM10K_MULT_TX_DESC,
1447         };
1448
1449         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1450                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1451                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1452 }
1453
1454 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1455 static const uint32_t *
1456 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1457 {
1458         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1459             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1460                 static uint32_t ptypes[] = {
1461                         /* refers to rx_desc_to_ol_flags() */
1462                         RTE_PTYPE_L2_ETHER,
1463                         RTE_PTYPE_L3_IPV4,
1464                         RTE_PTYPE_L3_IPV4_EXT,
1465                         RTE_PTYPE_L3_IPV6,
1466                         RTE_PTYPE_L3_IPV6_EXT,
1467                         RTE_PTYPE_L4_TCP,
1468                         RTE_PTYPE_L4_UDP,
1469                         RTE_PTYPE_UNKNOWN
1470                 };
1471
1472                 return ptypes;
1473         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1474                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1475                 static uint32_t ptypes_vec[] = {
1476                         /* refers to fm10k_desc_to_pktype_v() */
1477                         RTE_PTYPE_L3_IPV4,
1478                         RTE_PTYPE_L3_IPV4_EXT,
1479                         RTE_PTYPE_L3_IPV6,
1480                         RTE_PTYPE_L3_IPV6_EXT,
1481                         RTE_PTYPE_L4_TCP,
1482                         RTE_PTYPE_L4_UDP,
1483                         RTE_PTYPE_TUNNEL_GENEVE,
1484                         RTE_PTYPE_TUNNEL_NVGRE,
1485                         RTE_PTYPE_TUNNEL_VXLAN,
1486                         RTE_PTYPE_TUNNEL_GRE,
1487                         RTE_PTYPE_UNKNOWN
1488                 };
1489
1490                 return ptypes_vec;
1491         }
1492
1493         return NULL;
1494 }
1495 #else
1496 static const uint32_t *
1497 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1498 {
1499         return NULL;
1500 }
1501 #endif
1502
1503 static int
1504 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1505 {
1506         s32 result;
1507         uint16_t mac_num = 0;
1508         uint32_t vid_idx, vid_bit, mac_index;
1509         struct fm10k_hw *hw;
1510         struct fm10k_macvlan_filter_info *macvlan;
1511         struct rte_eth_dev_data *data = dev->data;
1512
1513         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1515
1516         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1517                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1518                 return -EINVAL;
1519         }
1520
1521         if (vlan_id > ETH_VLAN_ID_MAX) {
1522                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1523                 return -EINVAL;
1524         }
1525
1526         vid_idx = FM10K_VFTA_IDX(vlan_id);
1527         vid_bit = FM10K_VFTA_BIT(vlan_id);
1528         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1529         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1530                 return 0;
1531         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1532         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1533                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1534                         "in the VLAN filter table");
1535                 return -EINVAL;
1536         }
1537
1538         fm10k_mbx_lock(hw);
1539         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1540         fm10k_mbx_unlock(hw);
1541         if (result != FM10K_SUCCESS) {
1542                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1543                 return -EIO;
1544         }
1545
1546         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1547                         (result == FM10K_SUCCESS); mac_index++) {
1548                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1549                         continue;
1550                 if (mac_num > macvlan->mac_num - 1) {
1551                         PMD_INIT_LOG(ERR, "MAC address number "
1552                                         "not match");
1553                         break;
1554                 }
1555                 fm10k_mbx_lock(hw);
1556                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1557                         data->mac_addrs[mac_index].addr_bytes,
1558                         vlan_id, on, 0);
1559                 fm10k_mbx_unlock(hw);
1560                 mac_num++;
1561         }
1562         if (result != FM10K_SUCCESS) {
1563                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1564                 return -EIO;
1565         }
1566
1567         if (on) {
1568                 macvlan->vlan_num++;
1569                 macvlan->vfta[vid_idx] |= vid_bit;
1570         } else {
1571                 macvlan->vlan_num--;
1572                 macvlan->vfta[vid_idx] &= ~vid_bit;
1573         }
1574         return 0;
1575 }
1576
1577 static void
1578 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1579 {
1580         if (mask & ETH_VLAN_STRIP_MASK) {
1581                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1582                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1583                                         "always on in fm10k");
1584         }
1585
1586         if (mask & ETH_VLAN_EXTEND_MASK) {
1587                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1588                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1589                                         "supported in fm10k");
1590         }
1591
1592         if (mask & ETH_VLAN_FILTER_MASK) {
1593                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1594                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1595         }
1596 }
1597
1598 /* Add/Remove a MAC address, and update filters to main VSI */
1599 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1600                 const u8 *mac, bool add, uint32_t pool)
1601 {
1602         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603         struct fm10k_macvlan_filter_info *macvlan;
1604         uint32_t i, j, k;
1605
1606         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1607
1608         if (pool != MAIN_VSI_POOL_NUMBER) {
1609                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1610                         "mac to pool %u", pool);
1611                 return;
1612         }
1613         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1614                 if (!macvlan->vfta[j])
1615                         continue;
1616                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1617                         if (!(macvlan->vfta[j] & (1 << k)))
1618                                 continue;
1619                         if (i + 1 > macvlan->vlan_num) {
1620                                 PMD_INIT_LOG(ERR, "vlan number not match");
1621                                 return;
1622                         }
1623                         fm10k_mbx_lock(hw);
1624                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1625                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1626                         fm10k_mbx_unlock(hw);
1627                         i++;
1628                 }
1629         }
1630 }
1631
1632 /* Add/Remove a MAC address, and update filters to VMDQ */
1633 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1634                 const u8 *mac, bool add, uint32_t pool)
1635 {
1636         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1637         struct fm10k_macvlan_filter_info *macvlan;
1638         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1639         uint32_t i;
1640
1641         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1642         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1643
1644         if (pool > macvlan->nb_queue_pools) {
1645                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1646                         " Max pool is %u",
1647                         pool, macvlan->nb_queue_pools);
1648                 return;
1649         }
1650         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1651                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1652                         continue;
1653                 fm10k_mbx_lock(hw);
1654                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1655                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1656                 fm10k_mbx_unlock(hw);
1657         }
1658 }
1659
1660 /* Add/Remove a MAC address, and update filters */
1661 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1662                 const u8 *mac, bool add, uint32_t pool)
1663 {
1664         struct fm10k_macvlan_filter_info *macvlan;
1665
1666         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1667
1668         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1669                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1670         else
1671                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1672
1673         if (add)
1674                 macvlan->mac_num++;
1675         else
1676                 macvlan->mac_num--;
1677 }
1678
1679 /* Add a MAC address, and update filters */
1680 static void
1681 fm10k_macaddr_add(struct rte_eth_dev *dev,
1682                 struct ether_addr *mac_addr,
1683                 uint32_t index,
1684                 uint32_t pool)
1685 {
1686         struct fm10k_macvlan_filter_info *macvlan;
1687
1688         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1689         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1690         macvlan->mac_vmdq_id[index] = pool;
1691 }
1692
1693 /* Remove a MAC address, and update filters */
1694 static void
1695 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1696 {
1697         struct rte_eth_dev_data *data = dev->data;
1698         struct fm10k_macvlan_filter_info *macvlan;
1699
1700         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1701         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1702                         FALSE, macvlan->mac_vmdq_id[index]);
1703         macvlan->mac_vmdq_id[index] = 0;
1704 }
1705
1706 static inline int
1707 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1708 {
1709         if ((request < min) || (request > max) || ((request % mult) != 0))
1710                 return -1;
1711         else
1712                 return 0;
1713 }
1714
1715
1716 static inline int
1717 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1718 {
1719         if ((request < min) || (request > max) || ((div % request) != 0))
1720                 return -1;
1721         else
1722                 return 0;
1723 }
1724
1725 static inline int
1726 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1727 {
1728         uint16_t rx_free_thresh;
1729
1730         if (conf->rx_free_thresh == 0)
1731                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1732         else
1733                 rx_free_thresh = conf->rx_free_thresh;
1734
1735         /* make sure the requested threshold satisfies the constraints */
1736         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1737                         FM10K_RX_FREE_THRESH_MAX(q),
1738                         FM10K_RX_FREE_THRESH_DIV(q),
1739                         rx_free_thresh)) {
1740                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1741                         "less than or equal to %u, "
1742                         "greater than or equal to %u, "
1743                         "and a divisor of %u",
1744                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1745                         FM10K_RX_FREE_THRESH_MIN(q),
1746                         FM10K_RX_FREE_THRESH_DIV(q));
1747                 return -EINVAL;
1748         }
1749
1750         q->alloc_thresh = rx_free_thresh;
1751         q->drop_en = conf->rx_drop_en;
1752         q->rx_deferred_start = conf->rx_deferred_start;
1753
1754         return 0;
1755 }
1756
1757 /*
1758  * Hardware requires specific alignment for Rx packet buffers. At
1759  * least one of the following two conditions must be satisfied.
1760  *  1. Address is 512B aligned
1761  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1762  *
1763  * As such, the driver may need to adjust the DMA address within the
1764  * buffer by up to 512B.
1765  *
1766  * return 1 if the element size is valid, otherwise return 0.
1767  */
1768 static int
1769 mempool_element_size_valid(struct rte_mempool *mp)
1770 {
1771         uint32_t min_size;
1772
1773         /* elt_size includes mbuf header and headroom */
1774         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1775                         RTE_PKTMBUF_HEADROOM;
1776
1777         /* account for up to 512B of alignment */
1778         min_size -= FM10K_RX_DATABUF_ALIGN;
1779
1780         /* sanity check for overflow */
1781         if (min_size > mp->elt_size)
1782                 return 0;
1783
1784         /* size is valid */
1785         return 1;
1786 }
1787
1788 static int
1789 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1790         uint16_t nb_desc, unsigned int socket_id,
1791         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1792 {
1793         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1795         struct fm10k_rx_queue *q;
1796         const struct rte_memzone *mz;
1797
1798         PMD_INIT_FUNC_TRACE();
1799
1800         /* make sure the mempool element size can account for alignment. */
1801         if (!mempool_element_size_valid(mp)) {
1802                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1803                 return -EINVAL;
1804         }
1805
1806         /* make sure a valid number of descriptors have been requested */
1807         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1808                                 FM10K_MULT_RX_DESC, nb_desc)) {
1809                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1810                         "less than or equal to %"PRIu32", "
1811                         "greater than or equal to %u, "
1812                         "and a multiple of %u",
1813                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1814                         FM10K_MULT_RX_DESC);
1815                 return -EINVAL;
1816         }
1817
1818         /*
1819          * if this queue existed already, free the associated memory. The
1820          * queue cannot be reused in case we need to allocate memory on
1821          * different socket than was previously used.
1822          */
1823         if (dev->data->rx_queues[queue_id] != NULL) {
1824                 rx_queue_free(dev->data->rx_queues[queue_id]);
1825                 dev->data->rx_queues[queue_id] = NULL;
1826         }
1827
1828         /* allocate memory for the queue structure */
1829         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1830                                 socket_id);
1831         if (q == NULL) {
1832                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1833                 return -ENOMEM;
1834         }
1835
1836         /* setup queue */
1837         q->mp = mp;
1838         q->nb_desc = nb_desc;
1839         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1840         q->port_id = dev->data->port_id;
1841         q->queue_id = queue_id;
1842         q->tail_ptr = (volatile uint32_t *)
1843                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1844         if (handle_rxconf(q, conf))
1845                 return -EINVAL;
1846
1847         /* allocate memory for the software ring */
1848         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1849                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1850                         RTE_CACHE_LINE_SIZE, socket_id);
1851         if (q->sw_ring == NULL) {
1852                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1853                 rte_free(q);
1854                 return -ENOMEM;
1855         }
1856
1857         /*
1858          * allocate memory for the hardware descriptor ring. A memzone large
1859          * enough to hold the maximum ring size is requested to allow for
1860          * resizing in later calls to the queue setup function.
1861          */
1862         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1863                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1864                                       socket_id);
1865         if (mz == NULL) {
1866                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1867                 rte_free(q->sw_ring);
1868                 rte_free(q);
1869                 return -ENOMEM;
1870         }
1871         q->hw_ring = mz->addr;
1872         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1873
1874         /* Check if number of descs satisfied Vector requirement */
1875         if (!rte_is_power_of_2(nb_desc)) {
1876                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1877                                     "preconditions - canceling the feature for "
1878                                     "the whole port[%d]",
1879                              q->queue_id, q->port_id);
1880                 dev_info->rx_vec_allowed = false;
1881         } else
1882                 fm10k_rxq_vec_setup(q);
1883
1884         dev->data->rx_queues[queue_id] = q;
1885         return 0;
1886 }
1887
1888 static void
1889 fm10k_rx_queue_release(void *queue)
1890 {
1891         PMD_INIT_FUNC_TRACE();
1892
1893         rx_queue_free(queue);
1894 }
1895
1896 static inline int
1897 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1898 {
1899         uint16_t tx_free_thresh;
1900         uint16_t tx_rs_thresh;
1901
1902         /* constraint MACROs require that tx_free_thresh is configured
1903          * before tx_rs_thresh */
1904         if (conf->tx_free_thresh == 0)
1905                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1906         else
1907                 tx_free_thresh = conf->tx_free_thresh;
1908
1909         /* make sure the requested threshold satisfies the constraints */
1910         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1911                         FM10K_TX_FREE_THRESH_MAX(q),
1912                         FM10K_TX_FREE_THRESH_DIV(q),
1913                         tx_free_thresh)) {
1914                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1915                         "less than or equal to %u, "
1916                         "greater than or equal to %u, "
1917                         "and a divisor of %u",
1918                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1919                         FM10K_TX_FREE_THRESH_MIN(q),
1920                         FM10K_TX_FREE_THRESH_DIV(q));
1921                 return -EINVAL;
1922         }
1923
1924         q->free_thresh = tx_free_thresh;
1925
1926         if (conf->tx_rs_thresh == 0)
1927                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1928         else
1929                 tx_rs_thresh = conf->tx_rs_thresh;
1930
1931         q->tx_deferred_start = conf->tx_deferred_start;
1932
1933         /* make sure the requested threshold satisfies the constraints */
1934         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1935                         FM10K_TX_RS_THRESH_MAX(q),
1936                         FM10K_TX_RS_THRESH_DIV(q),
1937                         tx_rs_thresh)) {
1938                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1939                         "less than or equal to %u, "
1940                         "greater than or equal to %u, "
1941                         "and a divisor of %u",
1942                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1943                         FM10K_TX_RS_THRESH_MIN(q),
1944                         FM10K_TX_RS_THRESH_DIV(q));
1945                 return -EINVAL;
1946         }
1947
1948         q->rs_thresh = tx_rs_thresh;
1949
1950         return 0;
1951 }
1952
1953 static int
1954 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1955         uint16_t nb_desc, unsigned int socket_id,
1956         const struct rte_eth_txconf *conf)
1957 {
1958         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1959         struct fm10k_tx_queue *q;
1960         const struct rte_memzone *mz;
1961
1962         PMD_INIT_FUNC_TRACE();
1963
1964         /* make sure a valid number of descriptors have been requested */
1965         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1966                                 FM10K_MULT_TX_DESC, nb_desc)) {
1967                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1968                         "less than or equal to %"PRIu32", "
1969                         "greater than or equal to %u, "
1970                         "and a multiple of %u",
1971                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1972                         FM10K_MULT_TX_DESC);
1973                 return -EINVAL;
1974         }
1975
1976         /*
1977          * if this queue existed already, free the associated memory. The
1978          * queue cannot be reused in case we need to allocate memory on
1979          * different socket than was previously used.
1980          */
1981         if (dev->data->tx_queues[queue_id] != NULL) {
1982                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1983
1984                 tx_queue_free(txq);
1985                 dev->data->tx_queues[queue_id] = NULL;
1986         }
1987
1988         /* allocate memory for the queue structure */
1989         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1990                                 socket_id);
1991         if (q == NULL) {
1992                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1993                 return -ENOMEM;
1994         }
1995
1996         /* setup queue */
1997         q->nb_desc = nb_desc;
1998         q->port_id = dev->data->port_id;
1999         q->queue_id = queue_id;
2000         q->txq_flags = conf->txq_flags;
2001         q->ops = &def_txq_ops;
2002         q->tail_ptr = (volatile uint32_t *)
2003                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2004         if (handle_txconf(q, conf))
2005                 return -EINVAL;
2006
2007         /* allocate memory for the software ring */
2008         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2009                                         nb_desc * sizeof(struct rte_mbuf *),
2010                                         RTE_CACHE_LINE_SIZE, socket_id);
2011         if (q->sw_ring == NULL) {
2012                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2013                 rte_free(q);
2014                 return -ENOMEM;
2015         }
2016
2017         /*
2018          * allocate memory for the hardware descriptor ring. A memzone large
2019          * enough to hold the maximum ring size is requested to allow for
2020          * resizing in later calls to the queue setup function.
2021          */
2022         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2023                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2024                                       socket_id);
2025         if (mz == NULL) {
2026                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2027                 rte_free(q->sw_ring);
2028                 rte_free(q);
2029                 return -ENOMEM;
2030         }
2031         q->hw_ring = mz->addr;
2032         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2033
2034         /*
2035          * allocate memory for the RS bit tracker. Enough slots to hold the
2036          * descriptor index for each RS bit needing to be set are required.
2037          */
2038         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2039                                 ((nb_desc + 1) / q->rs_thresh) *
2040                                 sizeof(uint16_t),
2041                                 RTE_CACHE_LINE_SIZE, socket_id);
2042         if (q->rs_tracker.list == NULL) {
2043                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2044                 rte_free(q->sw_ring);
2045                 rte_free(q);
2046                 return -ENOMEM;
2047         }
2048
2049         dev->data->tx_queues[queue_id] = q;
2050         return 0;
2051 }
2052
2053 static void
2054 fm10k_tx_queue_release(void *queue)
2055 {
2056         struct fm10k_tx_queue *q = queue;
2057         PMD_INIT_FUNC_TRACE();
2058
2059         tx_queue_free(q);
2060 }
2061
2062 static int
2063 fm10k_reta_update(struct rte_eth_dev *dev,
2064                         struct rte_eth_rss_reta_entry64 *reta_conf,
2065                         uint16_t reta_size)
2066 {
2067         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2068         uint16_t i, j, idx, shift;
2069         uint8_t mask;
2070         uint32_t reta;
2071
2072         PMD_INIT_FUNC_TRACE();
2073
2074         if (reta_size > FM10K_MAX_RSS_INDICES) {
2075                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2076                         "(%d) doesn't match the number hardware can supported "
2077                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2078                 return -EINVAL;
2079         }
2080
2081         /*
2082          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2083          * 128-entries in 32 registers
2084          */
2085         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2086                 idx = i / RTE_RETA_GROUP_SIZE;
2087                 shift = i % RTE_RETA_GROUP_SIZE;
2088                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2089                                 BIT_MASK_PER_UINT32);
2090                 if (mask == 0)
2091                         continue;
2092
2093                 reta = 0;
2094                 if (mask != BIT_MASK_PER_UINT32)
2095                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2096
2097                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2098                         if (mask & (0x1 << j)) {
2099                                 if (mask != 0xF)
2100                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2101                                 reta |= reta_conf[idx].reta[shift + j] <<
2102                                                 (CHAR_BIT * j);
2103                         }
2104                 }
2105                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2106         }
2107
2108         return 0;
2109 }
2110
2111 static int
2112 fm10k_reta_query(struct rte_eth_dev *dev,
2113                         struct rte_eth_rss_reta_entry64 *reta_conf,
2114                         uint16_t reta_size)
2115 {
2116         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2117         uint16_t i, j, idx, shift;
2118         uint8_t mask;
2119         uint32_t reta;
2120
2121         PMD_INIT_FUNC_TRACE();
2122
2123         if (reta_size < FM10K_MAX_RSS_INDICES) {
2124                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2125                         "(%d) doesn't match the number hardware can supported "
2126                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2127                 return -EINVAL;
2128         }
2129
2130         /*
2131          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2132          * 128-entries in 32 registers
2133          */
2134         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2135                 idx = i / RTE_RETA_GROUP_SIZE;
2136                 shift = i % RTE_RETA_GROUP_SIZE;
2137                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2138                                 BIT_MASK_PER_UINT32);
2139                 if (mask == 0)
2140                         continue;
2141
2142                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2143                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2144                         if (mask & (0x1 << j))
2145                                 reta_conf[idx].reta[shift + j] = ((reta >>
2146                                         CHAR_BIT * j) & UINT8_MAX);
2147                 }
2148         }
2149
2150         return 0;
2151 }
2152
2153 static int
2154 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2155         struct rte_eth_rss_conf *rss_conf)
2156 {
2157         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2158         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2159         uint32_t mrqc;
2160         uint64_t hf = rss_conf->rss_hf;
2161         int i;
2162
2163         PMD_INIT_FUNC_TRACE();
2164
2165         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2166                 FM10K_RSSRK_ENTRIES_PER_REG)
2167                 return -EINVAL;
2168
2169         if (hf == 0)
2170                 return -EINVAL;
2171
2172         mrqc = 0;
2173         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2174         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2175         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2176         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2177         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2178         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2179         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2180         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2181         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2182
2183         /* If the mapping doesn't fit any supported, return */
2184         if (mrqc == 0)
2185                 return -EINVAL;
2186
2187         if (key != NULL)
2188                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2189                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2190
2191         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2192
2193         return 0;
2194 }
2195
2196 static int
2197 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2198         struct rte_eth_rss_conf *rss_conf)
2199 {
2200         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2201         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2202         uint32_t mrqc;
2203         uint64_t hf;
2204         int i;
2205
2206         PMD_INIT_FUNC_TRACE();
2207
2208         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2209                                 FM10K_RSSRK_ENTRIES_PER_REG)
2210                 return -EINVAL;
2211
2212         if (key != NULL)
2213                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2214                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2215
2216         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2217         hf = 0;
2218         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2219         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2220         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2221         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2222         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2223         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2224         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2225         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2226         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2227
2228         rss_conf->rss_hf = hf;
2229
2230         return 0;
2231 }
2232
2233 static void
2234 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2235 {
2236         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2237         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2238
2239         /* Bind all local non-queue interrupt to vector 0 */
2240         int_map |= FM10K_MISC_VEC_ID;
2241
2242         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2243         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2244         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2245         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2246         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2247         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2248
2249         /* Enable misc causes */
2250         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2251                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2252                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2253                                 FM10K_EIMR_ENABLE(MAILBOX) |
2254                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2255                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2256                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2257                                 FM10K_EIMR_ENABLE(VFLR));
2258
2259         /* Enable ITR 0 */
2260         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2261                                         FM10K_ITR_MASK_CLEAR);
2262         FM10K_WRITE_FLUSH(hw);
2263 }
2264
2265 static void
2266 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2267 {
2268         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2269         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2270
2271         int_map |= FM10K_MISC_VEC_ID;
2272
2273         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2274         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2275         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2276         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2277         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2278         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2279
2280         /* Disable misc causes */
2281         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2282                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2283                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2284                                 FM10K_EIMR_DISABLE(MAILBOX) |
2285                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2286                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2287                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2288                                 FM10K_EIMR_DISABLE(VFLR));
2289
2290         /* Disable ITR 0 */
2291         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2292         FM10K_WRITE_FLUSH(hw);
2293 }
2294
2295 static void
2296 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2297 {
2298         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2299         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2300
2301         /* Bind all local non-queue interrupt to vector 0 */
2302         int_map |= FM10K_MISC_VEC_ID;
2303
2304         /* Only INT 0 available, other 15 are reserved. */
2305         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2306
2307         /* Enable ITR 0 */
2308         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2309                                         FM10K_ITR_MASK_CLEAR);
2310         FM10K_WRITE_FLUSH(hw);
2311 }
2312
2313 static void
2314 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2315 {
2316         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2317         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2318
2319         int_map |= FM10K_MISC_VEC_ID;
2320
2321         /* Only INT 0 available, other 15 are reserved. */
2322         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2323
2324         /* Disable ITR 0 */
2325         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2326         FM10K_WRITE_FLUSH(hw);
2327 }
2328
2329 static int
2330 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2331 {
2332         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2333
2334         /* Enable ITR */
2335         if (hw->mac.type == fm10k_mac_pf)
2336                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2337                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2338         else
2339                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2340                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2341         rte_intr_enable(&dev->pci_dev->intr_handle);
2342         return 0;
2343 }
2344
2345 static int
2346 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2347 {
2348         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2349
2350         /* Disable ITR */
2351         if (hw->mac.type == fm10k_mac_pf)
2352                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
2353                         FM10K_ITR_MASK_SET);
2354         else
2355                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
2356                         FM10K_ITR_MASK_SET);
2357         return 0;
2358 }
2359
2360 static int
2361 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2362 {
2363         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2364         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
2365         uint32_t intr_vector, vec;
2366         uint16_t queue_id;
2367         int result = 0;
2368
2369         /* fm10k needs one separate interrupt for mailbox,
2370          * so only drivers which support multiple interrupt vectors
2371          * e.g. vfio-pci can work for fm10k interrupt mode
2372          */
2373         if (!rte_intr_cap_multiple(intr_handle) ||
2374                         dev->data->dev_conf.intr_conf.rxq == 0)
2375                 return result;
2376
2377         intr_vector = dev->data->nb_rx_queues;
2378
2379         /* disable interrupt first */
2380         rte_intr_disable(&dev->pci_dev->intr_handle);
2381         if (hw->mac.type == fm10k_mac_pf)
2382                 fm10k_dev_disable_intr_pf(dev);
2383         else
2384                 fm10k_dev_disable_intr_vf(dev);
2385
2386         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2387                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2388                 result = -EIO;
2389         }
2390
2391         if (rte_intr_dp_is_en(intr_handle) && !result) {
2392                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2393                         dev->data->nb_rx_queues * sizeof(int), 0);
2394                 if (intr_handle->intr_vec) {
2395                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2396                                         queue_id < dev->data->nb_rx_queues;
2397                                         queue_id++) {
2398                                 intr_handle->intr_vec[queue_id] = vec;
2399                                 if (vec < intr_handle->nb_efd - 1
2400                                                 + FM10K_RX_VEC_START)
2401                                         vec++;
2402                         }
2403                 } else {
2404                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2405                                 " intr_vec", dev->data->nb_rx_queues);
2406                         rte_intr_efd_disable(intr_handle);
2407                         result = -ENOMEM;
2408                 }
2409         }
2410
2411         if (hw->mac.type == fm10k_mac_pf)
2412                 fm10k_dev_enable_intr_pf(dev);
2413         else
2414                 fm10k_dev_enable_intr_vf(dev);
2415         rte_intr_enable(&dev->pci_dev->intr_handle);
2416         hw->mac.ops.update_int_moderator(hw);
2417         return result;
2418 }
2419
2420 static int
2421 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2422 {
2423         struct fm10k_fault fault;
2424         int err;
2425         const char *estr = "Unknown error";
2426
2427         /* Process PCA fault */
2428         if (eicr & FM10K_EICR_PCA_FAULT) {
2429                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2430                 if (err)
2431                         goto error;
2432                 switch (fault.type) {
2433                 case PCA_NO_FAULT:
2434                         estr = "PCA_NO_FAULT"; break;
2435                 case PCA_UNMAPPED_ADDR:
2436                         estr = "PCA_UNMAPPED_ADDR"; break;
2437                 case PCA_BAD_QACCESS_PF:
2438                         estr = "PCA_BAD_QACCESS_PF"; break;
2439                 case PCA_BAD_QACCESS_VF:
2440                         estr = "PCA_BAD_QACCESS_VF"; break;
2441                 case PCA_MALICIOUS_REQ:
2442                         estr = "PCA_MALICIOUS_REQ"; break;
2443                 case PCA_POISONED_TLP:
2444                         estr = "PCA_POISONED_TLP"; break;
2445                 case PCA_TLP_ABORT:
2446                         estr = "PCA_TLP_ABORT"; break;
2447                 default:
2448                         goto error;
2449                 }
2450                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2451                         estr, fault.func ? "VF" : "PF", fault.func,
2452                         fault.address, fault.specinfo);
2453         }
2454
2455         /* Process THI fault */
2456         if (eicr & FM10K_EICR_THI_FAULT) {
2457                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2458                 if (err)
2459                         goto error;
2460                 switch (fault.type) {
2461                 case THI_NO_FAULT:
2462                         estr = "THI_NO_FAULT"; break;
2463                 case THI_MAL_DIS_Q_FAULT:
2464                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2465                 default:
2466                         goto error;
2467                 }
2468                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2469                         estr, fault.func ? "VF" : "PF", fault.func,
2470                         fault.address, fault.specinfo);
2471         }
2472
2473         /* Process FUM fault */
2474         if (eicr & FM10K_EICR_FUM_FAULT) {
2475                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2476                 if (err)
2477                         goto error;
2478                 switch (fault.type) {
2479                 case FUM_NO_FAULT:
2480                         estr = "FUM_NO_FAULT"; break;
2481                 case FUM_UNMAPPED_ADDR:
2482                         estr = "FUM_UNMAPPED_ADDR"; break;
2483                 case FUM_POISONED_TLP:
2484                         estr = "FUM_POISONED_TLP"; break;
2485                 case FUM_BAD_VF_QACCESS:
2486                         estr = "FUM_BAD_VF_QACCESS"; break;
2487                 case FUM_ADD_DECODE_ERR:
2488                         estr = "FUM_ADD_DECODE_ERR"; break;
2489                 case FUM_RO_ERROR:
2490                         estr = "FUM_RO_ERROR"; break;
2491                 case FUM_QPRC_CRC_ERROR:
2492                         estr = "FUM_QPRC_CRC_ERROR"; break;
2493                 case FUM_CSR_TIMEOUT:
2494                         estr = "FUM_CSR_TIMEOUT"; break;
2495                 case FUM_INVALID_TYPE:
2496                         estr = "FUM_INVALID_TYPE"; break;
2497                 case FUM_INVALID_LENGTH:
2498                         estr = "FUM_INVALID_LENGTH"; break;
2499                 case FUM_INVALID_BE:
2500                         estr = "FUM_INVALID_BE"; break;
2501                 case FUM_INVALID_ALIGN:
2502                         estr = "FUM_INVALID_ALIGN"; break;
2503                 default:
2504                         goto error;
2505                 }
2506                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2507                         estr, fault.func ? "VF" : "PF", fault.func,
2508                         fault.address, fault.specinfo);
2509         }
2510
2511         return 0;
2512 error:
2513         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2514         return err;
2515 }
2516
2517 /**
2518  * PF interrupt handler triggered by NIC for handling specific interrupt.
2519  *
2520  * @param handle
2521  *  Pointer to interrupt handle.
2522  * @param param
2523  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2524  *
2525  * @return
2526  *  void
2527  */
2528 static void
2529 fm10k_dev_interrupt_handler_pf(
2530                         __rte_unused struct rte_intr_handle *handle,
2531                         void *param)
2532 {
2533         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2534         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2535         uint32_t cause, status;
2536
2537         if (hw->mac.type != fm10k_mac_pf)
2538                 return;
2539
2540         cause = FM10K_READ_REG(hw, FM10K_EICR);
2541
2542         /* Handle PCI fault cases */
2543         if (cause & FM10K_EICR_FAULT_MASK) {
2544                 PMD_INIT_LOG(ERR, "INT: find fault!");
2545                 fm10k_dev_handle_fault(hw, cause);
2546         }
2547
2548         /* Handle switch up/down */
2549         if (cause & FM10K_EICR_SWITCHNOTREADY)
2550                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2551
2552         if (cause & FM10K_EICR_SWITCHREADY)
2553                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2554
2555         /* Handle mailbox message */
2556         fm10k_mbx_lock(hw);
2557         hw->mbx.ops.process(hw, &hw->mbx);
2558         fm10k_mbx_unlock(hw);
2559
2560         /* Handle SRAM error */
2561         if (cause & FM10K_EICR_SRAMERROR) {
2562                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2563
2564                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2565                 /* Write to clear pending bits */
2566                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2567
2568                 /* Todo: print out error message after shared code  updates */
2569         }
2570
2571         /* Clear these 3 events if having any */
2572         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2573                  FM10K_EICR_SWITCHREADY;
2574         if (cause)
2575                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2576
2577         /* Re-enable interrupt from device side */
2578         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2579                                         FM10K_ITR_MASK_CLEAR);
2580         /* Re-enable interrupt from host side */
2581         rte_intr_enable(&(dev->pci_dev->intr_handle));
2582 }
2583
2584 /**
2585  * VF interrupt handler triggered by NIC for handling specific interrupt.
2586  *
2587  * @param handle
2588  *  Pointer to interrupt handle.
2589  * @param param
2590  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2591  *
2592  * @return
2593  *  void
2594  */
2595 static void
2596 fm10k_dev_interrupt_handler_vf(
2597                         __rte_unused struct rte_intr_handle *handle,
2598                         void *param)
2599 {
2600         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2601         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2602
2603         if (hw->mac.type != fm10k_mac_vf)
2604                 return;
2605
2606         /* Handle mailbox message if lock is acquired */
2607         fm10k_mbx_lock(hw);
2608         hw->mbx.ops.process(hw, &hw->mbx);
2609         fm10k_mbx_unlock(hw);
2610
2611         /* Re-enable interrupt from device side */
2612         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2613                                         FM10K_ITR_MASK_CLEAR);
2614         /* Re-enable interrupt from host side */
2615         rte_intr_enable(&(dev->pci_dev->intr_handle));
2616 }
2617
2618 /* Mailbox message handler in VF */
2619 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2620         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2621         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2622         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2623         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2624 };
2625
2626 static int
2627 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2628 {
2629         int err = 0;
2630
2631         /* Initialize mailbox lock */
2632         fm10k_mbx_initlock(hw);
2633
2634         /* Replace default message handler with new ones */
2635         if (hw->mac.type == fm10k_mac_vf)
2636                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2637
2638         if (err) {
2639                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2640                                 err);
2641                 return err;
2642         }
2643         /* Connect to SM for PF device or PF for VF device */
2644         return hw->mbx.ops.connect(hw, &hw->mbx);
2645 }
2646
2647 static void
2648 fm10k_close_mbx_service(struct fm10k_hw *hw)
2649 {
2650         /* Disconnect from SM for PF device or PF for VF device */
2651         hw->mbx.ops.disconnect(hw, &hw->mbx);
2652 }
2653
2654 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2655         .dev_configure          = fm10k_dev_configure,
2656         .dev_start              = fm10k_dev_start,
2657         .dev_stop               = fm10k_dev_stop,
2658         .dev_close              = fm10k_dev_close,
2659         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2660         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2661         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2662         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2663         .stats_get              = fm10k_stats_get,
2664         .xstats_get             = fm10k_xstats_get,
2665         .xstats_get_names       = fm10k_xstats_get_names,
2666         .stats_reset            = fm10k_stats_reset,
2667         .xstats_reset           = fm10k_stats_reset,
2668         .link_update            = fm10k_link_update,
2669         .dev_infos_get          = fm10k_dev_infos_get,
2670         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2671         .vlan_filter_set        = fm10k_vlan_filter_set,
2672         .vlan_offload_set       = fm10k_vlan_offload_set,
2673         .mac_addr_add           = fm10k_macaddr_add,
2674         .mac_addr_remove        = fm10k_macaddr_remove,
2675         .rx_queue_start         = fm10k_dev_rx_queue_start,
2676         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2677         .tx_queue_start         = fm10k_dev_tx_queue_start,
2678         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2679         .rx_queue_setup         = fm10k_rx_queue_setup,
2680         .rx_queue_release       = fm10k_rx_queue_release,
2681         .tx_queue_setup         = fm10k_tx_queue_setup,
2682         .tx_queue_release       = fm10k_tx_queue_release,
2683         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2684         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2685         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2686         .reta_update            = fm10k_reta_update,
2687         .reta_query             = fm10k_reta_query,
2688         .rss_hash_update        = fm10k_rss_hash_update,
2689         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2690 };
2691
2692 static int ftag_check_handler(__rte_unused const char *key,
2693                 const char *value, __rte_unused void *opaque)
2694 {
2695         if (strcmp(value, "1"))
2696                 return -1;
2697
2698         return 0;
2699 }
2700
2701 static int
2702 fm10k_check_ftag(struct rte_devargs *devargs)
2703 {
2704         struct rte_kvargs *kvlist;
2705         const char *ftag_key = "enable_ftag";
2706
2707         if (devargs == NULL)
2708                 return 0;
2709
2710         kvlist = rte_kvargs_parse(devargs->args, NULL);
2711         if (kvlist == NULL)
2712                 return 0;
2713
2714         if (!rte_kvargs_count(kvlist, ftag_key)) {
2715                 rte_kvargs_free(kvlist);
2716                 return 0;
2717         }
2718         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2719         if (rte_kvargs_process(kvlist, ftag_key,
2720                                 ftag_check_handler, NULL) < 0) {
2721                 rte_kvargs_free(kvlist);
2722                 return 0;
2723         }
2724         rte_kvargs_free(kvlist);
2725
2726         return 1;
2727 }
2728
2729 static void __attribute__((cold))
2730 fm10k_set_tx_function(struct rte_eth_dev *dev)
2731 {
2732         struct fm10k_tx_queue *txq;
2733         int i;
2734         int use_sse = 1;
2735         uint16_t tx_ftag_en = 0;
2736
2737         if (fm10k_check_ftag(dev->pci_dev->devargs))
2738                 tx_ftag_en = 1;
2739
2740         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2741                 txq = dev->data->tx_queues[i];
2742                 txq->tx_ftag_en = tx_ftag_en;
2743                 /* Check if Vector Tx is satisfied */
2744                 if (fm10k_tx_vec_condition_check(txq)) {
2745                         use_sse = 0;
2746                         break;
2747                 }
2748         }
2749
2750         if (use_sse) {
2751                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2752                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2753                         txq = dev->data->tx_queues[i];
2754                         fm10k_txq_vec_setup(txq);
2755                 }
2756                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2757         } else {
2758                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2759                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2760         }
2761 }
2762
2763 static void __attribute__((cold))
2764 fm10k_set_rx_function(struct rte_eth_dev *dev)
2765 {
2766         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2767         uint16_t i, rx_using_sse;
2768         uint16_t rx_ftag_en = 0;
2769
2770         if (fm10k_check_ftag(dev->pci_dev->devargs))
2771                 rx_ftag_en = 1;
2772
2773         /* In order to allow Vector Rx there are a few configuration
2774          * conditions to be met.
2775          */
2776         if (!fm10k_rx_vec_condition_check(dev) &&
2777                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2778                 if (dev->data->scattered_rx)
2779                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2780                 else
2781                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2782         } else if (dev->data->scattered_rx)
2783                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2784         else
2785                 dev->rx_pkt_burst = fm10k_recv_pkts;
2786
2787         rx_using_sse =
2788                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2789                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2790
2791         if (rx_using_sse)
2792                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2793         else
2794                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2795
2796         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2797                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2798
2799                 rxq->rx_using_sse = rx_using_sse;
2800                 rxq->rx_ftag_en = rx_ftag_en;
2801         }
2802 }
2803
2804 static void
2805 fm10k_params_init(struct rte_eth_dev *dev)
2806 {
2807         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2808         struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2809
2810         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2811          * there is no way to get link status without reading BAR4.  Until this
2812          * works, assume we have maximum bandwidth.
2813          * @todo - fix bus info
2814          */
2815         hw->bus_caps.speed = fm10k_bus_speed_8000;
2816         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2817         hw->bus_caps.payload = fm10k_bus_payload_512;
2818         hw->bus.speed = fm10k_bus_speed_8000;
2819         hw->bus.width = fm10k_bus_width_pcie_x8;
2820         hw->bus.payload = fm10k_bus_payload_256;
2821
2822         info->rx_vec_allowed = true;
2823 }
2824
2825 static int
2826 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2827 {
2828         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2829         int diag, i;
2830         struct fm10k_macvlan_filter_info *macvlan;
2831
2832         PMD_INIT_FUNC_TRACE();
2833
2834         dev->dev_ops = &fm10k_eth_dev_ops;
2835         dev->rx_pkt_burst = &fm10k_recv_pkts;
2836         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2837
2838         /* only initialize in the primary process */
2839         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2840                 return 0;
2841
2842         rte_eth_copy_pci_info(dev, dev->pci_dev);
2843
2844         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2845         memset(macvlan, 0, sizeof(*macvlan));
2846         /* Vendor and Device ID need to be set before init of shared code */
2847         memset(hw, 0, sizeof(*hw));
2848         hw->device_id = dev->pci_dev->id.device_id;
2849         hw->vendor_id = dev->pci_dev->id.vendor_id;
2850         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2851         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2852         hw->revision_id = 0;
2853         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2854         if (hw->hw_addr == NULL) {
2855                 PMD_INIT_LOG(ERR, "Bad mem resource."
2856                         " Try to blacklist unused devices.");
2857                 return -EIO;
2858         }
2859
2860         /* Store fm10k_adapter pointer */
2861         hw->back = dev->data->dev_private;
2862
2863         /* Initialize the shared code */
2864         diag = fm10k_init_shared_code(hw);
2865         if (diag != FM10K_SUCCESS) {
2866                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2867                 return -EIO;
2868         }
2869
2870         /* Initialize parameters */
2871         fm10k_params_init(dev);
2872
2873         /* Initialize the hw */
2874         diag = fm10k_init_hw(hw);
2875         if (diag != FM10K_SUCCESS) {
2876                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2877                 return -EIO;
2878         }
2879
2880         /* Initialize MAC address(es) */
2881         dev->data->mac_addrs = rte_zmalloc("fm10k",
2882                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2883         if (dev->data->mac_addrs == NULL) {
2884                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2885                 return -ENOMEM;
2886         }
2887
2888         diag = fm10k_read_mac_addr(hw);
2889
2890         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2891                         &dev->data->mac_addrs[0]);
2892
2893         if (diag != FM10K_SUCCESS ||
2894                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2895
2896                 /* Generate a random addr */
2897                 eth_random_addr(hw->mac.addr);
2898                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2899                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2900                 &dev->data->mac_addrs[0]);
2901         }
2902
2903         /* Reset the hw statistics */
2904         fm10k_stats_reset(dev);
2905
2906         /* Reset the hw */
2907         diag = fm10k_reset_hw(hw);
2908         if (diag != FM10K_SUCCESS) {
2909                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2910                 return -EIO;
2911         }
2912
2913         /* Setup mailbox service */
2914         diag = fm10k_setup_mbx_service(hw);
2915         if (diag != FM10K_SUCCESS) {
2916                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2917                 return -EIO;
2918         }
2919
2920         /*PF/VF has different interrupt handling mechanism */
2921         if (hw->mac.type == fm10k_mac_pf) {
2922                 /* register callback func to eal lib */
2923                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2924                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2925
2926                 /* enable MISC interrupt */
2927                 fm10k_dev_enable_intr_pf(dev);
2928         } else { /* VF */
2929                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2930                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2931
2932                 fm10k_dev_enable_intr_vf(dev);
2933         }
2934
2935         /* Enable intr after callback registered */
2936         rte_intr_enable(&(dev->pci_dev->intr_handle));
2937
2938         hw->mac.ops.update_int_moderator(hw);
2939
2940         /* Make sure Switch Manager is ready before going forward. */
2941         if (hw->mac.type == fm10k_mac_pf) {
2942                 int switch_ready = 0;
2943
2944                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2945                         fm10k_mbx_lock(hw);
2946                         hw->mac.ops.get_host_state(hw, &switch_ready);
2947                         fm10k_mbx_unlock(hw);
2948                         if (switch_ready)
2949                                 break;
2950                         /* Delay some time to acquire async LPORT_MAP info. */
2951                         rte_delay_us(WAIT_SWITCH_MSG_US);
2952                 }
2953
2954                 if (switch_ready == 0) {
2955                         PMD_INIT_LOG(ERR, "switch is not ready");
2956                         return -1;
2957                 }
2958         }
2959
2960         /*
2961          * Below function will trigger operations on mailbox, acquire lock to
2962          * avoid race condition from interrupt handler. Operations on mailbox
2963          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2964          * will handle and generate an interrupt to our side. Then,  FIFO in
2965          * mailbox will be touched.
2966          */
2967         fm10k_mbx_lock(hw);
2968         /* Enable port first */
2969         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2970                                         MAX_LPORT_NUM, 1);
2971
2972         /* Set unicast mode by default. App can change to other mode in other
2973          * API func.
2974          */
2975         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2976                                         FM10K_XCAST_MODE_NONE);
2977
2978         fm10k_mbx_unlock(hw);
2979
2980         /* Make sure default VID is ready before going forward. */
2981         if (hw->mac.type == fm10k_mac_pf) {
2982                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2983                         if (hw->mac.default_vid)
2984                                 break;
2985                         /* Delay some time to acquire async port VLAN info. */
2986                         rte_delay_us(WAIT_SWITCH_MSG_US);
2987                 }
2988
2989                 if (!hw->mac.default_vid) {
2990                         PMD_INIT_LOG(ERR, "default VID is not ready");
2991                         return -1;
2992                 }
2993         }
2994
2995         /* Add default mac address */
2996         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2997                 MAIN_VSI_POOL_NUMBER);
2998
2999         return 0;
3000 }
3001
3002 static int
3003 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3004 {
3005         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3006
3007         PMD_INIT_FUNC_TRACE();
3008
3009         /* only uninitialize in the primary process */
3010         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3011                 return 0;
3012
3013         /* safe to close dev here */
3014         fm10k_dev_close(dev);
3015
3016         dev->dev_ops = NULL;
3017         dev->rx_pkt_burst = NULL;
3018         dev->tx_pkt_burst = NULL;
3019
3020         /* disable uio/vfio intr */
3021         rte_intr_disable(&(dev->pci_dev->intr_handle));
3022
3023         /*PF/VF has different interrupt handling mechanism */
3024         if (hw->mac.type == fm10k_mac_pf) {
3025                 /* disable interrupt */
3026                 fm10k_dev_disable_intr_pf(dev);
3027
3028                 /* unregister callback func to eal lib */
3029                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
3030                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3031         } else {
3032                 /* disable interrupt */
3033                 fm10k_dev_disable_intr_vf(dev);
3034
3035                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
3036                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3037         }
3038
3039         /* free mac memory */
3040         if (dev->data->mac_addrs) {
3041                 rte_free(dev->data->mac_addrs);
3042                 dev->data->mac_addrs = NULL;
3043         }
3044
3045         memset(hw, 0, sizeof(*hw));
3046
3047         return 0;
3048 }
3049
3050 /*
3051  * The set of PCI devices this driver supports. This driver will enable both PF
3052  * and SRIOV-VF devices.
3053  */
3054 static const struct rte_pci_id pci_id_fm10k_map[] = {
3055 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
3056 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
3057 #include "rte_pci_dev_ids.h"
3058         { .vendor_id = 0, /* sentinel */ },
3059 };
3060
3061 static struct eth_driver rte_pmd_fm10k = {
3062         .pci_drv = {
3063                 .name = "rte_pmd_fm10k",
3064                 .id_table = pci_id_fm10k_map,
3065                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3066                         RTE_PCI_DRV_DETACHABLE,
3067         },
3068         .eth_dev_init = eth_fm10k_dev_init,
3069         .eth_dev_uninit = eth_fm10k_dev_uninit,
3070         .dev_private_size = sizeof(struct fm10k_adapter),
3071 };
3072
3073 /*
3074  * Driver initialization routine.
3075  * Invoked once at EAL init time.
3076  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
3077  */
3078 static int
3079 rte_pmd_fm10k_init(__rte_unused const char *name,
3080         __rte_unused const char *params)
3081 {
3082         PMD_INIT_FUNC_TRACE();
3083         rte_eth_driver_register(&rte_pmd_fm10k);
3084         return 0;
3085 }
3086
3087 static struct rte_driver rte_fm10k_driver = {
3088         .type = PMD_PDEV,
3089         .init = rte_pmd_fm10k_init,
3090 };
3091
3092 PMD_REGISTER_DRIVER(rte_fm10k_driver);