net/txgbe: add unicast hash bitmap
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24
25 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29                                 int wait_to_complete);
30
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37                                       struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
41
42 /*
43  * The set of PCI devices this driver supports
44  */
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48         { .vendor_id = 0, /* sentinel */ },
49 };
50
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52         .nb_max = TXGBE_RING_DESC_MAX,
53         .nb_min = TXGBE_RING_DESC_MIN,
54         .nb_align = TXGBE_RXD_ALIGN,
55 };
56
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58         .nb_max = TXGBE_RING_DESC_MAX,
59         .nb_min = TXGBE_RING_DESC_MIN,
60         .nb_align = TXGBE_TXD_ALIGN,
61         .nb_seg_max = TXGBE_TX_MAX_SEG,
62         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
63 };
64
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
66
67 static inline int
68 txgbe_is_sfp(struct txgbe_hw *hw)
69 {
70         switch (hw->phy.type) {
71         case txgbe_phy_sfp_avago:
72         case txgbe_phy_sfp_ftl:
73         case txgbe_phy_sfp_intel:
74         case txgbe_phy_sfp_unknown:
75         case txgbe_phy_sfp_tyco_passive:
76         case txgbe_phy_sfp_unknown_passive:
77                 return 1;
78         default:
79                 return 0;
80         }
81 }
82
83 static inline void
84 txgbe_enable_intr(struct rte_eth_dev *dev)
85 {
86         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
87         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
88
89         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
90         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
91         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
92         txgbe_flush(hw);
93 }
94
95 static void
96 txgbe_disable_intr(struct txgbe_hw *hw)
97 {
98         PMD_INIT_FUNC_TRACE();
99
100         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
101         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
102         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
103         txgbe_flush(hw);
104 }
105
106 static int
107 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
108 {
109         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
110         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
111         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
112         const struct rte_memzone *mz;
113         uint16_t csum;
114         int err;
115
116         PMD_INIT_FUNC_TRACE();
117
118         eth_dev->dev_ops = &txgbe_eth_dev_ops;
119
120         rte_eth_copy_pci_info(eth_dev, pci_dev);
121
122         /* Vendor and Device ID need to be set before init of shared code */
123         hw->device_id = pci_dev->id.device_id;
124         hw->vendor_id = pci_dev->id.vendor_id;
125         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
126         hw->allow_unsupported_sfp = 1;
127
128         /* Reserve memory for interrupt status block */
129         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
130                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
131         if (mz == NULL)
132                 return -ENOMEM;
133
134         hw->isb_dma = TMZ_PADDR(mz);
135         hw->isb_mem = TMZ_VADDR(mz);
136
137         /* Initialize the shared code (base driver) */
138         err = txgbe_init_shared_code(hw);
139         if (err != 0) {
140                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
141                 return -EIO;
142         }
143
144         err = hw->rom.init_params(hw);
145         if (err != 0) {
146                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
147                 return -EIO;
148         }
149
150         /* Make sure we have a good EEPROM before we read from it */
151         err = hw->rom.validate_checksum(hw, &csum);
152         if (err != 0) {
153                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
154                 return -EIO;
155         }
156
157         err = hw->mac.init_hw(hw);
158
159         /*
160          * Devices with copper phys will fail to initialise if txgbe_init_hw()
161          * is called too soon after the kernel driver unbinding/binding occurs.
162          * The failure occurs in txgbe_identify_phy() for all devices,
163          * but for non-copper devies, txgbe_identify_sfp_module() is
164          * also called. See txgbe_identify_phy(). The reason for the
165          * failure is not known, and only occuts when virtualisation features
166          * are disabled in the bios. A delay of 200ms  was found to be enough by
167          * trial-and-error, and is doubled to be safe.
168          */
169         if (err && hw->phy.media_type == txgbe_media_type_copper) {
170                 rte_delay_ms(200);
171                 err = hw->mac.init_hw(hw);
172         }
173
174         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
175                 err = 0;
176
177         if (err == TXGBE_ERR_EEPROM_VERSION) {
178                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
179                              "LOM.  Please be aware there may be issues associated "
180                              "with your hardware.");
181                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
182                              "please contact your hardware representative "
183                              "who provided you with this hardware.");
184         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
185                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
186         }
187         if (err) {
188                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
189                 return -EIO;
190         }
191
192         /* disable interrupt */
193         txgbe_disable_intr(hw);
194
195         /* Allocate memory for storing MAC addresses */
196         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
197                                                hw->mac.num_rar_entries, 0);
198         if (eth_dev->data->mac_addrs == NULL) {
199                 PMD_INIT_LOG(ERR,
200                              "Failed to allocate %u bytes needed to store "
201                              "MAC addresses",
202                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
203                 return -ENOMEM;
204         }
205
206         /* Copy the permanent MAC address */
207         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
208                         &eth_dev->data->mac_addrs[0]);
209
210         /* Allocate memory for storing hash filter MAC addresses */
211         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
212                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
213         if (eth_dev->data->hash_mac_addrs == NULL) {
214                 PMD_INIT_LOG(ERR,
215                              "Failed to allocate %d bytes needed to store MAC addresses",
216                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
217                 return -ENOMEM;
218         }
219
220         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
221                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
222                              (int)hw->mac.type, (int)hw->phy.type,
223                              (int)hw->phy.sfp_type);
224         else
225                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
226                              (int)hw->mac.type, (int)hw->phy.type);
227
228         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
229                      eth_dev->data->port_id, pci_dev->id.vendor_id,
230                      pci_dev->id.device_id);
231
232         rte_intr_callback_register(intr_handle,
233                                    txgbe_dev_interrupt_handler, eth_dev);
234
235         /* enable uio/vfio intr/eventfd mapping */
236         rte_intr_enable(intr_handle);
237
238         /* enable support intr */
239         txgbe_enable_intr(eth_dev);
240
241         return 0;
242 }
243
244 static int
245 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
246 {
247         PMD_INIT_FUNC_TRACE();
248
249         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
250                 return 0;
251
252         txgbe_dev_close(eth_dev);
253
254         return 0;
255 }
256
257 static int
258 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
259                 struct rte_pci_device *pci_dev)
260 {
261         struct rte_eth_dev *pf_ethdev;
262         struct rte_eth_devargs eth_da;
263         int retval;
264
265         if (pci_dev->device.devargs) {
266                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
267                                 &eth_da);
268                 if (retval)
269                         return retval;
270         } else {
271                 memset(&eth_da, 0, sizeof(eth_da));
272         }
273
274         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
275                         sizeof(struct txgbe_adapter),
276                         eth_dev_pci_specific_init, pci_dev,
277                         eth_txgbe_dev_init, NULL);
278
279         if (retval || eth_da.nb_representor_ports < 1)
280                 return retval;
281
282         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
283         if (pf_ethdev == NULL)
284                 return -ENODEV;
285
286         return 0;
287 }
288
289 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
290 {
291         struct rte_eth_dev *ethdev;
292
293         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
294         if (!ethdev)
295                 return -ENODEV;
296
297         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
298 }
299
300 static struct rte_pci_driver rte_txgbe_pmd = {
301         .id_table = pci_id_txgbe_map,
302         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
303                      RTE_PCI_DRV_INTR_LSC,
304         .probe = eth_txgbe_pci_probe,
305         .remove = eth_txgbe_pci_remove,
306 };
307
308 static int
309 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
310 {
311         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
312
313         switch (nb_rx_q) {
314         case 1:
315         case 2:
316                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
317                 break;
318         case 4:
319                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
320                 break;
321         default:
322                 return -EINVAL;
323         }
324
325         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
326                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
327         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
328                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
329         return 0;
330 }
331
332 static int
333 txgbe_check_mq_mode(struct rte_eth_dev *dev)
334 {
335         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
336         uint16_t nb_rx_q = dev->data->nb_rx_queues;
337         uint16_t nb_tx_q = dev->data->nb_tx_queues;
338
339         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
340                 /* check multi-queue mode */
341                 switch (dev_conf->rxmode.mq_mode) {
342                 case ETH_MQ_RX_VMDQ_DCB:
343                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
344                         break;
345                 case ETH_MQ_RX_VMDQ_DCB_RSS:
346                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
347                         PMD_INIT_LOG(ERR, "SRIOV active,"
348                                         " unsupported mq_mode rx %d.",
349                                         dev_conf->rxmode.mq_mode);
350                         return -EINVAL;
351                 case ETH_MQ_RX_RSS:
352                 case ETH_MQ_RX_VMDQ_RSS:
353                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
354                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
355                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
356                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
357                                                 " invalid queue number"
358                                                 " for VMDQ RSS, allowed"
359                                                 " value are 1, 2 or 4.");
360                                         return -EINVAL;
361                                 }
362                         break;
363                 case ETH_MQ_RX_VMDQ_ONLY:
364                 case ETH_MQ_RX_NONE:
365                         /* if nothing mq mode configure, use default scheme */
366                         dev->data->dev_conf.rxmode.mq_mode =
367                                 ETH_MQ_RX_VMDQ_ONLY;
368                         break;
369                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
370                         /* SRIOV only works in VMDq enable mode */
371                         PMD_INIT_LOG(ERR, "SRIOV is active,"
372                                         " wrong mq_mode rx %d.",
373                                         dev_conf->rxmode.mq_mode);
374                         return -EINVAL;
375                 }
376
377                 switch (dev_conf->txmode.mq_mode) {
378                 case ETH_MQ_TX_VMDQ_DCB:
379                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
380                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
381                         break;
382                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
383                         dev->data->dev_conf.txmode.mq_mode =
384                                 ETH_MQ_TX_VMDQ_ONLY;
385                         break;
386                 }
387
388                 /* check valid queue number */
389                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
390                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
391                         PMD_INIT_LOG(ERR, "SRIOV is active,"
392                                         " nb_rx_q=%d nb_tx_q=%d queue number"
393                                         " must be less than or equal to %d.",
394                                         nb_rx_q, nb_tx_q,
395                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
396                         return -EINVAL;
397                 }
398         } else {
399                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
400                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
401                                           " not supported.");
402                         return -EINVAL;
403                 }
404                 /* check configuration for vmdb+dcb mode */
405                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
406                         const struct rte_eth_vmdq_dcb_conf *conf;
407
408                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
409                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
410                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
411                                 return -EINVAL;
412                         }
413                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
414                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
415                                conf->nb_queue_pools == ETH_32_POOLS)) {
416                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
417                                                 " nb_queue_pools must be %d or %d.",
418                                                 ETH_16_POOLS, ETH_32_POOLS);
419                                 return -EINVAL;
420                         }
421                 }
422                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
423                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
424
425                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
426                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
427                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
428                                 return -EINVAL;
429                         }
430                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
431                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
432                                conf->nb_queue_pools == ETH_32_POOLS)) {
433                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
434                                                 " nb_queue_pools != %d and"
435                                                 " nb_queue_pools != %d.",
436                                                 ETH_16_POOLS, ETH_32_POOLS);
437                                 return -EINVAL;
438                         }
439                 }
440
441                 /* For DCB mode check our configuration before we go further */
442                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
443                         const struct rte_eth_dcb_rx_conf *conf;
444
445                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
446                         if (!(conf->nb_tcs == ETH_4_TCS ||
447                                conf->nb_tcs == ETH_8_TCS)) {
448                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
449                                                 " and nb_tcs != %d.",
450                                                 ETH_4_TCS, ETH_8_TCS);
451                                 return -EINVAL;
452                         }
453                 }
454
455                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
456                         const struct rte_eth_dcb_tx_conf *conf;
457
458                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
459                         if (!(conf->nb_tcs == ETH_4_TCS ||
460                                conf->nb_tcs == ETH_8_TCS)) {
461                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
462                                                 " and nb_tcs != %d.",
463                                                 ETH_4_TCS, ETH_8_TCS);
464                                 return -EINVAL;
465                         }
466                 }
467         }
468         return 0;
469 }
470
471 static int
472 txgbe_dev_configure(struct rte_eth_dev *dev)
473 {
474         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
475         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
476         int ret;
477
478         PMD_INIT_FUNC_TRACE();
479
480         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
481                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
482
483         /* multiple queue mode checking */
484         ret  = txgbe_check_mq_mode(dev);
485         if (ret != 0) {
486                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
487                             ret);
488                 return ret;
489         }
490
491         /* set flag to update link status after init */
492         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
493
494         /*
495          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
496          * allocation Rx preconditions we will reset it.
497          */
498         adapter->rx_bulk_alloc_allowed = true;
499
500         return 0;
501 }
502
503 static void
504 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
505 {
506         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
507         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
508         uint32_t gpie;
509
510         gpie = rd32(hw, TXGBE_GPIOINTEN);
511         gpie |= TXGBE_GPIOBIT_6;
512         wr32(hw, TXGBE_GPIOINTEN, gpie);
513         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
514 }
515
516 /*
517  * Set device link up: enable tx.
518  */
519 static int
520 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
521 {
522         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
523
524         if (hw->phy.media_type == txgbe_media_type_copper) {
525                 /* Turn on the copper */
526                 hw->phy.set_phy_power(hw, true);
527         } else {
528                 /* Turn on the laser */
529                 hw->mac.enable_tx_laser(hw);
530                 txgbe_dev_link_update(dev, 0);
531         }
532
533         return 0;
534 }
535
536 /*
537  * Set device link down: disable tx.
538  */
539 static int
540 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
541 {
542         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
543
544         if (hw->phy.media_type == txgbe_media_type_copper) {
545                 /* Turn off the copper */
546                 hw->phy.set_phy_power(hw, false);
547         } else {
548                 /* Turn off the laser */
549                 hw->mac.disable_tx_laser(hw);
550                 txgbe_dev_link_update(dev, 0);
551         }
552
553         return 0;
554 }
555
556 /*
557  * Reset and stop device.
558  */
559 static int
560 txgbe_dev_close(struct rte_eth_dev *dev)
561 {
562         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
563         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
564         int retries = 0;
565         int ret;
566
567         PMD_INIT_FUNC_TRACE();
568
569         /* disable uio intr before callback unregister */
570         rte_intr_disable(intr_handle);
571
572         do {
573                 ret = rte_intr_callback_unregister(intr_handle,
574                                 txgbe_dev_interrupt_handler, dev);
575                 if (ret >= 0 || ret == -ENOENT) {
576                         break;
577                 } else if (ret != -EAGAIN) {
578                         PMD_INIT_LOG(ERR,
579                                 "intr callback unregister failed: %d",
580                                 ret);
581                 }
582                 rte_delay_ms(100);
583         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
584
585         /* cancel the delay handler before remove dev */
586         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
587
588         rte_free(dev->data->mac_addrs);
589         dev->data->mac_addrs = NULL;
590
591         rte_free(dev->data->hash_mac_addrs);
592         dev->data->hash_mac_addrs = NULL;
593
594         return 0;
595 }
596
597 static int
598 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
599 {
600         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
601         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
602
603         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
604         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
605         dev_info->min_rx_bufsize = 1024;
606         dev_info->max_rx_pktlen = 15872;
607         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
608         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
609         dev_info->max_vfs = pci_dev->max_vfs;
610         dev_info->max_vmdq_pools = ETH_64_POOLS;
611         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
612         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
613         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
614                                      dev_info->rx_queue_offload_capa);
615         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
616         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
617
618         dev_info->default_rxconf = (struct rte_eth_rxconf) {
619                 .rx_thresh = {
620                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
621                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
622                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
623                 },
624                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
625                 .rx_drop_en = 0,
626                 .offloads = 0,
627         };
628
629         dev_info->default_txconf = (struct rte_eth_txconf) {
630                 .tx_thresh = {
631                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
632                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
633                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
634                 },
635                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
636                 .offloads = 0,
637         };
638
639         dev_info->rx_desc_lim = rx_desc_lim;
640         dev_info->tx_desc_lim = tx_desc_lim;
641
642         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
643         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
644         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
645
646         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
647         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
648
649         /* Driver-preferred Rx/Tx parameters */
650         dev_info->default_rxportconf.burst_size = 32;
651         dev_info->default_txportconf.burst_size = 32;
652         dev_info->default_rxportconf.nb_queues = 1;
653         dev_info->default_txportconf.nb_queues = 1;
654         dev_info->default_rxportconf.ring_size = 256;
655         dev_info->default_txportconf.ring_size = 256;
656
657         return 0;
658 }
659
660 void
661 txgbe_dev_setup_link_alarm_handler(void *param)
662 {
663         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
664         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
665         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
666         u32 speed;
667         bool autoneg = false;
668
669         speed = hw->phy.autoneg_advertised;
670         if (!speed)
671                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
672
673         hw->mac.setup_link(hw, speed, true);
674
675         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
676 }
677
678 /* return 0 means link status changed, -1 means not changed */
679 int
680 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
681                             int wait_to_complete)
682 {
683         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
684         struct rte_eth_link link;
685         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
686         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
687         bool link_up;
688         int err;
689         int wait = 1;
690
691         memset(&link, 0, sizeof(link));
692         link.link_status = ETH_LINK_DOWN;
693         link.link_speed = ETH_SPEED_NUM_NONE;
694         link.link_duplex = ETH_LINK_HALF_DUPLEX;
695         link.link_autoneg = ETH_LINK_AUTONEG;
696
697         hw->mac.get_link_status = true;
698
699         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
700                 return rte_eth_linkstatus_set(dev, &link);
701
702         /* check if it needs to wait to complete, if lsc interrupt is enabled */
703         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
704                 wait = 0;
705
706         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
707
708         if (err != 0) {
709                 link.link_speed = ETH_SPEED_NUM_100M;
710                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
711                 return rte_eth_linkstatus_set(dev, &link);
712         }
713
714         if (link_up == 0) {
715                 if (hw->phy.media_type == txgbe_media_type_fiber) {
716                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
717                         rte_eal_alarm_set(10,
718                                 txgbe_dev_setup_link_alarm_handler, dev);
719                 }
720                 return rte_eth_linkstatus_set(dev, &link);
721         }
722
723         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
724         link.link_status = ETH_LINK_UP;
725         link.link_duplex = ETH_LINK_FULL_DUPLEX;
726
727         switch (link_speed) {
728         default:
729         case TXGBE_LINK_SPEED_UNKNOWN:
730                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
731                 link.link_speed = ETH_SPEED_NUM_100M;
732                 break;
733
734         case TXGBE_LINK_SPEED_100M_FULL:
735                 link.link_speed = ETH_SPEED_NUM_100M;
736                 break;
737
738         case TXGBE_LINK_SPEED_1GB_FULL:
739                 link.link_speed = ETH_SPEED_NUM_1G;
740                 break;
741
742         case TXGBE_LINK_SPEED_2_5GB_FULL:
743                 link.link_speed = ETH_SPEED_NUM_2_5G;
744                 break;
745
746         case TXGBE_LINK_SPEED_5GB_FULL:
747                 link.link_speed = ETH_SPEED_NUM_5G;
748                 break;
749
750         case TXGBE_LINK_SPEED_10GB_FULL:
751                 link.link_speed = ETH_SPEED_NUM_10G;
752                 break;
753         }
754
755         return rte_eth_linkstatus_set(dev, &link);
756 }
757
758 static int
759 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
760 {
761         return txgbe_dev_link_update_share(dev, wait_to_complete);
762 }
763
764 /**
765  * It clears the interrupt causes and enables the interrupt.
766  * It will be called once only during nic initialized.
767  *
768  * @param dev
769  *  Pointer to struct rte_eth_dev.
770  * @param on
771  *  Enable or Disable.
772  *
773  * @return
774  *  - On success, zero.
775  *  - On failure, a negative value.
776  */
777 static int
778 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
779 {
780         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
781
782         txgbe_dev_link_status_print(dev);
783         if (on)
784                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
785         else
786                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
787
788         return 0;
789 }
790
791 /**
792  * It clears the interrupt causes and enables the interrupt.
793  * It will be called once only during nic initialized.
794  *
795  * @param dev
796  *  Pointer to struct rte_eth_dev.
797  *
798  * @return
799  *  - On success, zero.
800  *  - On failure, a negative value.
801  */
802 static int
803 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
804 {
805         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
806
807         intr->mask[0] |= TXGBE_ICR_MASK;
808         intr->mask[1] |= TXGBE_ICR_MASK;
809
810         return 0;
811 }
812
813 /**
814  * It clears the interrupt causes and enables the interrupt.
815  * It will be called once only during nic initialized.
816  *
817  * @param dev
818  *  Pointer to struct rte_eth_dev.
819  *
820  * @return
821  *  - On success, zero.
822  *  - On failure, a negative value.
823  */
824 static int
825 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
826 {
827         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
828
829         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
830
831         return 0;
832 }
833
834 /*
835  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
836  *
837  * @param dev
838  *  Pointer to struct rte_eth_dev.
839  *
840  * @return
841  *  - On success, zero.
842  *  - On failure, a negative value.
843  */
844 static int
845 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
846 {
847         uint32_t eicr;
848         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
849         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
850
851         /* clear all cause mask */
852         txgbe_disable_intr(hw);
853
854         /* read-on-clear nic registers here */
855         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
856         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
857
858         intr->flags = 0;
859
860         /* set flag for async link update */
861         if (eicr & TXGBE_ICRMISC_LSC)
862                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
863
864         if (eicr & TXGBE_ICRMISC_VFMBX)
865                 intr->flags |= TXGBE_FLAG_MAILBOX;
866
867         if (eicr & TXGBE_ICRMISC_LNKSEC)
868                 intr->flags |= TXGBE_FLAG_MACSEC;
869
870         if (eicr & TXGBE_ICRMISC_GPIO)
871                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
872
873         return 0;
874 }
875
876 /**
877  * It gets and then prints the link status.
878  *
879  * @param dev
880  *  Pointer to struct rte_eth_dev.
881  *
882  * @return
883  *  - On success, zero.
884  *  - On failure, a negative value.
885  */
886 static void
887 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
888 {
889         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
890         struct rte_eth_link link;
891
892         rte_eth_linkstatus_get(dev, &link);
893
894         if (link.link_status) {
895                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
896                                         (int)(dev->data->port_id),
897                                         (unsigned int)link.link_speed,
898                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
899                                         "full-duplex" : "half-duplex");
900         } else {
901                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
902                                 (int)(dev->data->port_id));
903         }
904         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
905                                 pci_dev->addr.domain,
906                                 pci_dev->addr.bus,
907                                 pci_dev->addr.devid,
908                                 pci_dev->addr.function);
909 }
910
911 /*
912  * It executes link_update after knowing an interrupt occurred.
913  *
914  * @param dev
915  *  Pointer to struct rte_eth_dev.
916  *
917  * @return
918  *  - On success, zero.
919  *  - On failure, a negative value.
920  */
921 static int
922 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
923                            struct rte_intr_handle *intr_handle)
924 {
925         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
926         int64_t timeout;
927         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
928
929         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
930
931         if (intr->flags & TXGBE_FLAG_MAILBOX)
932                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
933
934         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
935                 hw->phy.handle_lasi(hw);
936                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
937         }
938
939         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
940                 struct rte_eth_link link;
941
942                 /*get the link status before link update, for predicting later*/
943                 rte_eth_linkstatus_get(dev, &link);
944
945                 txgbe_dev_link_update(dev, 0);
946
947                 /* likely to up */
948                 if (!link.link_status)
949                         /* handle it 1 sec later, wait it being stable */
950                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
951                 /* likely to down */
952                 else
953                         /* handle it 4 sec later, wait it being stable */
954                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
955
956                 txgbe_dev_link_status_print(dev);
957                 if (rte_eal_alarm_set(timeout * 1000,
958                                       txgbe_dev_interrupt_delayed_handler,
959                                       (void *)dev) < 0) {
960                         PMD_DRV_LOG(ERR, "Error setting alarm");
961                 } else {
962                         /* remember original mask */
963                         intr->mask_misc_orig = intr->mask_misc;
964                         /* only disable lsc interrupt */
965                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
966                 }
967         }
968
969         PMD_DRV_LOG(DEBUG, "enable intr immediately");
970         txgbe_enable_intr(dev);
971         rte_intr_enable(intr_handle);
972
973         return 0;
974 }
975
976 /**
977  * Interrupt handler which shall be registered for alarm callback for delayed
978  * handling specific interrupt to wait for the stable nic state. As the
979  * NIC interrupt state is not stable for txgbe after link is just down,
980  * it needs to wait 4 seconds to get the stable status.
981  *
982  * @param handle
983  *  Pointer to interrupt handle.
984  * @param param
985  *  The address of parameter (struct rte_eth_dev *) registered before.
986  *
987  * @return
988  *  void
989  */
990 static void
991 txgbe_dev_interrupt_delayed_handler(void *param)
992 {
993         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
994         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
995         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
996         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
997         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
998         uint32_t eicr;
999
1000         txgbe_disable_intr(hw);
1001
1002         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1003
1004         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1005                 hw->phy.handle_lasi(hw);
1006                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1007         }
1008
1009         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1010                 txgbe_dev_link_update(dev, 0);
1011                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1012                 txgbe_dev_link_status_print(dev);
1013                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1014                                               NULL);
1015         }
1016
1017         if (intr->flags & TXGBE_FLAG_MACSEC) {
1018                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1019                                               NULL);
1020                 intr->flags &= ~TXGBE_FLAG_MACSEC;
1021         }
1022
1023         /* restore original mask */
1024         intr->mask_misc = intr->mask_misc_orig;
1025         intr->mask_misc_orig = 0;
1026
1027         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1028         txgbe_enable_intr(dev);
1029         rte_intr_enable(intr_handle);
1030 }
1031
1032 /**
1033  * Interrupt handler triggered by NIC  for handling
1034  * specific interrupt.
1035  *
1036  * @param handle
1037  *  Pointer to interrupt handle.
1038  * @param param
1039  *  The address of parameter (struct rte_eth_dev *) registered before.
1040  *
1041  * @return
1042  *  void
1043  */
1044 static void
1045 txgbe_dev_interrupt_handler(void *param)
1046 {
1047         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1048
1049         txgbe_dev_interrupt_get_status(dev);
1050         txgbe_dev_interrupt_action(dev, dev->intr_handle);
1051 }
1052
1053 static int
1054 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1055                                 uint32_t index, uint32_t pool)
1056 {
1057         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1058         uint32_t enable_addr = 1;
1059
1060         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1061                              pool, enable_addr);
1062 }
1063
1064 static void
1065 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1066 {
1067         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1068
1069         txgbe_clear_rar(hw, index);
1070 }
1071
1072 static int
1073 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1074 {
1075         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1076
1077         txgbe_remove_rar(dev, 0);
1078         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1079
1080         return 0;
1081 }
1082
1083 static uint32_t
1084 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1085 {
1086         uint32_t vector = 0;
1087
1088         switch (hw->mac.mc_filter_type) {
1089         case 0:   /* use bits [47:36] of the address */
1090                 vector = ((uc_addr->addr_bytes[4] >> 4) |
1091                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1092                 break;
1093         case 1:   /* use bits [46:35] of the address */
1094                 vector = ((uc_addr->addr_bytes[4] >> 3) |
1095                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1096                 break;
1097         case 2:   /* use bits [45:34] of the address */
1098                 vector = ((uc_addr->addr_bytes[4] >> 2) |
1099                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1100                 break;
1101         case 3:   /* use bits [43:32] of the address */
1102                 vector = ((uc_addr->addr_bytes[4]) |
1103                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1104                 break;
1105         default:  /* Invalid mc_filter_type */
1106                 break;
1107         }
1108
1109         /* vector can only be 12-bits or boundary will be exceeded */
1110         vector &= 0xFFF;
1111         return vector;
1112 }
1113
1114 static int
1115 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1116                         struct rte_ether_addr *mac_addr, uint8_t on)
1117 {
1118         uint32_t vector;
1119         uint32_t uta_idx;
1120         uint32_t reg_val;
1121         uint32_t uta_mask;
1122         uint32_t psrctl;
1123
1124         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1125         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1126
1127         /* The UTA table only exists on pf hardware */
1128         if (hw->mac.type < txgbe_mac_raptor)
1129                 return -ENOTSUP;
1130
1131         vector = txgbe_uta_vector(hw, mac_addr);
1132         uta_idx = (vector >> 5) & 0x7F;
1133         uta_mask = 0x1UL << (vector & 0x1F);
1134
1135         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1136                 return 0;
1137
1138         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1139         if (on) {
1140                 uta_info->uta_in_use++;
1141                 reg_val |= uta_mask;
1142                 uta_info->uta_shadow[uta_idx] |= uta_mask;
1143         } else {
1144                 uta_info->uta_in_use--;
1145                 reg_val &= ~uta_mask;
1146                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1147         }
1148
1149         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1150
1151         psrctl = rd32(hw, TXGBE_PSRCTL);
1152         if (uta_info->uta_in_use > 0)
1153                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1154         else
1155                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1156
1157         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1158         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1159         wr32(hw, TXGBE_PSRCTL, psrctl);
1160
1161         return 0;
1162 }
1163
1164 static int
1165 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1166 {
1167         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1168         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1169         uint32_t psrctl;
1170         int i;
1171
1172         /* The UTA table only exists on pf hardware */
1173         if (hw->mac.type < txgbe_mac_raptor)
1174                 return -ENOTSUP;
1175
1176         if (on) {
1177                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1178                         uta_info->uta_shadow[i] = ~0;
1179                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1180                 }
1181         } else {
1182                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1183                         uta_info->uta_shadow[i] = 0;
1184                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
1185                 }
1186         }
1187
1188         psrctl = rd32(hw, TXGBE_PSRCTL);
1189         if (on)
1190                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1191         else
1192                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1193
1194         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1195         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1196         wr32(hw, TXGBE_PSRCTL, psrctl);
1197
1198         return 0;
1199 }
1200
1201 /**
1202  * set the IVAR registers, mapping interrupt causes to vectors
1203  * @param hw
1204  *  pointer to txgbe_hw struct
1205  * @direction
1206  *  0 for Rx, 1 for Tx, -1 for other causes
1207  * @queue
1208  *  queue to map the corresponding interrupt to
1209  * @msix_vector
1210  *  the vector to map to the corresponding queue
1211  */
1212 void
1213 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1214                    uint8_t queue, uint8_t msix_vector)
1215 {
1216         uint32_t tmp, idx;
1217
1218         if (direction == -1) {
1219                 /* other causes */
1220                 msix_vector |= TXGBE_IVARMISC_VLD;
1221                 idx = 0;
1222                 tmp = rd32(hw, TXGBE_IVARMISC);
1223                 tmp &= ~(0xFF << idx);
1224                 tmp |= (msix_vector << idx);
1225                 wr32(hw, TXGBE_IVARMISC, tmp);
1226         } else {
1227                 /* rx or tx causes */
1228                 /* Workround for ICR lost */
1229                 idx = ((16 * (queue & 1)) + (8 * direction));
1230                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1231                 tmp &= ~(0xFF << idx);
1232                 tmp |= (msix_vector << idx);
1233                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1234         }
1235 }
1236
1237 /**
1238  * Sets up the hardware to properly generate MSI-X interrupts
1239  * @hw
1240  *  board private structure
1241  */
1242 static void
1243 txgbe_configure_msix(struct rte_eth_dev *dev)
1244 {
1245         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1246         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1247         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1248         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1249         uint32_t vec = TXGBE_MISC_VEC_ID;
1250         uint32_t gpie;
1251
1252         /* won't configure msix register if no mapping is done
1253          * between intr vector and event fd
1254          * but if misx has been enabled already, need to configure
1255          * auto clean, auto mask and throttling.
1256          */
1257         gpie = rd32(hw, TXGBE_GPIE);
1258         if (!rte_intr_dp_is_en(intr_handle) &&
1259             !(gpie & TXGBE_GPIE_MSIX))
1260                 return;
1261
1262         if (rte_intr_allow_others(intr_handle)) {
1263                 base = TXGBE_RX_VEC_START;
1264                 vec = base;
1265         }
1266
1267         /* setup GPIE for MSI-x mode */
1268         gpie = rd32(hw, TXGBE_GPIE);
1269         gpie |= TXGBE_GPIE_MSIX;
1270         wr32(hw, TXGBE_GPIE, gpie);
1271
1272         /* Populate the IVAR table and set the ITR values to the
1273          * corresponding register.
1274          */
1275         if (rte_intr_dp_is_en(intr_handle)) {
1276                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1277                         queue_id++) {
1278                         /* by default, 1:1 mapping */
1279                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
1280                         intr_handle->intr_vec[queue_id] = vec;
1281                         if (vec < base + intr_handle->nb_efd - 1)
1282                                 vec++;
1283                 }
1284
1285                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1286         }
1287         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1288                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1289                         | TXGBE_ITR_WRDSA);
1290 }
1291
1292 static u8 *
1293 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1294                         u8 **mc_addr_ptr, u32 *vmdq)
1295 {
1296         u8 *mc_addr;
1297
1298         *vmdq = 0;
1299         mc_addr = *mc_addr_ptr;
1300         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1301         return mc_addr;
1302 }
1303
1304 int
1305 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1306                           struct rte_ether_addr *mc_addr_set,
1307                           uint32_t nb_mc_addr)
1308 {
1309         struct txgbe_hw *hw;
1310         u8 *mc_addr_list;
1311
1312         hw = TXGBE_DEV_HW(dev);
1313         mc_addr_list = (u8 *)mc_addr_set;
1314         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1315                                          txgbe_dev_addr_list_itr, TRUE);
1316 }
1317
1318 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1319         .dev_configure              = txgbe_dev_configure,
1320         .dev_infos_get              = txgbe_dev_info_get,
1321         .dev_set_link_up            = txgbe_dev_set_link_up,
1322         .dev_set_link_down          = txgbe_dev_set_link_down,
1323         .mac_addr_add               = txgbe_add_rar,
1324         .mac_addr_remove            = txgbe_remove_rar,
1325         .mac_addr_set               = txgbe_set_default_mac_addr,
1326         .uc_hash_table_set          = txgbe_uc_hash_table_set,
1327         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
1328         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
1329 };
1330
1331 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1332 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1333 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1334
1335 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1336 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1337
1338 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1339         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1340 #endif
1341 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1342         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1343 #endif
1344
1345 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1346         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
1347 #endif