7f32ddfab301154de1186bfd0f2659c9daf03190
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24
25 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29                                 int wait_to_complete);
30
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37                                       struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
41
42 /*
43  * The set of PCI devices this driver supports
44  */
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48         { .vendor_id = 0, /* sentinel */ },
49 };
50
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52         .nb_max = TXGBE_RING_DESC_MAX,
53         .nb_min = TXGBE_RING_DESC_MIN,
54         .nb_align = TXGBE_RXD_ALIGN,
55 };
56
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58         .nb_max = TXGBE_RING_DESC_MAX,
59         .nb_min = TXGBE_RING_DESC_MIN,
60         .nb_align = TXGBE_TXD_ALIGN,
61         .nb_seg_max = TXGBE_TX_MAX_SEG,
62         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
63 };
64
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
66
67 static inline int
68 txgbe_is_sfp(struct txgbe_hw *hw)
69 {
70         switch (hw->phy.type) {
71         case txgbe_phy_sfp_avago:
72         case txgbe_phy_sfp_ftl:
73         case txgbe_phy_sfp_intel:
74         case txgbe_phy_sfp_unknown:
75         case txgbe_phy_sfp_tyco_passive:
76         case txgbe_phy_sfp_unknown_passive:
77                 return 1;
78         default:
79                 return 0;
80         }
81 }
82
83 static inline void
84 txgbe_enable_intr(struct rte_eth_dev *dev)
85 {
86         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
87         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
88
89         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
90         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
91         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
92         txgbe_flush(hw);
93 }
94
95 static void
96 txgbe_disable_intr(struct txgbe_hw *hw)
97 {
98         PMD_INIT_FUNC_TRACE();
99
100         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
101         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
102         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
103         txgbe_flush(hw);
104 }
105
106 static int
107 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
108 {
109         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
110         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
111         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
112         const struct rte_memzone *mz;
113         uint16_t csum;
114         int err;
115
116         PMD_INIT_FUNC_TRACE();
117
118         eth_dev->dev_ops = &txgbe_eth_dev_ops;
119         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
120         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
121
122         /*
123          * For secondary processes, we don't initialise any further as primary
124          * has already done this work. Only check we don't need a different
125          * RX and TX function.
126          */
127         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
128                 struct txgbe_tx_queue *txq;
129                 /* TX queue function in primary, set by last queue initialized
130                  * Tx queue may not initialized by primary process
131                  */
132                 if (eth_dev->data->tx_queues) {
133                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
134                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
135                         txgbe_set_tx_function(eth_dev, txq);
136                 } else {
137                         /* Use default TX function if we get here */
138                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
139                                      "Using default TX function.");
140                 }
141
142                 txgbe_set_rx_function(eth_dev);
143
144                 return 0;
145         }
146
147         rte_eth_copy_pci_info(eth_dev, pci_dev);
148
149         /* Vendor and Device ID need to be set before init of shared code */
150         hw->device_id = pci_dev->id.device_id;
151         hw->vendor_id = pci_dev->id.vendor_id;
152         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
153         hw->allow_unsupported_sfp = 1;
154
155         /* Reserve memory for interrupt status block */
156         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
157                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
158         if (mz == NULL)
159                 return -ENOMEM;
160
161         hw->isb_dma = TMZ_PADDR(mz);
162         hw->isb_mem = TMZ_VADDR(mz);
163
164         /* Initialize the shared code (base driver) */
165         err = txgbe_init_shared_code(hw);
166         if (err != 0) {
167                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
168                 return -EIO;
169         }
170
171         err = hw->rom.init_params(hw);
172         if (err != 0) {
173                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
174                 return -EIO;
175         }
176
177         /* Make sure we have a good EEPROM before we read from it */
178         err = hw->rom.validate_checksum(hw, &csum);
179         if (err != 0) {
180                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
181                 return -EIO;
182         }
183
184         err = hw->mac.init_hw(hw);
185
186         /*
187          * Devices with copper phys will fail to initialise if txgbe_init_hw()
188          * is called too soon after the kernel driver unbinding/binding occurs.
189          * The failure occurs in txgbe_identify_phy() for all devices,
190          * but for non-copper devies, txgbe_identify_sfp_module() is
191          * also called. See txgbe_identify_phy(). The reason for the
192          * failure is not known, and only occuts when virtualisation features
193          * are disabled in the bios. A delay of 200ms  was found to be enough by
194          * trial-and-error, and is doubled to be safe.
195          */
196         if (err && hw->phy.media_type == txgbe_media_type_copper) {
197                 rte_delay_ms(200);
198                 err = hw->mac.init_hw(hw);
199         }
200
201         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
202                 err = 0;
203
204         if (err == TXGBE_ERR_EEPROM_VERSION) {
205                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
206                              "LOM.  Please be aware there may be issues associated "
207                              "with your hardware.");
208                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
209                              "please contact your hardware representative "
210                              "who provided you with this hardware.");
211         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
212                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
213         }
214         if (err) {
215                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
216                 return -EIO;
217         }
218
219         /* disable interrupt */
220         txgbe_disable_intr(hw);
221
222         /* Allocate memory for storing MAC addresses */
223         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
224                                                hw->mac.num_rar_entries, 0);
225         if (eth_dev->data->mac_addrs == NULL) {
226                 PMD_INIT_LOG(ERR,
227                              "Failed to allocate %u bytes needed to store "
228                              "MAC addresses",
229                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
230                 return -ENOMEM;
231         }
232
233         /* Copy the permanent MAC address */
234         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
235                         &eth_dev->data->mac_addrs[0]);
236
237         /* Allocate memory for storing hash filter MAC addresses */
238         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
239                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
240         if (eth_dev->data->hash_mac_addrs == NULL) {
241                 PMD_INIT_LOG(ERR,
242                              "Failed to allocate %d bytes needed to store MAC addresses",
243                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
244                 return -ENOMEM;
245         }
246
247         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
248                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
249                              (int)hw->mac.type, (int)hw->phy.type,
250                              (int)hw->phy.sfp_type);
251         else
252                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
253                              (int)hw->mac.type, (int)hw->phy.type);
254
255         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
256                      eth_dev->data->port_id, pci_dev->id.vendor_id,
257                      pci_dev->id.device_id);
258
259         rte_intr_callback_register(intr_handle,
260                                    txgbe_dev_interrupt_handler, eth_dev);
261
262         /* enable uio/vfio intr/eventfd mapping */
263         rte_intr_enable(intr_handle);
264
265         /* enable support intr */
266         txgbe_enable_intr(eth_dev);
267
268         return 0;
269 }
270
271 static int
272 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
273 {
274         PMD_INIT_FUNC_TRACE();
275
276         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
277                 return 0;
278
279         txgbe_dev_close(eth_dev);
280
281         return 0;
282 }
283
284 static int
285 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
286                 struct rte_pci_device *pci_dev)
287 {
288         struct rte_eth_dev *pf_ethdev;
289         struct rte_eth_devargs eth_da;
290         int retval;
291
292         if (pci_dev->device.devargs) {
293                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
294                                 &eth_da);
295                 if (retval)
296                         return retval;
297         } else {
298                 memset(&eth_da, 0, sizeof(eth_da));
299         }
300
301         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
302                         sizeof(struct txgbe_adapter),
303                         eth_dev_pci_specific_init, pci_dev,
304                         eth_txgbe_dev_init, NULL);
305
306         if (retval || eth_da.nb_representor_ports < 1)
307                 return retval;
308
309         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
310         if (pf_ethdev == NULL)
311                 return -ENODEV;
312
313         return 0;
314 }
315
316 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
317 {
318         struct rte_eth_dev *ethdev;
319
320         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
321         if (!ethdev)
322                 return -ENODEV;
323
324         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
325 }
326
327 static struct rte_pci_driver rte_txgbe_pmd = {
328         .id_table = pci_id_txgbe_map,
329         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
330                      RTE_PCI_DRV_INTR_LSC,
331         .probe = eth_txgbe_pci_probe,
332         .remove = eth_txgbe_pci_remove,
333 };
334
335 static int
336 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
337 {
338         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
339
340         switch (nb_rx_q) {
341         case 1:
342         case 2:
343                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
344                 break;
345         case 4:
346                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
347                 break;
348         default:
349                 return -EINVAL;
350         }
351
352         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
353                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
354         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
355                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
356         return 0;
357 }
358
359 static int
360 txgbe_check_mq_mode(struct rte_eth_dev *dev)
361 {
362         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
363         uint16_t nb_rx_q = dev->data->nb_rx_queues;
364         uint16_t nb_tx_q = dev->data->nb_tx_queues;
365
366         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
367                 /* check multi-queue mode */
368                 switch (dev_conf->rxmode.mq_mode) {
369                 case ETH_MQ_RX_VMDQ_DCB:
370                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
371                         break;
372                 case ETH_MQ_RX_VMDQ_DCB_RSS:
373                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
374                         PMD_INIT_LOG(ERR, "SRIOV active,"
375                                         " unsupported mq_mode rx %d.",
376                                         dev_conf->rxmode.mq_mode);
377                         return -EINVAL;
378                 case ETH_MQ_RX_RSS:
379                 case ETH_MQ_RX_VMDQ_RSS:
380                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
381                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
382                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
383                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
384                                                 " invalid queue number"
385                                                 " for VMDQ RSS, allowed"
386                                                 " value are 1, 2 or 4.");
387                                         return -EINVAL;
388                                 }
389                         break;
390                 case ETH_MQ_RX_VMDQ_ONLY:
391                 case ETH_MQ_RX_NONE:
392                         /* if nothing mq mode configure, use default scheme */
393                         dev->data->dev_conf.rxmode.mq_mode =
394                                 ETH_MQ_RX_VMDQ_ONLY;
395                         break;
396                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
397                         /* SRIOV only works in VMDq enable mode */
398                         PMD_INIT_LOG(ERR, "SRIOV is active,"
399                                         " wrong mq_mode rx %d.",
400                                         dev_conf->rxmode.mq_mode);
401                         return -EINVAL;
402                 }
403
404                 switch (dev_conf->txmode.mq_mode) {
405                 case ETH_MQ_TX_VMDQ_DCB:
406                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
407                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
408                         break;
409                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
410                         dev->data->dev_conf.txmode.mq_mode =
411                                 ETH_MQ_TX_VMDQ_ONLY;
412                         break;
413                 }
414
415                 /* check valid queue number */
416                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
417                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
418                         PMD_INIT_LOG(ERR, "SRIOV is active,"
419                                         " nb_rx_q=%d nb_tx_q=%d queue number"
420                                         " must be less than or equal to %d.",
421                                         nb_rx_q, nb_tx_q,
422                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
423                         return -EINVAL;
424                 }
425         } else {
426                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
427                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
428                                           " not supported.");
429                         return -EINVAL;
430                 }
431                 /* check configuration for vmdb+dcb mode */
432                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
433                         const struct rte_eth_vmdq_dcb_conf *conf;
434
435                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
436                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
437                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
438                                 return -EINVAL;
439                         }
440                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
441                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
442                                conf->nb_queue_pools == ETH_32_POOLS)) {
443                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
444                                                 " nb_queue_pools must be %d or %d.",
445                                                 ETH_16_POOLS, ETH_32_POOLS);
446                                 return -EINVAL;
447                         }
448                 }
449                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
450                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
451
452                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
453                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
454                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
455                                 return -EINVAL;
456                         }
457                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
458                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
459                                conf->nb_queue_pools == ETH_32_POOLS)) {
460                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
461                                                 " nb_queue_pools != %d and"
462                                                 " nb_queue_pools != %d.",
463                                                 ETH_16_POOLS, ETH_32_POOLS);
464                                 return -EINVAL;
465                         }
466                 }
467
468                 /* For DCB mode check our configuration before we go further */
469                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
470                         const struct rte_eth_dcb_rx_conf *conf;
471
472                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
473                         if (!(conf->nb_tcs == ETH_4_TCS ||
474                                conf->nb_tcs == ETH_8_TCS)) {
475                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
476                                                 " and nb_tcs != %d.",
477                                                 ETH_4_TCS, ETH_8_TCS);
478                                 return -EINVAL;
479                         }
480                 }
481
482                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
483                         const struct rte_eth_dcb_tx_conf *conf;
484
485                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
486                         if (!(conf->nb_tcs == ETH_4_TCS ||
487                                conf->nb_tcs == ETH_8_TCS)) {
488                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
489                                                 " and nb_tcs != %d.",
490                                                 ETH_4_TCS, ETH_8_TCS);
491                                 return -EINVAL;
492                         }
493                 }
494         }
495         return 0;
496 }
497
498 static int
499 txgbe_dev_configure(struct rte_eth_dev *dev)
500 {
501         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
502         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
503         int ret;
504
505         PMD_INIT_FUNC_TRACE();
506
507         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
508                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
509
510         /* multiple queue mode checking */
511         ret  = txgbe_check_mq_mode(dev);
512         if (ret != 0) {
513                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
514                             ret);
515                 return ret;
516         }
517
518         /* set flag to update link status after init */
519         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
520
521         /*
522          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
523          * allocation Rx preconditions we will reset it.
524          */
525         adapter->rx_bulk_alloc_allowed = true;
526
527         return 0;
528 }
529
530 static void
531 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
532 {
533         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
534         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
535         uint32_t gpie;
536
537         gpie = rd32(hw, TXGBE_GPIOINTEN);
538         gpie |= TXGBE_GPIOBIT_6;
539         wr32(hw, TXGBE_GPIOINTEN, gpie);
540         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
541 }
542
543 /*
544  * Set device link up: enable tx.
545  */
546 static int
547 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
548 {
549         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
550
551         if (hw->phy.media_type == txgbe_media_type_copper) {
552                 /* Turn on the copper */
553                 hw->phy.set_phy_power(hw, true);
554         } else {
555                 /* Turn on the laser */
556                 hw->mac.enable_tx_laser(hw);
557                 txgbe_dev_link_update(dev, 0);
558         }
559
560         return 0;
561 }
562
563 /*
564  * Set device link down: disable tx.
565  */
566 static int
567 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
568 {
569         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
570
571         if (hw->phy.media_type == txgbe_media_type_copper) {
572                 /* Turn off the copper */
573                 hw->phy.set_phy_power(hw, false);
574         } else {
575                 /* Turn off the laser */
576                 hw->mac.disable_tx_laser(hw);
577                 txgbe_dev_link_update(dev, 0);
578         }
579
580         return 0;
581 }
582
583 /*
584  * Reset and stop device.
585  */
586 static int
587 txgbe_dev_close(struct rte_eth_dev *dev)
588 {
589         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
590         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
591         int retries = 0;
592         int ret;
593
594         PMD_INIT_FUNC_TRACE();
595
596         txgbe_dev_free_queues(dev);
597
598         /* disable uio intr before callback unregister */
599         rte_intr_disable(intr_handle);
600
601         do {
602                 ret = rte_intr_callback_unregister(intr_handle,
603                                 txgbe_dev_interrupt_handler, dev);
604                 if (ret >= 0 || ret == -ENOENT) {
605                         break;
606                 } else if (ret != -EAGAIN) {
607                         PMD_INIT_LOG(ERR,
608                                 "intr callback unregister failed: %d",
609                                 ret);
610                 }
611                 rte_delay_ms(100);
612         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
613
614         /* cancel the delay handler before remove dev */
615         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
616
617         rte_free(dev->data->mac_addrs);
618         dev->data->mac_addrs = NULL;
619
620         rte_free(dev->data->hash_mac_addrs);
621         dev->data->hash_mac_addrs = NULL;
622
623         return 0;
624 }
625
626 static int
627 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
628 {
629         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
630         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
631
632         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
633         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
634         dev_info->min_rx_bufsize = 1024;
635         dev_info->max_rx_pktlen = 15872;
636         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
637         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
638         dev_info->max_vfs = pci_dev->max_vfs;
639         dev_info->max_vmdq_pools = ETH_64_POOLS;
640         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
641         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
642         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
643                                      dev_info->rx_queue_offload_capa);
644         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
645         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
646
647         dev_info->default_rxconf = (struct rte_eth_rxconf) {
648                 .rx_thresh = {
649                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
650                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
651                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
652                 },
653                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
654                 .rx_drop_en = 0,
655                 .offloads = 0,
656         };
657
658         dev_info->default_txconf = (struct rte_eth_txconf) {
659                 .tx_thresh = {
660                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
661                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
662                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
663                 },
664                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
665                 .offloads = 0,
666         };
667
668         dev_info->rx_desc_lim = rx_desc_lim;
669         dev_info->tx_desc_lim = tx_desc_lim;
670
671         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
672         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
673         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
674
675         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
676         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
677
678         /* Driver-preferred Rx/Tx parameters */
679         dev_info->default_rxportconf.burst_size = 32;
680         dev_info->default_txportconf.burst_size = 32;
681         dev_info->default_rxportconf.nb_queues = 1;
682         dev_info->default_txportconf.nb_queues = 1;
683         dev_info->default_rxportconf.ring_size = 256;
684         dev_info->default_txportconf.ring_size = 256;
685
686         return 0;
687 }
688
689 void
690 txgbe_dev_setup_link_alarm_handler(void *param)
691 {
692         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
693         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
694         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
695         u32 speed;
696         bool autoneg = false;
697
698         speed = hw->phy.autoneg_advertised;
699         if (!speed)
700                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
701
702         hw->mac.setup_link(hw, speed, true);
703
704         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
705 }
706
707 /* return 0 means link status changed, -1 means not changed */
708 int
709 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
710                             int wait_to_complete)
711 {
712         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
713         struct rte_eth_link link;
714         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
715         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
716         bool link_up;
717         int err;
718         int wait = 1;
719
720         memset(&link, 0, sizeof(link));
721         link.link_status = ETH_LINK_DOWN;
722         link.link_speed = ETH_SPEED_NUM_NONE;
723         link.link_duplex = ETH_LINK_HALF_DUPLEX;
724         link.link_autoneg = ETH_LINK_AUTONEG;
725
726         hw->mac.get_link_status = true;
727
728         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
729                 return rte_eth_linkstatus_set(dev, &link);
730
731         /* check if it needs to wait to complete, if lsc interrupt is enabled */
732         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
733                 wait = 0;
734
735         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
736
737         if (err != 0) {
738                 link.link_speed = ETH_SPEED_NUM_100M;
739                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
740                 return rte_eth_linkstatus_set(dev, &link);
741         }
742
743         if (link_up == 0) {
744                 if (hw->phy.media_type == txgbe_media_type_fiber) {
745                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
746                         rte_eal_alarm_set(10,
747                                 txgbe_dev_setup_link_alarm_handler, dev);
748                 }
749                 return rte_eth_linkstatus_set(dev, &link);
750         }
751
752         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
753         link.link_status = ETH_LINK_UP;
754         link.link_duplex = ETH_LINK_FULL_DUPLEX;
755
756         switch (link_speed) {
757         default:
758         case TXGBE_LINK_SPEED_UNKNOWN:
759                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
760                 link.link_speed = ETH_SPEED_NUM_100M;
761                 break;
762
763         case TXGBE_LINK_SPEED_100M_FULL:
764                 link.link_speed = ETH_SPEED_NUM_100M;
765                 break;
766
767         case TXGBE_LINK_SPEED_1GB_FULL:
768                 link.link_speed = ETH_SPEED_NUM_1G;
769                 break;
770
771         case TXGBE_LINK_SPEED_2_5GB_FULL:
772                 link.link_speed = ETH_SPEED_NUM_2_5G;
773                 break;
774
775         case TXGBE_LINK_SPEED_5GB_FULL:
776                 link.link_speed = ETH_SPEED_NUM_5G;
777                 break;
778
779         case TXGBE_LINK_SPEED_10GB_FULL:
780                 link.link_speed = ETH_SPEED_NUM_10G;
781                 break;
782         }
783
784         return rte_eth_linkstatus_set(dev, &link);
785 }
786
787 static int
788 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
789 {
790         return txgbe_dev_link_update_share(dev, wait_to_complete);
791 }
792
793 /**
794  * It clears the interrupt causes and enables the interrupt.
795  * It will be called once only during nic initialized.
796  *
797  * @param dev
798  *  Pointer to struct rte_eth_dev.
799  * @param on
800  *  Enable or Disable.
801  *
802  * @return
803  *  - On success, zero.
804  *  - On failure, a negative value.
805  */
806 static int
807 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
808 {
809         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
810
811         txgbe_dev_link_status_print(dev);
812         if (on)
813                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
814         else
815                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
816
817         return 0;
818 }
819
820 /**
821  * It clears the interrupt causes and enables the interrupt.
822  * It will be called once only during nic initialized.
823  *
824  * @param dev
825  *  Pointer to struct rte_eth_dev.
826  *
827  * @return
828  *  - On success, zero.
829  *  - On failure, a negative value.
830  */
831 static int
832 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
833 {
834         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
835
836         intr->mask[0] |= TXGBE_ICR_MASK;
837         intr->mask[1] |= TXGBE_ICR_MASK;
838
839         return 0;
840 }
841
842 /**
843  * It clears the interrupt causes and enables the interrupt.
844  * It will be called once only during nic initialized.
845  *
846  * @param dev
847  *  Pointer to struct rte_eth_dev.
848  *
849  * @return
850  *  - On success, zero.
851  *  - On failure, a negative value.
852  */
853 static int
854 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
855 {
856         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
857
858         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
859
860         return 0;
861 }
862
863 /*
864  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
865  *
866  * @param dev
867  *  Pointer to struct rte_eth_dev.
868  *
869  * @return
870  *  - On success, zero.
871  *  - On failure, a negative value.
872  */
873 static int
874 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
875 {
876         uint32_t eicr;
877         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
878         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
879
880         /* clear all cause mask */
881         txgbe_disable_intr(hw);
882
883         /* read-on-clear nic registers here */
884         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
885         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
886
887         intr->flags = 0;
888
889         /* set flag for async link update */
890         if (eicr & TXGBE_ICRMISC_LSC)
891                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
892
893         if (eicr & TXGBE_ICRMISC_VFMBX)
894                 intr->flags |= TXGBE_FLAG_MAILBOX;
895
896         if (eicr & TXGBE_ICRMISC_LNKSEC)
897                 intr->flags |= TXGBE_FLAG_MACSEC;
898
899         if (eicr & TXGBE_ICRMISC_GPIO)
900                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
901
902         return 0;
903 }
904
905 /**
906  * It gets and then prints the link status.
907  *
908  * @param dev
909  *  Pointer to struct rte_eth_dev.
910  *
911  * @return
912  *  - On success, zero.
913  *  - On failure, a negative value.
914  */
915 static void
916 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
917 {
918         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
919         struct rte_eth_link link;
920
921         rte_eth_linkstatus_get(dev, &link);
922
923         if (link.link_status) {
924                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
925                                         (int)(dev->data->port_id),
926                                         (unsigned int)link.link_speed,
927                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
928                                         "full-duplex" : "half-duplex");
929         } else {
930                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
931                                 (int)(dev->data->port_id));
932         }
933         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
934                                 pci_dev->addr.domain,
935                                 pci_dev->addr.bus,
936                                 pci_dev->addr.devid,
937                                 pci_dev->addr.function);
938 }
939
940 /*
941  * It executes link_update after knowing an interrupt occurred.
942  *
943  * @param dev
944  *  Pointer to struct rte_eth_dev.
945  *
946  * @return
947  *  - On success, zero.
948  *  - On failure, a negative value.
949  */
950 static int
951 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
952                            struct rte_intr_handle *intr_handle)
953 {
954         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
955         int64_t timeout;
956         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
957
958         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
959
960         if (intr->flags & TXGBE_FLAG_MAILBOX)
961                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
962
963         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
964                 hw->phy.handle_lasi(hw);
965                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
966         }
967
968         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
969                 struct rte_eth_link link;
970
971                 /*get the link status before link update, for predicting later*/
972                 rte_eth_linkstatus_get(dev, &link);
973
974                 txgbe_dev_link_update(dev, 0);
975
976                 /* likely to up */
977                 if (!link.link_status)
978                         /* handle it 1 sec later, wait it being stable */
979                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
980                 /* likely to down */
981                 else
982                         /* handle it 4 sec later, wait it being stable */
983                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
984
985                 txgbe_dev_link_status_print(dev);
986                 if (rte_eal_alarm_set(timeout * 1000,
987                                       txgbe_dev_interrupt_delayed_handler,
988                                       (void *)dev) < 0) {
989                         PMD_DRV_LOG(ERR, "Error setting alarm");
990                 } else {
991                         /* remember original mask */
992                         intr->mask_misc_orig = intr->mask_misc;
993                         /* only disable lsc interrupt */
994                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
995                 }
996         }
997
998         PMD_DRV_LOG(DEBUG, "enable intr immediately");
999         txgbe_enable_intr(dev);
1000         rte_intr_enable(intr_handle);
1001
1002         return 0;
1003 }
1004
1005 /**
1006  * Interrupt handler which shall be registered for alarm callback for delayed
1007  * handling specific interrupt to wait for the stable nic state. As the
1008  * NIC interrupt state is not stable for txgbe after link is just down,
1009  * it needs to wait 4 seconds to get the stable status.
1010  *
1011  * @param handle
1012  *  Pointer to interrupt handle.
1013  * @param param
1014  *  The address of parameter (struct rte_eth_dev *) registered before.
1015  *
1016  * @return
1017  *  void
1018  */
1019 static void
1020 txgbe_dev_interrupt_delayed_handler(void *param)
1021 {
1022         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1023         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1024         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1025         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1026         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1027         uint32_t eicr;
1028
1029         txgbe_disable_intr(hw);
1030
1031         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1032
1033         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1034                 hw->phy.handle_lasi(hw);
1035                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1036         }
1037
1038         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1039                 txgbe_dev_link_update(dev, 0);
1040                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1041                 txgbe_dev_link_status_print(dev);
1042                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1043                                               NULL);
1044         }
1045
1046         if (intr->flags & TXGBE_FLAG_MACSEC) {
1047                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1048                                               NULL);
1049                 intr->flags &= ~TXGBE_FLAG_MACSEC;
1050         }
1051
1052         /* restore original mask */
1053         intr->mask_misc = intr->mask_misc_orig;
1054         intr->mask_misc_orig = 0;
1055
1056         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1057         txgbe_enable_intr(dev);
1058         rte_intr_enable(intr_handle);
1059 }
1060
1061 /**
1062  * Interrupt handler triggered by NIC  for handling
1063  * specific interrupt.
1064  *
1065  * @param handle
1066  *  Pointer to interrupt handle.
1067  * @param param
1068  *  The address of parameter (struct rte_eth_dev *) registered before.
1069  *
1070  * @return
1071  *  void
1072  */
1073 static void
1074 txgbe_dev_interrupt_handler(void *param)
1075 {
1076         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1077
1078         txgbe_dev_interrupt_get_status(dev);
1079         txgbe_dev_interrupt_action(dev, dev->intr_handle);
1080 }
1081
1082 static int
1083 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1084                                 uint32_t index, uint32_t pool)
1085 {
1086         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1087         uint32_t enable_addr = 1;
1088
1089         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1090                              pool, enable_addr);
1091 }
1092
1093 static void
1094 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1095 {
1096         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1097
1098         txgbe_clear_rar(hw, index);
1099 }
1100
1101 static int
1102 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1103 {
1104         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1105
1106         txgbe_remove_rar(dev, 0);
1107         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1108
1109         return 0;
1110 }
1111
1112 static uint32_t
1113 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1114 {
1115         uint32_t vector = 0;
1116
1117         switch (hw->mac.mc_filter_type) {
1118         case 0:   /* use bits [47:36] of the address */
1119                 vector = ((uc_addr->addr_bytes[4] >> 4) |
1120                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1121                 break;
1122         case 1:   /* use bits [46:35] of the address */
1123                 vector = ((uc_addr->addr_bytes[4] >> 3) |
1124                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1125                 break;
1126         case 2:   /* use bits [45:34] of the address */
1127                 vector = ((uc_addr->addr_bytes[4] >> 2) |
1128                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1129                 break;
1130         case 3:   /* use bits [43:32] of the address */
1131                 vector = ((uc_addr->addr_bytes[4]) |
1132                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1133                 break;
1134         default:  /* Invalid mc_filter_type */
1135                 break;
1136         }
1137
1138         /* vector can only be 12-bits or boundary will be exceeded */
1139         vector &= 0xFFF;
1140         return vector;
1141 }
1142
1143 static int
1144 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1145                         struct rte_ether_addr *mac_addr, uint8_t on)
1146 {
1147         uint32_t vector;
1148         uint32_t uta_idx;
1149         uint32_t reg_val;
1150         uint32_t uta_mask;
1151         uint32_t psrctl;
1152
1153         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1154         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1155
1156         /* The UTA table only exists on pf hardware */
1157         if (hw->mac.type < txgbe_mac_raptor)
1158                 return -ENOTSUP;
1159
1160         vector = txgbe_uta_vector(hw, mac_addr);
1161         uta_idx = (vector >> 5) & 0x7F;
1162         uta_mask = 0x1UL << (vector & 0x1F);
1163
1164         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1165                 return 0;
1166
1167         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1168         if (on) {
1169                 uta_info->uta_in_use++;
1170                 reg_val |= uta_mask;
1171                 uta_info->uta_shadow[uta_idx] |= uta_mask;
1172         } else {
1173                 uta_info->uta_in_use--;
1174                 reg_val &= ~uta_mask;
1175                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1176         }
1177
1178         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1179
1180         psrctl = rd32(hw, TXGBE_PSRCTL);
1181         if (uta_info->uta_in_use > 0)
1182                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1183         else
1184                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1185
1186         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1187         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1188         wr32(hw, TXGBE_PSRCTL, psrctl);
1189
1190         return 0;
1191 }
1192
1193 static int
1194 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1195 {
1196         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1197         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1198         uint32_t psrctl;
1199         int i;
1200
1201         /* The UTA table only exists on pf hardware */
1202         if (hw->mac.type < txgbe_mac_raptor)
1203                 return -ENOTSUP;
1204
1205         if (on) {
1206                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1207                         uta_info->uta_shadow[i] = ~0;
1208                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1209                 }
1210         } else {
1211                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1212                         uta_info->uta_shadow[i] = 0;
1213                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
1214                 }
1215         }
1216
1217         psrctl = rd32(hw, TXGBE_PSRCTL);
1218         if (on)
1219                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1220         else
1221                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1222
1223         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1224         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1225         wr32(hw, TXGBE_PSRCTL, psrctl);
1226
1227         return 0;
1228 }
1229
1230 /**
1231  * set the IVAR registers, mapping interrupt causes to vectors
1232  * @param hw
1233  *  pointer to txgbe_hw struct
1234  * @direction
1235  *  0 for Rx, 1 for Tx, -1 for other causes
1236  * @queue
1237  *  queue to map the corresponding interrupt to
1238  * @msix_vector
1239  *  the vector to map to the corresponding queue
1240  */
1241 void
1242 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1243                    uint8_t queue, uint8_t msix_vector)
1244 {
1245         uint32_t tmp, idx;
1246
1247         if (direction == -1) {
1248                 /* other causes */
1249                 msix_vector |= TXGBE_IVARMISC_VLD;
1250                 idx = 0;
1251                 tmp = rd32(hw, TXGBE_IVARMISC);
1252                 tmp &= ~(0xFF << idx);
1253                 tmp |= (msix_vector << idx);
1254                 wr32(hw, TXGBE_IVARMISC, tmp);
1255         } else {
1256                 /* rx or tx causes */
1257                 /* Workround for ICR lost */
1258                 idx = ((16 * (queue & 1)) + (8 * direction));
1259                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1260                 tmp &= ~(0xFF << idx);
1261                 tmp |= (msix_vector << idx);
1262                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1263         }
1264 }
1265
1266 /**
1267  * Sets up the hardware to properly generate MSI-X interrupts
1268  * @hw
1269  *  board private structure
1270  */
1271 static void
1272 txgbe_configure_msix(struct rte_eth_dev *dev)
1273 {
1274         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1275         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1276         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1277         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1278         uint32_t vec = TXGBE_MISC_VEC_ID;
1279         uint32_t gpie;
1280
1281         /* won't configure msix register if no mapping is done
1282          * between intr vector and event fd
1283          * but if misx has been enabled already, need to configure
1284          * auto clean, auto mask and throttling.
1285          */
1286         gpie = rd32(hw, TXGBE_GPIE);
1287         if (!rte_intr_dp_is_en(intr_handle) &&
1288             !(gpie & TXGBE_GPIE_MSIX))
1289                 return;
1290
1291         if (rte_intr_allow_others(intr_handle)) {
1292                 base = TXGBE_RX_VEC_START;
1293                 vec = base;
1294         }
1295
1296         /* setup GPIE for MSI-x mode */
1297         gpie = rd32(hw, TXGBE_GPIE);
1298         gpie |= TXGBE_GPIE_MSIX;
1299         wr32(hw, TXGBE_GPIE, gpie);
1300
1301         /* Populate the IVAR table and set the ITR values to the
1302          * corresponding register.
1303          */
1304         if (rte_intr_dp_is_en(intr_handle)) {
1305                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1306                         queue_id++) {
1307                         /* by default, 1:1 mapping */
1308                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
1309                         intr_handle->intr_vec[queue_id] = vec;
1310                         if (vec < base + intr_handle->nb_efd - 1)
1311                                 vec++;
1312                 }
1313
1314                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1315         }
1316         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1317                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1318                         | TXGBE_ITR_WRDSA);
1319 }
1320
1321 static u8 *
1322 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1323                         u8 **mc_addr_ptr, u32 *vmdq)
1324 {
1325         u8 *mc_addr;
1326
1327         *vmdq = 0;
1328         mc_addr = *mc_addr_ptr;
1329         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1330         return mc_addr;
1331 }
1332
1333 int
1334 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1335                           struct rte_ether_addr *mc_addr_set,
1336                           uint32_t nb_mc_addr)
1337 {
1338         struct txgbe_hw *hw;
1339         u8 *mc_addr_list;
1340
1341         hw = TXGBE_DEV_HW(dev);
1342         mc_addr_list = (u8 *)mc_addr_set;
1343         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1344                                          txgbe_dev_addr_list_itr, TRUE);
1345 }
1346
1347 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1348         .dev_configure              = txgbe_dev_configure,
1349         .dev_infos_get              = txgbe_dev_info_get,
1350         .dev_set_link_up            = txgbe_dev_set_link_up,
1351         .dev_set_link_down          = txgbe_dev_set_link_down,
1352         .rx_queue_start             = txgbe_dev_rx_queue_start,
1353         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
1354         .tx_queue_start             = txgbe_dev_tx_queue_start,
1355         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
1356         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
1357         .rx_queue_release           = txgbe_dev_rx_queue_release,
1358         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
1359         .tx_queue_release           = txgbe_dev_tx_queue_release,
1360         .mac_addr_add               = txgbe_add_rar,
1361         .mac_addr_remove            = txgbe_remove_rar,
1362         .mac_addr_set               = txgbe_set_default_mac_addr,
1363         .uc_hash_table_set          = txgbe_uc_hash_table_set,
1364         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
1365         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
1366 };
1367
1368 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1369 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1370 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1371
1372 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1373 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1374
1375 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1376         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1377 #endif
1378 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1379         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1380 #endif
1381
1382 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1383         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
1384 #endif