de933f8710703b935df9ca665fefa45bd3e1756f
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24
25 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
26 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
27 static int txgbe_dev_close(struct rte_eth_dev *dev);
28 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
29                                 int wait_to_complete);
30
31 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
32 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
33 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
34 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
35 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
36 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
37                                       struct rte_intr_handle *handle);
38 static void txgbe_dev_interrupt_handler(void *param);
39 static void txgbe_dev_interrupt_delayed_handler(void *param);
40 static void txgbe_configure_msix(struct rte_eth_dev *dev);
41
42 /*
43  * The set of PCI devices this driver supports
44  */
45 static const struct rte_pci_id pci_id_txgbe_map[] = {
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
48         { .vendor_id = 0, /* sentinel */ },
49 };
50
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52         .nb_max = TXGBE_RING_DESC_MAX,
53         .nb_min = TXGBE_RING_DESC_MIN,
54         .nb_align = TXGBE_RXD_ALIGN,
55 };
56
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58         .nb_max = TXGBE_RING_DESC_MAX,
59         .nb_min = TXGBE_RING_DESC_MIN,
60         .nb_align = TXGBE_TXD_ALIGN,
61         .nb_seg_max = TXGBE_TX_MAX_SEG,
62         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
63 };
64
65 static const struct eth_dev_ops txgbe_eth_dev_ops;
66
67 static inline int
68 txgbe_is_sfp(struct txgbe_hw *hw)
69 {
70         switch (hw->phy.type) {
71         case txgbe_phy_sfp_avago:
72         case txgbe_phy_sfp_ftl:
73         case txgbe_phy_sfp_intel:
74         case txgbe_phy_sfp_unknown:
75         case txgbe_phy_sfp_tyco_passive:
76         case txgbe_phy_sfp_unknown_passive:
77                 return 1;
78         default:
79                 return 0;
80         }
81 }
82
83 static inline int32_t
84 txgbe_pf_reset_hw(struct txgbe_hw *hw)
85 {
86         uint32_t ctrl_ext;
87         int32_t status;
88
89         status = hw->mac.reset_hw(hw);
90
91         ctrl_ext = rd32(hw, TXGBE_PORTCTL);
92         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
93         ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
94         wr32(hw, TXGBE_PORTCTL, ctrl_ext);
95         txgbe_flush(hw);
96
97         if (status == TXGBE_ERR_SFP_NOT_PRESENT)
98                 status = 0;
99         return status;
100 }
101
102 static inline void
103 txgbe_enable_intr(struct rte_eth_dev *dev)
104 {
105         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
106         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
107
108         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
109         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
110         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
111         txgbe_flush(hw);
112 }
113
114 static void
115 txgbe_disable_intr(struct txgbe_hw *hw)
116 {
117         PMD_INIT_FUNC_TRACE();
118
119         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
120         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
121         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
122         txgbe_flush(hw);
123 }
124
125 static int
126 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
127 {
128         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
129         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
130         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
131         const struct rte_memzone *mz;
132         uint16_t csum;
133         int err;
134
135         PMD_INIT_FUNC_TRACE();
136
137         eth_dev->dev_ops = &txgbe_eth_dev_ops;
138         eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
139         eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
140         eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
141
142         /*
143          * For secondary processes, we don't initialise any further as primary
144          * has already done this work. Only check we don't need a different
145          * RX and TX function.
146          */
147         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
148                 struct txgbe_tx_queue *txq;
149                 /* TX queue function in primary, set by last queue initialized
150                  * Tx queue may not initialized by primary process
151                  */
152                 if (eth_dev->data->tx_queues) {
153                         uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
154                         txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
155                         txgbe_set_tx_function(eth_dev, txq);
156                 } else {
157                         /* Use default TX function if we get here */
158                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
159                                      "Using default TX function.");
160                 }
161
162                 txgbe_set_rx_function(eth_dev);
163
164                 return 0;
165         }
166
167         rte_eth_copy_pci_info(eth_dev, pci_dev);
168
169         /* Vendor and Device ID need to be set before init of shared code */
170         hw->device_id = pci_dev->id.device_id;
171         hw->vendor_id = pci_dev->id.vendor_id;
172         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
173         hw->allow_unsupported_sfp = 1;
174
175         /* Reserve memory for interrupt status block */
176         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
177                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
178         if (mz == NULL)
179                 return -ENOMEM;
180
181         hw->isb_dma = TMZ_PADDR(mz);
182         hw->isb_mem = TMZ_VADDR(mz);
183
184         /* Initialize the shared code (base driver) */
185         err = txgbe_init_shared_code(hw);
186         if (err != 0) {
187                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
188                 return -EIO;
189         }
190
191         err = hw->rom.init_params(hw);
192         if (err != 0) {
193                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
194                 return -EIO;
195         }
196
197         /* Make sure we have a good EEPROM before we read from it */
198         err = hw->rom.validate_checksum(hw, &csum);
199         if (err != 0) {
200                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
201                 return -EIO;
202         }
203
204         err = hw->mac.init_hw(hw);
205
206         /*
207          * Devices with copper phys will fail to initialise if txgbe_init_hw()
208          * is called too soon after the kernel driver unbinding/binding occurs.
209          * The failure occurs in txgbe_identify_phy() for all devices,
210          * but for non-copper devies, txgbe_identify_sfp_module() is
211          * also called. See txgbe_identify_phy(). The reason for the
212          * failure is not known, and only occuts when virtualisation features
213          * are disabled in the bios. A delay of 200ms  was found to be enough by
214          * trial-and-error, and is doubled to be safe.
215          */
216         if (err && hw->phy.media_type == txgbe_media_type_copper) {
217                 rte_delay_ms(200);
218                 err = hw->mac.init_hw(hw);
219         }
220
221         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
222                 err = 0;
223
224         if (err == TXGBE_ERR_EEPROM_VERSION) {
225                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
226                              "LOM.  Please be aware there may be issues associated "
227                              "with your hardware.");
228                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
229                              "please contact your hardware representative "
230                              "who provided you with this hardware.");
231         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
232                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
233         }
234         if (err) {
235                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
236                 return -EIO;
237         }
238
239         /* disable interrupt */
240         txgbe_disable_intr(hw);
241
242         /* Allocate memory for storing MAC addresses */
243         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
244                                                hw->mac.num_rar_entries, 0);
245         if (eth_dev->data->mac_addrs == NULL) {
246                 PMD_INIT_LOG(ERR,
247                              "Failed to allocate %u bytes needed to store "
248                              "MAC addresses",
249                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
250                 return -ENOMEM;
251         }
252
253         /* Copy the permanent MAC address */
254         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
255                         &eth_dev->data->mac_addrs[0]);
256
257         /* Allocate memory for storing hash filter MAC addresses */
258         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
259                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
260         if (eth_dev->data->hash_mac_addrs == NULL) {
261                 PMD_INIT_LOG(ERR,
262                              "Failed to allocate %d bytes needed to store MAC addresses",
263                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
264                 return -ENOMEM;
265         }
266
267         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
268                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
269                              (int)hw->mac.type, (int)hw->phy.type,
270                              (int)hw->phy.sfp_type);
271         else
272                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
273                              (int)hw->mac.type, (int)hw->phy.type);
274
275         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
276                      eth_dev->data->port_id, pci_dev->id.vendor_id,
277                      pci_dev->id.device_id);
278
279         rte_intr_callback_register(intr_handle,
280                                    txgbe_dev_interrupt_handler, eth_dev);
281
282         /* enable uio/vfio intr/eventfd mapping */
283         rte_intr_enable(intr_handle);
284
285         /* enable support intr */
286         txgbe_enable_intr(eth_dev);
287
288         return 0;
289 }
290
291 static int
292 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
293 {
294         PMD_INIT_FUNC_TRACE();
295
296         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
297                 return 0;
298
299         txgbe_dev_close(eth_dev);
300
301         return 0;
302 }
303
304 static int
305 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
306                 struct rte_pci_device *pci_dev)
307 {
308         struct rte_eth_dev *pf_ethdev;
309         struct rte_eth_devargs eth_da;
310         int retval;
311
312         if (pci_dev->device.devargs) {
313                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
314                                 &eth_da);
315                 if (retval)
316                         return retval;
317         } else {
318                 memset(&eth_da, 0, sizeof(eth_da));
319         }
320
321         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
322                         sizeof(struct txgbe_adapter),
323                         eth_dev_pci_specific_init, pci_dev,
324                         eth_txgbe_dev_init, NULL);
325
326         if (retval || eth_da.nb_representor_ports < 1)
327                 return retval;
328
329         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
330         if (pf_ethdev == NULL)
331                 return -ENODEV;
332
333         return 0;
334 }
335
336 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
337 {
338         struct rte_eth_dev *ethdev;
339
340         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
341         if (!ethdev)
342                 return -ENODEV;
343
344         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
345 }
346
347 static struct rte_pci_driver rte_txgbe_pmd = {
348         .id_table = pci_id_txgbe_map,
349         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
350                      RTE_PCI_DRV_INTR_LSC,
351         .probe = eth_txgbe_pci_probe,
352         .remove = eth_txgbe_pci_remove,
353 };
354
355 static int
356 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
357 {
358         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
359
360         switch (nb_rx_q) {
361         case 1:
362         case 2:
363                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
364                 break;
365         case 4:
366                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
367                 break;
368         default:
369                 return -EINVAL;
370         }
371
372         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
373                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
374         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
375                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
376         return 0;
377 }
378
379 static int
380 txgbe_check_mq_mode(struct rte_eth_dev *dev)
381 {
382         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
383         uint16_t nb_rx_q = dev->data->nb_rx_queues;
384         uint16_t nb_tx_q = dev->data->nb_tx_queues;
385
386         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
387                 /* check multi-queue mode */
388                 switch (dev_conf->rxmode.mq_mode) {
389                 case ETH_MQ_RX_VMDQ_DCB:
390                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
391                         break;
392                 case ETH_MQ_RX_VMDQ_DCB_RSS:
393                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
394                         PMD_INIT_LOG(ERR, "SRIOV active,"
395                                         " unsupported mq_mode rx %d.",
396                                         dev_conf->rxmode.mq_mode);
397                         return -EINVAL;
398                 case ETH_MQ_RX_RSS:
399                 case ETH_MQ_RX_VMDQ_RSS:
400                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
401                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
402                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
403                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
404                                                 " invalid queue number"
405                                                 " for VMDQ RSS, allowed"
406                                                 " value are 1, 2 or 4.");
407                                         return -EINVAL;
408                                 }
409                         break;
410                 case ETH_MQ_RX_VMDQ_ONLY:
411                 case ETH_MQ_RX_NONE:
412                         /* if nothing mq mode configure, use default scheme */
413                         dev->data->dev_conf.rxmode.mq_mode =
414                                 ETH_MQ_RX_VMDQ_ONLY;
415                         break;
416                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
417                         /* SRIOV only works in VMDq enable mode */
418                         PMD_INIT_LOG(ERR, "SRIOV is active,"
419                                         " wrong mq_mode rx %d.",
420                                         dev_conf->rxmode.mq_mode);
421                         return -EINVAL;
422                 }
423
424                 switch (dev_conf->txmode.mq_mode) {
425                 case ETH_MQ_TX_VMDQ_DCB:
426                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
427                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
428                         break;
429                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
430                         dev->data->dev_conf.txmode.mq_mode =
431                                 ETH_MQ_TX_VMDQ_ONLY;
432                         break;
433                 }
434
435                 /* check valid queue number */
436                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
437                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
438                         PMD_INIT_LOG(ERR, "SRIOV is active,"
439                                         " nb_rx_q=%d nb_tx_q=%d queue number"
440                                         " must be less than or equal to %d.",
441                                         nb_rx_q, nb_tx_q,
442                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
443                         return -EINVAL;
444                 }
445         } else {
446                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
447                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
448                                           " not supported.");
449                         return -EINVAL;
450                 }
451                 /* check configuration for vmdb+dcb mode */
452                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
453                         const struct rte_eth_vmdq_dcb_conf *conf;
454
455                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
456                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
457                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
458                                 return -EINVAL;
459                         }
460                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
461                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
462                                conf->nb_queue_pools == ETH_32_POOLS)) {
463                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
464                                                 " nb_queue_pools must be %d or %d.",
465                                                 ETH_16_POOLS, ETH_32_POOLS);
466                                 return -EINVAL;
467                         }
468                 }
469                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
470                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
471
472                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
473                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
474                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
475                                 return -EINVAL;
476                         }
477                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
478                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
479                                conf->nb_queue_pools == ETH_32_POOLS)) {
480                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
481                                                 " nb_queue_pools != %d and"
482                                                 " nb_queue_pools != %d.",
483                                                 ETH_16_POOLS, ETH_32_POOLS);
484                                 return -EINVAL;
485                         }
486                 }
487
488                 /* For DCB mode check our configuration before we go further */
489                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
490                         const struct rte_eth_dcb_rx_conf *conf;
491
492                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
493                         if (!(conf->nb_tcs == ETH_4_TCS ||
494                                conf->nb_tcs == ETH_8_TCS)) {
495                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
496                                                 " and nb_tcs != %d.",
497                                                 ETH_4_TCS, ETH_8_TCS);
498                                 return -EINVAL;
499                         }
500                 }
501
502                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
503                         const struct rte_eth_dcb_tx_conf *conf;
504
505                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
506                         if (!(conf->nb_tcs == ETH_4_TCS ||
507                                conf->nb_tcs == ETH_8_TCS)) {
508                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
509                                                 " and nb_tcs != %d.",
510                                                 ETH_4_TCS, ETH_8_TCS);
511                                 return -EINVAL;
512                         }
513                 }
514         }
515         return 0;
516 }
517
518 static int
519 txgbe_dev_configure(struct rte_eth_dev *dev)
520 {
521         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
522         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
523         int ret;
524
525         PMD_INIT_FUNC_TRACE();
526
527         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
528                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
529
530         /* multiple queue mode checking */
531         ret  = txgbe_check_mq_mode(dev);
532         if (ret != 0) {
533                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
534                             ret);
535                 return ret;
536         }
537
538         /* set flag to update link status after init */
539         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
540
541         /*
542          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
543          * allocation Rx preconditions we will reset it.
544          */
545         adapter->rx_bulk_alloc_allowed = true;
546
547         return 0;
548 }
549
550 static void
551 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
552 {
553         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
554         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
555         uint32_t gpie;
556
557         gpie = rd32(hw, TXGBE_GPIOINTEN);
558         gpie |= TXGBE_GPIOBIT_6;
559         wr32(hw, TXGBE_GPIOINTEN, gpie);
560         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
561 }
562
563 /*
564  * Configure device link speed and setup link.
565  * It returns 0 on success.
566  */
567 static int
568 txgbe_dev_start(struct rte_eth_dev *dev)
569 {
570         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
571         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
572         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
573         uint32_t intr_vector = 0;
574         int err;
575         bool link_up = false, negotiate = 0;
576         uint32_t speed = 0;
577         uint32_t allowed_speeds = 0;
578         int status;
579         uint32_t *link_speeds;
580
581         PMD_INIT_FUNC_TRACE();
582
583         /* TXGBE devices don't support:
584          *    - half duplex (checked afterwards for valid speeds)
585          *    - fixed speed: TODO implement
586          */
587         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
588                 PMD_INIT_LOG(ERR,
589                 "Invalid link_speeds for port %u, fix speed not supported",
590                                 dev->data->port_id);
591                 return -EINVAL;
592         }
593
594         /* Stop the link setup handler before resetting the HW. */
595         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
596
597         /* disable uio/vfio intr/eventfd mapping */
598         rte_intr_disable(intr_handle);
599
600         /* stop adapter */
601         hw->adapter_stopped = 0;
602         txgbe_stop_hw(hw);
603
604         /* reinitialize adapter
605          * this calls reset and start
606          */
607         hw->nb_rx_queues = dev->data->nb_rx_queues;
608         hw->nb_tx_queues = dev->data->nb_tx_queues;
609         status = txgbe_pf_reset_hw(hw);
610         if (status != 0)
611                 return -1;
612         hw->mac.start_hw(hw);
613         hw->mac.get_link_status = true;
614
615         txgbe_dev_phy_intr_setup(dev);
616
617         /* check and configure queue intr-vector mapping */
618         if ((rte_intr_cap_multiple(intr_handle) ||
619              !RTE_ETH_DEV_SRIOV(dev).active) &&
620             dev->data->dev_conf.intr_conf.rxq != 0) {
621                 intr_vector = dev->data->nb_rx_queues;
622                 if (rte_intr_efd_enable(intr_handle, intr_vector))
623                         return -1;
624         }
625
626         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
627                 intr_handle->intr_vec =
628                         rte_zmalloc("intr_vec",
629                                     dev->data->nb_rx_queues * sizeof(int), 0);
630                 if (intr_handle->intr_vec == NULL) {
631                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
632                                      " intr_vec", dev->data->nb_rx_queues);
633                         return -ENOMEM;
634                 }
635         }
636
637         /* confiugre msix for sleep until rx interrupt */
638         txgbe_configure_msix(dev);
639
640         /* initialize transmission unit */
641         txgbe_dev_tx_init(dev);
642
643         /* This can fail when allocating mbufs for descriptor rings */
644         err = txgbe_dev_rx_init(dev);
645         if (err) {
646                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
647                 goto error;
648         }
649
650         err = txgbe_dev_rxtx_start(dev);
651         if (err < 0) {
652                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
653                 goto error;
654         }
655
656         /* Skip link setup if loopback mode is enabled. */
657         if (hw->mac.type == txgbe_mac_raptor &&
658             dev->data->dev_conf.lpbk_mode)
659                 goto skip_link_setup;
660
661         if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
662                 err = hw->mac.setup_sfp(hw);
663                 if (err)
664                         goto error;
665         }
666
667         if (hw->phy.media_type == txgbe_media_type_copper) {
668                 /* Turn on the copper */
669                 hw->phy.set_phy_power(hw, true);
670         } else {
671                 /* Turn on the laser */
672                 hw->mac.enable_tx_laser(hw);
673         }
674
675         err = hw->mac.check_link(hw, &speed, &link_up, 0);
676         if (err)
677                 goto error;
678         dev->data->dev_link.link_status = link_up;
679
680         err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
681         if (err)
682                 goto error;
683
684         allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
685                         ETH_LINK_SPEED_10G;
686
687         link_speeds = &dev->data->dev_conf.link_speeds;
688         if (*link_speeds & ~allowed_speeds) {
689                 PMD_INIT_LOG(ERR, "Invalid link setting");
690                 goto error;
691         }
692
693         speed = 0x0;
694         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
695                 speed = (TXGBE_LINK_SPEED_100M_FULL |
696                          TXGBE_LINK_SPEED_1GB_FULL |
697                          TXGBE_LINK_SPEED_10GB_FULL);
698         } else {
699                 if (*link_speeds & ETH_LINK_SPEED_10G)
700                         speed |= TXGBE_LINK_SPEED_10GB_FULL;
701                 if (*link_speeds & ETH_LINK_SPEED_5G)
702                         speed |= TXGBE_LINK_SPEED_5GB_FULL;
703                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
704                         speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
705                 if (*link_speeds & ETH_LINK_SPEED_1G)
706                         speed |= TXGBE_LINK_SPEED_1GB_FULL;
707                 if (*link_speeds & ETH_LINK_SPEED_100M)
708                         speed |= TXGBE_LINK_SPEED_100M_FULL;
709         }
710
711         err = hw->mac.setup_link(hw, speed, link_up);
712         if (err)
713                 goto error;
714
715 skip_link_setup:
716
717         if (rte_intr_allow_others(intr_handle)) {
718                 /* check if lsc interrupt is enabled */
719                 if (dev->data->dev_conf.intr_conf.lsc != 0)
720                         txgbe_dev_lsc_interrupt_setup(dev, TRUE);
721                 else
722                         txgbe_dev_lsc_interrupt_setup(dev, FALSE);
723                 txgbe_dev_macsec_interrupt_setup(dev);
724                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
725         } else {
726                 rte_intr_callback_unregister(intr_handle,
727                                              txgbe_dev_interrupt_handler, dev);
728                 if (dev->data->dev_conf.intr_conf.lsc != 0)
729                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
730                                      " no intr multiplex");
731         }
732
733         /* check if rxq interrupt is enabled */
734         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
735             rte_intr_dp_is_en(intr_handle))
736                 txgbe_dev_rxq_interrupt_setup(dev);
737
738         /* enable uio/vfio intr/eventfd mapping */
739         rte_intr_enable(intr_handle);
740
741         /* resume enabled intr since hw reset */
742         txgbe_enable_intr(dev);
743
744         /*
745          * Update link status right before return, because it may
746          * start link configuration process in a separate thread.
747          */
748         txgbe_dev_link_update(dev, 0);
749
750         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
751
752         return 0;
753
754 error:
755         PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
756         txgbe_dev_clear_queues(dev);
757         return -EIO;
758 }
759
760 /*
761  * Stop device: disable rx and tx functions to allow for reconfiguring.
762  */
763 static int
764 txgbe_dev_stop(struct rte_eth_dev *dev)
765 {
766         struct rte_eth_link link;
767         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
768         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
769         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
770
771         if (hw->adapter_stopped)
772                 return 0;
773
774         PMD_INIT_FUNC_TRACE();
775
776         rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
777
778         /* disable interrupts */
779         txgbe_disable_intr(hw);
780
781         /* reset the NIC */
782         txgbe_pf_reset_hw(hw);
783         hw->adapter_stopped = 0;
784
785         /* stop adapter */
786         txgbe_stop_hw(hw);
787
788         if (hw->phy.media_type == txgbe_media_type_copper) {
789                 /* Turn off the copper */
790                 hw->phy.set_phy_power(hw, false);
791         } else {
792                 /* Turn off the laser */
793                 hw->mac.disable_tx_laser(hw);
794         }
795
796         txgbe_dev_clear_queues(dev);
797
798         /* Clear stored conf */
799         dev->data->scattered_rx = 0;
800         dev->data->lro = 0;
801
802         /* Clear recorded link status */
803         memset(&link, 0, sizeof(link));
804         rte_eth_linkstatus_set(dev, &link);
805
806         if (!rte_intr_allow_others(intr_handle))
807                 /* resume to the default handler */
808                 rte_intr_callback_register(intr_handle,
809                                            txgbe_dev_interrupt_handler,
810                                            (void *)dev);
811
812         /* Clean datapath event and queue/vec mapping */
813         rte_intr_efd_disable(intr_handle);
814         if (intr_handle->intr_vec != NULL) {
815                 rte_free(intr_handle->intr_vec);
816                 intr_handle->intr_vec = NULL;
817         }
818
819         wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
820
821         hw->adapter_stopped = true;
822         dev->data->dev_started = 0;
823
824         return 0;
825 }
826
827 /*
828  * Set device link up: enable tx.
829  */
830 static int
831 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
832 {
833         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
834
835         if (hw->phy.media_type == txgbe_media_type_copper) {
836                 /* Turn on the copper */
837                 hw->phy.set_phy_power(hw, true);
838         } else {
839                 /* Turn on the laser */
840                 hw->mac.enable_tx_laser(hw);
841                 txgbe_dev_link_update(dev, 0);
842         }
843
844         return 0;
845 }
846
847 /*
848  * Set device link down: disable tx.
849  */
850 static int
851 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
852 {
853         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
854
855         if (hw->phy.media_type == txgbe_media_type_copper) {
856                 /* Turn off the copper */
857                 hw->phy.set_phy_power(hw, false);
858         } else {
859                 /* Turn off the laser */
860                 hw->mac.disable_tx_laser(hw);
861                 txgbe_dev_link_update(dev, 0);
862         }
863
864         return 0;
865 }
866
867 /*
868  * Reset and stop device.
869  */
870 static int
871 txgbe_dev_close(struct rte_eth_dev *dev)
872 {
873         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
874         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
875         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
876         int retries = 0;
877         int ret;
878
879         PMD_INIT_FUNC_TRACE();
880
881         txgbe_pf_reset_hw(hw);
882
883         ret = txgbe_dev_stop(dev);
884
885         txgbe_dev_free_queues(dev);
886
887         /* reprogram the RAR[0] in case user changed it. */
888         txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
889
890         /* disable uio intr before callback unregister */
891         rte_intr_disable(intr_handle);
892
893         do {
894                 ret = rte_intr_callback_unregister(intr_handle,
895                                 txgbe_dev_interrupt_handler, dev);
896                 if (ret >= 0 || ret == -ENOENT) {
897                         break;
898                 } else if (ret != -EAGAIN) {
899                         PMD_INIT_LOG(ERR,
900                                 "intr callback unregister failed: %d",
901                                 ret);
902                 }
903                 rte_delay_ms(100);
904         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
905
906         /* cancel the delay handler before remove dev */
907         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
908
909         rte_free(dev->data->mac_addrs);
910         dev->data->mac_addrs = NULL;
911
912         rte_free(dev->data->hash_mac_addrs);
913         dev->data->hash_mac_addrs = NULL;
914
915         return ret;
916 }
917
918 /*
919  * Reset PF device.
920  */
921 static int
922 txgbe_dev_reset(struct rte_eth_dev *dev)
923 {
924         int ret;
925
926         /* When a DPDK PMD PF begin to reset PF port, it should notify all
927          * its VF to make them align with it. The detailed notification
928          * mechanism is PMD specific. As to txgbe PF, it is rather complex.
929          * To avoid unexpected behavior in VF, currently reset of PF with
930          * SR-IOV activation is not supported. It might be supported later.
931          */
932         if (dev->data->sriov.active)
933                 return -ENOTSUP;
934
935         ret = eth_txgbe_dev_uninit(dev);
936         if (ret)
937                 return ret;
938
939         ret = eth_txgbe_dev_init(dev, NULL);
940
941         return ret;
942 }
943
944 static int
945 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
946 {
947         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
948         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
949
950         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
951         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
952         dev_info->min_rx_bufsize = 1024;
953         dev_info->max_rx_pktlen = 15872;
954         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
955         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
956         dev_info->max_vfs = pci_dev->max_vfs;
957         dev_info->max_vmdq_pools = ETH_64_POOLS;
958         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
959         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
960         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
961                                      dev_info->rx_queue_offload_capa);
962         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
963         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
964
965         dev_info->default_rxconf = (struct rte_eth_rxconf) {
966                 .rx_thresh = {
967                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
968                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
969                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
970                 },
971                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
972                 .rx_drop_en = 0,
973                 .offloads = 0,
974         };
975
976         dev_info->default_txconf = (struct rte_eth_txconf) {
977                 .tx_thresh = {
978                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
979                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
980                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
981                 },
982                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
983                 .offloads = 0,
984         };
985
986         dev_info->rx_desc_lim = rx_desc_lim;
987         dev_info->tx_desc_lim = tx_desc_lim;
988
989         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
990         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
991         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
992
993         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
994         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
995
996         /* Driver-preferred Rx/Tx parameters */
997         dev_info->default_rxportconf.burst_size = 32;
998         dev_info->default_txportconf.burst_size = 32;
999         dev_info->default_rxportconf.nb_queues = 1;
1000         dev_info->default_txportconf.nb_queues = 1;
1001         dev_info->default_rxportconf.ring_size = 256;
1002         dev_info->default_txportconf.ring_size = 256;
1003
1004         return 0;
1005 }
1006
1007 const uint32_t *
1008 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1009 {
1010         if (dev->rx_pkt_burst == txgbe_recv_pkts ||
1011             dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
1012             dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
1013             dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
1014                 return txgbe_get_supported_ptypes();
1015
1016         return NULL;
1017 }
1018
1019 void
1020 txgbe_dev_setup_link_alarm_handler(void *param)
1021 {
1022         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1023         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1024         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1025         u32 speed;
1026         bool autoneg = false;
1027
1028         speed = hw->phy.autoneg_advertised;
1029         if (!speed)
1030                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1031
1032         hw->mac.setup_link(hw, speed, true);
1033
1034         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1035 }
1036
1037 /* return 0 means link status changed, -1 means not changed */
1038 int
1039 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
1040                             int wait_to_complete)
1041 {
1042         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1043         struct rte_eth_link link;
1044         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
1045         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1046         bool link_up;
1047         int err;
1048         int wait = 1;
1049
1050         memset(&link, 0, sizeof(link));
1051         link.link_status = ETH_LINK_DOWN;
1052         link.link_speed = ETH_SPEED_NUM_NONE;
1053         link.link_duplex = ETH_LINK_HALF_DUPLEX;
1054         link.link_autoneg = ETH_LINK_AUTONEG;
1055
1056         hw->mac.get_link_status = true;
1057
1058         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
1059                 return rte_eth_linkstatus_set(dev, &link);
1060
1061         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1062         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1063                 wait = 0;
1064
1065         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1066
1067         if (err != 0) {
1068                 link.link_speed = ETH_SPEED_NUM_100M;
1069                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1070                 return rte_eth_linkstatus_set(dev, &link);
1071         }
1072
1073         if (link_up == 0) {
1074                 if (hw->phy.media_type == txgbe_media_type_fiber) {
1075                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
1076                         rte_eal_alarm_set(10,
1077                                 txgbe_dev_setup_link_alarm_handler, dev);
1078                 }
1079                 return rte_eth_linkstatus_set(dev, &link);
1080         }
1081
1082         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
1083         link.link_status = ETH_LINK_UP;
1084         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1085
1086         switch (link_speed) {
1087         default:
1088         case TXGBE_LINK_SPEED_UNKNOWN:
1089                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1090                 link.link_speed = ETH_SPEED_NUM_100M;
1091                 break;
1092
1093         case TXGBE_LINK_SPEED_100M_FULL:
1094                 link.link_speed = ETH_SPEED_NUM_100M;
1095                 break;
1096
1097         case TXGBE_LINK_SPEED_1GB_FULL:
1098                 link.link_speed = ETH_SPEED_NUM_1G;
1099                 break;
1100
1101         case TXGBE_LINK_SPEED_2_5GB_FULL:
1102                 link.link_speed = ETH_SPEED_NUM_2_5G;
1103                 break;
1104
1105         case TXGBE_LINK_SPEED_5GB_FULL:
1106                 link.link_speed = ETH_SPEED_NUM_5G;
1107                 break;
1108
1109         case TXGBE_LINK_SPEED_10GB_FULL:
1110                 link.link_speed = ETH_SPEED_NUM_10G;
1111                 break;
1112         }
1113
1114         return rte_eth_linkstatus_set(dev, &link);
1115 }
1116
1117 static int
1118 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1119 {
1120         return txgbe_dev_link_update_share(dev, wait_to_complete);
1121 }
1122
1123 /**
1124  * It clears the interrupt causes and enables the interrupt.
1125  * It will be called once only during nic initialized.
1126  *
1127  * @param dev
1128  *  Pointer to struct rte_eth_dev.
1129  * @param on
1130  *  Enable or Disable.
1131  *
1132  * @return
1133  *  - On success, zero.
1134  *  - On failure, a negative value.
1135  */
1136 static int
1137 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1138 {
1139         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1140
1141         txgbe_dev_link_status_print(dev);
1142         if (on)
1143                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
1144         else
1145                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1146
1147         return 0;
1148 }
1149
1150 /**
1151  * It clears the interrupt causes and enables the interrupt.
1152  * It will be called once only during nic initialized.
1153  *
1154  * @param dev
1155  *  Pointer to struct rte_eth_dev.
1156  *
1157  * @return
1158  *  - On success, zero.
1159  *  - On failure, a negative value.
1160  */
1161 static int
1162 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
1163 {
1164         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1165
1166         intr->mask[0] |= TXGBE_ICR_MASK;
1167         intr->mask[1] |= TXGBE_ICR_MASK;
1168
1169         return 0;
1170 }
1171
1172 /**
1173  * It clears the interrupt causes and enables the interrupt.
1174  * It will be called once only during nic initialized.
1175  *
1176  * @param dev
1177  *  Pointer to struct rte_eth_dev.
1178  *
1179  * @return
1180  *  - On success, zero.
1181  *  - On failure, a negative value.
1182  */
1183 static int
1184 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
1185 {
1186         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1187
1188         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
1189
1190         return 0;
1191 }
1192
1193 /*
1194  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
1195  *
1196  * @param dev
1197  *  Pointer to struct rte_eth_dev.
1198  *
1199  * @return
1200  *  - On success, zero.
1201  *  - On failure, a negative value.
1202  */
1203 static int
1204 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1205 {
1206         uint32_t eicr;
1207         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1208         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1209
1210         /* clear all cause mask */
1211         txgbe_disable_intr(hw);
1212
1213         /* read-on-clear nic registers here */
1214         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1215         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
1216
1217         intr->flags = 0;
1218
1219         /* set flag for async link update */
1220         if (eicr & TXGBE_ICRMISC_LSC)
1221                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1222
1223         if (eicr & TXGBE_ICRMISC_VFMBX)
1224                 intr->flags |= TXGBE_FLAG_MAILBOX;
1225
1226         if (eicr & TXGBE_ICRMISC_LNKSEC)
1227                 intr->flags |= TXGBE_FLAG_MACSEC;
1228
1229         if (eicr & TXGBE_ICRMISC_GPIO)
1230                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
1231
1232         return 0;
1233 }
1234
1235 /**
1236  * It gets and then prints the link status.
1237  *
1238  * @param dev
1239  *  Pointer to struct rte_eth_dev.
1240  *
1241  * @return
1242  *  - On success, zero.
1243  *  - On failure, a negative value.
1244  */
1245 static void
1246 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
1247 {
1248         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1249         struct rte_eth_link link;
1250
1251         rte_eth_linkstatus_get(dev, &link);
1252
1253         if (link.link_status) {
1254                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1255                                         (int)(dev->data->port_id),
1256                                         (unsigned int)link.link_speed,
1257                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1258                                         "full-duplex" : "half-duplex");
1259         } else {
1260                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1261                                 (int)(dev->data->port_id));
1262         }
1263         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1264                                 pci_dev->addr.domain,
1265                                 pci_dev->addr.bus,
1266                                 pci_dev->addr.devid,
1267                                 pci_dev->addr.function);
1268 }
1269
1270 /*
1271  * It executes link_update after knowing an interrupt occurred.
1272  *
1273  * @param dev
1274  *  Pointer to struct rte_eth_dev.
1275  *
1276  * @return
1277  *  - On success, zero.
1278  *  - On failure, a negative value.
1279  */
1280 static int
1281 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1282                            struct rte_intr_handle *intr_handle)
1283 {
1284         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1285         int64_t timeout;
1286         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1287
1288         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
1289
1290         if (intr->flags & TXGBE_FLAG_MAILBOX)
1291                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
1292
1293         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1294                 hw->phy.handle_lasi(hw);
1295                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1296         }
1297
1298         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1299                 struct rte_eth_link link;
1300
1301                 /*get the link status before link update, for predicting later*/
1302                 rte_eth_linkstatus_get(dev, &link);
1303
1304                 txgbe_dev_link_update(dev, 0);
1305
1306                 /* likely to up */
1307                 if (!link.link_status)
1308                         /* handle it 1 sec later, wait it being stable */
1309                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
1310                 /* likely to down */
1311                 else
1312                         /* handle it 4 sec later, wait it being stable */
1313                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
1314
1315                 txgbe_dev_link_status_print(dev);
1316                 if (rte_eal_alarm_set(timeout * 1000,
1317                                       txgbe_dev_interrupt_delayed_handler,
1318                                       (void *)dev) < 0) {
1319                         PMD_DRV_LOG(ERR, "Error setting alarm");
1320                 } else {
1321                         /* remember original mask */
1322                         intr->mask_misc_orig = intr->mask_misc;
1323                         /* only disable lsc interrupt */
1324                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
1325                 }
1326         }
1327
1328         PMD_DRV_LOG(DEBUG, "enable intr immediately");
1329         txgbe_enable_intr(dev);
1330         rte_intr_enable(intr_handle);
1331
1332         return 0;
1333 }
1334
1335 /**
1336  * Interrupt handler which shall be registered for alarm callback for delayed
1337  * handling specific interrupt to wait for the stable nic state. As the
1338  * NIC interrupt state is not stable for txgbe after link is just down,
1339  * it needs to wait 4 seconds to get the stable status.
1340  *
1341  * @param handle
1342  *  Pointer to interrupt handle.
1343  * @param param
1344  *  The address of parameter (struct rte_eth_dev *) registered before.
1345  *
1346  * @return
1347  *  void
1348  */
1349 static void
1350 txgbe_dev_interrupt_delayed_handler(void *param)
1351 {
1352         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1353         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1354         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1355         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1356         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1357         uint32_t eicr;
1358
1359         txgbe_disable_intr(hw);
1360
1361         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1362
1363         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1364                 hw->phy.handle_lasi(hw);
1365                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1366         }
1367
1368         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1369                 txgbe_dev_link_update(dev, 0);
1370                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1371                 txgbe_dev_link_status_print(dev);
1372                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1373                                               NULL);
1374         }
1375
1376         if (intr->flags & TXGBE_FLAG_MACSEC) {
1377                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1378                                               NULL);
1379                 intr->flags &= ~TXGBE_FLAG_MACSEC;
1380         }
1381
1382         /* restore original mask */
1383         intr->mask_misc = intr->mask_misc_orig;
1384         intr->mask_misc_orig = 0;
1385
1386         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1387         txgbe_enable_intr(dev);
1388         rte_intr_enable(intr_handle);
1389 }
1390
1391 /**
1392  * Interrupt handler triggered by NIC  for handling
1393  * specific interrupt.
1394  *
1395  * @param handle
1396  *  Pointer to interrupt handle.
1397  * @param param
1398  *  The address of parameter (struct rte_eth_dev *) registered before.
1399  *
1400  * @return
1401  *  void
1402  */
1403 static void
1404 txgbe_dev_interrupt_handler(void *param)
1405 {
1406         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1407
1408         txgbe_dev_interrupt_get_status(dev);
1409         txgbe_dev_interrupt_action(dev, dev->intr_handle);
1410 }
1411
1412 static int
1413 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1414                                 uint32_t index, uint32_t pool)
1415 {
1416         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1417         uint32_t enable_addr = 1;
1418
1419         return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
1420                              pool, enable_addr);
1421 }
1422
1423 static void
1424 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1425 {
1426         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1427
1428         txgbe_clear_rar(hw, index);
1429 }
1430
1431 static int
1432 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1433 {
1434         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1435
1436         txgbe_remove_rar(dev, 0);
1437         txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
1438
1439         return 0;
1440 }
1441
1442 static uint32_t
1443 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
1444 {
1445         uint32_t vector = 0;
1446
1447         switch (hw->mac.mc_filter_type) {
1448         case 0:   /* use bits [47:36] of the address */
1449                 vector = ((uc_addr->addr_bytes[4] >> 4) |
1450                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
1451                 break;
1452         case 1:   /* use bits [46:35] of the address */
1453                 vector = ((uc_addr->addr_bytes[4] >> 3) |
1454                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
1455                 break;
1456         case 2:   /* use bits [45:34] of the address */
1457                 vector = ((uc_addr->addr_bytes[4] >> 2) |
1458                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
1459                 break;
1460         case 3:   /* use bits [43:32] of the address */
1461                 vector = ((uc_addr->addr_bytes[4]) |
1462                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
1463                 break;
1464         default:  /* Invalid mc_filter_type */
1465                 break;
1466         }
1467
1468         /* vector can only be 12-bits or boundary will be exceeded */
1469         vector &= 0xFFF;
1470         return vector;
1471 }
1472
1473 static int
1474 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
1475                         struct rte_ether_addr *mac_addr, uint8_t on)
1476 {
1477         uint32_t vector;
1478         uint32_t uta_idx;
1479         uint32_t reg_val;
1480         uint32_t uta_mask;
1481         uint32_t psrctl;
1482
1483         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1484         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1485
1486         /* The UTA table only exists on pf hardware */
1487         if (hw->mac.type < txgbe_mac_raptor)
1488                 return -ENOTSUP;
1489
1490         vector = txgbe_uta_vector(hw, mac_addr);
1491         uta_idx = (vector >> 5) & 0x7F;
1492         uta_mask = 0x1UL << (vector & 0x1F);
1493
1494         if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
1495                 return 0;
1496
1497         reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
1498         if (on) {
1499                 uta_info->uta_in_use++;
1500                 reg_val |= uta_mask;
1501                 uta_info->uta_shadow[uta_idx] |= uta_mask;
1502         } else {
1503                 uta_info->uta_in_use--;
1504                 reg_val &= ~uta_mask;
1505                 uta_info->uta_shadow[uta_idx] &= ~uta_mask;
1506         }
1507
1508         wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
1509
1510         psrctl = rd32(hw, TXGBE_PSRCTL);
1511         if (uta_info->uta_in_use > 0)
1512                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1513         else
1514                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1515
1516         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1517         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1518         wr32(hw, TXGBE_PSRCTL, psrctl);
1519
1520         return 0;
1521 }
1522
1523 static int
1524 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
1525 {
1526         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1527         struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
1528         uint32_t psrctl;
1529         int i;
1530
1531         /* The UTA table only exists on pf hardware */
1532         if (hw->mac.type < txgbe_mac_raptor)
1533                 return -ENOTSUP;
1534
1535         if (on) {
1536                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1537                         uta_info->uta_shadow[i] = ~0;
1538                         wr32(hw, TXGBE_UCADDRTBL(i), ~0);
1539                 }
1540         } else {
1541                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
1542                         uta_info->uta_shadow[i] = 0;
1543                         wr32(hw, TXGBE_UCADDRTBL(i), 0);
1544                 }
1545         }
1546
1547         psrctl = rd32(hw, TXGBE_PSRCTL);
1548         if (on)
1549                 psrctl |= TXGBE_PSRCTL_UCHFENA;
1550         else
1551                 psrctl &= ~TXGBE_PSRCTL_UCHFENA;
1552
1553         psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
1554         psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
1555         wr32(hw, TXGBE_PSRCTL, psrctl);
1556
1557         return 0;
1558 }
1559
1560 static int
1561 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1562 {
1563         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1564         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1565         uint32_t mask;
1566         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1567
1568         if (queue_id < 32) {
1569                 mask = rd32(hw, TXGBE_IMS(0));
1570                 mask &= (1 << queue_id);
1571                 wr32(hw, TXGBE_IMS(0), mask);
1572         } else if (queue_id < 64) {
1573                 mask = rd32(hw, TXGBE_IMS(1));
1574                 mask &= (1 << (queue_id - 32));
1575                 wr32(hw, TXGBE_IMS(1), mask);
1576         }
1577         rte_intr_enable(intr_handle);
1578
1579         return 0;
1580 }
1581
1582 static int
1583 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1584 {
1585         uint32_t mask;
1586         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1587
1588         if (queue_id < 32) {
1589                 mask = rd32(hw, TXGBE_IMS(0));
1590                 mask &= ~(1 << queue_id);
1591                 wr32(hw, TXGBE_IMS(0), mask);
1592         } else if (queue_id < 64) {
1593                 mask = rd32(hw, TXGBE_IMS(1));
1594                 mask &= ~(1 << (queue_id - 32));
1595                 wr32(hw, TXGBE_IMS(1), mask);
1596         }
1597
1598         return 0;
1599 }
1600
1601 /**
1602  * set the IVAR registers, mapping interrupt causes to vectors
1603  * @param hw
1604  *  pointer to txgbe_hw struct
1605  * @direction
1606  *  0 for Rx, 1 for Tx, -1 for other causes
1607  * @queue
1608  *  queue to map the corresponding interrupt to
1609  * @msix_vector
1610  *  the vector to map to the corresponding queue
1611  */
1612 void
1613 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1614                    uint8_t queue, uint8_t msix_vector)
1615 {
1616         uint32_t tmp, idx;
1617
1618         if (direction == -1) {
1619                 /* other causes */
1620                 msix_vector |= TXGBE_IVARMISC_VLD;
1621                 idx = 0;
1622                 tmp = rd32(hw, TXGBE_IVARMISC);
1623                 tmp &= ~(0xFF << idx);
1624                 tmp |= (msix_vector << idx);
1625                 wr32(hw, TXGBE_IVARMISC, tmp);
1626         } else {
1627                 /* rx or tx causes */
1628                 /* Workround for ICR lost */
1629                 idx = ((16 * (queue & 1)) + (8 * direction));
1630                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1631                 tmp &= ~(0xFF << idx);
1632                 tmp |= (msix_vector << idx);
1633                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1634         }
1635 }
1636
1637 /**
1638  * Sets up the hardware to properly generate MSI-X interrupts
1639  * @hw
1640  *  board private structure
1641  */
1642 static void
1643 txgbe_configure_msix(struct rte_eth_dev *dev)
1644 {
1645         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1646         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1647         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1648         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1649         uint32_t vec = TXGBE_MISC_VEC_ID;
1650         uint32_t gpie;
1651
1652         /* won't configure msix register if no mapping is done
1653          * between intr vector and event fd
1654          * but if misx has been enabled already, need to configure
1655          * auto clean, auto mask and throttling.
1656          */
1657         gpie = rd32(hw, TXGBE_GPIE);
1658         if (!rte_intr_dp_is_en(intr_handle) &&
1659             !(gpie & TXGBE_GPIE_MSIX))
1660                 return;
1661
1662         if (rte_intr_allow_others(intr_handle)) {
1663                 base = TXGBE_RX_VEC_START;
1664                 vec = base;
1665         }
1666
1667         /* setup GPIE for MSI-x mode */
1668         gpie = rd32(hw, TXGBE_GPIE);
1669         gpie |= TXGBE_GPIE_MSIX;
1670         wr32(hw, TXGBE_GPIE, gpie);
1671
1672         /* Populate the IVAR table and set the ITR values to the
1673          * corresponding register.
1674          */
1675         if (rte_intr_dp_is_en(intr_handle)) {
1676                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1677                         queue_id++) {
1678                         /* by default, 1:1 mapping */
1679                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
1680                         intr_handle->intr_vec[queue_id] = vec;
1681                         if (vec < base + intr_handle->nb_efd - 1)
1682                                 vec++;
1683                 }
1684
1685                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1686         }
1687         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1688                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1689                         | TXGBE_ITR_WRDSA);
1690 }
1691
1692 static u8 *
1693 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
1694                         u8 **mc_addr_ptr, u32 *vmdq)
1695 {
1696         u8 *mc_addr;
1697
1698         *vmdq = 0;
1699         mc_addr = *mc_addr_ptr;
1700         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
1701         return mc_addr;
1702 }
1703
1704 int
1705 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1706                           struct rte_ether_addr *mc_addr_set,
1707                           uint32_t nb_mc_addr)
1708 {
1709         struct txgbe_hw *hw;
1710         u8 *mc_addr_list;
1711
1712         hw = TXGBE_DEV_HW(dev);
1713         mc_addr_list = (u8 *)mc_addr_set;
1714         return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
1715                                          txgbe_dev_addr_list_itr, TRUE);
1716 }
1717
1718 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1719         .dev_configure              = txgbe_dev_configure,
1720         .dev_infos_get              = txgbe_dev_info_get,
1721         .dev_start                  = txgbe_dev_start,
1722         .dev_stop                   = txgbe_dev_stop,
1723         .dev_set_link_up            = txgbe_dev_set_link_up,
1724         .dev_set_link_down          = txgbe_dev_set_link_down,
1725         .dev_close                  = txgbe_dev_close,
1726         .dev_reset                  = txgbe_dev_reset,
1727         .link_update                = txgbe_dev_link_update,
1728         .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
1729         .rx_queue_start             = txgbe_dev_rx_queue_start,
1730         .rx_queue_stop              = txgbe_dev_rx_queue_stop,
1731         .tx_queue_start             = txgbe_dev_tx_queue_start,
1732         .tx_queue_stop              = txgbe_dev_tx_queue_stop,
1733         .rx_queue_setup             = txgbe_dev_rx_queue_setup,
1734         .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
1735         .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
1736         .rx_queue_release           = txgbe_dev_rx_queue_release,
1737         .tx_queue_setup             = txgbe_dev_tx_queue_setup,
1738         .tx_queue_release           = txgbe_dev_tx_queue_release,
1739         .mac_addr_add               = txgbe_add_rar,
1740         .mac_addr_remove            = txgbe_remove_rar,
1741         .mac_addr_set               = txgbe_set_default_mac_addr,
1742         .uc_hash_table_set          = txgbe_uc_hash_table_set,
1743         .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
1744         .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
1745         .rxq_info_get               = txgbe_rxq_info_get,
1746         .txq_info_get               = txgbe_txq_info_get,
1747 };
1748
1749 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1750 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1751 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1752
1753 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1754 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1755
1756 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1757         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1758 #endif
1759 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1760         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1761 #endif
1762
1763 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1764         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
1765 #endif