net/txgbe: add autoneg control read and write
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11
12 #include <rte_interrupts.h>
13 #include <rte_pci.h>
14 #include <rte_memory.h>
15 #include <rte_eal.h>
16 #include <rte_alarm.h>
17
18 #include "txgbe_logs.h"
19 #include "base/txgbe.h"
20 #include "txgbe_ethdev.h"
21 #include "txgbe_rxtx.h"
22
23 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
25 static int txgbe_dev_close(struct rte_eth_dev *dev);
26 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
27                                 int wait_to_complete);
28
29 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
30 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
31 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
32 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
33 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
34 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
35                                       struct rte_intr_handle *handle);
36 static void txgbe_dev_interrupt_handler(void *param);
37 static void txgbe_dev_interrupt_delayed_handler(void *param);
38 static void txgbe_configure_msix(struct rte_eth_dev *dev);
39
40 /*
41  * The set of PCI devices this driver supports
42  */
43 static const struct rte_pci_id pci_id_txgbe_map[] = {
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
46         { .vendor_id = 0, /* sentinel */ },
47 };
48
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50         .nb_max = TXGBE_RING_DESC_MAX,
51         .nb_min = TXGBE_RING_DESC_MIN,
52         .nb_align = TXGBE_RXD_ALIGN,
53 };
54
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56         .nb_max = TXGBE_RING_DESC_MAX,
57         .nb_min = TXGBE_RING_DESC_MIN,
58         .nb_align = TXGBE_TXD_ALIGN,
59         .nb_seg_max = TXGBE_TX_MAX_SEG,
60         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
61 };
62
63 static const struct eth_dev_ops txgbe_eth_dev_ops;
64
65 static inline int
66 txgbe_is_sfp(struct txgbe_hw *hw)
67 {
68         switch (hw->phy.type) {
69         case txgbe_phy_sfp_avago:
70         case txgbe_phy_sfp_ftl:
71         case txgbe_phy_sfp_intel:
72         case txgbe_phy_sfp_unknown:
73         case txgbe_phy_sfp_tyco_passive:
74         case txgbe_phy_sfp_unknown_passive:
75                 return 1;
76         default:
77                 return 0;
78         }
79 }
80
81 static inline void
82 txgbe_enable_intr(struct rte_eth_dev *dev)
83 {
84         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
85         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
86
87         wr32(hw, TXGBE_IENMISC, intr->mask_misc);
88         wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
89         wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
90         txgbe_flush(hw);
91 }
92
93 static void
94 txgbe_disable_intr(struct txgbe_hw *hw)
95 {
96         PMD_INIT_FUNC_TRACE();
97
98         wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
99         wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
100         wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
101         txgbe_flush(hw);
102 }
103
104 static int
105 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
106 {
107         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
108         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
109         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
110         const struct rte_memzone *mz;
111         uint16_t csum;
112         int err;
113
114         PMD_INIT_FUNC_TRACE();
115
116         eth_dev->dev_ops = &txgbe_eth_dev_ops;
117
118         rte_eth_copy_pci_info(eth_dev, pci_dev);
119
120         /* Vendor and Device ID need to be set before init of shared code */
121         hw->device_id = pci_dev->id.device_id;
122         hw->vendor_id = pci_dev->id.vendor_id;
123         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
124         hw->allow_unsupported_sfp = 1;
125
126         /* Reserve memory for interrupt status block */
127         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
128                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
129         if (mz == NULL)
130                 return -ENOMEM;
131
132         hw->isb_dma = TMZ_PADDR(mz);
133         hw->isb_mem = TMZ_VADDR(mz);
134
135         /* Initialize the shared code (base driver) */
136         err = txgbe_init_shared_code(hw);
137         if (err != 0) {
138                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
139                 return -EIO;
140         }
141
142         err = hw->rom.init_params(hw);
143         if (err != 0) {
144                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
145                 return -EIO;
146         }
147
148         /* Make sure we have a good EEPROM before we read from it */
149         err = hw->rom.validate_checksum(hw, &csum);
150         if (err != 0) {
151                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
152                 return -EIO;
153         }
154
155         err = hw->mac.init_hw(hw);
156
157         /*
158          * Devices with copper phys will fail to initialise if txgbe_init_hw()
159          * is called too soon after the kernel driver unbinding/binding occurs.
160          * The failure occurs in txgbe_identify_phy() for all devices,
161          * but for non-copper devies, txgbe_identify_sfp_module() is
162          * also called. See txgbe_identify_phy(). The reason for the
163          * failure is not known, and only occuts when virtualisation features
164          * are disabled in the bios. A delay of 200ms  was found to be enough by
165          * trial-and-error, and is doubled to be safe.
166          */
167         if (err && hw->phy.media_type == txgbe_media_type_copper) {
168                 rte_delay_ms(200);
169                 err = hw->mac.init_hw(hw);
170         }
171
172         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
173                 err = 0;
174
175         if (err == TXGBE_ERR_EEPROM_VERSION) {
176                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
177                              "LOM.  Please be aware there may be issues associated "
178                              "with your hardware.");
179                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
180                              "please contact your hardware representative "
181                              "who provided you with this hardware.");
182         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
183                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
184         }
185         if (err) {
186                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
187                 return -EIO;
188         }
189
190         /* disable interrupt */
191         txgbe_disable_intr(hw);
192
193         /* Allocate memory for storing MAC addresses */
194         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
195                                                hw->mac.num_rar_entries, 0);
196         if (eth_dev->data->mac_addrs == NULL) {
197                 PMD_INIT_LOG(ERR,
198                              "Failed to allocate %u bytes needed to store "
199                              "MAC addresses",
200                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
201                 return -ENOMEM;
202         }
203
204         /* Copy the permanent MAC address */
205         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
206                         &eth_dev->data->mac_addrs[0]);
207
208         /* Allocate memory for storing hash filter MAC addresses */
209         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
210                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
211         if (eth_dev->data->hash_mac_addrs == NULL) {
212                 PMD_INIT_LOG(ERR,
213                              "Failed to allocate %d bytes needed to store MAC addresses",
214                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
215                 return -ENOMEM;
216         }
217
218         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
219                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
220                              (int)hw->mac.type, (int)hw->phy.type,
221                              (int)hw->phy.sfp_type);
222         else
223                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
224                              (int)hw->mac.type, (int)hw->phy.type);
225
226         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
227                      eth_dev->data->port_id, pci_dev->id.vendor_id,
228                      pci_dev->id.device_id);
229
230         rte_intr_callback_register(intr_handle,
231                                    txgbe_dev_interrupt_handler, eth_dev);
232
233         /* enable uio/vfio intr/eventfd mapping */
234         rte_intr_enable(intr_handle);
235
236         /* enable support intr */
237         txgbe_enable_intr(eth_dev);
238
239         return 0;
240 }
241
242 static int
243 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
244 {
245         PMD_INIT_FUNC_TRACE();
246
247         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
248                 return 0;
249
250         txgbe_dev_close(eth_dev);
251
252         return 0;
253 }
254
255 static int
256 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
257                 struct rte_pci_device *pci_dev)
258 {
259         struct rte_eth_dev *pf_ethdev;
260         struct rte_eth_devargs eth_da;
261         int retval;
262
263         if (pci_dev->device.devargs) {
264                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
265                                 &eth_da);
266                 if (retval)
267                         return retval;
268         } else {
269                 memset(&eth_da, 0, sizeof(eth_da));
270         }
271
272         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
273                         sizeof(struct txgbe_adapter),
274                         eth_dev_pci_specific_init, pci_dev,
275                         eth_txgbe_dev_init, NULL);
276
277         if (retval || eth_da.nb_representor_ports < 1)
278                 return retval;
279
280         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
281         if (pf_ethdev == NULL)
282                 return -ENODEV;
283
284         return 0;
285 }
286
287 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
288 {
289         struct rte_eth_dev *ethdev;
290
291         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
292         if (!ethdev)
293                 return -ENODEV;
294
295         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
296 }
297
298 static struct rte_pci_driver rte_txgbe_pmd = {
299         .id_table = pci_id_txgbe_map,
300         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
301                      RTE_PCI_DRV_INTR_LSC,
302         .probe = eth_txgbe_pci_probe,
303         .remove = eth_txgbe_pci_remove,
304 };
305
306 static int
307 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
308 {
309         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
310
311         switch (nb_rx_q) {
312         case 1:
313         case 2:
314                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
315                 break;
316         case 4:
317                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
318                 break;
319         default:
320                 return -EINVAL;
321         }
322
323         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
324                 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
325         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
326                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
327         return 0;
328 }
329
330 static int
331 txgbe_check_mq_mode(struct rte_eth_dev *dev)
332 {
333         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
334         uint16_t nb_rx_q = dev->data->nb_rx_queues;
335         uint16_t nb_tx_q = dev->data->nb_tx_queues;
336
337         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
338                 /* check multi-queue mode */
339                 switch (dev_conf->rxmode.mq_mode) {
340                 case ETH_MQ_RX_VMDQ_DCB:
341                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
342                         break;
343                 case ETH_MQ_RX_VMDQ_DCB_RSS:
344                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
345                         PMD_INIT_LOG(ERR, "SRIOV active,"
346                                         " unsupported mq_mode rx %d.",
347                                         dev_conf->rxmode.mq_mode);
348                         return -EINVAL;
349                 case ETH_MQ_RX_RSS:
350                 case ETH_MQ_RX_VMDQ_RSS:
351                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
352                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
353                                 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
354                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
355                                                 " invalid queue number"
356                                                 " for VMDQ RSS, allowed"
357                                                 " value are 1, 2 or 4.");
358                                         return -EINVAL;
359                                 }
360                         break;
361                 case ETH_MQ_RX_VMDQ_ONLY:
362                 case ETH_MQ_RX_NONE:
363                         /* if nothing mq mode configure, use default scheme */
364                         dev->data->dev_conf.rxmode.mq_mode =
365                                 ETH_MQ_RX_VMDQ_ONLY;
366                         break;
367                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
368                         /* SRIOV only works in VMDq enable mode */
369                         PMD_INIT_LOG(ERR, "SRIOV is active,"
370                                         " wrong mq_mode rx %d.",
371                                         dev_conf->rxmode.mq_mode);
372                         return -EINVAL;
373                 }
374
375                 switch (dev_conf->txmode.mq_mode) {
376                 case ETH_MQ_TX_VMDQ_DCB:
377                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
378                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
379                         break;
380                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
381                         dev->data->dev_conf.txmode.mq_mode =
382                                 ETH_MQ_TX_VMDQ_ONLY;
383                         break;
384                 }
385
386                 /* check valid queue number */
387                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
388                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
389                         PMD_INIT_LOG(ERR, "SRIOV is active,"
390                                         " nb_rx_q=%d nb_tx_q=%d queue number"
391                                         " must be less than or equal to %d.",
392                                         nb_rx_q, nb_tx_q,
393                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
394                         return -EINVAL;
395                 }
396         } else {
397                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
398                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
399                                           " not supported.");
400                         return -EINVAL;
401                 }
402                 /* check configuration for vmdb+dcb mode */
403                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
404                         const struct rte_eth_vmdq_dcb_conf *conf;
405
406                         if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
407                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
408                                                 TXGBE_VMDQ_DCB_NB_QUEUES);
409                                 return -EINVAL;
410                         }
411                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
412                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
413                                conf->nb_queue_pools == ETH_32_POOLS)) {
414                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
415                                                 " nb_queue_pools must be %d or %d.",
416                                                 ETH_16_POOLS, ETH_32_POOLS);
417                                 return -EINVAL;
418                         }
419                 }
420                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
421                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
422
423                         if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
424                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
425                                                  TXGBE_VMDQ_DCB_NB_QUEUES);
426                                 return -EINVAL;
427                         }
428                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
429                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
430                                conf->nb_queue_pools == ETH_32_POOLS)) {
431                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
432                                                 " nb_queue_pools != %d and"
433                                                 " nb_queue_pools != %d.",
434                                                 ETH_16_POOLS, ETH_32_POOLS);
435                                 return -EINVAL;
436                         }
437                 }
438
439                 /* For DCB mode check our configuration before we go further */
440                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
441                         const struct rte_eth_dcb_rx_conf *conf;
442
443                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
444                         if (!(conf->nb_tcs == ETH_4_TCS ||
445                                conf->nb_tcs == ETH_8_TCS)) {
446                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
447                                                 " and nb_tcs != %d.",
448                                                 ETH_4_TCS, ETH_8_TCS);
449                                 return -EINVAL;
450                         }
451                 }
452
453                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
454                         const struct rte_eth_dcb_tx_conf *conf;
455
456                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
457                         if (!(conf->nb_tcs == ETH_4_TCS ||
458                                conf->nb_tcs == ETH_8_TCS)) {
459                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
460                                                 " and nb_tcs != %d.",
461                                                 ETH_4_TCS, ETH_8_TCS);
462                                 return -EINVAL;
463                         }
464                 }
465         }
466         return 0;
467 }
468
469 static int
470 txgbe_dev_configure(struct rte_eth_dev *dev)
471 {
472         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
473         struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
474         int ret;
475
476         PMD_INIT_FUNC_TRACE();
477
478         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
479                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
480
481         /* multiple queue mode checking */
482         ret  = txgbe_check_mq_mode(dev);
483         if (ret != 0) {
484                 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
485                             ret);
486                 return ret;
487         }
488
489         /* set flag to update link status after init */
490         intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
491
492         /*
493          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
494          * allocation Rx preconditions we will reset it.
495          */
496         adapter->rx_bulk_alloc_allowed = true;
497
498         return 0;
499 }
500
501 static void
502 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
503 {
504         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
505         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
506         uint32_t gpie;
507
508         gpie = rd32(hw, TXGBE_GPIOINTEN);
509         gpie |= TXGBE_GPIOBIT_6;
510         wr32(hw, TXGBE_GPIOINTEN, gpie);
511         intr->mask_misc |= TXGBE_ICRMISC_GPIO;
512 }
513
514 /*
515  * Set device link up: enable tx.
516  */
517 static int
518 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
519 {
520         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
521
522         if (hw->phy.media_type == txgbe_media_type_copper) {
523                 /* Turn on the copper */
524                 hw->phy.set_phy_power(hw, true);
525         } else {
526                 /* Turn on the laser */
527                 hw->mac.enable_tx_laser(hw);
528                 txgbe_dev_link_update(dev, 0);
529         }
530
531         return 0;
532 }
533
534 /*
535  * Set device link down: disable tx.
536  */
537 static int
538 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
539 {
540         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
541
542         if (hw->phy.media_type == txgbe_media_type_copper) {
543                 /* Turn off the copper */
544                 hw->phy.set_phy_power(hw, false);
545         } else {
546                 /* Turn off the laser */
547                 hw->mac.disable_tx_laser(hw);
548                 txgbe_dev_link_update(dev, 0);
549         }
550
551         return 0;
552 }
553
554 /*
555  * Reset and stop device.
556  */
557 static int
558 txgbe_dev_close(struct rte_eth_dev *dev)
559 {
560         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
561         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
562         int retries = 0;
563         int ret;
564
565         PMD_INIT_FUNC_TRACE();
566
567         /* disable uio intr before callback unregister */
568         rte_intr_disable(intr_handle);
569
570         do {
571                 ret = rte_intr_callback_unregister(intr_handle,
572                                 txgbe_dev_interrupt_handler, dev);
573                 if (ret >= 0 || ret == -ENOENT) {
574                         break;
575                 } else if (ret != -EAGAIN) {
576                         PMD_INIT_LOG(ERR,
577                                 "intr callback unregister failed: %d",
578                                 ret);
579                 }
580                 rte_delay_ms(100);
581         } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
582
583         /* cancel the delay handler before remove dev */
584         rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
585
586         rte_free(dev->data->mac_addrs);
587         dev->data->mac_addrs = NULL;
588
589         rte_free(dev->data->hash_mac_addrs);
590         dev->data->hash_mac_addrs = NULL;
591
592         return 0;
593 }
594
595 static int
596 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
597 {
598         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
599         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
600
601         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
602         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
603         dev_info->min_rx_bufsize = 1024;
604         dev_info->max_rx_pktlen = 15872;
605         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
606         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
607         dev_info->max_vfs = pci_dev->max_vfs;
608         dev_info->max_vmdq_pools = ETH_64_POOLS;
609         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
610         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
611         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
612                                      dev_info->rx_queue_offload_capa);
613         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
614         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
615
616         dev_info->default_rxconf = (struct rte_eth_rxconf) {
617                 .rx_thresh = {
618                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
619                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
620                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
621                 },
622                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
623                 .rx_drop_en = 0,
624                 .offloads = 0,
625         };
626
627         dev_info->default_txconf = (struct rte_eth_txconf) {
628                 .tx_thresh = {
629                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
630                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
631                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
632                 },
633                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
634                 .offloads = 0,
635         };
636
637         dev_info->rx_desc_lim = rx_desc_lim;
638         dev_info->tx_desc_lim = tx_desc_lim;
639
640         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
641         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
642         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
643
644         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
645         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
646
647         /* Driver-preferred Rx/Tx parameters */
648         dev_info->default_rxportconf.burst_size = 32;
649         dev_info->default_txportconf.burst_size = 32;
650         dev_info->default_rxportconf.nb_queues = 1;
651         dev_info->default_txportconf.nb_queues = 1;
652         dev_info->default_rxportconf.ring_size = 256;
653         dev_info->default_txportconf.ring_size = 256;
654
655         return 0;
656 }
657
658 void
659 txgbe_dev_setup_link_alarm_handler(void *param)
660 {
661         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
662         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
663         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
664         u32 speed;
665         bool autoneg = false;
666
667         speed = hw->phy.autoneg_advertised;
668         if (!speed)
669                 hw->mac.get_link_capabilities(hw, &speed, &autoneg);
670
671         hw->mac.setup_link(hw, speed, true);
672
673         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
674 }
675
676 /* return 0 means link status changed, -1 means not changed */
677 int
678 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
679                             int wait_to_complete)
680 {
681         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
682         struct rte_eth_link link;
683         u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
684         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
685         bool link_up;
686         int err;
687         int wait = 1;
688
689         memset(&link, 0, sizeof(link));
690         link.link_status = ETH_LINK_DOWN;
691         link.link_speed = ETH_SPEED_NUM_NONE;
692         link.link_duplex = ETH_LINK_HALF_DUPLEX;
693         link.link_autoneg = ETH_LINK_AUTONEG;
694
695         hw->mac.get_link_status = true;
696
697         if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
698                 return rte_eth_linkstatus_set(dev, &link);
699
700         /* check if it needs to wait to complete, if lsc interrupt is enabled */
701         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
702                 wait = 0;
703
704         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
705
706         if (err != 0) {
707                 link.link_speed = ETH_SPEED_NUM_100M;
708                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
709                 return rte_eth_linkstatus_set(dev, &link);
710         }
711
712         if (link_up == 0) {
713                 if (hw->phy.media_type == txgbe_media_type_fiber) {
714                         intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
715                         rte_eal_alarm_set(10,
716                                 txgbe_dev_setup_link_alarm_handler, dev);
717                 }
718                 return rte_eth_linkstatus_set(dev, &link);
719         }
720
721         intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
722         link.link_status = ETH_LINK_UP;
723         link.link_duplex = ETH_LINK_FULL_DUPLEX;
724
725         switch (link_speed) {
726         default:
727         case TXGBE_LINK_SPEED_UNKNOWN:
728                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
729                 link.link_speed = ETH_SPEED_NUM_100M;
730                 break;
731
732         case TXGBE_LINK_SPEED_100M_FULL:
733                 link.link_speed = ETH_SPEED_NUM_100M;
734                 break;
735
736         case TXGBE_LINK_SPEED_1GB_FULL:
737                 link.link_speed = ETH_SPEED_NUM_1G;
738                 break;
739
740         case TXGBE_LINK_SPEED_2_5GB_FULL:
741                 link.link_speed = ETH_SPEED_NUM_2_5G;
742                 break;
743
744         case TXGBE_LINK_SPEED_5GB_FULL:
745                 link.link_speed = ETH_SPEED_NUM_5G;
746                 break;
747
748         case TXGBE_LINK_SPEED_10GB_FULL:
749                 link.link_speed = ETH_SPEED_NUM_10G;
750                 break;
751         }
752
753         return rte_eth_linkstatus_set(dev, &link);
754 }
755
756 static int
757 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
758 {
759         return txgbe_dev_link_update_share(dev, wait_to_complete);
760 }
761
762 /**
763  * It clears the interrupt causes and enables the interrupt.
764  * It will be called once only during nic initialized.
765  *
766  * @param dev
767  *  Pointer to struct rte_eth_dev.
768  * @param on
769  *  Enable or Disable.
770  *
771  * @return
772  *  - On success, zero.
773  *  - On failure, a negative value.
774  */
775 static int
776 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
777 {
778         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
779
780         txgbe_dev_link_status_print(dev);
781         if (on)
782                 intr->mask_misc |= TXGBE_ICRMISC_LSC;
783         else
784                 intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
785
786         return 0;
787 }
788
789 /**
790  * It clears the interrupt causes and enables the interrupt.
791  * It will be called once only during nic initialized.
792  *
793  * @param dev
794  *  Pointer to struct rte_eth_dev.
795  *
796  * @return
797  *  - On success, zero.
798  *  - On failure, a negative value.
799  */
800 static int
801 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
802 {
803         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
804
805         intr->mask[0] |= TXGBE_ICR_MASK;
806         intr->mask[1] |= TXGBE_ICR_MASK;
807
808         return 0;
809 }
810
811 /**
812  * It clears the interrupt causes and enables the interrupt.
813  * It will be called once only during nic initialized.
814  *
815  * @param dev
816  *  Pointer to struct rte_eth_dev.
817  *
818  * @return
819  *  - On success, zero.
820  *  - On failure, a negative value.
821  */
822 static int
823 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
824 {
825         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
826
827         intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
828
829         return 0;
830 }
831
832 /*
833  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
834  *
835  * @param dev
836  *  Pointer to struct rte_eth_dev.
837  *
838  * @return
839  *  - On success, zero.
840  *  - On failure, a negative value.
841  */
842 static int
843 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
844 {
845         uint32_t eicr;
846         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
847         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
848
849         /* clear all cause mask */
850         txgbe_disable_intr(hw);
851
852         /* read-on-clear nic registers here */
853         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
854         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
855
856         intr->flags = 0;
857
858         /* set flag for async link update */
859         if (eicr & TXGBE_ICRMISC_LSC)
860                 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
861
862         if (eicr & TXGBE_ICRMISC_VFMBX)
863                 intr->flags |= TXGBE_FLAG_MAILBOX;
864
865         if (eicr & TXGBE_ICRMISC_LNKSEC)
866                 intr->flags |= TXGBE_FLAG_MACSEC;
867
868         if (eicr & TXGBE_ICRMISC_GPIO)
869                 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
870
871         return 0;
872 }
873
874 /**
875  * It gets and then prints the link status.
876  *
877  * @param dev
878  *  Pointer to struct rte_eth_dev.
879  *
880  * @return
881  *  - On success, zero.
882  *  - On failure, a negative value.
883  */
884 static void
885 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
886 {
887         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
888         struct rte_eth_link link;
889
890         rte_eth_linkstatus_get(dev, &link);
891
892         if (link.link_status) {
893                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
894                                         (int)(dev->data->port_id),
895                                         (unsigned int)link.link_speed,
896                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
897                                         "full-duplex" : "half-duplex");
898         } else {
899                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
900                                 (int)(dev->data->port_id));
901         }
902         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
903                                 pci_dev->addr.domain,
904                                 pci_dev->addr.bus,
905                                 pci_dev->addr.devid,
906                                 pci_dev->addr.function);
907 }
908
909 /*
910  * It executes link_update after knowing an interrupt occurred.
911  *
912  * @param dev
913  *  Pointer to struct rte_eth_dev.
914  *
915  * @return
916  *  - On success, zero.
917  *  - On failure, a negative value.
918  */
919 static int
920 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
921                            struct rte_intr_handle *intr_handle)
922 {
923         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
924         int64_t timeout;
925         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
926
927         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
928
929         if (intr->flags & TXGBE_FLAG_MAILBOX)
930                 intr->flags &= ~TXGBE_FLAG_MAILBOX;
931
932         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
933                 hw->phy.handle_lasi(hw);
934                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
935         }
936
937         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
938                 struct rte_eth_link link;
939
940                 /*get the link status before link update, for predicting later*/
941                 rte_eth_linkstatus_get(dev, &link);
942
943                 txgbe_dev_link_update(dev, 0);
944
945                 /* likely to up */
946                 if (!link.link_status)
947                         /* handle it 1 sec later, wait it being stable */
948                         timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
949                 /* likely to down */
950                 else
951                         /* handle it 4 sec later, wait it being stable */
952                         timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
953
954                 txgbe_dev_link_status_print(dev);
955                 if (rte_eal_alarm_set(timeout * 1000,
956                                       txgbe_dev_interrupt_delayed_handler,
957                                       (void *)dev) < 0) {
958                         PMD_DRV_LOG(ERR, "Error setting alarm");
959                 } else {
960                         /* remember original mask */
961                         intr->mask_misc_orig = intr->mask_misc;
962                         /* only disable lsc interrupt */
963                         intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
964                 }
965         }
966
967         PMD_DRV_LOG(DEBUG, "enable intr immediately");
968         txgbe_enable_intr(dev);
969         rte_intr_enable(intr_handle);
970
971         return 0;
972 }
973
974 /**
975  * Interrupt handler which shall be registered for alarm callback for delayed
976  * handling specific interrupt to wait for the stable nic state. As the
977  * NIC interrupt state is not stable for txgbe after link is just down,
978  * it needs to wait 4 seconds to get the stable status.
979  *
980  * @param handle
981  *  Pointer to interrupt handle.
982  * @param param
983  *  The address of parameter (struct rte_eth_dev *) registered before.
984  *
985  * @return
986  *  void
987  */
988 static void
989 txgbe_dev_interrupt_delayed_handler(void *param)
990 {
991         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
992         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
993         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
994         struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
995         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
996         uint32_t eicr;
997
998         txgbe_disable_intr(hw);
999
1000         eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
1001
1002         if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
1003                 hw->phy.handle_lasi(hw);
1004                 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
1005         }
1006
1007         if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
1008                 txgbe_dev_link_update(dev, 0);
1009                 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
1010                 txgbe_dev_link_status_print(dev);
1011                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1012                                               NULL);
1013         }
1014
1015         if (intr->flags & TXGBE_FLAG_MACSEC) {
1016                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1017                                               NULL);
1018                 intr->flags &= ~TXGBE_FLAG_MACSEC;
1019         }
1020
1021         /* restore original mask */
1022         intr->mask_misc = intr->mask_misc_orig;
1023         intr->mask_misc_orig = 0;
1024
1025         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1026         txgbe_enable_intr(dev);
1027         rte_intr_enable(intr_handle);
1028 }
1029
1030 /**
1031  * Interrupt handler triggered by NIC  for handling
1032  * specific interrupt.
1033  *
1034  * @param handle
1035  *  Pointer to interrupt handle.
1036  * @param param
1037  *  The address of parameter (struct rte_eth_dev *) registered before.
1038  *
1039  * @return
1040  *  void
1041  */
1042 static void
1043 txgbe_dev_interrupt_handler(void *param)
1044 {
1045         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1046
1047         txgbe_dev_interrupt_get_status(dev);
1048         txgbe_dev_interrupt_action(dev, dev->intr_handle);
1049 }
1050
1051 /**
1052  * set the IVAR registers, mapping interrupt causes to vectors
1053  * @param hw
1054  *  pointer to txgbe_hw struct
1055  * @direction
1056  *  0 for Rx, 1 for Tx, -1 for other causes
1057  * @queue
1058  *  queue to map the corresponding interrupt to
1059  * @msix_vector
1060  *  the vector to map to the corresponding queue
1061  */
1062 void
1063 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
1064                    uint8_t queue, uint8_t msix_vector)
1065 {
1066         uint32_t tmp, idx;
1067
1068         if (direction == -1) {
1069                 /* other causes */
1070                 msix_vector |= TXGBE_IVARMISC_VLD;
1071                 idx = 0;
1072                 tmp = rd32(hw, TXGBE_IVARMISC);
1073                 tmp &= ~(0xFF << idx);
1074                 tmp |= (msix_vector << idx);
1075                 wr32(hw, TXGBE_IVARMISC, tmp);
1076         } else {
1077                 /* rx or tx causes */
1078                 /* Workround for ICR lost */
1079                 idx = ((16 * (queue & 1)) + (8 * direction));
1080                 tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
1081                 tmp &= ~(0xFF << idx);
1082                 tmp |= (msix_vector << idx);
1083                 wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
1084         }
1085 }
1086
1087 /**
1088  * Sets up the hardware to properly generate MSI-X interrupts
1089  * @hw
1090  *  board private structure
1091  */
1092 static void
1093 txgbe_configure_msix(struct rte_eth_dev *dev)
1094 {
1095         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1096         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1097         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1098         uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
1099         uint32_t vec = TXGBE_MISC_VEC_ID;
1100         uint32_t gpie;
1101
1102         /* won't configure msix register if no mapping is done
1103          * between intr vector and event fd
1104          * but if misx has been enabled already, need to configure
1105          * auto clean, auto mask and throttling.
1106          */
1107         gpie = rd32(hw, TXGBE_GPIE);
1108         if (!rte_intr_dp_is_en(intr_handle) &&
1109             !(gpie & TXGBE_GPIE_MSIX))
1110                 return;
1111
1112         if (rte_intr_allow_others(intr_handle)) {
1113                 base = TXGBE_RX_VEC_START;
1114                 vec = base;
1115         }
1116
1117         /* setup GPIE for MSI-x mode */
1118         gpie = rd32(hw, TXGBE_GPIE);
1119         gpie |= TXGBE_GPIE_MSIX;
1120         wr32(hw, TXGBE_GPIE, gpie);
1121
1122         /* Populate the IVAR table and set the ITR values to the
1123          * corresponding register.
1124          */
1125         if (rte_intr_dp_is_en(intr_handle)) {
1126                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1127                         queue_id++) {
1128                         /* by default, 1:1 mapping */
1129                         txgbe_set_ivar_map(hw, 0, queue_id, vec);
1130                         intr_handle->intr_vec[queue_id] = vec;
1131                         if (vec < base + intr_handle->nb_efd - 1)
1132                                 vec++;
1133                 }
1134
1135                 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1136         }
1137         wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
1138                         TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1139                         | TXGBE_ITR_WRDSA);
1140 }
1141
1142 static const struct eth_dev_ops txgbe_eth_dev_ops = {
1143         .dev_configure              = txgbe_dev_configure,
1144         .dev_infos_get              = txgbe_dev_info_get,
1145         .dev_set_link_up            = txgbe_dev_set_link_up,
1146         .dev_set_link_down          = txgbe_dev_set_link_down,
1147 };
1148
1149 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
1150 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
1151 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1152
1153 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
1154 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
1155
1156 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
1157         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
1158 #endif
1159 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
1160         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
1161 #endif
1162
1163 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
1164         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
1165 #endif