net/igc: support Rx and Tx
[dpdk.git] / drivers / net / igc / igc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <string.h>
7
8 #include <rte_pci.h>
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13
14 #include "igc_logs.h"
15 #include "igc_txrx.h"
16
17 #define IGC_INTEL_VENDOR_ID             0x8086
18
19 /*
20  * The overhead from MTU to max frame size.
21  * Considering VLAN so tag needs to be counted.
22  */
23 #define IGC_ETH_OVERHEAD                (RTE_ETHER_HDR_LEN + \
24                                         RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
25
26 #define IGC_FC_PAUSE_TIME               0x0680
27 #define IGC_LINK_UPDATE_CHECK_TIMEOUT   90  /* 9s */
28 #define IGC_LINK_UPDATE_CHECK_INTERVAL  100 /* ms */
29
30 #define IGC_MISC_VEC_ID                 RTE_INTR_VEC_ZERO_OFFSET
31 #define IGC_RX_VEC_START                RTE_INTR_VEC_RXTX_OFFSET
32 #define IGC_MSIX_OTHER_INTR_VEC         0   /* MSI-X other interrupt vector */
33 #define IGC_FLAG_NEED_LINK_UPDATE       (1u << 0)       /* need update link */
34
35 #define IGC_DEFAULT_RX_FREE_THRESH      32
36
37 #define IGC_DEFAULT_RX_PTHRESH          8
38 #define IGC_DEFAULT_RX_HTHRESH          8
39 #define IGC_DEFAULT_RX_WTHRESH          4
40
41 #define IGC_DEFAULT_TX_PTHRESH          8
42 #define IGC_DEFAULT_TX_HTHRESH          1
43 #define IGC_DEFAULT_TX_WTHRESH          16
44
45 /* MSI-X other interrupt vector */
46 #define IGC_MSIX_OTHER_INTR_VEC         0
47
48 /* External VLAN Enable bit mask */
49 #define IGC_CTRL_EXT_EXT_VLAN           (1u << 26)
50
51 static const struct rte_eth_desc_lim rx_desc_lim = {
52         .nb_max = IGC_MAX_RXD,
53         .nb_min = IGC_MIN_RXD,
54         .nb_align = IGC_RXD_ALIGN,
55 };
56
57 static const struct rte_eth_desc_lim tx_desc_lim = {
58         .nb_max = IGC_MAX_TXD,
59         .nb_min = IGC_MIN_TXD,
60         .nb_align = IGC_TXD_ALIGN,
61         .nb_seg_max = IGC_TX_MAX_SEG,
62         .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
63 };
64
65 static const struct rte_pci_id pci_id_igc_map[] = {
66         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
67         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },
68         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I)  },
69         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K)  },
70         { .vendor_id = 0, /* sentinel */ },
71 };
72
73 static int eth_igc_configure(struct rte_eth_dev *dev);
74 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
75 static void eth_igc_stop(struct rte_eth_dev *dev);
76 static int eth_igc_start(struct rte_eth_dev *dev);
77 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
78 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
79 static void eth_igc_close(struct rte_eth_dev *dev);
80 static int eth_igc_reset(struct rte_eth_dev *dev);
81 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
82 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
83 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
84                                 char *fw_version, size_t fw_size);
85 static int eth_igc_infos_get(struct rte_eth_dev *dev,
86                         struct rte_eth_dev_info *dev_info);
87 static int eth_igc_led_on(struct rte_eth_dev *dev);
88 static int eth_igc_led_off(struct rte_eth_dev *dev);
89 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int eth_igc_rar_set(struct rte_eth_dev *dev,
91                 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
92 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
93 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
94                         struct rte_ether_addr *addr);
95 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
96                          struct rte_ether_addr *mc_addr_set,
97                          uint32_t nb_mc_addr);
98 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
99 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
100 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
101
102 static const struct eth_dev_ops eth_igc_ops = {
103         .dev_configure          = eth_igc_configure,
104         .link_update            = eth_igc_link_update,
105         .dev_stop               = eth_igc_stop,
106         .dev_start              = eth_igc_start,
107         .dev_close              = eth_igc_close,
108         .dev_reset              = eth_igc_reset,
109         .dev_set_link_up        = eth_igc_set_link_up,
110         .dev_set_link_down      = eth_igc_set_link_down,
111         .promiscuous_enable     = eth_igc_promiscuous_enable,
112         .promiscuous_disable    = eth_igc_promiscuous_disable,
113         .allmulticast_enable    = eth_igc_allmulticast_enable,
114         .allmulticast_disable   = eth_igc_allmulticast_disable,
115         .fw_version_get         = eth_igc_fw_version_get,
116         .dev_infos_get          = eth_igc_infos_get,
117         .dev_led_on             = eth_igc_led_on,
118         .dev_led_off            = eth_igc_led_off,
119         .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
120         .mtu_set                = eth_igc_mtu_set,
121         .mac_addr_add           = eth_igc_rar_set,
122         .mac_addr_remove        = eth_igc_rar_clear,
123         .mac_addr_set           = eth_igc_default_mac_addr_set,
124         .set_mc_addr_list       = eth_igc_set_mc_addr_list,
125
126         .rx_queue_setup         = eth_igc_rx_queue_setup,
127         .rx_queue_release       = eth_igc_rx_queue_release,
128         .rx_queue_count         = eth_igc_rx_queue_count,
129         .rx_descriptor_done     = eth_igc_rx_descriptor_done,
130         .rx_descriptor_status   = eth_igc_rx_descriptor_status,
131         .tx_descriptor_status   = eth_igc_tx_descriptor_status,
132         .tx_queue_setup         = eth_igc_tx_queue_setup,
133         .tx_queue_release       = eth_igc_tx_queue_release,
134         .tx_done_cleanup        = eth_igc_tx_done_cleanup,
135         .rxq_info_get           = eth_igc_rxq_info_get,
136         .txq_info_get           = eth_igc_txq_info_get,
137 };
138
139 /*
140  * multiple queue mode checking
141  */
142 static int
143 igc_check_mq_mode(struct rte_eth_dev *dev)
144 {
145         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
146         enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
147
148         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
149                 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
150                 return -EINVAL;
151         }
152
153         if (rx_mq_mode != ETH_MQ_RX_NONE &&
154                 rx_mq_mode != ETH_MQ_RX_RSS) {
155                 /* RSS together with VMDq not supported*/
156                 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
157                                 rx_mq_mode);
158                 return -EINVAL;
159         }
160
161         /* To no break software that set invalid mode, only display
162          * warning if invalid mode is used.
163          */
164         if (tx_mq_mode != ETH_MQ_TX_NONE)
165                 PMD_INIT_LOG(WARNING,
166                         "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
167                         tx_mq_mode);
168
169         return 0;
170 }
171
172 static int
173 eth_igc_configure(struct rte_eth_dev *dev)
174 {
175         struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
176         int ret;
177
178         PMD_INIT_FUNC_TRACE();
179
180         ret  = igc_check_mq_mode(dev);
181         if (ret != 0)
182                 return ret;
183
184         intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
185         return 0;
186 }
187
188 static int
189 eth_igc_set_link_up(struct rte_eth_dev *dev)
190 {
191         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
192
193         if (hw->phy.media_type == igc_media_type_copper)
194                 igc_power_up_phy(hw);
195         else
196                 igc_power_up_fiber_serdes_link(hw);
197         return 0;
198 }
199
200 static int
201 eth_igc_set_link_down(struct rte_eth_dev *dev)
202 {
203         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
204
205         if (hw->phy.media_type == igc_media_type_copper)
206                 igc_power_down_phy(hw);
207         else
208                 igc_shutdown_fiber_serdes_link(hw);
209         return 0;
210 }
211
212 /*
213  * disable other interrupt
214  */
215 static void
216 igc_intr_other_disable(struct rte_eth_dev *dev)
217 {
218         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
219         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
220         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
221
222         if (rte_intr_allow_others(intr_handle) &&
223                 dev->data->dev_conf.intr_conf.lsc) {
224                 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
225         }
226
227         IGC_WRITE_REG(hw, IGC_IMC, ~0);
228         IGC_WRITE_FLUSH(hw);
229 }
230
231 /*
232  * enable other interrupt
233  */
234 static inline void
235 igc_intr_other_enable(struct rte_eth_dev *dev)
236 {
237         struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
238         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
239         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
240         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
241
242         if (rte_intr_allow_others(intr_handle) &&
243                 dev->data->dev_conf.intr_conf.lsc) {
244                 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
245         }
246
247         IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
248         IGC_WRITE_FLUSH(hw);
249 }
250
251 /*
252  * It reads ICR and gets interrupt causes, check it and set a bit flag
253  * to update link status.
254  */
255 static void
256 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
257 {
258         uint32_t icr;
259         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
260         struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
261
262         /* read-on-clear nic registers here */
263         icr = IGC_READ_REG(hw, IGC_ICR);
264
265         intr->flags = 0;
266         if (icr & IGC_ICR_LSC)
267                 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
268 }
269
270 /* return 0 means link status changed, -1 means not changed */
271 static int
272 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
273 {
274         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
275         struct rte_eth_link link;
276         int link_check, count;
277
278         link_check = 0;
279         hw->mac.get_link_status = 1;
280
281         /* possible wait-to-complete in up to 9 seconds */
282         for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
283                 /* Read the real link status */
284                 switch (hw->phy.media_type) {
285                 case igc_media_type_copper:
286                         /* Do the work to read phy */
287                         igc_check_for_link(hw);
288                         link_check = !hw->mac.get_link_status;
289                         break;
290
291                 case igc_media_type_fiber:
292                         igc_check_for_link(hw);
293                         link_check = (IGC_READ_REG(hw, IGC_STATUS) &
294                                       IGC_STATUS_LU);
295                         break;
296
297                 case igc_media_type_internal_serdes:
298                         igc_check_for_link(hw);
299                         link_check = hw->mac.serdes_has_link;
300                         break;
301
302                 default:
303                         break;
304                 }
305                 if (link_check || wait_to_complete == 0)
306                         break;
307                 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
308         }
309         memset(&link, 0, sizeof(link));
310
311         /* Now we check if a transition has happened */
312         if (link_check) {
313                 uint16_t duplex, speed;
314                 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
315                 link.link_duplex = (duplex == FULL_DUPLEX) ?
316                                 ETH_LINK_FULL_DUPLEX :
317                                 ETH_LINK_HALF_DUPLEX;
318                 link.link_speed = speed;
319                 link.link_status = ETH_LINK_UP;
320                 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
321                                 ETH_LINK_SPEED_FIXED);
322
323                 if (speed == SPEED_2500) {
324                         uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
325                         if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
326                                 tipg &= ~IGC_TIPG_IPGT_MASK;
327                                 tipg |= 0x0b;
328                                 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
329                         }
330                 }
331         } else {
332                 link.link_speed = 0;
333                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
334                 link.link_status = ETH_LINK_DOWN;
335                 link.link_autoneg = ETH_LINK_FIXED;
336         }
337
338         return rte_eth_linkstatus_set(dev, &link);
339 }
340
341 /*
342  * It executes link_update after knowing an interrupt is present.
343  */
344 static void
345 eth_igc_interrupt_action(struct rte_eth_dev *dev)
346 {
347         struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
348         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
349         struct rte_eth_link link;
350         int ret;
351
352         if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
353                 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
354
355                 /* set get_link_status to check register later */
356                 ret = eth_igc_link_update(dev, 0);
357
358                 /* check if link has changed */
359                 if (ret < 0)
360                         return;
361
362                 rte_eth_linkstatus_get(dev, &link);
363                 if (link.link_status)
364                         PMD_DRV_LOG(INFO,
365                                 " Port %d: Link Up - speed %u Mbps - %s",
366                                 dev->data->port_id,
367                                 (unsigned int)link.link_speed,
368                                 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
369                                 "full-duplex" : "half-duplex");
370                 else
371                         PMD_DRV_LOG(INFO, " Port %d: Link Down",
372                                 dev->data->port_id);
373
374                 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
375                                 pci_dev->addr.domain,
376                                 pci_dev->addr.bus,
377                                 pci_dev->addr.devid,
378                                 pci_dev->addr.function);
379                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
380                                 NULL);
381         }
382 }
383
384 /*
385  * Interrupt handler which shall be registered at first.
386  *
387  * @handle
388  *  Pointer to interrupt handle.
389  * @param
390  *  The address of parameter (struct rte_eth_dev *) registered before.
391  */
392 static void
393 eth_igc_interrupt_handler(void *param)
394 {
395         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
396
397         eth_igc_interrupt_get_status(dev);
398         eth_igc_interrupt_action(dev);
399 }
400
401 /*
402  * rx,tx enable/disable
403  */
404 static void
405 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
406 {
407         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
408         uint32_t tctl, rctl;
409
410         tctl = IGC_READ_REG(hw, IGC_TCTL);
411         rctl = IGC_READ_REG(hw, IGC_RCTL);
412
413         if (enable) {
414                 /* enable Tx/Rx */
415                 tctl |= IGC_TCTL_EN;
416                 rctl |= IGC_RCTL_EN;
417         } else {
418                 /* disable Tx/Rx */
419                 tctl &= ~IGC_TCTL_EN;
420                 rctl &= ~IGC_RCTL_EN;
421         }
422         IGC_WRITE_REG(hw, IGC_TCTL, tctl);
423         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
424         IGC_WRITE_FLUSH(hw);
425 }
426
427 /*
428  *  This routine disables all traffic on the adapter by issuing a
429  *  global reset on the MAC.
430  */
431 static void
432 eth_igc_stop(struct rte_eth_dev *dev)
433 {
434         struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
435         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
436         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
437         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
438         struct rte_eth_link link;
439
440         adapter->stopped = 1;
441
442         /* disable receive and transmit */
443         eth_igc_rxtx_control(dev, false);
444
445         /* disable all MSI-X interrupts */
446         IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
447         IGC_WRITE_FLUSH(hw);
448
449         /* clear all MSI-X interrupts */
450         IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
451
452         igc_intr_other_disable(dev);
453
454         /* disable intr eventfd mapping */
455         rte_intr_disable(intr_handle);
456
457         igc_reset_hw(hw);
458
459         /* disable all wake up */
460         IGC_WRITE_REG(hw, IGC_WUC, 0);
461
462         /* Set bit for Go Link disconnect */
463         igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
464                         IGC_82580_PM_GO_LINKD);
465
466         /* Power down the phy. Needed to make the link go Down */
467         eth_igc_set_link_down(dev);
468
469         igc_dev_clear_queues(dev);
470
471         /* clear the recorded link status */
472         memset(&link, 0, sizeof(link));
473         rte_eth_linkstatus_set(dev, &link);
474
475         if (!rte_intr_allow_others(intr_handle))
476                 /* resume to the default handler */
477                 rte_intr_callback_register(intr_handle,
478                                            eth_igc_interrupt_handler,
479                                            (void *)dev);
480
481         /* Clean datapath event and queue/vec mapping */
482         rte_intr_efd_disable(intr_handle);
483 }
484
485 /* Sets up the hardware to generate MSI-X interrupts properly
486  * @hw
487  *  board private structure
488  */
489 static void
490 igc_configure_msix_intr(struct rte_eth_dev *dev)
491 {
492         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
493         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
494         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
495
496         uint32_t intr_mask;
497
498         /* won't configure msix register if no mapping is done
499          * between intr vector and event fd
500          */
501         if (!rte_intr_dp_is_en(intr_handle) ||
502                 !dev->data->dev_conf.intr_conf.lsc)
503                 return;
504
505         /* turn on MSI-X capability first */
506         IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
507                                 IGC_GPIE_PBA | IGC_GPIE_EIAME |
508                                 IGC_GPIE_NSICR);
509
510         intr_mask = (1u << IGC_MSIX_OTHER_INTR_VEC);
511
512         /* enable msix auto-clear */
513         igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
514
515         /* set other cause interrupt vector */
516         igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
517                 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
518
519         /* enable auto-mask */
520         igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
521
522         IGC_WRITE_FLUSH(hw);
523 }
524
525 /**
526  * It enables the interrupt mask and then enable the interrupt.
527  *
528  * @dev
529  *  Pointer to struct rte_eth_dev.
530  * @on
531  *  Enable or Disable
532  */
533 static void
534 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
535 {
536         struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
537
538         if (on)
539                 intr->mask |= IGC_ICR_LSC;
540         else
541                 intr->mask &= ~IGC_ICR_LSC;
542 }
543
544 /*
545  *  Get hardware rx-buffer size.
546  */
547 static inline int
548 igc_get_rx_buffer_size(struct igc_hw *hw)
549 {
550         return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
551 }
552
553 /*
554  * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
555  * For ASF and Pass Through versions of f/w this means
556  * that the driver is loaded.
557  */
558 static void
559 igc_hw_control_acquire(struct igc_hw *hw)
560 {
561         uint32_t ctrl_ext;
562
563         /* Let firmware know the driver has taken over */
564         ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
565         IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
566 }
567
568 /*
569  * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
570  * For ASF and Pass Through versions of f/w this means that the
571  * driver is no longer loaded.
572  */
573 static void
574 igc_hw_control_release(struct igc_hw *hw)
575 {
576         uint32_t ctrl_ext;
577
578         /* Let firmware taken over control of h/w */
579         ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
580         IGC_WRITE_REG(hw, IGC_CTRL_EXT,
581                         ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
582 }
583
584 static int
585 igc_hardware_init(struct igc_hw *hw)
586 {
587         uint32_t rx_buf_size;
588         int diag;
589
590         /* Let the firmware know the OS is in control */
591         igc_hw_control_acquire(hw);
592
593         /* Issue a global reset */
594         igc_reset_hw(hw);
595
596         /* disable all wake up */
597         IGC_WRITE_REG(hw, IGC_WUC, 0);
598
599         /*
600          * Hardware flow control
601          * - High water mark should allow for at least two standard size (1518)
602          *   frames to be received after sending an XOFF.
603          * - Low water mark works best when it is very near the high water mark.
604          *   This allows the receiver to restart by sending XON when it has
605          *   drained a bit. Here we use an arbitrary value of 1500 which will
606          *   restart after one full frame is pulled from the buffer. There
607          *   could be several smaller frames in the buffer and if so they will
608          *   not trigger the XON until their total number reduces the buffer
609          *   by 1500.
610          */
611         rx_buf_size = igc_get_rx_buffer_size(hw);
612         hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
613         hw->fc.low_water = hw->fc.high_water - 1500;
614         hw->fc.pause_time = IGC_FC_PAUSE_TIME;
615         hw->fc.send_xon = 1;
616         hw->fc.requested_mode = igc_fc_full;
617
618         diag = igc_init_hw(hw);
619         if (diag < 0)
620                 return diag;
621
622         igc_get_phy_info(hw);
623         igc_check_for_link(hw);
624
625         return 0;
626 }
627
628 static int
629 eth_igc_start(struct rte_eth_dev *dev)
630 {
631         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
632         struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
633         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
634         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
635         uint32_t *speeds;
636         int ret;
637
638         PMD_INIT_FUNC_TRACE();
639
640         /* disable all MSI-X interrupts */
641         IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
642         IGC_WRITE_FLUSH(hw);
643
644         /* clear all MSI-X interrupts */
645         IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
646
647         /* disable uio/vfio intr/eventfd mapping */
648         if (!adapter->stopped)
649                 rte_intr_disable(intr_handle);
650
651         /* Power up the phy. Needed to make the link go Up */
652         eth_igc_set_link_up(dev);
653
654         /* Put the address into the Receive Address Array */
655         igc_rar_set(hw, hw->mac.addr, 0);
656
657         /* Initialize the hardware */
658         if (igc_hardware_init(hw)) {
659                 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
660                 return -EIO;
661         }
662         adapter->stopped = 0;
663
664         /* confiugre msix for rx interrupt */
665         igc_configure_msix_intr(dev);
666
667         igc_tx_init(dev);
668
669         /* This can fail when allocating mbufs for descriptor rings */
670         ret = igc_rx_init(dev);
671         if (ret) {
672                 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
673                 igc_dev_clear_queues(dev);
674                 return ret;
675         }
676
677         igc_clear_hw_cntrs_base_generic(hw);
678
679         /* Setup link speed and duplex */
680         speeds = &dev->data->dev_conf.link_speeds;
681         if (*speeds == ETH_LINK_SPEED_AUTONEG) {
682                 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
683                 hw->mac.autoneg = 1;
684         } else {
685                 int num_speeds = 0;
686                 bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
687
688                 /* Reset */
689                 hw->phy.autoneg_advertised = 0;
690
691                 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
692                                 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
693                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
694                                 ETH_LINK_SPEED_FIXED)) {
695                         num_speeds = -1;
696                         goto error_invalid_config;
697                 }
698                 if (*speeds & ETH_LINK_SPEED_10M_HD) {
699                         hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
700                         num_speeds++;
701                 }
702                 if (*speeds & ETH_LINK_SPEED_10M) {
703                         hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
704                         num_speeds++;
705                 }
706                 if (*speeds & ETH_LINK_SPEED_100M_HD) {
707                         hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
708                         num_speeds++;
709                 }
710                 if (*speeds & ETH_LINK_SPEED_100M) {
711                         hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
712                         num_speeds++;
713                 }
714                 if (*speeds & ETH_LINK_SPEED_1G) {
715                         hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
716                         num_speeds++;
717                 }
718                 if (*speeds & ETH_LINK_SPEED_2_5G) {
719                         hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
720                         num_speeds++;
721                 }
722                 if (num_speeds == 0 || (!autoneg && num_speeds > 1))
723                         goto error_invalid_config;
724
725                 /* Set/reset the mac.autoneg based on the link speed,
726                  * fixed or not
727                  */
728                 if (!autoneg) {
729                         hw->mac.autoneg = 0;
730                         hw->mac.forced_speed_duplex =
731                                         hw->phy.autoneg_advertised;
732                 } else {
733                         hw->mac.autoneg = 1;
734                 }
735         }
736
737         igc_setup_link(hw);
738
739         if (rte_intr_allow_others(intr_handle)) {
740                 /* check if lsc interrupt is enabled */
741                 if (dev->data->dev_conf.intr_conf.lsc)
742                         igc_lsc_interrupt_setup(dev, 1);
743                 else
744                         igc_lsc_interrupt_setup(dev, 0);
745         } else {
746                 rte_intr_callback_unregister(intr_handle,
747                                              eth_igc_interrupt_handler,
748                                              (void *)dev);
749                 if (dev->data->dev_conf.intr_conf.lsc)
750                         PMD_DRV_LOG(INFO,
751                                 "LSC won't enable because of no intr multiplex");
752         }
753
754         /* enable uio/vfio intr/eventfd mapping */
755         rte_intr_enable(intr_handle);
756
757         /* resume enabled intr since hw reset */
758         igc_intr_other_enable(dev);
759
760         eth_igc_rxtx_control(dev, true);
761         eth_igc_link_update(dev, 0);
762
763         return 0;
764
765 error_invalid_config:
766         PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
767                      dev->data->dev_conf.link_speeds, dev->data->port_id);
768         igc_dev_clear_queues(dev);
769         return -EINVAL;
770 }
771
772 static int
773 igc_reset_swfw_lock(struct igc_hw *hw)
774 {
775         int ret_val;
776
777         /*
778          * Do mac ops initialization manually here, since we will need
779          * some function pointers set by this call.
780          */
781         ret_val = igc_init_mac_params(hw);
782         if (ret_val)
783                 return ret_val;
784
785         /*
786          * SMBI lock should not fail in this early stage. If this is the case,
787          * it is due to an improper exit of the application.
788          * So force the release of the faulty lock.
789          */
790         if (igc_get_hw_semaphore_generic(hw) < 0)
791                 PMD_DRV_LOG(DEBUG, "SMBI lock released");
792
793         igc_put_hw_semaphore_generic(hw);
794
795         if (hw->mac.ops.acquire_swfw_sync != NULL) {
796                 uint16_t mask;
797
798                 /*
799                  * Phy lock should not fail in this early stage.
800                  * If this is the case, it is due to an improper exit of the
801                  * application. So force the release of the faulty lock.
802                  */
803                 mask = IGC_SWFW_PHY0_SM;
804                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
805                         PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
806                                     hw->bus.func);
807                 }
808                 hw->mac.ops.release_swfw_sync(hw, mask);
809
810                 /*
811                  * This one is more tricky since it is common to all ports; but
812                  * swfw_sync retries last long enough (1s) to be almost sure
813                  * that if lock can not be taken it is due to an improper lock
814                  * of the semaphore.
815                  */
816                 mask = IGC_SWFW_EEP_SM;
817                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
818                         PMD_DRV_LOG(DEBUG, "SWFW common locks released");
819
820                 hw->mac.ops.release_swfw_sync(hw, mask);
821         }
822
823         return IGC_SUCCESS;
824 }
825
826 /*
827  * free all rx/tx queues.
828  */
829 static void
830 igc_dev_free_queues(struct rte_eth_dev *dev)
831 {
832         uint16_t i;
833
834         for (i = 0; i < dev->data->nb_rx_queues; i++) {
835                 eth_igc_rx_queue_release(dev->data->rx_queues[i]);
836                 dev->data->rx_queues[i] = NULL;
837         }
838         dev->data->nb_rx_queues = 0;
839
840         for (i = 0; i < dev->data->nb_tx_queues; i++) {
841                 eth_igc_tx_queue_release(dev->data->tx_queues[i]);
842                 dev->data->tx_queues[i] = NULL;
843         }
844         dev->data->nb_tx_queues = 0;
845 }
846
847 static void
848 eth_igc_close(struct rte_eth_dev *dev)
849 {
850         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
851         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
852         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
853         struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
854         int retry = 0;
855
856         PMD_INIT_FUNC_TRACE();
857
858         if (!adapter->stopped)
859                 eth_igc_stop(dev);
860
861         igc_intr_other_disable(dev);
862         do {
863                 int ret = rte_intr_callback_unregister(intr_handle,
864                                 eth_igc_interrupt_handler, dev);
865                 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
866                         break;
867
868                 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
869                 DELAY(200 * 1000); /* delay 200ms */
870         } while (retry++ < 5);
871
872         igc_phy_hw_reset(hw);
873         igc_hw_control_release(hw);
874         igc_dev_free_queues(dev);
875
876         /* Reset any pending lock */
877         igc_reset_swfw_lock(hw);
878 }
879
880 static void
881 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
882 {
883         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
884
885         hw->vendor_id = pci_dev->id.vendor_id;
886         hw->device_id = pci_dev->id.device_id;
887         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
888         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
889 }
890
891 static int
892 eth_igc_dev_init(struct rte_eth_dev *dev)
893 {
894         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
895         struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
896         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
897         int error = 0;
898
899         PMD_INIT_FUNC_TRACE();
900         dev->dev_ops = &eth_igc_ops;
901
902         /*
903          * for secondary processes, we don't initialize any further as primary
904          * has already done this work. Only check we don't need a different
905          * RX function.
906          */
907         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
908                 return 0;
909
910         rte_eth_copy_pci_info(dev, pci_dev);
911
912         hw->back = pci_dev;
913         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
914
915         igc_identify_hardware(dev, pci_dev);
916         if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
917                 error = -EIO;
918                 goto err_late;
919         }
920
921         igc_get_bus_info(hw);
922
923         /* Reset any pending lock */
924         if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
925                 error = -EIO;
926                 goto err_late;
927         }
928
929         /* Finish initialization */
930         if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
931                 error = -EIO;
932                 goto err_late;
933         }
934
935         hw->mac.autoneg = 1;
936         hw->phy.autoneg_wait_to_complete = 0;
937         hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
938
939         /* Copper options */
940         if (hw->phy.media_type == igc_media_type_copper) {
941                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
942                 hw->phy.disable_polarity_correction = 0;
943                 hw->phy.ms_type = igc_ms_hw_default;
944         }
945
946         /*
947          * Start from a known state, this is important in reading the nvm
948          * and mac from that.
949          */
950         igc_reset_hw(hw);
951
952         /* Make sure we have a good EEPROM before we read from it */
953         if (igc_validate_nvm_checksum(hw) < 0) {
954                 /*
955                  * Some PCI-E parts fail the first check due to
956                  * the link being in sleep state, call it again,
957                  * if it fails a second time its a real issue.
958                  */
959                 if (igc_validate_nvm_checksum(hw) < 0) {
960                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
961                         error = -EIO;
962                         goto err_late;
963                 }
964         }
965
966         /* Read the permanent MAC address out of the EEPROM */
967         if (igc_read_mac_addr(hw) != 0) {
968                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
969                 error = -EIO;
970                 goto err_late;
971         }
972
973         /* Allocate memory for storing MAC addresses */
974         dev->data->mac_addrs = rte_zmalloc("igc",
975                 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
976         if (dev->data->mac_addrs == NULL) {
977                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
978                                 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
979                 error = -ENOMEM;
980                 goto err_late;
981         }
982
983         /* Copy the permanent MAC address */
984         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
985                         &dev->data->mac_addrs[0]);
986
987         /* Now initialize the hardware */
988         if (igc_hardware_init(hw) != 0) {
989                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
990                 rte_free(dev->data->mac_addrs);
991                 dev->data->mac_addrs = NULL;
992                 error = -ENODEV;
993                 goto err_late;
994         }
995
996         /* Pass the information to the rte_eth_dev_close() that it should also
997          * release the private port resources.
998          */
999         dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1000
1001         hw->mac.get_link_status = 1;
1002         igc->stopped = 0;
1003
1004         /* Indicate SOL/IDER usage */
1005         if (igc_check_reset_block(hw) < 0)
1006                 PMD_INIT_LOG(ERR,
1007                         "PHY reset is blocked due to SOL/IDER session.");
1008
1009         PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1010                         dev->data->port_id, pci_dev->id.vendor_id,
1011                         pci_dev->id.device_id);
1012
1013         rte_intr_callback_register(&pci_dev->intr_handle,
1014                         eth_igc_interrupt_handler, (void *)dev);
1015
1016         /* enable uio/vfio intr/eventfd mapping */
1017         rte_intr_enable(&pci_dev->intr_handle);
1018
1019         /* enable support intr */
1020         igc_intr_other_enable(dev);
1021
1022         return 0;
1023
1024 err_late:
1025         igc_hw_control_release(hw);
1026         return error;
1027 }
1028
1029 static int
1030 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1031 {
1032         PMD_INIT_FUNC_TRACE();
1033
1034         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1035                 return 0;
1036
1037         eth_igc_close(eth_dev);
1038         return 0;
1039 }
1040
1041 static int
1042 eth_igc_reset(struct rte_eth_dev *dev)
1043 {
1044         int ret;
1045
1046         PMD_INIT_FUNC_TRACE();
1047
1048         ret = eth_igc_dev_uninit(dev);
1049         if (ret)
1050                 return ret;
1051
1052         return eth_igc_dev_init(dev);
1053 }
1054
1055 static int
1056 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1057 {
1058         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1059         uint32_t rctl;
1060
1061         rctl = IGC_READ_REG(hw, IGC_RCTL);
1062         rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1063         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1064         return 0;
1065 }
1066
1067 static int
1068 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1069 {
1070         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1071         uint32_t rctl;
1072
1073         rctl = IGC_READ_REG(hw, IGC_RCTL);
1074         rctl &= (~IGC_RCTL_UPE);
1075         if (dev->data->all_multicast == 1)
1076                 rctl |= IGC_RCTL_MPE;
1077         else
1078                 rctl &= (~IGC_RCTL_MPE);
1079         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1080         return 0;
1081 }
1082
1083 static int
1084 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1085 {
1086         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1087         uint32_t rctl;
1088
1089         rctl = IGC_READ_REG(hw, IGC_RCTL);
1090         rctl |= IGC_RCTL_MPE;
1091         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1092         return 0;
1093 }
1094
1095 static int
1096 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1097 {
1098         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1099         uint32_t rctl;
1100
1101         if (dev->data->promiscuous == 1)
1102                 return 0;       /* must remain in all_multicast mode */
1103
1104         rctl = IGC_READ_REG(hw, IGC_RCTL);
1105         rctl &= (~IGC_RCTL_MPE);
1106         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1107         return 0;
1108 }
1109
1110 static int
1111 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1112                        size_t fw_size)
1113 {
1114         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1115         struct igc_fw_version fw;
1116         int ret;
1117
1118         igc_get_fw_version(hw, &fw);
1119
1120         /* if option rom is valid, display its version too */
1121         if (fw.or_valid) {
1122                 ret = snprintf(fw_version, fw_size,
1123                          "%d.%d, 0x%08x, %d.%d.%d",
1124                          fw.eep_major, fw.eep_minor, fw.etrack_id,
1125                          fw.or_major, fw.or_build, fw.or_patch);
1126         /* no option rom */
1127         } else {
1128                 if (fw.etrack_id != 0X0000) {
1129                         ret = snprintf(fw_version, fw_size,
1130                                  "%d.%d, 0x%08x",
1131                                  fw.eep_major, fw.eep_minor,
1132                                  fw.etrack_id);
1133                 } else {
1134                         ret = snprintf(fw_version, fw_size,
1135                                  "%d.%d.%d",
1136                                  fw.eep_major, fw.eep_minor,
1137                                  fw.eep_build);
1138                 }
1139         }
1140
1141         ret += 1; /* add the size of '\0' */
1142         if (fw_size < (u32)ret)
1143                 return ret;
1144         else
1145                 return 0;
1146 }
1147
1148 static int
1149 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1150 {
1151         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1152
1153         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1154         dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1155         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1156         dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1157         dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1158
1159         dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1160         dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1161         dev_info->max_vmdq_pools = 0;
1162
1163         dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1164         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1165         dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1166
1167         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1168                 .rx_thresh = {
1169                         .pthresh = IGC_DEFAULT_RX_PTHRESH,
1170                         .hthresh = IGC_DEFAULT_RX_HTHRESH,
1171                         .wthresh = IGC_DEFAULT_RX_WTHRESH,
1172                 },
1173                 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1174                 .rx_drop_en = 0,
1175                 .offloads = 0,
1176         };
1177
1178         dev_info->default_txconf = (struct rte_eth_txconf) {
1179                 .tx_thresh = {
1180                         .pthresh = IGC_DEFAULT_TX_PTHRESH,
1181                         .hthresh = IGC_DEFAULT_TX_HTHRESH,
1182                         .wthresh = IGC_DEFAULT_TX_WTHRESH,
1183                 },
1184                 .offloads = 0,
1185         };
1186
1187         dev_info->rx_desc_lim = rx_desc_lim;
1188         dev_info->tx_desc_lim = tx_desc_lim;
1189
1190         dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1191                         ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1192                         ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1193
1194         dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1195         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1196         return 0;
1197 }
1198
1199 static int
1200 eth_igc_led_on(struct rte_eth_dev *dev)
1201 {
1202         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1203
1204         return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1205 }
1206
1207 static int
1208 eth_igc_led_off(struct rte_eth_dev *dev)
1209 {
1210         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1211
1212         return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1213 }
1214
1215 static const uint32_t *
1216 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1217 {
1218         static const uint32_t ptypes[] = {
1219                 /* refers to rx_desc_pkt_info_to_pkt_type() */
1220                 RTE_PTYPE_L2_ETHER,
1221                 RTE_PTYPE_L3_IPV4,
1222                 RTE_PTYPE_L3_IPV4_EXT,
1223                 RTE_PTYPE_L3_IPV6,
1224                 RTE_PTYPE_L3_IPV6_EXT,
1225                 RTE_PTYPE_L4_TCP,
1226                 RTE_PTYPE_L4_UDP,
1227                 RTE_PTYPE_L4_SCTP,
1228                 RTE_PTYPE_TUNNEL_IP,
1229                 RTE_PTYPE_INNER_L3_IPV6,
1230                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1231                 RTE_PTYPE_INNER_L4_TCP,
1232                 RTE_PTYPE_INNER_L4_UDP,
1233                 RTE_PTYPE_UNKNOWN
1234         };
1235
1236         return ptypes;
1237 }
1238
1239 static int
1240 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1241 {
1242         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1243         uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1244         uint32_t rctl;
1245
1246         /* if extend vlan has been enabled */
1247         if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1248                 frame_size += VLAN_TAG_SIZE;
1249
1250         /* check that mtu is within the allowed range */
1251         if (mtu < RTE_ETHER_MIN_MTU ||
1252                 frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1253                 return -EINVAL;
1254
1255         /*
1256          * refuse mtu that requires the support of scattered packets when
1257          * this feature has not been enabled before.
1258          */
1259         if (!dev->data->scattered_rx &&
1260             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1261                 return -EINVAL;
1262
1263         rctl = IGC_READ_REG(hw, IGC_RCTL);
1264
1265         /* switch to jumbo mode if needed */
1266         if (mtu > RTE_ETHER_MTU) {
1267                 dev->data->dev_conf.rxmode.offloads |=
1268                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1269                 rctl |= IGC_RCTL_LPE;
1270         } else {
1271                 dev->data->dev_conf.rxmode.offloads &=
1272                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1273                 rctl &= ~IGC_RCTL_LPE;
1274         }
1275         IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1276
1277         /* update max frame size */
1278         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1279
1280         IGC_WRITE_REG(hw, IGC_RLPML,
1281                         dev->data->dev_conf.rxmode.max_rx_pkt_len);
1282
1283         return 0;
1284 }
1285
1286 static int
1287 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1288                 uint32_t index, uint32_t pool)
1289 {
1290         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1291
1292         igc_rar_set(hw, mac_addr->addr_bytes, index);
1293         RTE_SET_USED(pool);
1294         return 0;
1295 }
1296
1297 static void
1298 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1299 {
1300         uint8_t addr[RTE_ETHER_ADDR_LEN];
1301         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1302
1303         memset(addr, 0, sizeof(addr));
1304         igc_rar_set(hw, addr, index);
1305 }
1306
1307 static int
1308 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1309                         struct rte_ether_addr *addr)
1310 {
1311         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1312         igc_rar_set(hw, addr->addr_bytes, 0);
1313         return 0;
1314 }
1315
1316 static int
1317 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1318                          struct rte_ether_addr *mc_addr_set,
1319                          uint32_t nb_mc_addr)
1320 {
1321         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1322         igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1323         return 0;
1324 }
1325
1326 static int
1327 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1328         struct rte_pci_device *pci_dev)
1329 {
1330         PMD_INIT_FUNC_TRACE();
1331         return rte_eth_dev_pci_generic_probe(pci_dev,
1332                 sizeof(struct igc_adapter), eth_igc_dev_init);
1333 }
1334
1335 static int
1336 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
1337 {
1338         PMD_INIT_FUNC_TRACE();
1339         return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
1340 }
1341
1342 static struct rte_pci_driver rte_igc_pmd = {
1343         .id_table = pci_id_igc_map,
1344         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1345         .probe = eth_igc_pci_probe,
1346         .remove = eth_igc_pci_remove,
1347 };
1348
1349 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
1350 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
1351 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");