net/e1000: fix Rx error counter for bad length
[dpdk.git] / drivers / net / e1000 / em_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_bus_pci.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <ethdev_pci.h>
20 #include <rte_memory.h>
21 #include <rte_eal.h>
22 #include <rte_malloc.h>
23 #include <rte_dev.h>
24
25 #include "e1000_logs.h"
26 #include "base/e1000_api.h"
27 #include "e1000_ethdev.h"
28
29 #define EM_EIAC                 0x000DC
30
31 #define PMD_ROUNDUP(x,y)        (((x) + (y) - 1)/(y) * (y))
32
33
34 static int eth_em_configure(struct rte_eth_dev *dev);
35 static int eth_em_start(struct rte_eth_dev *dev);
36 static int eth_em_stop(struct rte_eth_dev *dev);
37 static int eth_em_close(struct rte_eth_dev *dev);
38 static int eth_em_promiscuous_enable(struct rte_eth_dev *dev);
39 static int eth_em_promiscuous_disable(struct rte_eth_dev *dev);
40 static int eth_em_allmulticast_enable(struct rte_eth_dev *dev);
41 static int eth_em_allmulticast_disable(struct rte_eth_dev *dev);
42 static int eth_em_link_update(struct rte_eth_dev *dev,
43                                 int wait_to_complete);
44 static int eth_em_stats_get(struct rte_eth_dev *dev,
45                                 struct rte_eth_stats *rte_stats);
46 static int eth_em_stats_reset(struct rte_eth_dev *dev);
47 static int eth_em_infos_get(struct rte_eth_dev *dev,
48                                 struct rte_eth_dev_info *dev_info);
49 static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
50                                 struct rte_eth_fc_conf *fc_conf);
51 static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
52                                 struct rte_eth_fc_conf *fc_conf);
53 static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
54 static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev);
55 static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
56 static int eth_em_interrupt_action(struct rte_eth_dev *dev,
57                                    struct rte_intr_handle *handle);
58 static void eth_em_interrupt_handler(void *param);
59
60 static int em_hw_init(struct e1000_hw *hw);
61 static int em_hardware_init(struct e1000_hw *hw);
62 static void em_hw_control_acquire(struct e1000_hw *hw);
63 static void em_hw_control_release(struct e1000_hw *hw);
64 static void em_init_manageability(struct e1000_hw *hw);
65 static void em_release_manageability(struct e1000_hw *hw);
66
67 static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
68
69 static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
70                 uint16_t vlan_id, int on);
71 static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
72 static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev);
73 static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev);
74 static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev);
75 static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev);
76
77 /*
78 static void eth_em_vlan_filter_set(struct rte_eth_dev *dev,
79                                         uint16_t vlan_id, int on);
80 */
81
82 static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
83 static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
84 static void em_lsc_intr_disable(struct e1000_hw *hw);
85 static void em_rxq_intr_enable(struct e1000_hw *hw);
86 static void em_rxq_intr_disable(struct e1000_hw *hw);
87
88 static int eth_em_led_on(struct rte_eth_dev *dev);
89 static int eth_em_led_off(struct rte_eth_dev *dev);
90
91 static int em_get_rx_buffer_size(struct e1000_hw *hw);
92 static int eth_em_rar_set(struct rte_eth_dev *dev,
93                         struct rte_ether_addr *mac_addr,
94                         uint32_t index, uint32_t pool);
95 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
96 static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
97                                          struct rte_ether_addr *addr);
98
99 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
100                                    struct rte_ether_addr *mc_addr_set,
101                                    uint32_t nb_mc_addr);
102
103 #define EM_FC_PAUSE_TIME 0x0680
104 #define EM_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
105 #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
106
107 static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
108
109 /*
110  * The set of PCI devices this driver supports
111  */
112 static const struct rte_pci_id pci_id_em_map[] = {
113         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) },
114         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) },
115         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) },
116         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) },
117         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) },
118         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) },
119         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) },
120         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) },
121         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) },
122         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) },
123         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) },
124         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) },
125         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) },
126         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) },
127         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) },
128         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) },
129         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) },
130         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) },
131         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) },
132         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) },
133         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) },
134         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) },
135         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) },
136         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) },
137         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) },
138         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) },
139         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) },
140         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) },
141         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) },
142         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) },
143         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) },
144         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) },
145         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) },
146         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) },
147         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) },
148         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) },
149         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) },
150         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) },
151         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) },
152         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) },
153         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) },
154         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) },
155         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) },
156         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) },
157         { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) },
158         { .vendor_id = 0, /* sentinel */ },
159 };
160
161 static const struct eth_dev_ops eth_em_ops = {
162         .dev_configure        = eth_em_configure,
163         .dev_start            = eth_em_start,
164         .dev_stop             = eth_em_stop,
165         .dev_close            = eth_em_close,
166         .promiscuous_enable   = eth_em_promiscuous_enable,
167         .promiscuous_disable  = eth_em_promiscuous_disable,
168         .allmulticast_enable  = eth_em_allmulticast_enable,
169         .allmulticast_disable = eth_em_allmulticast_disable,
170         .link_update          = eth_em_link_update,
171         .stats_get            = eth_em_stats_get,
172         .stats_reset          = eth_em_stats_reset,
173         .dev_infos_get        = eth_em_infos_get,
174         .mtu_set              = eth_em_mtu_set,
175         .vlan_filter_set      = eth_em_vlan_filter_set,
176         .vlan_offload_set     = eth_em_vlan_offload_set,
177         .rx_queue_setup       = eth_em_rx_queue_setup,
178         .rx_queue_release     = eth_em_rx_queue_release,
179         .tx_queue_setup       = eth_em_tx_queue_setup,
180         .tx_queue_release     = eth_em_tx_queue_release,
181         .rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
182         .rx_queue_intr_disable = eth_em_rx_queue_intr_disable,
183         .dev_led_on           = eth_em_led_on,
184         .dev_led_off          = eth_em_led_off,
185         .flow_ctrl_get        = eth_em_flow_ctrl_get,
186         .flow_ctrl_set        = eth_em_flow_ctrl_set,
187         .mac_addr_set         = eth_em_default_mac_addr_set,
188         .mac_addr_add         = eth_em_rar_set,
189         .mac_addr_remove      = eth_em_rar_clear,
190         .set_mc_addr_list     = eth_em_set_mc_addr_list,
191         .rxq_info_get         = em_rxq_info_get,
192         .txq_info_get         = em_txq_info_get,
193 };
194
195
196 /**
197  *  eth_em_dev_is_ich8 - Check for ICH8 device
198  *  @hw: pointer to the HW structure
199  *
200  *  return TRUE for ICH8, otherwise FALSE
201  **/
202 static bool
203 eth_em_dev_is_ich8(struct e1000_hw *hw)
204 {
205         DEBUGFUNC("eth_em_dev_is_ich8");
206
207         switch (hw->device_id) {
208         case E1000_DEV_ID_PCH2_LV_LM:
209         case E1000_DEV_ID_PCH_LPT_I217_LM:
210         case E1000_DEV_ID_PCH_LPT_I217_V:
211         case E1000_DEV_ID_PCH_LPTLP_I218_LM:
212         case E1000_DEV_ID_PCH_LPTLP_I218_V:
213         case E1000_DEV_ID_PCH_I218_V2:
214         case E1000_DEV_ID_PCH_I218_LM2:
215         case E1000_DEV_ID_PCH_I218_V3:
216         case E1000_DEV_ID_PCH_I218_LM3:
217         case E1000_DEV_ID_PCH_SPT_I219_LM:
218         case E1000_DEV_ID_PCH_SPT_I219_V:
219         case E1000_DEV_ID_PCH_SPT_I219_LM2:
220         case E1000_DEV_ID_PCH_SPT_I219_V2:
221         case E1000_DEV_ID_PCH_LBG_I219_LM3:
222         case E1000_DEV_ID_PCH_SPT_I219_LM4:
223         case E1000_DEV_ID_PCH_SPT_I219_V4:
224         case E1000_DEV_ID_PCH_SPT_I219_LM5:
225         case E1000_DEV_ID_PCH_SPT_I219_V5:
226         case E1000_DEV_ID_PCH_CNP_I219_LM6:
227         case E1000_DEV_ID_PCH_CNP_I219_V6:
228         case E1000_DEV_ID_PCH_CNP_I219_LM7:
229         case E1000_DEV_ID_PCH_CNP_I219_V7:
230                 return 1;
231         default:
232                 return 0;
233         }
234 }
235
236 static int
237 eth_em_dev_init(struct rte_eth_dev *eth_dev)
238 {
239         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
240         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
241         struct e1000_adapter *adapter =
242                 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
243         struct e1000_hw *hw =
244                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
245         struct e1000_vfta * shadow_vfta =
246                 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
247
248         eth_dev->dev_ops = &eth_em_ops;
249         eth_dev->rx_queue_count = eth_em_rx_queue_count;
250         eth_dev->rx_descriptor_done   = eth_em_rx_descriptor_done;
251         eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
252         eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
253         eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
254         eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
255         eth_dev->tx_pkt_prepare = (eth_tx_prep_t)&eth_em_prep_pkts;
256
257         /* for secondary processes, we don't initialise any further as primary
258          * has already done this work. Only check we don't need a different
259          * RX function */
260         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
261                 if (eth_dev->data->scattered_rx)
262                         eth_dev->rx_pkt_burst =
263                                 (eth_rx_burst_t)&eth_em_recv_scattered_pkts;
264                 return 0;
265         }
266
267         rte_eth_copy_pci_info(eth_dev, pci_dev);
268         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
269
270         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
271         hw->device_id = pci_dev->id.device_id;
272         adapter->stopped = 0;
273
274         /* For ICH8 support we'll need to map the flash memory BAR */
275         if (eth_em_dev_is_ich8(hw))
276                 hw->flash_address = (void *)pci_dev->mem_resource[1].addr;
277
278         if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
279                         em_hw_init(hw) != 0) {
280                 PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
281                         "failed to init HW",
282                         eth_dev->data->port_id, pci_dev->id.vendor_id,
283                         pci_dev->id.device_id);
284                 return -ENODEV;
285         }
286
287         /* Allocate memory for storing MAC addresses */
288         eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN *
289                         hw->mac.rar_entry_count, 0);
290         if (eth_dev->data->mac_addrs == NULL) {
291                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
292                         "store MAC addresses",
293                         RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
294                 return -ENOMEM;
295         }
296
297         /* Copy the permanent MAC address */
298         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
299                 eth_dev->data->mac_addrs);
300
301         /* initialize the vfta */
302         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
303
304         PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
305                      eth_dev->data->port_id, pci_dev->id.vendor_id,
306                      pci_dev->id.device_id);
307
308         rte_intr_callback_register(intr_handle,
309                                    eth_em_interrupt_handler, eth_dev);
310
311         return 0;
312 }
313
314 static int
315 eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
316 {
317         PMD_INIT_FUNC_TRACE();
318
319         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
320                 return 0;
321
322         eth_em_close(eth_dev);
323
324         return 0;
325 }
326
327 static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
328         struct rte_pci_device *pci_dev)
329 {
330         return rte_eth_dev_pci_generic_probe(pci_dev,
331                 sizeof(struct e1000_adapter), eth_em_dev_init);
332 }
333
334 static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
335 {
336         return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit);
337 }
338
339 static struct rte_pci_driver rte_em_pmd = {
340         .id_table = pci_id_em_map,
341         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
342         .probe = eth_em_pci_probe,
343         .remove = eth_em_pci_remove,
344 };
345
346 static int
347 em_hw_init(struct e1000_hw *hw)
348 {
349         int diag;
350
351         diag = hw->mac.ops.init_params(hw);
352         if (diag != 0) {
353                 PMD_INIT_LOG(ERR, "MAC Initialization Error");
354                 return diag;
355         }
356         diag = hw->nvm.ops.init_params(hw);
357         if (diag != 0) {
358                 PMD_INIT_LOG(ERR, "NVM Initialization Error");
359                 return diag;
360         }
361         diag = hw->phy.ops.init_params(hw);
362         if (diag != 0) {
363                 PMD_INIT_LOG(ERR, "PHY Initialization Error");
364                 return diag;
365         }
366         (void) e1000_get_bus_info(hw);
367
368         hw->mac.autoneg = 1;
369         hw->phy.autoneg_wait_to_complete = 0;
370         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
371
372         e1000_init_script_state_82541(hw, TRUE);
373         e1000_set_tbi_compatibility_82543(hw, TRUE);
374
375         /* Copper options */
376         if (hw->phy.media_type == e1000_media_type_copper) {
377                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
378                 hw->phy.disable_polarity_correction = 0;
379                 hw->phy.ms_type = e1000_ms_hw_default;
380         }
381
382         /*
383          * Start from a known state, this is important in reading the nvm
384          * and mac from that.
385          */
386         e1000_reset_hw(hw);
387
388         /* Make sure we have a good EEPROM before we read from it */
389         if (e1000_validate_nvm_checksum(hw) < 0) {
390                 /*
391                  * Some PCI-E parts fail the first check due to
392                  * the link being in sleep state, call it again,
393                  * if it fails a second time its a real issue.
394                  */
395                 diag = e1000_validate_nvm_checksum(hw);
396                 if (diag < 0) {
397                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
398                         goto error;
399                 }
400         }
401
402         /* Read the permanent MAC address out of the EEPROM */
403         diag = e1000_read_mac_addr(hw);
404         if (diag != 0) {
405                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
406                 goto error;
407         }
408
409         /* Now initialize the hardware */
410         diag = em_hardware_init(hw);
411         if (diag != 0) {
412                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
413                 goto error;
414         }
415
416         hw->mac.get_link_status = 1;
417
418         /* Indicate SOL/IDER usage */
419         diag = e1000_check_reset_block(hw);
420         if (diag < 0) {
421                 PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
422                         "SOL/IDER session");
423         }
424         return 0;
425
426 error:
427         em_hw_control_release(hw);
428         return diag;
429 }
430
431 static int
432 eth_em_configure(struct rte_eth_dev *dev)
433 {
434         struct e1000_interrupt *intr =
435                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
436
437         PMD_INIT_FUNC_TRACE();
438         intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
439
440         PMD_INIT_FUNC_TRACE();
441
442         return 0;
443 }
444
445 static void
446 em_set_pba(struct e1000_hw *hw)
447 {
448         uint32_t pba;
449
450         /*
451          * Packet Buffer Allocation (PBA)
452          * Writing PBA sets the receive portion of the buffer
453          * the remainder is used for the transmit buffer.
454          * Devices before the 82547 had a Packet Buffer of 64K.
455          * After the 82547 the buffer was reduced to 40K.
456          */
457         switch (hw->mac.type) {
458                 case e1000_82547:
459                 case e1000_82547_rev_2:
460                 /* 82547: Total Packet Buffer is 40K */
461                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
462                         break;
463                 case e1000_82571:
464                 case e1000_82572:
465                 case e1000_80003es2lan:
466                         pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
467                         break;
468                 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
469                         pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
470                         break;
471                 case e1000_82574:
472                 case e1000_82583:
473                         pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
474                         break;
475                 case e1000_ich8lan:
476                         pba = E1000_PBA_8K;
477                         break;
478                 case e1000_ich9lan:
479                 case e1000_ich10lan:
480                         pba = E1000_PBA_10K;
481                         break;
482                 case e1000_pchlan:
483                 case e1000_pch2lan:
484                 case e1000_pch_lpt:
485                 case e1000_pch_spt:
486                 case e1000_pch_cnp:
487                         pba = E1000_PBA_26K;
488                         break;
489                 default:
490                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
491         }
492
493         E1000_WRITE_REG(hw, E1000_PBA, pba);
494 }
495
496 static void
497 eth_em_rxtx_control(struct rte_eth_dev *dev,
498                     bool enable)
499 {
500         struct e1000_hw *hw =
501                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
502         uint32_t tctl, rctl;
503
504         tctl = E1000_READ_REG(hw, E1000_TCTL);
505         rctl = E1000_READ_REG(hw, E1000_RCTL);
506         if (enable) {
507                 /* enable Tx/Rx */
508                 tctl |= E1000_TCTL_EN;
509                 rctl |= E1000_RCTL_EN;
510         } else {
511                 /* disable Tx/Rx */
512                 tctl &= ~E1000_TCTL_EN;
513                 rctl &= ~E1000_RCTL_EN;
514         }
515         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
516         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
517         E1000_WRITE_FLUSH(hw);
518 }
519
520 static int
521 eth_em_start(struct rte_eth_dev *dev)
522 {
523         struct e1000_adapter *adapter =
524                 E1000_DEV_PRIVATE(dev->data->dev_private);
525         struct e1000_hw *hw =
526                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
527         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
528         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
529         int ret, mask;
530         uint32_t intr_vector = 0;
531         uint32_t *speeds;
532         int num_speeds;
533         bool autoneg;
534
535         PMD_INIT_FUNC_TRACE();
536
537         ret = eth_em_stop(dev);
538         if (ret != 0)
539                 return ret;
540
541         e1000_power_up_phy(hw);
542
543         /* Set default PBA value */
544         em_set_pba(hw);
545
546         /* Put the address into the Receive Address Array */
547         e1000_rar_set(hw, hw->mac.addr, 0);
548
549         /*
550          * With the 82571 adapter, RAR[0] may be overwritten
551          * when the other port is reset, we make a duplicate
552          * in RAR[14] for that eventuality, this assures
553          * the interface continues to function.
554          */
555         if (hw->mac.type == e1000_82571) {
556                 e1000_set_laa_state_82571(hw, TRUE);
557                 e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1);
558         }
559
560         /* Initialize the hardware */
561         if (em_hardware_init(hw)) {
562                 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
563                 return -EIO;
564         }
565
566         E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN);
567
568         /* Configure for OS presence */
569         em_init_manageability(hw);
570
571         if (dev->data->dev_conf.intr_conf.rxq != 0) {
572                 intr_vector = dev->data->nb_rx_queues;
573                 if (rte_intr_efd_enable(intr_handle, intr_vector))
574                         return -1;
575         }
576
577         if (rte_intr_dp_is_en(intr_handle)) {
578                 intr_handle->intr_vec =
579                         rte_zmalloc("intr_vec",
580                                         dev->data->nb_rx_queues * sizeof(int), 0);
581                 if (intr_handle->intr_vec == NULL) {
582                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
583                                                 " intr_vec", dev->data->nb_rx_queues);
584                         return -ENOMEM;
585                 }
586
587                 /* enable rx interrupt */
588                 em_rxq_intr_enable(hw);
589         }
590
591         eth_em_tx_init(dev);
592
593         ret = eth_em_rx_init(dev);
594         if (ret) {
595                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
596                 em_dev_clear_queues(dev);
597                 return ret;
598         }
599
600         e1000_clear_hw_cntrs_base_generic(hw);
601
602         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
603                         ETH_VLAN_EXTEND_MASK;
604         ret = eth_em_vlan_offload_set(dev, mask);
605         if (ret) {
606                 PMD_INIT_LOG(ERR, "Unable to update vlan offload");
607                 em_dev_clear_queues(dev);
608                 return ret;
609         }
610
611         /* Set Interrupt Throttling Rate to maximum allowed value. */
612         E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);
613
614         /* Setup link speed and duplex */
615         speeds = &dev->data->dev_conf.link_speeds;
616         if (*speeds == ETH_LINK_SPEED_AUTONEG) {
617                 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
618                 hw->mac.autoneg = 1;
619         } else {
620                 num_speeds = 0;
621                 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
622
623                 /* Reset */
624                 hw->phy.autoneg_advertised = 0;
625
626                 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
627                                 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
628                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
629                         num_speeds = -1;
630                         goto error_invalid_config;
631                 }
632                 if (*speeds & ETH_LINK_SPEED_10M_HD) {
633                         hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
634                         num_speeds++;
635                 }
636                 if (*speeds & ETH_LINK_SPEED_10M) {
637                         hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
638                         num_speeds++;
639                 }
640                 if (*speeds & ETH_LINK_SPEED_100M_HD) {
641                         hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
642                         num_speeds++;
643                 }
644                 if (*speeds & ETH_LINK_SPEED_100M) {
645                         hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
646                         num_speeds++;
647                 }
648                 if (*speeds & ETH_LINK_SPEED_1G) {
649                         hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
650                         num_speeds++;
651                 }
652                 if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
653                         goto error_invalid_config;
654
655                 /* Set/reset the mac.autoneg based on the link speed,
656                  * fixed or not
657                  */
658                 if (!autoneg) {
659                         hw->mac.autoneg = 0;
660                         hw->mac.forced_speed_duplex =
661                                         hw->phy.autoneg_advertised;
662                 } else {
663                         hw->mac.autoneg = 1;
664                 }
665         }
666
667         e1000_setup_link(hw);
668
669         if (rte_intr_allow_others(intr_handle)) {
670                 /* check if lsc interrupt is enabled */
671                 if (dev->data->dev_conf.intr_conf.lsc != 0) {
672                         ret = eth_em_interrupt_setup(dev);
673                         if (ret) {
674                                 PMD_INIT_LOG(ERR, "Unable to setup interrupts");
675                                 em_dev_clear_queues(dev);
676                                 return ret;
677                         }
678                 }
679         } else {
680                 rte_intr_callback_unregister(intr_handle,
681                                                 eth_em_interrupt_handler,
682                                                 (void *)dev);
683                 if (dev->data->dev_conf.intr_conf.lsc != 0)
684                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
685                                      " no intr multiplexn");
686         }
687         /* check if rxq interrupt is enabled */
688         if (dev->data->dev_conf.intr_conf.rxq != 0)
689                 eth_em_rxq_interrupt_setup(dev);
690
691         rte_intr_enable(intr_handle);
692
693         adapter->stopped = 0;
694
695         eth_em_rxtx_control(dev, true);
696         eth_em_link_update(dev, 0);
697
698         PMD_INIT_LOG(DEBUG, "<<");
699
700         return 0;
701
702 error_invalid_config:
703         PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
704                      dev->data->dev_conf.link_speeds, dev->data->port_id);
705         em_dev_clear_queues(dev);
706         return -EINVAL;
707 }
708
709 /*********************************************************************
710  *
711  *  This routine disables all traffic on the adapter by issuing a
712  *  global reset on the MAC.
713  *
714  **********************************************************************/
715 static int
716 eth_em_stop(struct rte_eth_dev *dev)
717 {
718         struct rte_eth_link link;
719         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
720         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
721         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
722
723         dev->data->dev_started = 0;
724
725         eth_em_rxtx_control(dev, false);
726         em_rxq_intr_disable(hw);
727         em_lsc_intr_disable(hw);
728
729         e1000_reset_hw(hw);
730
731         /* Flush desc rings for i219 */
732         if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp)
733                 em_flush_desc_rings(dev);
734
735         if (hw->mac.type >= e1000_82544)
736                 E1000_WRITE_REG(hw, E1000_WUC, 0);
737
738         /* Power down the phy. Needed to make the link go down */
739         e1000_power_down_phy(hw);
740
741         em_dev_clear_queues(dev);
742
743         /* clear the recorded link status */
744         memset(&link, 0, sizeof(link));
745         rte_eth_linkstatus_set(dev, &link);
746
747         if (!rte_intr_allow_others(intr_handle))
748                 /* resume to the default handler */
749                 rte_intr_callback_register(intr_handle,
750                                            eth_em_interrupt_handler,
751                                            (void *)dev);
752
753         /* Clean datapath event and queue/vec mapping */
754         rte_intr_efd_disable(intr_handle);
755         if (intr_handle->intr_vec != NULL) {
756                 rte_free(intr_handle->intr_vec);
757                 intr_handle->intr_vec = NULL;
758         }
759
760         return 0;
761 }
762
763 static int
764 eth_em_close(struct rte_eth_dev *dev)
765 {
766         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
767         struct e1000_adapter *adapter =
768                 E1000_DEV_PRIVATE(dev->data->dev_private);
769         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
770         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
771         int ret;
772
773         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
774                 return 0;
775
776         ret = eth_em_stop(dev);
777         adapter->stopped = 1;
778         em_dev_free_queues(dev);
779         e1000_phy_hw_reset(hw);
780         em_release_manageability(hw);
781         em_hw_control_release(hw);
782
783         /* disable uio intr before callback unregister */
784         rte_intr_disable(intr_handle);
785         rte_intr_callback_unregister(intr_handle,
786                                      eth_em_interrupt_handler, dev);
787
788         return ret;
789 }
790
791 static int
792 em_get_rx_buffer_size(struct e1000_hw *hw)
793 {
794         uint32_t rx_buf_size;
795
796         rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10);
797         return rx_buf_size;
798 }
799
800 /*********************************************************************
801  *
802  *  Initialize the hardware
803  *
804  **********************************************************************/
805 static int
806 em_hardware_init(struct e1000_hw *hw)
807 {
808         uint32_t rx_buf_size;
809         int diag;
810
811         /* Issue a global reset */
812         e1000_reset_hw(hw);
813
814         /* Let the firmware know the OS is in control */
815         em_hw_control_acquire(hw);
816
817         /*
818          * These parameters control the automatic generation (Tx) and
819          * response (Rx) to Ethernet PAUSE frames.
820          * - High water mark should allow for at least two standard size (1518)
821          *   frames to be received after sending an XOFF.
822          * - Low water mark works best when it is very near the high water mark.
823          *   This allows the receiver to restart by sending XON when it has
824          *   drained a bit. Here we use an arbitrary value of 1500 which will
825          *   restart after one full frame is pulled from the buffer. There
826          *   could be several smaller frames in the buffer and if so they will
827          *   not trigger the XON until their total number reduces the buffer
828          *   by 1500.
829          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
830          */
831         rx_buf_size = em_get_rx_buffer_size(hw);
832
833         hw->fc.high_water = rx_buf_size -
834                 PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);
835         hw->fc.low_water = hw->fc.high_water - 1500;
836
837         if (hw->mac.type == e1000_80003es2lan)
838                 hw->fc.pause_time = UINT16_MAX;
839         else
840                 hw->fc.pause_time = EM_FC_PAUSE_TIME;
841
842         hw->fc.send_xon = 1;
843
844         /* Set Flow control, use the tunable location if sane */
845         if (em_fc_setting <= e1000_fc_full)
846                 hw->fc.requested_mode = em_fc_setting;
847         else
848                 hw->fc.requested_mode = e1000_fc_none;
849
850         /* Workaround: no TX flow ctrl for PCH */
851         if (hw->mac.type == e1000_pchlan)
852                 hw->fc.requested_mode = e1000_fc_rx_pause;
853
854         /* Override - settings for PCH2LAN, ya its magic :) */
855         if (hw->mac.type == e1000_pch2lan) {
856                 hw->fc.high_water = 0x5C20;
857                 hw->fc.low_water = 0x5048;
858                 hw->fc.pause_time = 0x0650;
859                 hw->fc.refresh_time = 0x0400;
860         } else if (hw->mac.type == e1000_pch_lpt ||
861                    hw->mac.type == e1000_pch_spt ||
862                    hw->mac.type == e1000_pch_cnp) {
863                 hw->fc.requested_mode = e1000_fc_full;
864         }
865
866         diag = e1000_init_hw(hw);
867         if (diag < 0)
868                 return diag;
869         e1000_check_for_link(hw);
870         return 0;
871 }
872
873 /* This function is based on em_update_stats_counters() in e1000/if_em.c */
874 static int
875 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
876 {
877         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
878         struct e1000_hw_stats *stats =
879                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
880         int pause_frames;
881
882         if(hw->phy.media_type == e1000_media_type_copper ||
883                         (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
884                 stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS);
885                 stats->sec += E1000_READ_REG(hw, E1000_SEC);
886         }
887
888         stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
889         stats->mpc += E1000_READ_REG(hw, E1000_MPC);
890         stats->scc += E1000_READ_REG(hw, E1000_SCC);
891         stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
892
893         stats->mcc += E1000_READ_REG(hw, E1000_MCC);
894         stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
895         stats->colc += E1000_READ_REG(hw, E1000_COLC);
896         stats->dc += E1000_READ_REG(hw, E1000_DC);
897         stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
898         stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
899         stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
900
901         /*
902          * For watchdog management we need to know if we have been
903          * paused during the last interval, so capture that here.
904          */
905         pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
906         stats->xoffrxc += pause_frames;
907         stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
908         stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
909         stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
910         stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
911         stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
912         stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
913         stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
914         stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
915         stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
916         stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
917         stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
918         stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
919
920         /*
921          * For the 64-bit byte counters the low dword must be read first.
922          * Both registers clear on the read of the high dword.
923          */
924
925         stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
926         stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
927         stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
928         stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
929
930         stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
931         stats->ruc += E1000_READ_REG(hw, E1000_RUC);
932         stats->rfc += E1000_READ_REG(hw, E1000_RFC);
933         stats->roc += E1000_READ_REG(hw, E1000_ROC);
934         stats->rjc += E1000_READ_REG(hw, E1000_RJC);
935
936         stats->tor += E1000_READ_REG(hw, E1000_TORH);
937         stats->tot += E1000_READ_REG(hw, E1000_TOTH);
938
939         stats->tpr += E1000_READ_REG(hw, E1000_TPR);
940         stats->tpt += E1000_READ_REG(hw, E1000_TPT);
941         stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
942         stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
943         stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
944         stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
945         stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
946         stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
947         stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
948         stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
949
950         /* Interrupt Counts */
951
952         if (hw->mac.type >= e1000_82571) {
953                 stats->iac += E1000_READ_REG(hw, E1000_IAC);
954                 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
955                 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
956                 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
957                 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
958                 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
959                 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
960                 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
961                 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
962         }
963
964         if (hw->mac.type >= e1000_82543) {
965                 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
966                 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
967                 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
968                 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
969                 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
970                 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
971         }
972
973         if (rte_stats == NULL)
974                 return -EINVAL;
975
976         /* Rx Errors */
977         rte_stats->imissed = stats->mpc;
978         rte_stats->ierrors = stats->crcerrs + stats->rlec +
979                              stats->rxerrc + stats->algnerrc + stats->cexterr;
980
981         /* Tx Errors */
982         rte_stats->oerrors = stats->ecol + stats->latecol;
983
984         rte_stats->ipackets = stats->gprc;
985         rte_stats->opackets = stats->gptc;
986         rte_stats->ibytes   = stats->gorc;
987         rte_stats->obytes   = stats->gotc;
988         return 0;
989 }
990
991 static int
992 eth_em_stats_reset(struct rte_eth_dev *dev)
993 {
994         struct e1000_hw_stats *hw_stats =
995                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
996
997         /* HW registers are cleared on read */
998         eth_em_stats_get(dev, NULL);
999
1000         /* Reset software totals */
1001         memset(hw_stats, 0, sizeof(*hw_stats));
1002
1003         return 0;
1004 }
1005
1006 static int
1007 eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
1008 {
1009         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1010         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1011         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1012
1013         em_rxq_intr_enable(hw);
1014         rte_intr_ack(intr_handle);
1015
1016         return 0;
1017 }
1018
1019 static int
1020 eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
1021 {
1022         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1023
1024         em_rxq_intr_disable(hw);
1025
1026         return 0;
1027 }
1028
1029 uint32_t
1030 em_get_max_pktlen(struct rte_eth_dev *dev)
1031 {
1032         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1033
1034         switch (hw->mac.type) {
1035         case e1000_82571:
1036         case e1000_82572:
1037         case e1000_ich9lan:
1038         case e1000_ich10lan:
1039         case e1000_pch2lan:
1040         case e1000_pch_lpt:
1041         case e1000_pch_spt:
1042         case e1000_pch_cnp:
1043         case e1000_82574:
1044         case e1000_80003es2lan: /* 9K Jumbo Frame size */
1045         case e1000_82583:
1046                 return 0x2412;
1047         case e1000_pchlan:
1048                 return 0x1000;
1049         /* Adapters that do not support jumbo frames */
1050         case e1000_ich8lan:
1051                 return RTE_ETHER_MAX_LEN;
1052         default:
1053                 return MAX_JUMBO_FRAME_SIZE;
1054         }
1055 }
1056
1057 static int
1058 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1059 {
1060         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1061
1062         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1063         dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
1064         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1065
1066         /*
1067          * Starting with 631xESB hw supports 2 TX/RX queues per port.
1068          * Unfortunatelly, all these nics have just one TX context.
1069          * So we have few choises for TX:
1070          * - Use just one TX queue.
1071          * - Allow cksum offload only for one TX queue.
1072          * - Don't allow TX cksum offload at all.
1073          * For now, option #1 was chosen.
1074          * To use second RX queue we have to use extended RX descriptor
1075          * (Multiple Receive Queues are mutually exclusive with UDP
1076          * fragmentation and are not supported when a legacy receive
1077          * descriptor format is used).
1078          * Which means separate RX routinies - as legacy nics (82540, 82545)
1079          * don't support extended RXD.
1080          * To avoid it we support just one RX queue for now (no RSS).
1081          */
1082
1083         dev_info->max_rx_queues = 1;
1084         dev_info->max_tx_queues = 1;
1085
1086         dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
1087         dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
1088                                     dev_info->rx_queue_offload_capa;
1089         dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
1090         dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
1091                                     dev_info->tx_queue_offload_capa;
1092
1093         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1094                 .nb_max = E1000_MAX_RING_DESC,
1095                 .nb_min = E1000_MIN_RING_DESC,
1096                 .nb_align = EM_RXD_ALIGN,
1097         };
1098
1099         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1100                 .nb_max = E1000_MAX_RING_DESC,
1101                 .nb_min = E1000_MIN_RING_DESC,
1102                 .nb_align = EM_TXD_ALIGN,
1103                 .nb_seg_max = EM_TX_MAX_SEG,
1104                 .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
1105         };
1106
1107         dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1108                         ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1109                         ETH_LINK_SPEED_1G;
1110
1111         /* Preferred queue parameters */
1112         dev_info->default_rxportconf.nb_queues = 1;
1113         dev_info->default_txportconf.nb_queues = 1;
1114         dev_info->default_txportconf.ring_size = 256;
1115         dev_info->default_rxportconf.ring_size = 256;
1116
1117         return 0;
1118 }
1119
1120 /* return 0 means link status changed, -1 means not changed */
1121 static int
1122 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1123 {
1124         struct e1000_hw *hw =
1125                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1126         struct rte_eth_link link;
1127         int link_up, count;
1128
1129         link_up = 0;
1130         hw->mac.get_link_status = 1;
1131
1132         /* possible wait-to-complete in up to 9 seconds */
1133         for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1134                 /* Read the real link status */
1135                 switch (hw->phy.media_type) {
1136                 case e1000_media_type_copper:
1137                         /* Do the work to read phy */
1138                         e1000_check_for_link(hw);
1139                         link_up = !hw->mac.get_link_status;
1140                         break;
1141
1142                 case e1000_media_type_fiber:
1143                         e1000_check_for_link(hw);
1144                         link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1145                                         E1000_STATUS_LU);
1146                         break;
1147
1148                 case e1000_media_type_internal_serdes:
1149                         e1000_check_for_link(hw);
1150                         link_up = hw->mac.serdes_has_link;
1151                         break;
1152
1153                 default:
1154                         break;
1155                 }
1156                 if (link_up || wait_to_complete == 0)
1157                         break;
1158                 rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
1159         }
1160         memset(&link, 0, sizeof(link));
1161
1162         /* Now we check if a transition has happened */
1163         if (link_up) {
1164                 uint16_t duplex, speed;
1165                 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1166                 link.link_duplex = (duplex == FULL_DUPLEX) ?
1167                                 ETH_LINK_FULL_DUPLEX :
1168                                 ETH_LINK_HALF_DUPLEX;
1169                 link.link_speed = speed;
1170                 link.link_status = ETH_LINK_UP;
1171                 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1172                                 ETH_LINK_SPEED_FIXED);
1173         } else {
1174                 link.link_speed = ETH_SPEED_NUM_NONE;
1175                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1176                 link.link_status = ETH_LINK_DOWN;
1177                 link.link_autoneg = ETH_LINK_FIXED;
1178         }
1179
1180         return rte_eth_linkstatus_set(dev, &link);
1181 }
1182
1183 /*
1184  * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1185  * For ASF and Pass Through versions of f/w this means
1186  * that the driver is loaded. For AMT version type f/w
1187  * this means that the network i/f is open.
1188  */
1189 static void
1190 em_hw_control_acquire(struct e1000_hw *hw)
1191 {
1192         uint32_t ctrl_ext, swsm;
1193
1194         /* Let firmware know the driver has taken over */
1195         if (hw->mac.type == e1000_82573) {
1196                 swsm = E1000_READ_REG(hw, E1000_SWSM);
1197                 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
1198
1199         } else {
1200                 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1201                 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1202                         ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1203         }
1204 }
1205
1206 /*
1207  * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit.
1208  * For ASF and Pass Through versions of f/w this means that the
1209  * driver is no longer loaded. For AMT versions of the
1210  * f/w this means that the network i/f is closed.
1211  */
1212 static void
1213 em_hw_control_release(struct e1000_hw *hw)
1214 {
1215         uint32_t ctrl_ext, swsm;
1216
1217         /* Let firmware taken over control of h/w */
1218         if (hw->mac.type == e1000_82573) {
1219                 swsm = E1000_READ_REG(hw, E1000_SWSM);
1220                 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1221         } else {
1222                 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1223                 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1224                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1225         }
1226 }
1227
1228 /*
1229  * Bit of a misnomer, what this really means is
1230  * to enable OS management of the system... aka
1231  * to disable special hardware management features.
1232  */
1233 static void
1234 em_init_manageability(struct e1000_hw *hw)
1235 {
1236         if (e1000_enable_mng_pass_thru(hw)) {
1237                 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1238                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1239
1240                 /* disable hardware interception of ARP */
1241                 manc &= ~(E1000_MANC_ARP_EN);
1242
1243                 /* enable receiving management packets to the host */
1244                 manc |= E1000_MANC_EN_MNG2HOST;
1245                 manc2h |= 1 << 5;  /* Mng Port 623 */
1246                 manc2h |= 1 << 6;  /* Mng Port 664 */
1247                 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1248                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1249         }
1250 }
1251
1252 /*
1253  * Give control back to hardware management
1254  * controller if there is one.
1255  */
1256 static void
1257 em_release_manageability(struct e1000_hw *hw)
1258 {
1259         uint32_t manc;
1260
1261         if (e1000_enable_mng_pass_thru(hw)) {
1262                 manc = E1000_READ_REG(hw, E1000_MANC);
1263
1264                 /* re-enable hardware interception of ARP */
1265                 manc |= E1000_MANC_ARP_EN;
1266                 manc &= ~E1000_MANC_EN_MNG2HOST;
1267
1268                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1269         }
1270 }
1271
1272 static int
1273 eth_em_promiscuous_enable(struct rte_eth_dev *dev)
1274 {
1275         struct e1000_hw *hw =
1276                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1277         uint32_t rctl;
1278
1279         rctl = E1000_READ_REG(hw, E1000_RCTL);
1280         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1281         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1282
1283         return 0;
1284 }
1285
1286 static int
1287 eth_em_promiscuous_disable(struct rte_eth_dev *dev)
1288 {
1289         struct e1000_hw *hw =
1290                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1291         uint32_t rctl;
1292
1293         rctl = E1000_READ_REG(hw, E1000_RCTL);
1294         rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
1295         if (dev->data->all_multicast == 1)
1296                 rctl |= E1000_RCTL_MPE;
1297         else
1298                 rctl &= (~E1000_RCTL_MPE);
1299         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1300
1301         return 0;
1302 }
1303
1304 static int
1305 eth_em_allmulticast_enable(struct rte_eth_dev *dev)
1306 {
1307         struct e1000_hw *hw =
1308                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1309         uint32_t rctl;
1310
1311         rctl = E1000_READ_REG(hw, E1000_RCTL);
1312         rctl |= E1000_RCTL_MPE;
1313         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1314
1315         return 0;
1316 }
1317
1318 static int
1319 eth_em_allmulticast_disable(struct rte_eth_dev *dev)
1320 {
1321         struct e1000_hw *hw =
1322                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1323         uint32_t rctl;
1324
1325         if (dev->data->promiscuous == 1)
1326                 return 0; /* must remain in all_multicast mode */
1327         rctl = E1000_READ_REG(hw, E1000_RCTL);
1328         rctl &= (~E1000_RCTL_MPE);
1329         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1330
1331         return 0;
1332 }
1333
1334 static int
1335 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1336 {
1337         struct e1000_hw *hw =
1338                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1339         struct e1000_vfta * shadow_vfta =
1340                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1341         uint32_t vfta;
1342         uint32_t vid_idx;
1343         uint32_t vid_bit;
1344
1345         vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1346                               E1000_VFTA_ENTRY_MASK);
1347         vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1348         vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1349         if (on)
1350                 vfta |= vid_bit;
1351         else
1352                 vfta &= ~vid_bit;
1353         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1354
1355         /* update local VFTA copy */
1356         shadow_vfta->vfta[vid_idx] = vfta;
1357
1358         return 0;
1359 }
1360
1361 static void
1362 em_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1363 {
1364         struct e1000_hw *hw =
1365                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366         uint32_t reg;
1367
1368         /* Filter Table Disable */
1369         reg = E1000_READ_REG(hw, E1000_RCTL);
1370         reg &= ~E1000_RCTL_CFIEN;
1371         reg &= ~E1000_RCTL_VFE;
1372         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1373 }
1374
1375 static void
1376 em_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1377 {
1378         struct e1000_hw *hw =
1379                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1380         struct e1000_vfta * shadow_vfta =
1381                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1382         uint32_t reg;
1383         int i;
1384
1385         /* Filter Table Enable, CFI not used for packet acceptance */
1386         reg = E1000_READ_REG(hw, E1000_RCTL);
1387         reg &= ~E1000_RCTL_CFIEN;
1388         reg |= E1000_RCTL_VFE;
1389         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1390
1391         /* restore vfta from local copy */
1392         for (i = 0; i < IGB_VFTA_SIZE; i++)
1393                 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1394 }
1395
1396 static void
1397 em_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1398 {
1399         struct e1000_hw *hw =
1400                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1401         uint32_t reg;
1402
1403         /* VLAN Mode Disable */
1404         reg = E1000_READ_REG(hw, E1000_CTRL);
1405         reg &= ~E1000_CTRL_VME;
1406         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1407
1408 }
1409
1410 static void
1411 em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1412 {
1413         struct e1000_hw *hw =
1414                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1415         uint32_t reg;
1416
1417         /* VLAN Mode Enable */
1418         reg = E1000_READ_REG(hw, E1000_CTRL);
1419         reg |= E1000_CTRL_VME;
1420         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1421 }
1422
1423 static int
1424 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1425 {
1426         struct rte_eth_rxmode *rxmode;
1427
1428         rxmode = &dev->data->dev_conf.rxmode;
1429         if(mask & ETH_VLAN_STRIP_MASK){
1430                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1431                         em_vlan_hw_strip_enable(dev);
1432                 else
1433                         em_vlan_hw_strip_disable(dev);
1434         }
1435
1436         if(mask & ETH_VLAN_FILTER_MASK){
1437                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1438                         em_vlan_hw_filter_enable(dev);
1439                 else
1440                         em_vlan_hw_filter_disable(dev);
1441         }
1442
1443         return 0;
1444 }
1445
1446 /*
1447  * It enables the interrupt mask and then enable the interrupt.
1448  *
1449  * @param dev
1450  *  Pointer to struct rte_eth_dev.
1451  *
1452  * @return
1453  *  - On success, zero.
1454  *  - On failure, a negative value.
1455  */
1456 static int
1457 eth_em_interrupt_setup(struct rte_eth_dev *dev)
1458 {
1459         uint32_t regval;
1460         struct e1000_hw *hw =
1461                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1462
1463         /* clear interrupt */
1464         E1000_READ_REG(hw, E1000_ICR);
1465         regval = E1000_READ_REG(hw, E1000_IMS);
1466         E1000_WRITE_REG(hw, E1000_IMS,
1467                         regval | E1000_ICR_LSC | E1000_ICR_OTHER);
1468         return 0;
1469 }
1470
1471 /*
1472  * It clears the interrupt causes and enables the interrupt.
1473  * It will be called once only during nic initialized.
1474  *
1475  * @param dev
1476  *  Pointer to struct rte_eth_dev.
1477  *
1478  * @return
1479  *  - On success, zero.
1480  *  - On failure, a negative value.
1481  */
1482 static int
1483 eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev)
1484 {
1485         struct e1000_hw *hw =
1486         E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1487
1488         E1000_READ_REG(hw, E1000_ICR);
1489         em_rxq_intr_enable(hw);
1490         return 0;
1491 }
1492
1493 /*
1494  * It enable receive packet interrupt.
1495  * @param hw
1496  * Pointer to struct e1000_hw
1497  *
1498  * @return
1499  */
1500 static void
1501 em_rxq_intr_enable(struct e1000_hw *hw)
1502 {
1503         E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0);
1504         E1000_WRITE_FLUSH(hw);
1505 }
1506
1507 /*
1508  * It disabled lsc interrupt.
1509  * @param hw
1510  * Pointer to struct e1000_hw
1511  *
1512  * @return
1513  */
1514 static void
1515 em_lsc_intr_disable(struct e1000_hw *hw)
1516 {
1517         E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER);
1518         E1000_WRITE_FLUSH(hw);
1519 }
1520
1521 /*
1522  * It disabled receive packet interrupt.
1523  * @param hw
1524  * Pointer to struct e1000_hw
1525  *
1526  * @return
1527  */
1528 static void
1529 em_rxq_intr_disable(struct e1000_hw *hw)
1530 {
1531         E1000_READ_REG(hw, E1000_ICR);
1532         E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
1533         E1000_WRITE_FLUSH(hw);
1534 }
1535
1536 /*
1537  * It reads ICR and gets interrupt causes, check it and set a bit flag
1538  * to update link status.
1539  *
1540  * @param dev
1541  *  Pointer to struct rte_eth_dev.
1542  *
1543  * @return
1544  *  - On success, zero.
1545  *  - On failure, a negative value.
1546  */
1547 static int
1548 eth_em_interrupt_get_status(struct rte_eth_dev *dev)
1549 {
1550         uint32_t icr;
1551         struct e1000_hw *hw =
1552                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1553         struct e1000_interrupt *intr =
1554                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1555
1556         /* read-on-clear nic registers here */
1557         icr = E1000_READ_REG(hw, E1000_ICR);
1558         if (icr & E1000_ICR_LSC) {
1559                 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1560         }
1561
1562         return 0;
1563 }
1564
1565 /*
1566  * It executes link_update after knowing an interrupt is prsent.
1567  *
1568  * @param dev
1569  *  Pointer to struct rte_eth_dev.
1570  *
1571  * @return
1572  *  - On success, zero.
1573  *  - On failure, a negative value.
1574  */
1575 static int
1576 eth_em_interrupt_action(struct rte_eth_dev *dev,
1577                         struct rte_intr_handle *intr_handle)
1578 {
1579         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1580         struct e1000_hw *hw =
1581                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582         struct e1000_interrupt *intr =
1583                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1584         struct rte_eth_link link;
1585         int ret;
1586
1587         if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
1588                 return -1;
1589
1590         intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1591         rte_intr_ack(intr_handle);
1592
1593         /* set get_link_status to check register later */
1594         hw->mac.get_link_status = 1;
1595         ret = eth_em_link_update(dev, 0);
1596
1597         /* check if link has changed */
1598         if (ret < 0)
1599                 return 0;
1600
1601         rte_eth_linkstatus_get(dev, &link);
1602
1603         if (link.link_status) {
1604                 PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
1605                              dev->data->port_id, link.link_speed,
1606                              link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1607                              "full-duplex" : "half-duplex");
1608         } else {
1609                 PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
1610         }
1611         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1612                      pci_dev->addr.domain, pci_dev->addr.bus,
1613                      pci_dev->addr.devid, pci_dev->addr.function);
1614
1615         return 0;
1616 }
1617
1618 /**
1619  * Interrupt handler which shall be registered at first.
1620  *
1621  * @param handle
1622  *  Pointer to interrupt handle.
1623  * @param param
1624  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1625  *
1626  * @return
1627  *  void
1628  */
1629 static void
1630 eth_em_interrupt_handler(void *param)
1631 {
1632         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1633
1634         eth_em_interrupt_get_status(dev);
1635         eth_em_interrupt_action(dev, dev->intr_handle);
1636         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1637 }
1638
1639 static int
1640 eth_em_led_on(struct rte_eth_dev *dev)
1641 {
1642         struct e1000_hw *hw;
1643
1644         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1645         return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
1646 }
1647
1648 static int
1649 eth_em_led_off(struct rte_eth_dev *dev)
1650 {
1651         struct e1000_hw *hw;
1652
1653         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1654         return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
1655 }
1656
1657 static int
1658 eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1659 {
1660         struct e1000_hw *hw;
1661         uint32_t ctrl;
1662         int tx_pause;
1663         int rx_pause;
1664
1665         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1666         fc_conf->pause_time = hw->fc.pause_time;
1667         fc_conf->high_water = hw->fc.high_water;
1668         fc_conf->low_water = hw->fc.low_water;
1669         fc_conf->send_xon = hw->fc.send_xon;
1670         fc_conf->autoneg = hw->mac.autoneg;
1671
1672         /*
1673          * Return rx_pause and tx_pause status according to actual setting of
1674          * the TFCE and RFCE bits in the CTRL register.
1675          */
1676         ctrl = E1000_READ_REG(hw, E1000_CTRL);
1677         if (ctrl & E1000_CTRL_TFCE)
1678                 tx_pause = 1;
1679         else
1680                 tx_pause = 0;
1681
1682         if (ctrl & E1000_CTRL_RFCE)
1683                 rx_pause = 1;
1684         else
1685                 rx_pause = 0;
1686
1687         if (rx_pause && tx_pause)
1688                 fc_conf->mode = RTE_FC_FULL;
1689         else if (rx_pause)
1690                 fc_conf->mode = RTE_FC_RX_PAUSE;
1691         else if (tx_pause)
1692                 fc_conf->mode = RTE_FC_TX_PAUSE;
1693         else
1694                 fc_conf->mode = RTE_FC_NONE;
1695
1696         return 0;
1697 }
1698
1699 static int
1700 eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1701 {
1702         struct e1000_hw *hw;
1703         int err;
1704         enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
1705                 e1000_fc_none,
1706                 e1000_fc_rx_pause,
1707                 e1000_fc_tx_pause,
1708                 e1000_fc_full
1709         };
1710         uint32_t rx_buf_size;
1711         uint32_t max_high_water;
1712         uint32_t rctl;
1713
1714         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1715         if (fc_conf->autoneg != hw->mac.autoneg)
1716                 return -ENOTSUP;
1717         rx_buf_size = em_get_rx_buffer_size(hw);
1718         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
1719
1720         /* At least reserve one Ethernet frame for watermark */
1721         max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
1722         if ((fc_conf->high_water > max_high_water) ||
1723             (fc_conf->high_water < fc_conf->low_water)) {
1724                 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
1725                 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
1726                 return -EINVAL;
1727         }
1728
1729         hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
1730         hw->fc.pause_time     = fc_conf->pause_time;
1731         hw->fc.high_water     = fc_conf->high_water;
1732         hw->fc.low_water      = fc_conf->low_water;
1733         hw->fc.send_xon       = fc_conf->send_xon;
1734
1735         err = e1000_setup_link_generic(hw);
1736         if (err == E1000_SUCCESS) {
1737
1738                 /* check if we want to forward MAC frames - driver doesn't have native
1739                  * capability to do that, so we'll write the registers ourselves */
1740
1741                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1742
1743                 /* set or clear MFLCN.PMCF bit depending on configuration */
1744                 if (fc_conf->mac_ctrl_frame_fwd != 0)
1745                         rctl |= E1000_RCTL_PMCF;
1746                 else
1747                         rctl &= ~E1000_RCTL_PMCF;
1748
1749                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1750                 E1000_WRITE_FLUSH(hw);
1751
1752                 return 0;
1753         }
1754
1755         PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
1756         return -EIO;
1757 }
1758
1759 static int
1760 eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1761                 uint32_t index, __rte_unused uint32_t pool)
1762 {
1763         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1764
1765         return e1000_rar_set(hw, mac_addr->addr_bytes, index);
1766 }
1767
1768 static void
1769 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1770 {
1771         uint8_t addr[RTE_ETHER_ADDR_LEN];
1772         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773
1774         memset(addr, 0, sizeof(addr));
1775
1776         e1000_rar_set(hw, addr, index);
1777 }
1778
1779 static int
1780 eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
1781                             struct rte_ether_addr *addr)
1782 {
1783         eth_em_rar_clear(dev, 0);
1784
1785         return eth_em_rar_set(dev, (void *)addr, 0, 0);
1786 }
1787
1788 static int
1789 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1790 {
1791         struct rte_eth_dev_info dev_info;
1792         struct e1000_hw *hw;
1793         uint32_t frame_size;
1794         uint32_t rctl;
1795         int ret;
1796
1797         ret = eth_em_infos_get(dev, &dev_info);
1798         if (ret != 0)
1799                 return ret;
1800
1801         frame_size = mtu + E1000_ETH_OVERHEAD;
1802
1803         /* check that mtu is within the allowed range */
1804         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1805                 return -EINVAL;
1806
1807         /*
1808          * If device is started, refuse mtu that requires the support of
1809          * scattered packets when this feature has not been enabled before.
1810          */
1811         if (dev->data->dev_started && !dev->data->scattered_rx &&
1812             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
1813                 PMD_INIT_LOG(ERR, "Stop port first.");
1814                 return -EINVAL;
1815         }
1816
1817         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1818         rctl = E1000_READ_REG(hw, E1000_RCTL);
1819
1820         /* switch to jumbo mode if needed */
1821         if (frame_size > E1000_ETH_MAX_LEN) {
1822                 dev->data->dev_conf.rxmode.offloads |=
1823                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1824                 rctl |= E1000_RCTL_LPE;
1825         } else {
1826                 dev->data->dev_conf.rxmode.offloads &=
1827                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1828                 rctl &= ~E1000_RCTL_LPE;
1829         }
1830         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1831
1832         /* update max frame size */
1833         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1834         return 0;
1835 }
1836
1837 static int
1838 eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
1839                         struct rte_ether_addr *mc_addr_set,
1840                         uint32_t nb_mc_addr)
1841 {
1842         struct e1000_hw *hw;
1843
1844         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1845         e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1846         return 0;
1847 }
1848
1849 RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
1850 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
1851 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");