613e49df8818037e476312003a668d99b56e3b57
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/queue.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <string.h>
40 #include <unistd.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62
63 #include "ixgbe_logs.h"
64 #include "ixgbe/ixgbe_api.h"
65 #include "ixgbe/ixgbe_vf.h"
66 #include "ixgbe/ixgbe_common.h"
67 #include "ixgbe_ethdev.h"
68
69 /*
70  * High threshold controlling when to start sending XOFF frames. Must be at
71  * least 8 bytes less than receive packet buffer size. This value is in units
72  * of 1024 bytes.
73  */
74 #define IXGBE_FC_HI    0x80
75
76 /*
77  * Low threshold controlling when to start sending XON frames. This value is
78  * in units of 1024 bytes.
79  */
80 #define IXGBE_FC_LO    0x40
81
82 /* Timer value included in XOFF frames. */
83 #define IXGBE_FC_PAUSE 0x680
84
85 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
86 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
87
88 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
89
90 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
91                 struct rte_eth_dev *eth_dev);
92 static int  ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
93                                 uint16_t nb_tx_q);
94 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
95 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
96 static void ixgbe_dev_close(struct rte_eth_dev *dev);
97 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
98 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
99 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
100 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
101 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
102                                 int wait_to_complete);
103 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
104                                 struct rte_eth_stats *stats);
105 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
106 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
107                                              uint16_t queue_id,
108                                              uint8_t stat_idx,
109                                              uint8_t is_rx);
110 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
111                                 struct rte_eth_dev_info *dev_info);
112 static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
113                                   uint16_t vlan_id,
114                                   int on);
115 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
116 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
117 static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
118                                 struct rte_eth_fc_conf *fc_conf);
119 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
120 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
121 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
122 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
123 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
124                                                         void *param);
125 static void ixgbe_dev_interrupt_delayed_handler(void *param);
126 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
127                                 uint32_t index, uint32_t pool);
128 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
129
130 /* For Virtual Function support */
131 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
132                 struct rte_eth_dev *eth_dev);
133 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
134                 uint16_t nb_tx_q);
135 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
136 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
137 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
138 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
139 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
140
141 /*
142  *  * Define VF Stats MACRO for Non "cleared on read" register
143  *   */
144 #define UPDATE_VF_STAT(reg, last, cur)                          \
145 {                                                               \
146         u32 latest = IXGBE_READ_REG(hw, reg);                   \
147         cur += latest - last;                                   \
148         last = latest;                                          \
149 }
150
151 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
152 {                                                                \
153         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
154         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
155         u64 latest = ((new_msb << 32) | new_lsb);                \
156         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
157         last = latest;                                           \
158 }
159
160 /*
161  * The set of PCI devices this driver supports
162  */
163 static struct rte_pci_id pci_id_ixgbe_map[] = {
164
165 #undef RTE_LIBRTE_IGB_PMD
166 #define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
167 #include "rte_pci_dev_ids.h"
168
169 { .vendor_id = 0, /* sentinel */ },
170 };
171
172
173 /*
174  * The set of PCI devices this driver supports (for 82599 VF)
175  */
176 static struct rte_pci_id pci_id_ixgbevf_map[] = {
177 {
178         .vendor_id = PCI_VENDOR_ID_INTEL,
179         .device_id = IXGBE_DEV_ID_82599_VF,
180         .subsystem_vendor_id = PCI_ANY_ID,
181         .subsystem_device_id = PCI_ANY_ID,
182 },
183 { .vendor_id = 0, /* sentinel */ },
184 };
185
186 static struct eth_dev_ops ixgbe_eth_dev_ops = {
187         .dev_configure        = ixgbe_dev_configure,
188         .dev_start            = ixgbe_dev_start,
189         .dev_stop             = ixgbe_dev_stop,
190         .dev_close            = ixgbe_dev_close,
191         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
192         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
193         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
194         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
195         .link_update          = ixgbe_dev_link_update,
196         .stats_get            = ixgbe_dev_stats_get,
197         .stats_reset          = ixgbe_dev_stats_reset,
198         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
199         .dev_infos_get        = ixgbe_dev_info_get,
200         .vlan_filter_set      = ixgbe_vlan_filter_set,
201         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
202         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
203         .dev_led_on           = ixgbe_dev_led_on,
204         .dev_led_off          = ixgbe_dev_led_off,
205         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
206         .mac_addr_add         = ixgbe_add_rar,
207         .mac_addr_remove      = ixgbe_remove_rar,
208         .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
209         .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
210         .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
211         .fdir_infos_get               = ixgbe_fdir_info_get,
212         .fdir_add_perfect_filter      = ixgbe_fdir_add_perfect_filter,
213         .fdir_update_perfect_filter   = ixgbe_fdir_update_perfect_filter,
214         .fdir_remove_perfect_filter   = ixgbe_fdir_remove_perfect_filter,
215         .fdir_set_masks               = ixgbe_fdir_set_masks,
216 };
217
218 /*
219  * dev_ops for virtual function, bare necessities for basic vf
220  * operation have been implemented
221  */
222 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
223
224         .dev_configure        = ixgbevf_dev_configure,
225         .dev_start            = ixgbevf_dev_start,
226         .dev_stop             = ixgbevf_dev_stop,
227         .link_update          = ixgbe_dev_link_update,
228         .stats_get            = ixgbevf_dev_stats_get,
229         .stats_reset          = ixgbevf_dev_stats_reset,
230         .dev_close            = ixgbevf_dev_stop,
231
232         .dev_infos_get        = ixgbe_dev_info_get,
233         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
234         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
235 };
236
237 /**
238  * Atomically reads the link status information from global
239  * structure rte_eth_dev.
240  *
241  * @param dev
242  *   - Pointer to the structure rte_eth_dev to read from.
243  *   - Pointer to the buffer to be saved with the link status.
244  *
245  * @return
246  *   - On success, zero.
247  *   - On failure, negative value.
248  */
249 static inline int
250 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
251                                 struct rte_eth_link *link)
252 {
253         struct rte_eth_link *dst = link;
254         struct rte_eth_link *src = &(dev->data->dev_link);
255
256         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
257                                         *(uint64_t *)src) == 0)
258                 return -1;
259
260         return 0;
261 }
262
263 /**
264  * Atomically writes the link status information into global
265  * structure rte_eth_dev.
266  *
267  * @param dev
268  *   - Pointer to the structure rte_eth_dev to read from.
269  *   - Pointer to the buffer to be saved with the link status.
270  *
271  * @return
272  *   - On success, zero.
273  *   - On failure, negative value.
274  */
275 static inline int
276 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
277                                 struct rte_eth_link *link)
278 {
279         struct rte_eth_link *dst = &(dev->data->dev_link);
280         struct rte_eth_link *src = link;
281
282         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
283                                         *(uint64_t *)src) == 0)
284                 return -1;
285
286         return 0;
287 }
288
289 /*
290  * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
291  */
292 static inline int
293 ixgbe_is_sfp(struct ixgbe_hw *hw)
294 {
295         switch (hw->phy.type) {
296         case ixgbe_phy_sfp_avago:
297         case ixgbe_phy_sfp_ftl:
298         case ixgbe_phy_sfp_intel:
299         case ixgbe_phy_sfp_unknown:
300         case ixgbe_phy_sfp_passive_tyco:
301         case ixgbe_phy_sfp_passive_unknown:
302                 return 1;
303         default:
304                 return 0;
305         }
306 }
307
308 /*
309  * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
310  */
311 static void
312 ixgbe_disable_intr(struct ixgbe_hw *hw)
313 {
314         PMD_INIT_FUNC_TRACE();
315
316         if (hw->mac.type == ixgbe_mac_82598EB) {
317                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
318         } else {
319                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
320                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
321                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
322         }
323         IXGBE_WRITE_FLUSH(hw);
324 }
325
326 /*
327  * This function resets queue statistics mapping registers.
328  * From Niantic datasheet, Initialization of Statistics section:
329  * "...if software requires the queue counters, the RQSMR and TQSM registers
330  * must be re-programmed following a device reset.
331  */
332 static void
333 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
334 {
335         uint32_t i;
336
337         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
338                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
339                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
340         }
341 }
342
343
344 static int
345 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
346                                   uint16_t queue_id,
347                                   uint8_t stat_idx,
348                                   uint8_t is_rx)
349 {
350 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
351 #define NB_QMAP_FIELDS_PER_QSM_REG 4
352 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
353
354         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
355         struct ixgbe_stat_mapping_registers *stat_mappings =
356                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
357         uint32_t qsmr_mask = 0;
358         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
359         uint32_t q_map;
360         uint8_t n, offset;
361
362         if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
363                 return -ENOSYS;
364
365         PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
366                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
367
368         n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
369         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
370                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
371                 return -EIO;
372         }
373         offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
374
375         /* Now clear any previous stat_idx set */
376         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
377         if (!is_rx)
378                 stat_mappings->tqsm[n] &= ~clearing_mask;
379         else
380                 stat_mappings->rqsmr[n] &= ~clearing_mask;
381
382         q_map = (uint32_t)stat_idx;
383         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
384         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
385         if (!is_rx)
386                 stat_mappings->tqsm[n] |= qsmr_mask;
387         else
388                 stat_mappings->rqsmr[n] |= qsmr_mask;
389
390         PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
391                      "%s[%d] = 0x%08x\n",
392                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
393                      is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
394
395         /* Now write the mapping in the appropriate register */
396         if (is_rx) {
397                 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
398                              stat_mappings->rqsmr[n], n);
399                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
400         }
401         else {
402                 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
403                              stat_mappings->tqsm[n], n);
404                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
405         }
406         return 0;
407 }
408
409 static void
410 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
411 {
412         struct ixgbe_stat_mapping_registers *stat_mappings =
413                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
414         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
415         int i;
416
417         /* write whatever was in stat mapping table to the NIC */
418         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
419                 /* rx */
420                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
421
422                 /* tx */
423                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
424         }
425 }
426
427 /*
428  * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
429  * It returns 0 on success.
430  */
431 static int
432 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
433                      struct rte_eth_dev *eth_dev)
434 {
435         struct rte_pci_device *pci_dev;
436         struct ixgbe_hw *hw =
437                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
438         struct ixgbe_vfta * shadow_vfta =
439                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
440         uint32_t ctrl_ext;
441         uint16_t csum;
442         int diag, i;
443
444         PMD_INIT_FUNC_TRACE();
445
446         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
447         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
448         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
449
450         /* for secondary processes, we don't initialise any further as primary
451          * has already done this work. Only check we don't need a different
452          * RX function */
453         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
454                 if (eth_dev->data->scattered_rx)
455                         eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
456                 return 0;
457         }
458         pci_dev = eth_dev->pci_dev;
459
460         /* Vendor and Device ID need to be set before init of shared code */
461         hw->device_id = pci_dev->id.device_id;
462         hw->vendor_id = pci_dev->id.vendor_id;
463         hw->hw_addr = (void *)pci_dev->mem_resource.addr;
464
465         /* Initialize the shared code */
466         diag = ixgbe_init_shared_code(hw);
467         if (diag != IXGBE_SUCCESS) {
468                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
469                 return -EIO;
470         }
471
472         /* Get Hardware Flow Control setting */
473         hw->fc.requested_mode = ixgbe_fc_full;
474         hw->fc.current_mode = ixgbe_fc_full;
475         hw->fc.pause_time = IXGBE_FC_PAUSE;
476         hw->fc.low_water = IXGBE_FC_LO;
477         for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
478                 hw->fc.high_water[i] = IXGBE_FC_HI;
479         hw->fc.send_xon = 1;
480
481         ixgbe_disable_intr(hw);
482
483         /* Make sure we have a good EEPROM before we read from it */
484         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
485         if (diag != IXGBE_SUCCESS) {
486                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
487                 return -EIO;
488         }
489
490         diag = ixgbe_init_hw(hw);
491
492         /*
493          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
494          * is called too soon after the kernel driver unbinding/binding occurs.
495          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
496          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
497          * also called. See ixgbe_identify_phy_82599(). The reason for the
498          * failure is not known, and only occuts when virtualisation features
499          * are disabled in the bios. A delay of 100ms  was found to be enough by
500          * trial-and-error, and is doubled to be safe.
501          */
502         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
503                 rte_delay_ms(200);
504                 diag = ixgbe_init_hw(hw);
505         }
506
507         if (diag == IXGBE_ERR_EEPROM_VERSION) {
508                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
509                     "LOM.  Please be aware there may be issues associated "
510                     "with your hardware.\n If you are experiencing problems "
511                     "please contact your Intel or hardware representative "
512                     "who provided you with this hardware.\n");
513         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
514                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
515         if (diag) {
516                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
517                 return -EIO;
518         }
519
520         /* pick up the PCI bus settings for reporting later */
521         ixgbe_get_bus_info(hw);
522
523         /* reset mappings for queue statistics hw counters*/
524         ixgbe_reset_qstat_mappings(hw);
525
526         /* Allocate memory for storing MAC addresses */
527         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
528                         hw->mac.num_rar_entries, 0);
529         if (eth_dev->data->mac_addrs == NULL) {
530                 PMD_INIT_LOG(ERR,
531                         "Failed to allocate %d bytes needed to store MAC addresses",
532                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
533                 return -ENOMEM;
534         }
535         /* Copy the permanent MAC address */
536         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
537                         &eth_dev->data->mac_addrs[0]);
538
539         /* initialize the vfta */
540         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
541
542         /* let hardware know driver is loaded */
543         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
544         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
545         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
546
547         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
548                 PMD_INIT_LOG(DEBUG,
549                              "MAC: %d, PHY: %d, SFP+: %d<n",
550                              (int) hw->mac.type, (int) hw->phy.type,
551                              (int) hw->phy.sfp_type);
552         else
553                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
554                              (int) hw->mac.type, (int) hw->phy.type);
555
556         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
557                         eth_dev->data->port_id, pci_dev->id.vendor_id,
558                         pci_dev->id.device_id);
559
560         rte_intr_callback_register(&(pci_dev->intr_handle),
561                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
562
563         return 0;
564 }
565
566 /*
567  * Virtual Function device init
568  */
569 static int
570 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
571                      struct rte_eth_dev *eth_dev)
572 {
573         struct rte_pci_device *pci_dev;
574         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
575         int diag;
576
577         PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
578
579         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
580         pci_dev = eth_dev->pci_dev;
581
582         hw->device_id = pci_dev->id.device_id;
583         hw->vendor_id = pci_dev->id.vendor_id;
584         hw->hw_addr = (void *)pci_dev->mem_resource.addr;
585
586         /* Initialize the shared code */
587         diag = ixgbe_init_shared_code(hw);
588         if (diag != IXGBE_SUCCESS) {
589                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
590                 return -EIO;
591         }
592
593         /* init_mailbox_params */
594         hw->mbx.ops.init_params(hw);
595
596         /* Disable the interrupts for VF */
597         ixgbevf_intr_disable(hw);
598
599         hw->mac.num_rar_entries = hw->mac.max_rx_queues;
600         diag = hw->mac.ops.reset_hw(hw);
601
602         /* Allocate memory for storing MAC addresses */
603         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
604                         hw->mac.num_rar_entries, 0);
605         if (eth_dev->data->mac_addrs == NULL) {
606                 PMD_INIT_LOG(ERR,
607                         "Failed to allocate %d bytes needed to store MAC addresses",
608                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
609                 return -ENOMEM;
610         }
611         /* Copy the permanent MAC address */
612         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
613                         &eth_dev->data->mac_addrs[0]);
614
615         /* reset the hardware with the new settings */
616         diag = hw->mac.ops.start_hw(hw);
617         switch (diag) {
618                 case  0:
619                         break;
620
621                 default:
622                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
623                         return (diag);
624         }
625
626         PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
627                          eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
628                          "ixgbe_mac_82599_vf");
629
630         return 0;
631 }
632
633 static struct eth_driver rte_ixgbe_pmd = {
634         {
635                 .name = "rte_ixgbe_pmd",
636                 .id_table = pci_id_ixgbe_map,
637                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
638         },
639         .eth_dev_init = eth_ixgbe_dev_init,
640         .dev_private_size = sizeof(struct ixgbe_adapter),
641 };
642
643 /*
644  * virtual function driver struct
645  */
646 static struct eth_driver rte_ixgbevf_pmd = {
647         {
648                 .name = "rte_ixgbevf_pmd",
649                 .id_table = pci_id_ixgbevf_map,
650                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
651         },
652         .eth_dev_init = eth_ixgbevf_dev_init,
653         .dev_private_size = sizeof(struct ixgbe_adapter),
654 };
655
656 /*
657  * Driver initialization routine.
658  * Invoked once at EAL init time.
659  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
660  */
661 int
662 rte_ixgbe_pmd_init(void)
663 {
664         PMD_INIT_FUNC_TRACE();
665
666         rte_eth_driver_register(&rte_ixgbe_pmd);
667         return 0;
668 }
669
670 /*
671  * VF Driver initialization routine.
672  * Invoked one at EAL init time.
673  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
674  */
675 int
676 rte_ixgbevf_pmd_init(void)
677 {
678         DEBUGFUNC("rte_ixgbevf_pmd_init");
679
680         rte_eth_driver_register(&rte_ixgbevf_pmd);
681         return (0);
682 }
683
684 static void
685 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
686 {
687         struct ixgbe_hw *hw =
688                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689         struct ixgbe_vfta * shadow_vfta =
690                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
691         uint32_t vfta;
692         uint32_t vid_idx;
693         uint32_t vid_bit;
694
695         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
696         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
697         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
698         if (on)
699                 vfta |= vid_bit;
700         else
701                 vfta &= ~vid_bit;
702         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
703
704         /* update local VFTA copy */
705         shadow_vfta->vfta[vid_idx] = vfta;
706 }
707
708 static void
709 ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
710 {
711         struct ixgbe_hw *hw =
712                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713         uint32_t vlnctrl;
714         uint32_t rxdctl;
715         uint16_t i;
716
717         PMD_INIT_FUNC_TRACE();
718
719         /* Filter Table Disable */
720         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
721         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
722
723         if (hw->mac.type == ixgbe_mac_82598EB)
724                 vlnctrl &= ~IXGBE_VLNCTRL_VME;
725         else {
726                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
727                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
728                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
729                         rxdctl &= ~IXGBE_RXDCTL_VME;
730                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
731                 }
732         }
733         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
734 }
735
736 static void
737 ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
738 {
739         struct ixgbe_hw *hw =
740                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
741         struct ixgbe_vfta * shadow_vfta =
742                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
743         uint32_t vlnctrl;
744         uint32_t rxdctl;
745         uint16_t i;
746
747         PMD_INIT_FUNC_TRACE();
748
749         /* Filter Table Enable */
750         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
751         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
752         vlnctrl |= IXGBE_VLNCTRL_VFE;
753
754         if (hw->mac.type == ixgbe_mac_82598EB)
755                 vlnctrl |= IXGBE_VLNCTRL_VME;
756         else {
757                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
758                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
759                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
760                         rxdctl |= IXGBE_RXDCTL_VME;
761                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
762                 }
763         }
764         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
765
766         /* write whatever is in local vfta copy */
767         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
768                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
769 }
770
771 static int
772 ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
773 {
774         struct ixgbe_interrupt *intr =
775                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
776         int diag;
777
778         PMD_INIT_FUNC_TRACE();
779
780         /* Allocate the array of pointers to RX queue structures */
781         diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
782         if (diag != 0) {
783                 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
784                              "pointers to RX queues failed", dev->data->port_id,
785                              nb_rx_q);
786                 return diag;
787         }
788
789         /* Allocate the array of pointers to TX queue structures */
790         diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
791         if (diag != 0) {
792                 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
793                              "pointers to TX queues failed", dev->data->port_id,
794                              nb_tx_q);
795                 return diag;
796         }
797
798         /* set flag to update link status after init */
799         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
800
801         return 0;
802 }
803
804 /*
805  * Configure device link speed and setup link.
806  * It returns 0 on success.
807  */
808 static int
809 ixgbe_dev_start(struct rte_eth_dev *dev)
810 {
811         struct ixgbe_hw *hw =
812                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
813         int err, link_up = 0, negotiate = 0;
814         uint32_t speed = 0;
815
816         PMD_INIT_FUNC_TRACE();
817
818         /* IXGBE devices don't support half duplex */
819         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
820                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
821                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
822                                 dev->data->dev_conf.link_duplex,
823                                 dev->data->port_id);
824                 return -EINVAL;
825         }
826
827         /* stop adapter */
828         hw->adapter_stopped = FALSE;
829         ixgbe_stop_adapter(hw);
830
831         /* reinitialize adapter
832          * this calls reset and start */
833         ixgbe_init_hw(hw);
834
835         /* initialize transmission unit */
836         ixgbe_dev_tx_init(dev);
837
838         /* This can fail when allocating mbufs for descriptor rings */
839         err = ixgbe_dev_rx_init(dev);
840         if (err) {
841                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
842                 return err;
843         }
844
845         ixgbe_dev_rxtx_start(dev);
846
847         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
848                 err = hw->mac.ops.setup_sfp(hw);
849                 if (err)
850                         goto error;
851         }
852
853         /* Turn on the laser */
854         if (hw->phy.multispeed_fiber)
855                 ixgbe_enable_tx_laser(hw);
856
857         err = ixgbe_check_link(hw, &speed, &link_up, 0);
858         if (err)
859                 goto error;
860         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
861         if (err)
862                 goto error;
863
864         switch(dev->data->dev_conf.link_speed) {
865         case ETH_LINK_SPEED_AUTONEG:
866                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
867                                 IXGBE_LINK_SPEED_82599_AUTONEG :
868                                 IXGBE_LINK_SPEED_82598_AUTONEG;
869                 break;
870         case ETH_LINK_SPEED_100:
871                 /*
872                  * Invalid for 82598 but error will be detected by
873                  * ixgbe_setup_link()
874                  */
875                 speed = IXGBE_LINK_SPEED_100_FULL;
876                 break;
877         case ETH_LINK_SPEED_1000:
878                 speed = IXGBE_LINK_SPEED_1GB_FULL;
879                 break;
880         case ETH_LINK_SPEED_10000:
881                 speed = IXGBE_LINK_SPEED_10GB_FULL;
882                 break;
883         default:
884                 PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
885                                 dev->data->dev_conf.link_speed, dev->data->port_id);
886                 return -EINVAL;
887         }
888
889         err = ixgbe_setup_link(hw, speed, negotiate, link_up);
890         if (err)
891                 goto error;
892
893         /* check if lsc interrupt is enabled */
894         if (dev->data->dev_conf.intr_conf.lsc != 0) {
895                 err = ixgbe_dev_interrupt_setup(dev);
896                 if (err)
897                         goto error;
898         }
899
900         /*
901          * If VLAN filtering is enabled, set up VLAN tag offload and filtering
902          * and restore VFTA.
903          */
904         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
905                 ixgbe_vlan_hw_support_enable(dev);
906         else
907                 ixgbe_vlan_hw_support_disable(dev);
908
909         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
910                 err = ixgbe_fdir_configure(dev);
911                 if (err)
912                         goto error;
913         }
914
915         ixgbe_restore_statistics_mapping(dev);
916
917         return (0);
918
919 error:
920         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
921         return -EIO;
922 }
923
924 /*
925  * Stop device: disable rx and tx functions to allow for reconfiguring.
926  */
927 static void
928 ixgbe_dev_stop(struct rte_eth_dev *dev)
929 {
930         struct rte_eth_link link;
931         struct ixgbe_hw *hw =
932                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933
934         PMD_INIT_FUNC_TRACE();
935
936         /* disable interrupts */
937         ixgbe_disable_intr(hw);
938
939         /* reset the NIC */
940         ixgbe_reset_hw(hw);
941         hw->adapter_stopped = FALSE;
942
943         /* stop adapter */
944         ixgbe_stop_adapter(hw);
945
946         /* Turn off the laser */
947         if (hw->phy.multispeed_fiber)
948                 ixgbe_disable_tx_laser(hw);
949
950         ixgbe_dev_clear_queues(dev);
951
952         /* Clear recorded link status */
953         memset(&link, 0, sizeof(link));
954         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
955 }
956
957 /*
958  * Reest and stop device.
959  */
960 static void
961 ixgbe_dev_close(struct rte_eth_dev *dev)
962 {
963         struct ixgbe_hw *hw =
964                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
965
966         PMD_INIT_FUNC_TRACE();
967
968         ixgbe_reset_hw(hw);
969
970
971         ixgbe_dev_stop(dev);
972         hw->adapter_stopped = 1;
973
974         ixgbe_disable_pcie_master(hw);
975
976         /* reprogram the RAR[0] in case user changed it. */
977         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
978 }
979
980 /*
981  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
982  */
983 static void
984 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
985 {
986         struct ixgbe_hw *hw =
987                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
988         struct ixgbe_hw_stats *hw_stats =
989                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
990         uint32_t bprc, lxon, lxoff, total;
991         uint64_t total_missed_rx, total_qbrc, total_qprc;
992         unsigned i;
993
994         total_missed_rx = 0;
995         total_qbrc = 0;
996         total_qprc = 0;
997
998         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
999         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1000         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1001         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1002
1003         for (i = 0; i < 8; i++) {
1004                 uint32_t mp;
1005                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1006                 /* global total per queue */
1007                 hw_stats->mpc[i] += mp;
1008                 /* Running comprehensive total for stats display */
1009                 total_missed_rx += hw_stats->mpc[i];
1010                 if (hw->mac.type == ixgbe_mac_82598EB)
1011                         hw_stats->rnbc[i] +=
1012                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1013                 hw_stats->pxontxc[i] +=
1014                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1015                 hw_stats->pxonrxc[i] +=
1016                     IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1017                 hw_stats->pxofftxc[i] +=
1018                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1019                 hw_stats->pxoffrxc[i] +=
1020                     IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1021                 hw_stats->pxon2offc[i] +=
1022                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1023         }
1024         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1025                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1026                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1027                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1028                 hw_stats->qbrc[i] +=
1029                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1030                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1031                 hw_stats->qbtc[i] +=
1032                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1033                 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1034
1035                 total_qprc += hw_stats->qprc[i];
1036                 total_qbrc += hw_stats->qbrc[i];
1037         }
1038         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1039         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1040         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1041
1042         /* Note that gprc counts missed packets */
1043         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1044
1045         if (hw->mac.type != ixgbe_mac_82598EB) {
1046                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1047                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1048                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1049                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1050                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1051                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1052                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1053                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1054         } else {
1055                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1056                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1057                 /* 82598 only has a counter in the high register */
1058                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1059                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1060                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1061         }
1062
1063         /*
1064          * Workaround: mprc hardware is incorrectly counting
1065          * broadcasts, so for now we subtract those.
1066          */
1067         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1068         hw_stats->bprc += bprc;
1069         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1070         if (hw->mac.type == ixgbe_mac_82598EB)
1071                 hw_stats->mprc -= bprc;
1072
1073         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1074         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1075         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1076         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1077         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1078         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1079
1080         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1081         hw_stats->lxontxc += lxon;
1082         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1083         hw_stats->lxofftxc += lxoff;
1084         total = lxon + lxoff;
1085
1086         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1087         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1088         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1089         hw_stats->gptc -= total;
1090         hw_stats->mptc -= total;
1091         hw_stats->ptc64 -= total;
1092         hw_stats->gotc -= total * ETHER_MIN_LEN;
1093
1094         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1095         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1096         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1097         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1098         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1099         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1100         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1101         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1102         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1103         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1104         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1105         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1106         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1107         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1108         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1109         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1110         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1111         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1112         /* Only read FCOE on 82599 */
1113         if (hw->mac.type != ixgbe_mac_82598EB) {
1114                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1115                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1116                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1117                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1118                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1119         }
1120
1121         if (stats == NULL)
1122                 return;
1123
1124         /* Fill out the rte_eth_stats statistics structure */
1125         stats->ipackets = total_qprc;
1126         stats->ibytes = total_qbrc;
1127         stats->opackets = hw_stats->gptc;
1128         stats->obytes = hw_stats->gotc;
1129         stats->imcasts = hw_stats->mprc;
1130
1131         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1132                 stats->q_ipackets[i] = hw_stats->qprc[i];
1133                 stats->q_opackets[i] = hw_stats->qptc[i];
1134                 stats->q_ibytes[i] = hw_stats->qbrc[i];
1135                 stats->q_obytes[i] = hw_stats->qbtc[i];
1136                 stats->q_errors[i] = hw_stats->qprdc[i];
1137         }
1138
1139         /* Rx Errors */
1140         stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1141                 hw_stats->rlec;
1142
1143         stats->oerrors  = 0;
1144
1145         /* Flow Director Stats registers */
1146         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1147         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1148         stats->fdirmatch = hw_stats->fdirmatch;
1149         stats->fdirmiss = hw_stats->fdirmiss;
1150 }
1151
1152 static void
1153 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1154 {
1155         struct ixgbe_hw_stats *stats =
1156                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1157
1158         /* HW registers are cleared on read */
1159         ixgbe_dev_stats_get(dev, NULL);
1160
1161         /* Reset software totals */
1162         memset(stats, 0, sizeof(*stats));
1163 }
1164
1165 static void
1166 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1167 {
1168         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1169         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1170                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1171
1172         /* Good Rx packet, include VF loopback */
1173         UPDATE_VF_STAT(IXGBE_VFGPRC,
1174             hw_stats->last_vfgprc, hw_stats->vfgprc);
1175
1176         /* Good Rx octets, include VF loopback */
1177         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1178             hw_stats->last_vfgorc, hw_stats->vfgorc);
1179
1180         /* Good Tx packet, include VF loopback */
1181         UPDATE_VF_STAT(IXGBE_VFGPTC,
1182             hw_stats->last_vfgptc, hw_stats->vfgptc);
1183
1184         /* Good Tx octets, include VF loopback */
1185         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1186             hw_stats->last_vfgotc, hw_stats->vfgotc);
1187
1188         /* Rx Multicst Packet */
1189         UPDATE_VF_STAT(IXGBE_VFMPRC,
1190             hw_stats->last_vfmprc, hw_stats->vfmprc);
1191
1192         if (stats == NULL)
1193                 return;
1194
1195         memset(stats, 0, sizeof(*stats));
1196         stats->ipackets = hw_stats->vfgprc;
1197         stats->ibytes = hw_stats->vfgorc;
1198         stats->opackets = hw_stats->vfgptc;
1199         stats->obytes = hw_stats->vfgotc;
1200         stats->imcasts = hw_stats->vfmprc;
1201 }
1202
1203 static void
1204 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1205 {
1206         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1207                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1208
1209         /* Sync HW register to the last stats */
1210         ixgbevf_dev_stats_get(dev, NULL);
1211
1212         /* reset HW current stats*/
1213         hw_stats->vfgprc = 0;
1214         hw_stats->vfgorc = 0;
1215         hw_stats->vfgptc = 0;
1216         hw_stats->vfgotc = 0;
1217         hw_stats->vfmprc = 0;
1218
1219 }
1220
1221 static void
1222 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1223 {
1224         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1225
1226         dev_info->max_rx_queues = hw->mac.max_rx_queues;
1227         dev_info->max_tx_queues = hw->mac.max_tx_queues;
1228         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1229         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1230         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1231 }
1232
1233 /* return 0 means link status changed, -1 means not changed */
1234 static int
1235 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1236 {
1237         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1238         struct rte_eth_link link, old;
1239         ixgbe_link_speed link_speed;
1240         int link_up;
1241         int diag;
1242
1243         link.link_status = 0;
1244         link.link_speed = 0;
1245         link.link_duplex = 0;
1246         memset(&old, 0, sizeof(old));
1247         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1248
1249         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1250         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1251                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1252         else
1253                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1254         if (diag != 0) {
1255                 link.link_speed = ETH_LINK_SPEED_100;
1256                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1257                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1258                 if (link.link_status == old.link_status)
1259                         return -1;
1260                 return 0;
1261         }
1262
1263         if (link_up == 0) {
1264                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1265                 if (link.link_status == old.link_status)
1266                         return -1;
1267                 return 0;
1268         }
1269         link.link_status = 1;
1270         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1271
1272         switch (link_speed) {
1273         default:
1274         case IXGBE_LINK_SPEED_UNKNOWN:
1275                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1276                 link.link_speed = ETH_LINK_SPEED_100;
1277                 break;
1278
1279         case IXGBE_LINK_SPEED_100_FULL:
1280                 link.link_speed = ETH_LINK_SPEED_100;
1281                 break;
1282
1283         case IXGBE_LINK_SPEED_1GB_FULL:
1284                 link.link_speed = ETH_LINK_SPEED_1000;
1285                 break;
1286
1287         case IXGBE_LINK_SPEED_10GB_FULL:
1288                 link.link_speed = ETH_LINK_SPEED_10000;
1289                 break;
1290         }
1291         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1292
1293         if (link.link_status == old.link_status)
1294                 return -1;
1295
1296         return 0;
1297 }
1298
1299 static void
1300 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1301 {
1302         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1303         uint32_t fctrl;
1304
1305         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1306         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1307         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1308 }
1309
1310 static void
1311 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1312 {
1313         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1314         uint32_t fctrl;
1315
1316         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1317         fctrl &= (~IXGBE_FCTRL_UPE);
1318         if (dev->data->all_multicast == 1)
1319                 fctrl |= IXGBE_FCTRL_MPE;
1320         else
1321                 fctrl &= (~IXGBE_FCTRL_MPE);
1322         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1323 }
1324
1325 static void
1326 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1327 {
1328         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329         uint32_t fctrl;
1330
1331         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1332         fctrl |= IXGBE_FCTRL_MPE;
1333         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1334 }
1335
1336 static void
1337 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1338 {
1339         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1340         uint32_t fctrl;
1341
1342         if (dev->data->promiscuous == 1)
1343                 return; /* must remain in all_multicast mode */
1344
1345         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1346         fctrl &= (~IXGBE_FCTRL_MPE);
1347         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1348 }
1349
1350 /**
1351  * It clears the interrupt causes and enables the interrupt.
1352  * It will be called once only during nic initialized.
1353  *
1354  * @param dev
1355  *  Pointer to struct rte_eth_dev.
1356  *
1357  * @return
1358  *  - On success, zero.
1359  *  - On failure, a negative value.
1360  */
1361 static int
1362 ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
1363 {
1364         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1365
1366         ixgbe_dev_link_status_print(dev);
1367         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1368         IXGBE_WRITE_FLUSH(hw);
1369         rte_intr_enable(&(dev->pci_dev->intr_handle));
1370
1371         return 0;
1372 }
1373
1374 /*
1375  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1376  *
1377  * @param dev
1378  *  Pointer to struct rte_eth_dev.
1379  *
1380  * @return
1381  *  - On success, zero.
1382  *  - On failure, a negative value.
1383  */
1384 static int
1385 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1386 {
1387         uint32_t eicr;
1388         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1389         struct ixgbe_interrupt *intr =
1390                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1391
1392         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
1393         IXGBE_WRITE_FLUSH(hw);
1394
1395         /* read-on-clear nic registers here */
1396         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1397         PMD_INIT_LOG(INFO, "eicr %x", eicr);
1398         if (eicr & IXGBE_EICR_LSC) {
1399                 /* set flag for async link update */
1400                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1401         }
1402
1403         return 0;
1404 }
1405
1406 /**
1407  * It gets and then prints the link status.
1408  *
1409  * @param dev
1410  *  Pointer to struct rte_eth_dev.
1411  *
1412  * @return
1413  *  - On success, zero.
1414  *  - On failure, a negative value.
1415  */
1416 static void
1417 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
1418 {
1419         struct rte_eth_link link;
1420
1421         memset(&link, 0, sizeof(link));
1422         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1423         if (link.link_status) {
1424                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1425                                         (int)(dev->data->port_id),
1426                                         (unsigned)link.link_speed,
1427                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1428                                         "full-duplex" : "half-duplex");
1429         } else {
1430                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1431                                 (int)(dev->data->port_id));
1432         }
1433         PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1434                                 dev->pci_dev->addr.domain,
1435                                 dev->pci_dev->addr.bus,
1436                                 dev->pci_dev->addr.devid,
1437                                 dev->pci_dev->addr.function);
1438 }
1439
1440 /*
1441  * It executes link_update after knowing an interrupt occured.
1442  *
1443  * @param dev
1444  *  Pointer to struct rte_eth_dev.
1445  *
1446  * @return
1447  *  - On success, zero.
1448  *  - On failure, a negative value.
1449  */
1450 static int
1451 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
1452 {
1453         struct ixgbe_interrupt *intr =
1454                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1455
1456         if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
1457                 return -1;
1458         }
1459         ixgbe_dev_link_update(dev, 0);
1460
1461         return 0;
1462 }
1463
1464 /**
1465  * Interrupt handler which shall be registered for alarm callback for delayed
1466  * handling specific interrupt to wait for the stable nic state. As the
1467  * NIC interrupt state is not stable for ixgbe after link is just down,
1468  * it needs to wait 4 seconds to get the stable status.
1469  *
1470  * @param handle
1471  *  Pointer to interrupt handle.
1472  * @param param
1473  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1474  *
1475  * @return
1476  *  void
1477  */
1478 static void
1479 ixgbe_dev_interrupt_delayed_handler(void *param)
1480 {
1481         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1482         struct ixgbe_interrupt *intr =
1483                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1484         struct ixgbe_hw *hw =
1485                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1486
1487         IXGBE_READ_REG(hw, IXGBE_EICR);
1488         ixgbe_dev_interrupt_action(dev);
1489         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
1490                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
1491                 rte_intr_enable(&(dev->pci_dev->intr_handle));
1492                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1493                 IXGBE_WRITE_FLUSH(hw);
1494                 ixgbe_dev_link_status_print(dev);
1495                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1496         }
1497 }
1498
1499 /**
1500  * Interrupt handler triggered by NIC  for handling
1501  * specific interrupt.
1502  *
1503  * @param handle
1504  *  Pointer to interrupt handle.
1505  * @param param
1506  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1507  *
1508  * @return
1509  *  void
1510  */
1511 static void
1512 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
1513 {
1514         int64_t timeout;
1515         struct rte_eth_link link;
1516         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1517         struct ixgbe_interrupt *intr =
1518                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1519
1520         /* get the link status before link update, for predicting later */
1521         memset(&link, 0, sizeof(link));
1522         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1523         ixgbe_dev_interrupt_get_status(dev);
1524         ixgbe_dev_interrupt_action(dev);
1525
1526         if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
1527                 return;
1528
1529         /* likely to up */
1530         if (!link.link_status)
1531                 /* handle it 1 sec later, wait it being stable */
1532                 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
1533         /* likely to down */
1534         else
1535                 /* handle it 4 sec later, wait it being stable */
1536                 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
1537
1538         ixgbe_dev_link_status_print(dev);
1539         if (rte_eal_alarm_set(timeout * 1000,
1540                 ixgbe_dev_interrupt_delayed_handler, param) < 0)
1541                 PMD_INIT_LOG(ERR, "Error setting alarm");
1542 }
1543
1544 static int
1545 ixgbe_dev_led_on(struct rte_eth_dev *dev)
1546 {
1547         struct ixgbe_hw *hw;
1548
1549         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1550         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1551 }
1552
1553 static int
1554 ixgbe_dev_led_off(struct rte_eth_dev *dev)
1555 {
1556         struct ixgbe_hw *hw;
1557
1558         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1559         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1560 }
1561
1562 static int
1563 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1564 {
1565         struct ixgbe_hw *hw;
1566         int err;
1567         uint32_t rx_buf_size;
1568         uint32_t max_high_water;
1569         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
1570                 ixgbe_fc_none,
1571                 ixgbe_fc_rx_pause,
1572                 ixgbe_fc_tx_pause,
1573                 ixgbe_fc_full
1574         };
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1579         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
1580         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1581
1582         /*
1583          * At least reserve one Ethernet frame for watermark
1584          * high_water/low_water in kilo bytes for ixgbe
1585          */
1586         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
1587         if ((fc_conf->high_water > max_high_water) ||
1588                 (fc_conf->high_water < fc_conf->low_water)) {
1589                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
1590                 PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
1591                 return (-EINVAL);
1592         }
1593
1594         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
1595         hw->fc.pause_time     = fc_conf->pause_time;
1596         hw->fc.high_water[0]  = fc_conf->high_water;
1597         hw->fc.low_water      = fc_conf->low_water;
1598         hw->fc.send_xon       = fc_conf->send_xon;
1599
1600         err = ixgbe_fc_enable(hw, 0);
1601         /* Not negotiated is not an error case */
1602         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
1603                 return 0;
1604         }
1605
1606         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
1607         return -EIO;
1608 }
1609
1610 static void
1611 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1612                                 uint32_t index, uint32_t pool)
1613 {
1614         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1615         uint32_t enable_addr = 1;
1616
1617         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
1618 }
1619
1620 static void
1621 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1622 {
1623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1624
1625         ixgbe_clear_rar(hw, index);
1626 }
1627
1628 /*
1629  * Virtual Function operations
1630  */
1631 static void
1632 ixgbevf_intr_disable(struct ixgbe_hw *hw)
1633 {
1634         PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
1635
1636         /* Clear interrupt mask to stop from interrupts being generated */
1637         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
1638
1639         IXGBE_WRITE_FLUSH(hw);
1640 }
1641
1642 static int
1643 ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
1644 {
1645         int diag;
1646         struct rte_eth_conf* conf = &dev->data->dev_conf;
1647
1648         PMD_INIT_FUNC_TRACE();
1649
1650         /* Allocate the array of pointers to RX queue structures */
1651         diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
1652         if (diag != 0) {
1653                 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1654                              "pointers to RX queues failed", dev->data->port_id,
1655                              nb_rx_q);
1656                 return diag;
1657         }
1658
1659         /* Allocate the array of pointers to TX queue structures */
1660         diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
1661         if (diag != 0) {
1662                 PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
1663                              "pointers to TX queues failed", dev->data->port_id,
1664                              nb_tx_q);
1665                 return diag;
1666         }
1667
1668         if (!conf->rxmode.hw_strip_crc) {
1669                 /*
1670                  * VF has no ability to enable/disable HW CRC
1671                  * Keep the persistent behavior the same as Host PF
1672                  */
1673                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1674                 conf->rxmode.hw_strip_crc = 1;
1675         }
1676
1677         return 0;
1678 }
1679
1680 static int
1681 ixgbevf_dev_start(struct rte_eth_dev *dev)
1682 {
1683         int err = 0;
1684         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
1685
1686         ixgbevf_dev_tx_init(dev);
1687         err = ixgbevf_dev_rx_init(dev);
1688         if(err){
1689                 ixgbe_dev_clear_queues(dev);
1690                 PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
1691                 return err;
1692         }
1693         ixgbevf_dev_rxtx_start(dev);
1694
1695         return 0;
1696 }
1697
1698 static void
1699 ixgbevf_dev_stop(struct rte_eth_dev *dev)
1700 {
1701         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1702
1703         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
1704
1705         ixgbe_reset_hw(hw);
1706         hw->adapter_stopped = 0;
1707         ixgbe_stop_adapter(hw);
1708         /* reprogram the RAR[0] in case user changed it. */
1709         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1710 }