net/qede: set MTU
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10
11 /* Globals */
12 static const struct qed_eth_ops *qed_ops;
13 static const char *drivername = "qede pmd";
14
15 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
16 {
17         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
18 }
19
20 static void
21 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
22 {
23         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
24         struct qede_dev *qdev = eth_dev->data->dev_private;
25         struct ecore_dev *edev = &qdev->edev;
26
27         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
28         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
29                 DP_ERR(edev, "rte_intr_enable failed\n");
30 }
31
32 static void
33 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
34 {
35         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
36         qdev->num_tc = qdev->dev_info.num_tc;
37         qdev->ops = qed_ops;
38 }
39
40 static void qede_print_adapter_info(struct qede_dev *qdev)
41 {
42         struct ecore_dev *edev = &qdev->edev;
43         struct qed_dev_info *info = &qdev->dev_info.common;
44         static char ver_str[QED_DRV_VER_STR_SIZE];
45
46         DP_INFO(edev, "*********************************\n");
47         DP_INFO(edev, " Chip details : %s%d\n",
48                 ECORE_IS_BB(edev) ? "BB" : "AH",
49                 CHIP_REV_IS_A0(edev) ? 0 : 1);
50
51         sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
52                 edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
53                 QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
54         strcpy(qdev->drv_ver, ver_str);
55         DP_INFO(edev, " Driver version : %s\n", ver_str);
56
57         sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
58                 info->fw_rev, info->fw_eng);
59         DP_INFO(edev, " Firmware version : %s\n", ver_str);
60
61         sprintf(ver_str, "%d.%d.%d.%d",
62                 (info->mfw_rev >> 24) & 0xff,
63                 (info->mfw_rev >> 16) & 0xff,
64                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
65         DP_INFO(edev, " Management firmware version : %s\n", ver_str);
66
67         DP_INFO(edev, " Firmware file : %s\n", fw_file);
68
69         DP_INFO(edev, "*********************************\n");
70 }
71
72 static int
73 qede_set_ucast_rx_mac(struct qede_dev *qdev,
74                       enum qed_filter_xcast_params_type opcode,
75                       uint8_t mac[ETHER_ADDR_LEN])
76 {
77         struct ecore_dev *edev = &qdev->edev;
78         struct qed_filter_params filter_cmd;
79
80         memset(&filter_cmd, 0, sizeof(filter_cmd));
81         filter_cmd.type = QED_FILTER_TYPE_UCAST;
82         filter_cmd.filter.ucast.type = opcode;
83         filter_cmd.filter.ucast.mac_valid = 1;
84         rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
85         return qdev->ops->filter_config(edev, &filter_cmd);
86 }
87
88 static void
89 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
90                   uint32_t index, __rte_unused uint32_t pool)
91 {
92         struct qede_dev *qdev = eth_dev->data->dev_private;
93         struct ecore_dev *edev = &qdev->edev;
94         int rc;
95
96         PMD_INIT_FUNC_TRACE(edev);
97
98         if (index >= qdev->dev_info.num_mac_addrs) {
99                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
100                        index, qdev->dev_info.num_mac_addrs);
101                 return;
102         }
103
104         /* Adding macaddr even though promiscuous mode is set */
105         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
106                 DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
107
108         /* Add MAC filters according to the unicast secondary macs */
109         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
110                                    mac_addr->addr_bytes);
111         if (rc)
112                 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
113 }
114
115 static void
116 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
117 {
118         struct qede_dev *qdev = eth_dev->data->dev_private;
119         struct ecore_dev *edev = &qdev->edev;
120         struct ether_addr mac_addr;
121         int rc;
122
123         PMD_INIT_FUNC_TRACE(edev);
124
125         if (index >= qdev->dev_info.num_mac_addrs) {
126                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
127                        index, qdev->dev_info.num_mac_addrs);
128                 return;
129         }
130
131         /* Use the index maintained by rte */
132         ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
133         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
134                                    mac_addr.addr_bytes);
135         if (rc)
136                 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
137 }
138
139 static void
140 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
141 {
142         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
143         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
144         int rc;
145
146         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
147                                                mac_addr->addr_bytes)) {
148                 DP_ERR(edev, "Setting MAC address is not allowed\n");
149                 ether_addr_copy(&qdev->primary_mac,
150                                 &eth_dev->data->mac_addrs[0]);
151                 return;
152         }
153
154         /* First remove the primary mac */
155         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
156                                    qdev->primary_mac.addr_bytes);
157
158         if (rc) {
159                 DP_ERR(edev, "Unable to remove current macaddr"
160                              " Reverting to previous default mac\n");
161                 ether_addr_copy(&qdev->primary_mac,
162                                 &eth_dev->data->mac_addrs[0]);
163                 return;
164         }
165
166         /* Add new MAC */
167         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
168                                    mac_addr->addr_bytes);
169
170         if (rc)
171                 DP_ERR(edev, "Unable to add new default mac\n");
172         else
173                 ether_addr_copy(mac_addr, &qdev->primary_mac);
174 }
175
176
177
178
179 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
180 {
181         struct ecore_dev *edev = &qdev->edev;
182         struct qed_update_vport_params params = {
183                 .vport_id = 0,
184                 .accept_any_vlan = action,
185                 .update_accept_any_vlan_flg = 1,
186         };
187         int rc;
188
189         /* Proceed only if action actually needs to be performed */
190         if (qdev->accept_any_vlan == action)
191                 return;
192
193         rc = qdev->ops->vport_update(edev, &params);
194         if (rc) {
195                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
196                        action ? "enable" : "disable");
197         } else {
198                 DP_INFO(edev, "%s accept-any-vlan\n",
199                         action ? "enabled" : "disabled");
200                 qdev->accept_any_vlan = action;
201         }
202 }
203
204 void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
205 {
206         struct qede_dev *qdev = eth_dev->data->dev_private;
207         struct ecore_dev *edev = &qdev->edev;
208         /* TODO: - QED_FILTER_TYPE_UCAST */
209         enum qed_filter_rx_mode_type accept_flags =
210                         QED_FILTER_RX_MODE_TYPE_REGULAR;
211         struct qed_filter_params rx_mode;
212         int rc;
213
214         /* Configure the struct for the Rx mode */
215         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
216         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
217
218         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
219                                    eth_dev->data->mac_addrs[0].addr_bytes);
220         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
221                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
222         } else {
223                 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
224                                            eth_dev->data->
225                                            mac_addrs[0].addr_bytes);
226                 if (rc) {
227                         DP_ERR(edev, "Unable to add filter\n");
228                         return;
229                 }
230         }
231
232         /* take care of VLAN mode */
233         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
234                 qede_config_accept_any_vlan(qdev, true);
235         } else if (!qdev->non_configured_vlans) {
236                 /* If we dont have non-configured VLANs and promisc
237                  * is not set, then check if we need to disable
238                  * accept_any_vlan mode.
239                  * Because in this case, accept_any_vlan mode is set
240                  * as part of IFF_RPOMISC flag handling.
241                  */
242                 qede_config_accept_any_vlan(qdev, false);
243         }
244         rx_mode.filter.accept_flags = accept_flags;
245         rc = qdev->ops->filter_config(edev, &rx_mode);
246         if (rc)
247                 DP_ERR(edev, "Filter config failed rc=%d\n", rc);
248 }
249
250 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
251 {
252         struct qed_update_vport_params vport_update_params;
253         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
254         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
255         int rc;
256
257         memset(&vport_update_params, 0, sizeof(vport_update_params));
258         vport_update_params.vport_id = 0;
259         vport_update_params.update_inner_vlan_removal_flg = 1;
260         vport_update_params.inner_vlan_removal_flg = set_stripping;
261         rc = qdev->ops->vport_update(edev, &vport_update_params);
262         if (rc) {
263                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
264                 return rc;
265         }
266
267         return 0;
268 }
269
270 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
271 {
272         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
273         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
274
275         if (mask & ETH_VLAN_STRIP_MASK) {
276                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
277                         (void)qede_vlan_stripping(eth_dev, 1);
278                 else
279                         (void)qede_vlan_stripping(eth_dev, 0);
280         }
281
282         DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
283                 mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
284 }
285
286 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
287                                   enum qed_filter_xcast_params_type opcode,
288                                   uint16_t vid)
289 {
290         struct qed_filter_params filter_cmd;
291         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
292
293         memset(&filter_cmd, 0, sizeof(filter_cmd));
294         filter_cmd.type = QED_FILTER_TYPE_UCAST;
295         filter_cmd.filter.ucast.type = opcode;
296         filter_cmd.filter.ucast.vlan_valid = 1;
297         filter_cmd.filter.ucast.vlan = vid;
298
299         return qdev->ops->filter_config(edev, &filter_cmd);
300 }
301
302 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
303                                 uint16_t vlan_id, int on)
304 {
305         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
306         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
307         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
308         int rc;
309
310         if (vlan_id != 0 &&
311             qdev->configured_vlans == dev_info->num_vlan_filters) {
312                 DP_NOTICE(edev, false, "Reached max VLAN filter limit"
313                                      " enabling accept_any_vlan\n");
314                 qede_config_accept_any_vlan(qdev, true);
315                 return 0;
316         }
317
318         if (on) {
319                 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
320                                             vlan_id);
321                 if (rc)
322                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
323                                rc);
324                 else
325                         if (vlan_id != 0)
326                                 qdev->configured_vlans++;
327         } else {
328                 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
329                                             vlan_id);
330                 if (rc)
331                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
332                                vlan_id, rc);
333                 else
334                         if (vlan_id != 0)
335                                 qdev->configured_vlans--;
336         }
337
338         DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
339                         vlan_id, on, rc, qdev->configured_vlans);
340
341         return rc;
342 }
343
344 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
345 {
346         struct qede_dev *qdev = eth_dev->data->dev_private;
347         struct ecore_dev *edev = &qdev->edev;
348         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
349
350         PMD_INIT_FUNC_TRACE(edev);
351
352         if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
353                 DP_NOTICE(edev, false,
354                           "Unequal number of rx/tx queues "
355                           "is not supported RX=%u TX=%u\n",
356                           eth_dev->data->nb_rx_queues,
357                           eth_dev->data->nb_tx_queues);
358                 return -EINVAL;
359         }
360
361         qdev->num_rss = eth_dev->data->nb_rx_queues;
362
363         /* Initial state */
364         qdev->state = QEDE_CLOSE;
365
366         /* Sanity checks and throw warnings */
367
368         if (rxmode->enable_scatter == 1) {
369                 DP_ERR(edev, "RX scatter packets is not supported\n");
370                 return -EINVAL;
371         }
372
373         if (rxmode->enable_lro == 1) {
374                 DP_INFO(edev, "LRO is not supported\n");
375                 return -EINVAL;
376         }
377
378         if (!rxmode->hw_strip_crc)
379                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
380
381         if (!rxmode->hw_ip_checksum)
382                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
383                               "in hw\n");
384
385
386         DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
387                 QEDE_RSS_CNT(qdev), qdev->num_tc);
388
389         DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
390                 " port %u first_on_engine %d\n",
391                 edev->hwfns[0].my_id,
392                 edev->hwfns[0].rel_pf_id,
393                 edev->hwfns[0].abs_pf_id,
394                 edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
395
396         return 0;
397 }
398
399 /* Info about HW descriptor ring limitations */
400 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
401         .nb_max = NUM_RX_BDS_MAX,
402         .nb_min = 128,
403         .nb_align = 128 /* lowest common multiple */
404 };
405
406 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
407         .nb_max = NUM_TX_BDS_MAX,
408         .nb_min = 256,
409         .nb_align = 256
410 };
411
412 static void
413 qede_dev_info_get(struct rte_eth_dev *eth_dev,
414                   struct rte_eth_dev_info *dev_info)
415 {
416         struct qede_dev *qdev = eth_dev->data->dev_private;
417         struct ecore_dev *edev = &qdev->edev;
418
419         PMD_INIT_FUNC_TRACE(edev);
420
421         dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
422                                               QEDE_ETH_OVERHEAD);
423         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
424         dev_info->rx_desc_lim = qede_rx_desc_lim;
425         dev_info->tx_desc_lim = qede_tx_desc_lim;
426         dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
427         dev_info->max_tx_queues = dev_info->max_rx_queues;
428         dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
429         if (IS_VF(edev))
430                 dev_info->max_vfs = 0;
431         else
432                 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
433         dev_info->driver_name = qdev->drv_ver;
434         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
435         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
436
437         dev_info->default_txconf = (struct rte_eth_txconf) {
438                 .txq_flags = QEDE_TXQ_FLAGS,
439         };
440
441         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
442                                      DEV_RX_OFFLOAD_IPV4_CKSUM |
443                                      DEV_RX_OFFLOAD_UDP_CKSUM |
444                                      DEV_RX_OFFLOAD_TCP_CKSUM);
445         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
446                                      DEV_TX_OFFLOAD_IPV4_CKSUM |
447                                      DEV_TX_OFFLOAD_UDP_CKSUM |
448                                      DEV_TX_OFFLOAD_TCP_CKSUM);
449
450         dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
451 }
452
453 /* return 0 means link status changed, -1 means not changed */
454 static int
455 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
456 {
457         struct qede_dev *qdev = eth_dev->data->dev_private;
458         struct ecore_dev *edev = &qdev->edev;
459         uint16_t link_duplex;
460         struct qed_link_output link;
461         struct rte_eth_link *curr = &eth_dev->data->dev_link;
462
463         memset(&link, 0, sizeof(struct qed_link_output));
464         qdev->ops->common->get_link(edev, &link);
465
466         /* Link Speed */
467         curr->link_speed = link.speed;
468
469         /* Link Mode */
470         switch (link.duplex) {
471         case QEDE_DUPLEX_HALF:
472                 link_duplex = ETH_LINK_HALF_DUPLEX;
473                 break;
474         case QEDE_DUPLEX_FULL:
475                 link_duplex = ETH_LINK_FULL_DUPLEX;
476                 break;
477         case QEDE_DUPLEX_UNKNOWN:
478         default:
479                 link_duplex = -1;
480         }
481         curr->link_duplex = link_duplex;
482
483         /* Link Status */
484         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
485
486         /* AN */
487         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
488                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
489
490         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
491                 curr->link_speed, curr->link_duplex,
492                 curr->link_autoneg, curr->link_status);
493
494         /* return 0 means link status changed, -1 means not changed */
495         return ((curr->link_status == link.link_up) ? -1 : 0);
496 }
497
498 static void
499 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
500                      enum qed_filter_rx_mode_type accept_flags)
501 {
502         struct qede_dev *qdev = eth_dev->data->dev_private;
503         struct ecore_dev *edev = &qdev->edev;
504         struct qed_filter_params rx_mode;
505
506         DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
507
508         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
509         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
510         rx_mode.filter.accept_flags = accept_flags;
511         qdev->ops->filter_config(edev, &rx_mode);
512 }
513
514 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
515 {
516         struct qede_dev *qdev = eth_dev->data->dev_private;
517         struct ecore_dev *edev = &qdev->edev;
518
519         PMD_INIT_FUNC_TRACE(edev);
520
521         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
522
523         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
524                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
525
526         qede_rx_mode_setting(eth_dev, type);
527 }
528
529 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
530 {
531         struct qede_dev *qdev = eth_dev->data->dev_private;
532         struct ecore_dev *edev = &qdev->edev;
533
534         PMD_INIT_FUNC_TRACE(edev);
535
536         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
537                 qede_rx_mode_setting(eth_dev,
538                                      QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
539         else
540                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
541 }
542
543 static void qede_dev_close(struct rte_eth_dev *eth_dev)
544 {
545         struct qede_dev *qdev = eth_dev->data->dev_private;
546         struct ecore_dev *edev = &qdev->edev;
547
548         PMD_INIT_FUNC_TRACE(edev);
549
550         /* dev_stop() shall cleanup fp resources in hw but without releasing
551          * dma memories and sw structures so that dev_start() can be called
552          * by the app without reconfiguration. However, in dev_close() we
553          * can release all the resources and device can be brought up newly
554          */
555         if (qdev->state != QEDE_STOP)
556                 qede_dev_stop(eth_dev);
557         else
558                 DP_INFO(edev, "Device is already stopped\n");
559
560         qede_free_mem_load(qdev);
561
562         qede_free_fp_arrays(qdev);
563
564         qede_dev_set_link_state(eth_dev, false);
565
566         qdev->ops->common->slowpath_stop(edev);
567
568         qdev->ops->common->remove(edev);
569
570         rte_intr_disable(&eth_dev->pci_dev->intr_handle);
571
572         rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
573                                      qede_interrupt_handler, (void *)eth_dev);
574
575         qdev->state = QEDE_CLOSE;
576 }
577
578 static void
579 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
580 {
581         struct qede_dev *qdev = eth_dev->data->dev_private;
582         struct ecore_dev *edev = &qdev->edev;
583         struct ecore_eth_stats stats;
584
585         qdev->ops->get_vport_stats(edev, &stats);
586
587         /* RX Stats */
588         eth_stats->ipackets = stats.rx_ucast_pkts +
589             stats.rx_mcast_pkts + stats.rx_bcast_pkts;
590
591         eth_stats->ibytes = stats.rx_ucast_bytes +
592             stats.rx_mcast_bytes + stats.rx_bcast_bytes;
593
594         eth_stats->ierrors = stats.rx_crc_errors +
595             stats.rx_align_errors +
596             stats.rx_carrier_errors +
597             stats.rx_oversize_packets +
598             stats.rx_jabbers + stats.rx_undersize_packets;
599
600         eth_stats->rx_nombuf = stats.no_buff_discards;
601
602         eth_stats->imissed = stats.mftag_filter_discards +
603             stats.mac_filter_discards +
604             stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
605
606         /* TX stats */
607         eth_stats->opackets = stats.tx_ucast_pkts +
608             stats.tx_mcast_pkts + stats.tx_bcast_pkts;
609
610         eth_stats->obytes = stats.tx_ucast_bytes +
611             stats.tx_mcast_bytes + stats.tx_bcast_bytes;
612
613         eth_stats->oerrors = stats.tx_err_drop_pkts;
614
615         DP_INFO(edev,
616                 "no_buff_discards=%" PRIu64 ""
617                 " mac_filter_discards=%" PRIu64 ""
618                 " brb_truncates=%" PRIu64 ""
619                 " brb_discards=%" PRIu64 "\n",
620                 stats.no_buff_discards,
621                 stats.mac_filter_discards,
622                 stats.brb_truncates, stats.brb_discards);
623 }
624
625 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
626 {
627         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
628         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
629         struct qed_link_params link_params;
630         int rc;
631
632         DP_INFO(edev, "setting link state %d\n", link_up);
633         memset(&link_params, 0, sizeof(link_params));
634         link_params.link_up = link_up;
635         rc = qdev->ops->common->set_link(edev, &link_params);
636         if (rc != ECORE_SUCCESS)
637                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
638
639         return rc;
640 }
641
642 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
643 {
644         return qede_dev_set_link_state(eth_dev, true);
645 }
646
647 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
648 {
649         return qede_dev_set_link_state(eth_dev, false);
650 }
651
652 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
653 {
654         struct qede_dev *qdev = eth_dev->data->dev_private;
655         struct ecore_dev *edev = &qdev->edev;
656
657         ecore_reset_vport_stats(edev);
658 }
659
660 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
661 {
662         enum qed_filter_rx_mode_type type =
663             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
664
665         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
666                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
667
668         qede_rx_mode_setting(eth_dev, type);
669 }
670
671 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
672 {
673         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
674                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
675         else
676                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
677 }
678
679 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
680                               struct rte_eth_fc_conf *fc_conf)
681 {
682         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
683         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
684         struct qed_link_output current_link;
685         struct qed_link_params params;
686
687         memset(&current_link, 0, sizeof(current_link));
688         qdev->ops->common->get_link(edev, &current_link);
689
690         memset(&params, 0, sizeof(params));
691         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
692         if (fc_conf->autoneg) {
693                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
694                         DP_ERR(edev, "Autoneg not supported\n");
695                         return -EINVAL;
696                 }
697                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
698         }
699
700         /* Pause is assumed to be supported (SUPPORTED_Pause) */
701         if (fc_conf->mode == RTE_FC_FULL)
702                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
703                                         QED_LINK_PAUSE_RX_ENABLE);
704         if (fc_conf->mode == RTE_FC_TX_PAUSE)
705                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
706         if (fc_conf->mode == RTE_FC_RX_PAUSE)
707                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
708
709         params.link_up = true;
710         (void)qdev->ops->common->set_link(edev, &params);
711
712         return 0;
713 }
714
715 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
716                               struct rte_eth_fc_conf *fc_conf)
717 {
718         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
719         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
720         struct qed_link_output current_link;
721
722         memset(&current_link, 0, sizeof(current_link));
723         qdev->ops->common->get_link(edev, &current_link);
724
725         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
726                 fc_conf->autoneg = true;
727
728         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
729                                          QED_LINK_PAUSE_TX_ENABLE))
730                 fc_conf->mode = RTE_FC_FULL;
731         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
732                 fc_conf->mode = RTE_FC_RX_PAUSE;
733         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
734                 fc_conf->mode = RTE_FC_TX_PAUSE;
735         else
736                 fc_conf->mode = RTE_FC_NONE;
737
738         return 0;
739 }
740
741 static const uint32_t *
742 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
743 {
744         static const uint32_t ptypes[] = {
745                 RTE_PTYPE_L3_IPV4,
746                 RTE_PTYPE_L3_IPV6,
747                 RTE_PTYPE_UNKNOWN
748         };
749
750         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
751                 return ptypes;
752
753         return NULL;
754 }
755
756 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
757                          struct rte_eth_rss_conf *rss_conf)
758 {
759         struct qed_update_vport_params vport_update_params;
760         struct qede_dev *qdev = eth_dev->data->dev_private;
761         struct ecore_dev *edev = &qdev->edev;
762         uint8_t rss_caps;
763         uint32_t *key = (uint32_t *)rss_conf->rss_key;
764         uint64_t hf = rss_conf->rss_hf;
765         int i;
766
767         if (hf == 0)
768                 DP_ERR(edev, "hash function 0 will disable RSS\n");
769
770         rss_caps = 0;
771         rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
772         rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
773         rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
774         rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
775         rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
776         rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
777
778         /* If the mapping doesn't fit any supported, return */
779         if (rss_caps == 0 && hf != 0)
780                 return -EINVAL;
781
782         memset(&vport_update_params, 0, sizeof(vport_update_params));
783
784         if (key != NULL)
785                 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
786                        rss_conf->rss_key_len);
787
788         qdev->rss_params.rss_caps = rss_caps;
789         memcpy(&vport_update_params.rss_params, &qdev->rss_params,
790                sizeof(vport_update_params.rss_params));
791         vport_update_params.update_rss_flg = 1;
792         vport_update_params.vport_id = 0;
793
794         return qdev->ops->vport_update(edev, &vport_update_params);
795 }
796
797 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
798                            struct rte_eth_rss_conf *rss_conf)
799 {
800         struct qede_dev *qdev = eth_dev->data->dev_private;
801         uint64_t hf;
802
803         if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
804                 return -EINVAL;
805
806         if (rss_conf->rss_key)
807                 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
808                        sizeof(qdev->rss_params.rss_key));
809
810         hf = 0;
811         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4)     ?
812                         ETH_RSS_IPV4 : 0;
813         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
814                         ETH_RSS_IPV6 : 0;
815         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
816                         ETH_RSS_IPV6_EX : 0;
817         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
818                         ETH_RSS_NONFRAG_IPV4_TCP : 0;
819         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
820                         ETH_RSS_NONFRAG_IPV6_TCP : 0;
821         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
822                         ETH_RSS_IPV6_TCP_EX : 0;
823
824         rss_conf->rss_hf = hf;
825
826         return 0;
827 }
828
829 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
830                          struct rte_eth_rss_reta_entry64 *reta_conf,
831                          uint16_t reta_size)
832 {
833         struct qed_update_vport_params vport_update_params;
834         struct qede_dev *qdev = eth_dev->data->dev_private;
835         struct ecore_dev *edev = &qdev->edev;
836         uint16_t i, idx, shift;
837
838         if (reta_size > ETH_RSS_RETA_SIZE_128) {
839                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
840                        reta_size);
841                 return -EINVAL;
842         }
843
844         memset(&vport_update_params, 0, sizeof(vport_update_params));
845         memcpy(&vport_update_params.rss_params, &qdev->rss_params,
846                sizeof(vport_update_params.rss_params));
847
848         for (i = 0; i < reta_size; i++) {
849                 idx = i / RTE_RETA_GROUP_SIZE;
850                 shift = i % RTE_RETA_GROUP_SIZE;
851                 if (reta_conf[idx].mask & (1ULL << shift)) {
852                         uint8_t entry = reta_conf[idx].reta[shift];
853                         qdev->rss_params.rss_ind_table[i] = entry;
854                 }
855         }
856
857         vport_update_params.update_rss_flg = 1;
858         vport_update_params.vport_id = 0;
859
860         return qdev->ops->vport_update(edev, &vport_update_params);
861 }
862
863 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
864                         struct rte_eth_rss_reta_entry64 *reta_conf,
865                         uint16_t reta_size)
866 {
867         struct qede_dev *qdev = eth_dev->data->dev_private;
868         uint16_t i, idx, shift;
869
870         if (reta_size > ETH_RSS_RETA_SIZE_128) {
871                 struct ecore_dev *edev = &qdev->edev;
872                 DP_ERR(edev, "reta_size %d is not supported\n",
873                        reta_size);
874         }
875
876         for (i = 0; i < reta_size; i++) {
877                 idx = i / RTE_RETA_GROUP_SIZE;
878                 shift = i % RTE_RETA_GROUP_SIZE;
879                 if (reta_conf[idx].mask & (1ULL << shift)) {
880                         uint8_t entry = qdev->rss_params.rss_ind_table[i];
881                         reta_conf[idx].reta[shift] = entry;
882                 }
883         }
884
885         return 0;
886 }
887
888 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
889 {
890         uint32_t frame_size;
891         struct qede_dev *qdev = dev->data->dev_private;
892         struct rte_eth_dev_info dev_info = {0};
893
894         qede_dev_info_get(dev, &dev_info);
895
896         /* VLAN_TAG = 4 */
897         frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
898
899         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
900                 return -EINVAL;
901
902         if (!dev->data->scattered_rx &&
903             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
904                 return -EINVAL;
905
906         if (frame_size > ETHER_MAX_LEN)
907                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
908         else
909                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
910
911         /* update max frame size */
912         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
913         qdev->mtu = mtu;
914         qede_dev_stop(dev);
915         qede_dev_start(dev);
916
917         return 0;
918 }
919
920 static const struct eth_dev_ops qede_eth_dev_ops = {
921         .dev_configure = qede_dev_configure,
922         .dev_infos_get = qede_dev_info_get,
923         .rx_queue_setup = qede_rx_queue_setup,
924         .rx_queue_release = qede_rx_queue_release,
925         .tx_queue_setup = qede_tx_queue_setup,
926         .tx_queue_release = qede_tx_queue_release,
927         .dev_start = qede_dev_start,
928         .dev_set_link_up = qede_dev_set_link_up,
929         .dev_set_link_down = qede_dev_set_link_down,
930         .link_update = qede_link_update,
931         .promiscuous_enable = qede_promiscuous_enable,
932         .promiscuous_disable = qede_promiscuous_disable,
933         .allmulticast_enable = qede_allmulticast_enable,
934         .allmulticast_disable = qede_allmulticast_disable,
935         .dev_stop = qede_dev_stop,
936         .dev_close = qede_dev_close,
937         .stats_get = qede_get_stats,
938         .stats_reset = qede_reset_stats,
939         .mac_addr_add = qede_mac_addr_add,
940         .mac_addr_remove = qede_mac_addr_remove,
941         .mac_addr_set = qede_mac_addr_set,
942         .vlan_offload_set = qede_vlan_offload_set,
943         .vlan_filter_set = qede_vlan_filter_set,
944         .flow_ctrl_set = qede_flow_ctrl_set,
945         .flow_ctrl_get = qede_flow_ctrl_get,
946         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
947         .rss_hash_update = qede_rss_hash_update,
948         .rss_hash_conf_get = qede_rss_hash_conf_get,
949         .reta_update  = qede_rss_reta_update,
950         .reta_query  = qede_rss_reta_query,
951         .mtu_set = qede_set_mtu,
952 };
953
954 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
955         .dev_configure = qede_dev_configure,
956         .dev_infos_get = qede_dev_info_get,
957         .rx_queue_setup = qede_rx_queue_setup,
958         .rx_queue_release = qede_rx_queue_release,
959         .tx_queue_setup = qede_tx_queue_setup,
960         .tx_queue_release = qede_tx_queue_release,
961         .dev_start = qede_dev_start,
962         .dev_set_link_up = qede_dev_set_link_up,
963         .dev_set_link_down = qede_dev_set_link_down,
964         .link_update = qede_link_update,
965         .promiscuous_enable = qede_promiscuous_enable,
966         .promiscuous_disable = qede_promiscuous_disable,
967         .allmulticast_enable = qede_allmulticast_enable,
968         .allmulticast_disable = qede_allmulticast_disable,
969         .dev_stop = qede_dev_stop,
970         .dev_close = qede_dev_close,
971         .stats_get = qede_get_stats,
972         .stats_reset = qede_reset_stats,
973         .vlan_offload_set = qede_vlan_offload_set,
974         .vlan_filter_set = qede_vlan_filter_set,
975         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
976         .rss_hash_update = qede_rss_hash_update,
977         .rss_hash_conf_get = qede_rss_hash_conf_get,
978         .reta_update  = qede_rss_reta_update,
979         .reta_query  = qede_rss_reta_query,
980         .mtu_set = qede_set_mtu,
981 };
982
983 static void qede_update_pf_params(struct ecore_dev *edev)
984 {
985         struct ecore_pf_params pf_params;
986         /* 32 rx + 32 tx */
987         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
988         pf_params.eth_pf_params.num_cons = 64;
989         qed_ops->common->update_pf_params(edev, &pf_params);
990 }
991
992 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
993 {
994         struct rte_pci_device *pci_dev;
995         struct rte_pci_addr pci_addr;
996         struct qede_dev *adapter;
997         struct ecore_dev *edev;
998         struct qed_dev_eth_info dev_info;
999         struct qed_slowpath_params params;
1000         uint32_t qed_ver;
1001         static bool do_once = true;
1002         uint8_t bulletin_change;
1003         uint8_t vf_mac[ETHER_ADDR_LEN];
1004         uint8_t is_mac_forced;
1005         bool is_mac_exist;
1006         /* Fix up ecore debug level */
1007         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1008         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1009         uint32_t max_mac_addrs;
1010         int rc;
1011
1012         /* Extract key data structures */
1013         adapter = eth_dev->data->dev_private;
1014         edev = &adapter->edev;
1015         pci_addr = eth_dev->pci_dev->addr;
1016
1017         PMD_INIT_FUNC_TRACE(edev);
1018
1019         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1020                  pci_addr.bus, pci_addr.devid, pci_addr.function,
1021                  eth_dev->data->port_id);
1022
1023         eth_dev->rx_pkt_burst = qede_recv_pkts;
1024         eth_dev->tx_pkt_burst = qede_xmit_pkts;
1025
1026         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1027                 DP_NOTICE(edev, false,
1028                           "Skipping device init from secondary process\n");
1029                 return 0;
1030         }
1031
1032         pci_dev = eth_dev->pci_dev;
1033
1034         rte_eth_copy_pci_info(eth_dev, pci_dev);
1035
1036         qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
1037
1038         qed_ops = qed_get_eth_ops();
1039         if (!qed_ops) {
1040                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1041                 return -EINVAL;
1042         }
1043
1044         DP_INFO(edev, "Starting qede probe\n");
1045
1046         rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1047                                     dp_module, dp_level, is_vf);
1048
1049         if (rc != 0) {
1050                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1051                 return -ENODEV;
1052         }
1053
1054         qede_update_pf_params(edev);
1055
1056         rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
1057                                    qede_interrupt_handler, (void *)eth_dev);
1058
1059         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
1060                 DP_ERR(edev, "rte_intr_enable() failed\n");
1061                 return -ENODEV;
1062         }
1063
1064         /* Start the Slowpath-process */
1065         memset(&params, 0, sizeof(struct qed_slowpath_params));
1066         params.int_mode = ECORE_INT_MODE_MSIX;
1067         params.drv_major = QEDE_MAJOR_VERSION;
1068         params.drv_minor = QEDE_MINOR_VERSION;
1069         params.drv_rev = QEDE_REVISION_VERSION;
1070         params.drv_eng = QEDE_ENGINEERING_VERSION;
1071         strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1072
1073         rc = qed_ops->common->slowpath_start(edev, &params);
1074         if (rc) {
1075                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1076                 return -ENODEV;
1077         }
1078
1079         rc = qed_ops->fill_dev_info(edev, &dev_info);
1080         if (rc) {
1081                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1082                 qed_ops->common->slowpath_stop(edev);
1083                 qed_ops->common->remove(edev);
1084                 return -ENODEV;
1085         }
1086
1087         qede_alloc_etherdev(adapter, &dev_info);
1088
1089         adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
1090
1091         if (!is_vf)
1092                 adapter->dev_info.num_mac_addrs =
1093                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1094                                             ECORE_MAC);
1095         else
1096                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1097                                              &adapter->dev_info.num_mac_addrs);
1098
1099         /* Allocate memory for storing MAC addr */
1100         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1101                                         (ETHER_ADDR_LEN *
1102                                         adapter->dev_info.num_mac_addrs),
1103                                         RTE_CACHE_LINE_SIZE);
1104
1105         if (eth_dev->data->mac_addrs == NULL) {
1106                 DP_ERR(edev, "Failed to allocate MAC address\n");
1107                 qed_ops->common->slowpath_stop(edev);
1108                 qed_ops->common->remove(edev);
1109                 return -ENOMEM;
1110         }
1111
1112         if (!is_vf) {
1113                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1114                                 hw_info.hw_mac_addr,
1115                                 &eth_dev->data->mac_addrs[0]);
1116                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1117                                 &adapter->primary_mac);
1118         } else {
1119                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1120                                        &bulletin_change);
1121                 if (bulletin_change) {
1122                         is_mac_exist =
1123                             ecore_vf_bulletin_get_forced_mac(
1124                                                 ECORE_LEADING_HWFN(edev),
1125                                                 vf_mac,
1126                                                 &is_mac_forced);
1127                         if (is_mac_exist && is_mac_forced) {
1128                                 DP_INFO(edev, "VF macaddr received from PF\n");
1129                                 ether_addr_copy((struct ether_addr *)&vf_mac,
1130                                                 &eth_dev->data->mac_addrs[0]);
1131                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1132                                                 &adapter->primary_mac);
1133                         } else {
1134                                 DP_NOTICE(edev, false,
1135                                           "No VF macaddr assigned\n");
1136                         }
1137                 }
1138         }
1139
1140         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1141
1142         if (do_once) {
1143                 qede_print_adapter_info(adapter);
1144                 do_once = false;
1145         }
1146
1147         DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1148                   adapter->primary_mac.addr_bytes[0],
1149                   adapter->primary_mac.addr_bytes[1],
1150                   adapter->primary_mac.addr_bytes[2],
1151                   adapter->primary_mac.addr_bytes[3],
1152                   adapter->primary_mac.addr_bytes[4],
1153                   adapter->primary_mac.addr_bytes[5]);
1154
1155         return rc;
1156 }
1157
1158 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1159 {
1160         return qede_common_dev_init(eth_dev, 1);
1161 }
1162
1163 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1164 {
1165         return qede_common_dev_init(eth_dev, 0);
1166 }
1167
1168 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1169 {
1170         /* only uninitialize in the primary process */
1171         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1172                 return 0;
1173
1174         /* safe to close dev here */
1175         qede_dev_close(eth_dev);
1176
1177         eth_dev->dev_ops = NULL;
1178         eth_dev->rx_pkt_burst = NULL;
1179         eth_dev->tx_pkt_burst = NULL;
1180
1181         if (eth_dev->data->mac_addrs)
1182                 rte_free(eth_dev->data->mac_addrs);
1183
1184         eth_dev->data->mac_addrs = NULL;
1185
1186         return 0;
1187 }
1188
1189 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1190 {
1191         return qede_dev_common_uninit(eth_dev);
1192 }
1193
1194 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1195 {
1196         return qede_dev_common_uninit(eth_dev);
1197 }
1198
1199 static struct rte_pci_id pci_id_qedevf_map[] = {
1200 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1201         {
1202                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1203         },
1204         {
1205                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1206         },
1207         {.vendor_id = 0,}
1208 };
1209
1210 static struct rte_pci_id pci_id_qede_map[] = {
1211 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1212         {
1213                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1214         },
1215         {
1216                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1217         },
1218         {
1219                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1220         },
1221         {
1222                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1223         },
1224         {.vendor_id = 0,}
1225 };
1226
1227 static struct eth_driver rte_qedevf_pmd = {
1228         .pci_drv = {
1229                     .name = "rte_qedevf_pmd",
1230                     .id_table = pci_id_qedevf_map,
1231                     .drv_flags =
1232                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1233                     },
1234         .eth_dev_init = qedevf_eth_dev_init,
1235         .eth_dev_uninit = qedevf_eth_dev_uninit,
1236         .dev_private_size = sizeof(struct qede_dev),
1237 };
1238
1239 static struct eth_driver rte_qede_pmd = {
1240         .pci_drv = {
1241                     .name = "rte_qede_pmd",
1242                     .id_table = pci_id_qede_map,
1243                     .drv_flags =
1244                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1245                     },
1246         .eth_dev_init = qede_eth_dev_init,
1247         .eth_dev_uninit = qede_eth_dev_uninit,
1248         .dev_private_size = sizeof(struct qede_dev),
1249 };
1250
1251 static int
1252 rte_qedevf_pmd_init(const char *name __rte_unused,
1253                     const char *params __rte_unused)
1254 {
1255         rte_eth_driver_register(&rte_qedevf_pmd);
1256
1257         return 0;
1258 }
1259
1260 static int
1261 rte_qede_pmd_init(const char *name __rte_unused,
1262                   const char *params __rte_unused)
1263 {
1264         rte_eth_driver_register(&rte_qede_pmd);
1265
1266         return 0;
1267 }
1268
1269 static struct rte_driver rte_qedevf_driver = {
1270         .type = PMD_PDEV,
1271         .init = rte_qede_pmd_init
1272 };
1273
1274 static struct rte_driver rte_qede_driver = {
1275         .type = PMD_PDEV,
1276         .init = rte_qedevf_pmd_init
1277 };
1278
1279 PMD_REGISTER_DRIVER(rte_qede_driver);
1280 PMD_REGISTER_DRIVER(rte_qedevf_driver);