2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
13 static const struct qed_eth_ops *qed_ops;
14 static const char *drivername = "qede pmd";
15 static int64_t timer_period = 1;
17 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
19 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
23 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
25 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
26 struct qede_dev *qdev = eth_dev->data->dev_private;
27 struct ecore_dev *edev = &qdev->edev;
29 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
30 if (rte_intr_enable(ð_dev->pci_dev->intr_handle))
31 DP_ERR(edev, "rte_intr_enable failed\n");
35 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
37 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
38 qdev->num_tc = qdev->dev_info.num_tc;
42 static void qede_print_adapter_info(struct qede_dev *qdev)
44 struct ecore_dev *edev = &qdev->edev;
45 struct qed_dev_info *info = &qdev->dev_info.common;
46 static char ver_str[QED_DRV_VER_STR_SIZE];
48 DP_INFO(edev, "*********************************\n");
49 DP_INFO(edev, " Chip details : %s%d\n",
50 ECORE_IS_BB(edev) ? "BB" : "AH",
51 CHIP_REV_IS_A0(edev) ? 0 : 1);
53 sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
54 edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
55 QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
56 strcpy(qdev->drv_ver, ver_str);
57 DP_INFO(edev, " Driver version : %s\n", ver_str);
59 sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
60 info->fw_rev, info->fw_eng);
61 DP_INFO(edev, " Firmware version : %s\n", ver_str);
63 sprintf(ver_str, "%d.%d.%d.%d",
64 (info->mfw_rev >> 24) & 0xff,
65 (info->mfw_rev >> 16) & 0xff,
66 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
67 DP_INFO(edev, " Management firmware version : %s\n", ver_str);
69 DP_INFO(edev, " Firmware file : %s\n", fw_file);
71 DP_INFO(edev, "*********************************\n");
75 qede_set_ucast_rx_mac(struct qede_dev *qdev,
76 enum qed_filter_xcast_params_type opcode,
77 uint8_t mac[ETHER_ADDR_LEN])
79 struct ecore_dev *edev = &qdev->edev;
80 struct qed_filter_params filter_cmd;
82 memset(&filter_cmd, 0, sizeof(filter_cmd));
83 filter_cmd.type = QED_FILTER_TYPE_UCAST;
84 filter_cmd.filter.ucast.type = opcode;
85 filter_cmd.filter.ucast.mac_valid = 1;
86 rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
87 return qdev->ops->filter_config(edev, &filter_cmd);
91 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
92 uint32_t index, __rte_unused uint32_t pool)
94 struct qede_dev *qdev = eth_dev->data->dev_private;
95 struct ecore_dev *edev = &qdev->edev;
98 PMD_INIT_FUNC_TRACE(edev);
100 if (index >= qdev->dev_info.num_mac_addrs) {
101 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
102 index, qdev->dev_info.num_mac_addrs);
106 /* Adding macaddr even though promiscuous mode is set */
107 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
108 DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
110 /* Add MAC filters according to the unicast secondary macs */
111 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
112 mac_addr->addr_bytes);
114 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
118 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
120 struct qede_dev *qdev = eth_dev->data->dev_private;
121 struct ecore_dev *edev = &qdev->edev;
122 struct ether_addr mac_addr;
125 PMD_INIT_FUNC_TRACE(edev);
127 if (index >= qdev->dev_info.num_mac_addrs) {
128 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
129 index, qdev->dev_info.num_mac_addrs);
133 /* Use the index maintained by rte */
134 ether_addr_copy(ð_dev->data->mac_addrs[index], &mac_addr);
135 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
136 mac_addr.addr_bytes);
138 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
142 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
144 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
145 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
148 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
149 mac_addr->addr_bytes)) {
150 DP_ERR(edev, "Setting MAC address is not allowed\n");
151 ether_addr_copy(&qdev->primary_mac,
152 ð_dev->data->mac_addrs[0]);
156 /* First remove the primary mac */
157 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
158 qdev->primary_mac.addr_bytes);
161 DP_ERR(edev, "Unable to remove current macaddr"
162 " Reverting to previous default mac\n");
163 ether_addr_copy(&qdev->primary_mac,
164 ð_dev->data->mac_addrs[0]);
169 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
170 mac_addr->addr_bytes);
173 DP_ERR(edev, "Unable to add new default mac\n");
175 ether_addr_copy(mac_addr, &qdev->primary_mac);
181 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
183 struct ecore_dev *edev = &qdev->edev;
184 struct qed_update_vport_params params = {
186 .accept_any_vlan = action,
187 .update_accept_any_vlan_flg = 1,
191 /* Proceed only if action actually needs to be performed */
192 if (qdev->accept_any_vlan == action)
195 rc = qdev->ops->vport_update(edev, ¶ms);
197 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
198 action ? "enable" : "disable");
200 DP_INFO(edev, "%s accept-any-vlan\n",
201 action ? "enabled" : "disabled");
202 qdev->accept_any_vlan = action;
206 void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
208 struct qede_dev *qdev = eth_dev->data->dev_private;
209 struct ecore_dev *edev = &qdev->edev;
210 /* TODO: - QED_FILTER_TYPE_UCAST */
211 enum qed_filter_rx_mode_type accept_flags =
212 QED_FILTER_RX_MODE_TYPE_REGULAR;
213 struct qed_filter_params rx_mode;
216 /* Configure the struct for the Rx mode */
217 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
218 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
220 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
221 eth_dev->data->mac_addrs[0].addr_bytes);
222 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
223 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
225 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
227 mac_addrs[0].addr_bytes);
229 DP_ERR(edev, "Unable to add filter\n");
234 /* take care of VLAN mode */
235 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
236 qede_config_accept_any_vlan(qdev, true);
237 } else if (!qdev->non_configured_vlans) {
238 /* If we dont have non-configured VLANs and promisc
239 * is not set, then check if we need to disable
240 * accept_any_vlan mode.
241 * Because in this case, accept_any_vlan mode is set
242 * as part of IFF_RPOMISC flag handling.
244 qede_config_accept_any_vlan(qdev, false);
246 rx_mode.filter.accept_flags = accept_flags;
247 rc = qdev->ops->filter_config(edev, &rx_mode);
249 DP_ERR(edev, "Filter config failed rc=%d\n", rc);
252 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
254 struct qed_update_vport_params vport_update_params;
255 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
256 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
259 memset(&vport_update_params, 0, sizeof(vport_update_params));
260 vport_update_params.vport_id = 0;
261 vport_update_params.update_inner_vlan_removal_flg = 1;
262 vport_update_params.inner_vlan_removal_flg = set_stripping;
263 rc = qdev->ops->vport_update(edev, &vport_update_params);
265 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
272 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
274 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
275 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
277 if (mask & ETH_VLAN_STRIP_MASK) {
278 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
279 (void)qede_vlan_stripping(eth_dev, 1);
281 (void)qede_vlan_stripping(eth_dev, 0);
284 DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
285 mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
288 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
289 enum qed_filter_xcast_params_type opcode,
292 struct qed_filter_params filter_cmd;
293 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
295 memset(&filter_cmd, 0, sizeof(filter_cmd));
296 filter_cmd.type = QED_FILTER_TYPE_UCAST;
297 filter_cmd.filter.ucast.type = opcode;
298 filter_cmd.filter.ucast.vlan_valid = 1;
299 filter_cmd.filter.ucast.vlan = vid;
301 return qdev->ops->filter_config(edev, &filter_cmd);
304 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
305 uint16_t vlan_id, int on)
307 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
308 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
309 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
313 qdev->configured_vlans == dev_info->num_vlan_filters) {
314 DP_NOTICE(edev, false, "Reached max VLAN filter limit"
315 " enabling accept_any_vlan\n");
316 qede_config_accept_any_vlan(qdev, true);
321 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
324 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
328 qdev->configured_vlans++;
330 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
333 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
337 qdev->configured_vlans--;
340 DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
341 vlan_id, on, rc, qdev->configured_vlans);
346 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
348 struct qede_dev *qdev = eth_dev->data->dev_private;
349 struct ecore_dev *edev = &qdev->edev;
350 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
352 PMD_INIT_FUNC_TRACE(edev);
354 if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
355 DP_NOTICE(edev, false,
356 "Unequal number of rx/tx queues "
357 "is not supported RX=%u TX=%u\n",
358 eth_dev->data->nb_rx_queues,
359 eth_dev->data->nb_tx_queues);
363 /* Check requirements for 100G mode */
364 if (edev->num_hwfns > 1) {
365 if (eth_dev->data->nb_rx_queues < 2) {
366 DP_NOTICE(edev, false,
367 "100G mode requires minimum two queues\n");
371 if ((eth_dev->data->nb_rx_queues % 2) != 0) {
372 DP_NOTICE(edev, false,
373 "100G mode requires even number of queues\n");
378 qdev->num_rss = eth_dev->data->nb_rx_queues;
381 qdev->state = QEDE_CLOSE;
383 /* Sanity checks and throw warnings */
385 if (rxmode->enable_scatter == 1) {
386 DP_ERR(edev, "RX scatter packets is not supported\n");
390 if (rxmode->enable_lro == 1) {
391 DP_INFO(edev, "LRO is not supported\n");
395 if (!rxmode->hw_strip_crc)
396 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
398 if (!rxmode->hw_ip_checksum)
399 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
403 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
404 QEDE_RSS_CNT(qdev), qdev->num_tc);
406 DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
407 " port %u first_on_engine %d\n",
408 edev->hwfns[0].my_id,
409 edev->hwfns[0].rel_pf_id,
410 edev->hwfns[0].abs_pf_id,
411 edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
416 /* Info about HW descriptor ring limitations */
417 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
418 .nb_max = NUM_RX_BDS_MAX,
420 .nb_align = 128 /* lowest common multiple */
423 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
424 .nb_max = NUM_TX_BDS_MAX,
430 qede_dev_info_get(struct rte_eth_dev *eth_dev,
431 struct rte_eth_dev_info *dev_info)
433 struct qede_dev *qdev = eth_dev->data->dev_private;
434 struct ecore_dev *edev = &qdev->edev;
436 PMD_INIT_FUNC_TRACE(edev);
438 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
440 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
441 dev_info->rx_desc_lim = qede_rx_desc_lim;
442 dev_info->tx_desc_lim = qede_tx_desc_lim;
443 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
444 dev_info->max_tx_queues = dev_info->max_rx_queues;
445 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
447 dev_info->max_vfs = 0;
449 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
450 dev_info->driver_name = qdev->drv_ver;
451 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
452 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
454 dev_info->default_txconf = (struct rte_eth_txconf) {
455 .txq_flags = QEDE_TXQ_FLAGS,
458 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
459 DEV_RX_OFFLOAD_IPV4_CKSUM |
460 DEV_RX_OFFLOAD_UDP_CKSUM |
461 DEV_RX_OFFLOAD_TCP_CKSUM);
462 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
463 DEV_TX_OFFLOAD_IPV4_CKSUM |
464 DEV_TX_OFFLOAD_UDP_CKSUM |
465 DEV_TX_OFFLOAD_TCP_CKSUM);
467 dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
470 /* return 0 means link status changed, -1 means not changed */
472 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
474 struct qede_dev *qdev = eth_dev->data->dev_private;
475 struct ecore_dev *edev = &qdev->edev;
476 uint16_t link_duplex;
477 struct qed_link_output link;
478 struct rte_eth_link *curr = ð_dev->data->dev_link;
480 memset(&link, 0, sizeof(struct qed_link_output));
481 qdev->ops->common->get_link(edev, &link);
484 curr->link_speed = link.speed;
487 switch (link.duplex) {
488 case QEDE_DUPLEX_HALF:
489 link_duplex = ETH_LINK_HALF_DUPLEX;
491 case QEDE_DUPLEX_FULL:
492 link_duplex = ETH_LINK_FULL_DUPLEX;
494 case QEDE_DUPLEX_UNKNOWN:
498 curr->link_duplex = link_duplex;
501 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
504 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
505 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
507 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
508 curr->link_speed, curr->link_duplex,
509 curr->link_autoneg, curr->link_status);
511 /* return 0 means link status changed, -1 means not changed */
512 return ((curr->link_status == link.link_up) ? -1 : 0);
516 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
517 enum qed_filter_rx_mode_type accept_flags)
519 struct qede_dev *qdev = eth_dev->data->dev_private;
520 struct ecore_dev *edev = &qdev->edev;
521 struct qed_filter_params rx_mode;
523 DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
525 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
526 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
527 rx_mode.filter.accept_flags = accept_flags;
528 qdev->ops->filter_config(edev, &rx_mode);
531 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
533 struct qede_dev *qdev = eth_dev->data->dev_private;
534 struct ecore_dev *edev = &qdev->edev;
536 PMD_INIT_FUNC_TRACE(edev);
538 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
540 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
541 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
543 qede_rx_mode_setting(eth_dev, type);
546 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
548 struct qede_dev *qdev = eth_dev->data->dev_private;
549 struct ecore_dev *edev = &qdev->edev;
551 PMD_INIT_FUNC_TRACE(edev);
553 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
554 qede_rx_mode_setting(eth_dev,
555 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
557 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
560 static void qede_poll_sp_sb_cb(void *param)
562 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
563 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
564 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
567 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
568 qede_interrupt_action(&edev->hwfns[1]);
570 rc = rte_eal_alarm_set(timer_period * US_PER_S,
574 DP_ERR(edev, "Unable to start periodic"
575 " timer rc %d\n", rc);
576 assert(false && "Unable to start periodic timer");
580 static void qede_dev_close(struct rte_eth_dev *eth_dev)
582 struct qede_dev *qdev = eth_dev->data->dev_private;
583 struct ecore_dev *edev = &qdev->edev;
585 PMD_INIT_FUNC_TRACE(edev);
587 /* dev_stop() shall cleanup fp resources in hw but without releasing
588 * dma memories and sw structures so that dev_start() can be called
589 * by the app without reconfiguration. However, in dev_close() we
590 * can release all the resources and device can be brought up newly
592 if (qdev->state != QEDE_STOP)
593 qede_dev_stop(eth_dev);
595 DP_INFO(edev, "Device is already stopped\n");
597 qede_free_mem_load(qdev);
599 qede_free_fp_arrays(qdev);
601 qede_dev_set_link_state(eth_dev, false);
603 qdev->ops->common->slowpath_stop(edev);
605 qdev->ops->common->remove(edev);
607 rte_intr_disable(ð_dev->pci_dev->intr_handle);
609 rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle,
610 qede_interrupt_handler, (void *)eth_dev);
612 if (edev->num_hwfns > 1)
613 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
615 qdev->state = QEDE_CLOSE;
619 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
621 struct qede_dev *qdev = eth_dev->data->dev_private;
622 struct ecore_dev *edev = &qdev->edev;
623 struct ecore_eth_stats stats;
625 qdev->ops->get_vport_stats(edev, &stats);
628 eth_stats->ipackets = stats.rx_ucast_pkts +
629 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
631 eth_stats->ibytes = stats.rx_ucast_bytes +
632 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
634 eth_stats->ierrors = stats.rx_crc_errors +
635 stats.rx_align_errors +
636 stats.rx_carrier_errors +
637 stats.rx_oversize_packets +
638 stats.rx_jabbers + stats.rx_undersize_packets;
640 eth_stats->rx_nombuf = stats.no_buff_discards;
642 eth_stats->imissed = stats.mftag_filter_discards +
643 stats.mac_filter_discards +
644 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
647 eth_stats->opackets = stats.tx_ucast_pkts +
648 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
650 eth_stats->obytes = stats.tx_ucast_bytes +
651 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
653 eth_stats->oerrors = stats.tx_err_drop_pkts;
656 "no_buff_discards=%" PRIu64 ""
657 " mac_filter_discards=%" PRIu64 ""
658 " brb_truncates=%" PRIu64 ""
659 " brb_discards=%" PRIu64 "\n",
660 stats.no_buff_discards,
661 stats.mac_filter_discards,
662 stats.brb_truncates, stats.brb_discards);
665 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
667 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
668 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
669 struct qed_link_params link_params;
672 DP_INFO(edev, "setting link state %d\n", link_up);
673 memset(&link_params, 0, sizeof(link_params));
674 link_params.link_up = link_up;
675 rc = qdev->ops->common->set_link(edev, &link_params);
676 if (rc != ECORE_SUCCESS)
677 DP_ERR(edev, "Unable to set link state %d\n", link_up);
682 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
684 return qede_dev_set_link_state(eth_dev, true);
687 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
689 return qede_dev_set_link_state(eth_dev, false);
692 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
694 struct qede_dev *qdev = eth_dev->data->dev_private;
695 struct ecore_dev *edev = &qdev->edev;
697 ecore_reset_vport_stats(edev);
700 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
702 enum qed_filter_rx_mode_type type =
703 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
705 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
706 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
708 qede_rx_mode_setting(eth_dev, type);
711 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
713 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
714 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
716 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
719 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
720 struct rte_eth_fc_conf *fc_conf)
722 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
723 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
724 struct qed_link_output current_link;
725 struct qed_link_params params;
727 memset(¤t_link, 0, sizeof(current_link));
728 qdev->ops->common->get_link(edev, ¤t_link);
730 memset(¶ms, 0, sizeof(params));
731 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
732 if (fc_conf->autoneg) {
733 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
734 DP_ERR(edev, "Autoneg not supported\n");
737 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
740 /* Pause is assumed to be supported (SUPPORTED_Pause) */
741 if (fc_conf->mode == RTE_FC_FULL)
742 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
743 QED_LINK_PAUSE_RX_ENABLE);
744 if (fc_conf->mode == RTE_FC_TX_PAUSE)
745 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
746 if (fc_conf->mode == RTE_FC_RX_PAUSE)
747 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
749 params.link_up = true;
750 (void)qdev->ops->common->set_link(edev, ¶ms);
755 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
756 struct rte_eth_fc_conf *fc_conf)
758 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
759 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
760 struct qed_link_output current_link;
762 memset(¤t_link, 0, sizeof(current_link));
763 qdev->ops->common->get_link(edev, ¤t_link);
765 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
766 fc_conf->autoneg = true;
768 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
769 QED_LINK_PAUSE_TX_ENABLE))
770 fc_conf->mode = RTE_FC_FULL;
771 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
772 fc_conf->mode = RTE_FC_RX_PAUSE;
773 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
774 fc_conf->mode = RTE_FC_TX_PAUSE;
776 fc_conf->mode = RTE_FC_NONE;
781 static const uint32_t *
782 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
784 static const uint32_t ptypes[] = {
790 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
796 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
797 struct rte_eth_rss_conf *rss_conf)
799 struct qed_update_vport_params vport_update_params;
800 struct qede_dev *qdev = eth_dev->data->dev_private;
801 struct ecore_dev *edev = &qdev->edev;
803 uint32_t *key = (uint32_t *)rss_conf->rss_key;
804 uint64_t hf = rss_conf->rss_hf;
808 DP_ERR(edev, "hash function 0 will disable RSS\n");
811 rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
812 rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
813 rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
814 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
815 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
816 rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
818 /* If the mapping doesn't fit any supported, return */
819 if (rss_caps == 0 && hf != 0)
822 memset(&vport_update_params, 0, sizeof(vport_update_params));
825 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
826 rss_conf->rss_key_len);
828 qdev->rss_params.rss_caps = rss_caps;
829 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
830 sizeof(vport_update_params.rss_params));
831 vport_update_params.update_rss_flg = 1;
832 vport_update_params.vport_id = 0;
834 return qdev->ops->vport_update(edev, &vport_update_params);
837 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
838 struct rte_eth_rss_conf *rss_conf)
840 struct qede_dev *qdev = eth_dev->data->dev_private;
843 if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
846 if (rss_conf->rss_key)
847 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
848 sizeof(qdev->rss_params.rss_key));
851 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
853 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
855 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
857 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
858 ETH_RSS_NONFRAG_IPV4_TCP : 0;
859 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
860 ETH_RSS_NONFRAG_IPV6_TCP : 0;
861 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
862 ETH_RSS_IPV6_TCP_EX : 0;
864 rss_conf->rss_hf = hf;
869 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
870 struct rte_eth_rss_reta_entry64 *reta_conf,
873 struct qed_update_vport_params vport_update_params;
874 struct qede_dev *qdev = eth_dev->data->dev_private;
875 struct ecore_dev *edev = &qdev->edev;
876 uint16_t i, idx, shift;
878 if (reta_size > ETH_RSS_RETA_SIZE_128) {
879 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
884 memset(&vport_update_params, 0, sizeof(vport_update_params));
885 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
886 sizeof(vport_update_params.rss_params));
888 for (i = 0; i < reta_size; i++) {
889 idx = i / RTE_RETA_GROUP_SIZE;
890 shift = i % RTE_RETA_GROUP_SIZE;
891 if (reta_conf[idx].mask & (1ULL << shift)) {
892 uint8_t entry = reta_conf[idx].reta[shift];
893 qdev->rss_params.rss_ind_table[i] = entry;
897 vport_update_params.update_rss_flg = 1;
898 vport_update_params.vport_id = 0;
900 return qdev->ops->vport_update(edev, &vport_update_params);
903 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
904 struct rte_eth_rss_reta_entry64 *reta_conf,
907 struct qede_dev *qdev = eth_dev->data->dev_private;
908 uint16_t i, idx, shift;
910 if (reta_size > ETH_RSS_RETA_SIZE_128) {
911 struct ecore_dev *edev = &qdev->edev;
912 DP_ERR(edev, "reta_size %d is not supported\n",
916 for (i = 0; i < reta_size; i++) {
917 idx = i / RTE_RETA_GROUP_SIZE;
918 shift = i % RTE_RETA_GROUP_SIZE;
919 if (reta_conf[idx].mask & (1ULL << shift)) {
920 uint8_t entry = qdev->rss_params.rss_ind_table[i];
921 reta_conf[idx].reta[shift] = entry;
928 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
931 struct qede_dev *qdev = dev->data->dev_private;
932 struct rte_eth_dev_info dev_info = {0};
934 qede_dev_info_get(dev, &dev_info);
937 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
939 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
942 if (!dev->data->scattered_rx &&
943 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
946 if (frame_size > ETHER_MAX_LEN)
947 dev->data->dev_conf.rxmode.jumbo_frame = 1;
949 dev->data->dev_conf.rxmode.jumbo_frame = 0;
951 /* update max frame size */
952 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
960 static const struct eth_dev_ops qede_eth_dev_ops = {
961 .dev_configure = qede_dev_configure,
962 .dev_infos_get = qede_dev_info_get,
963 .rx_queue_setup = qede_rx_queue_setup,
964 .rx_queue_release = qede_rx_queue_release,
965 .tx_queue_setup = qede_tx_queue_setup,
966 .tx_queue_release = qede_tx_queue_release,
967 .dev_start = qede_dev_start,
968 .dev_set_link_up = qede_dev_set_link_up,
969 .dev_set_link_down = qede_dev_set_link_down,
970 .link_update = qede_link_update,
971 .promiscuous_enable = qede_promiscuous_enable,
972 .promiscuous_disable = qede_promiscuous_disable,
973 .allmulticast_enable = qede_allmulticast_enable,
974 .allmulticast_disable = qede_allmulticast_disable,
975 .dev_stop = qede_dev_stop,
976 .dev_close = qede_dev_close,
977 .stats_get = qede_get_stats,
978 .stats_reset = qede_reset_stats,
979 .mac_addr_add = qede_mac_addr_add,
980 .mac_addr_remove = qede_mac_addr_remove,
981 .mac_addr_set = qede_mac_addr_set,
982 .vlan_offload_set = qede_vlan_offload_set,
983 .vlan_filter_set = qede_vlan_filter_set,
984 .flow_ctrl_set = qede_flow_ctrl_set,
985 .flow_ctrl_get = qede_flow_ctrl_get,
986 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
987 .rss_hash_update = qede_rss_hash_update,
988 .rss_hash_conf_get = qede_rss_hash_conf_get,
989 .reta_update = qede_rss_reta_update,
990 .reta_query = qede_rss_reta_query,
991 .mtu_set = qede_set_mtu,
994 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
995 .dev_configure = qede_dev_configure,
996 .dev_infos_get = qede_dev_info_get,
997 .rx_queue_setup = qede_rx_queue_setup,
998 .rx_queue_release = qede_rx_queue_release,
999 .tx_queue_setup = qede_tx_queue_setup,
1000 .tx_queue_release = qede_tx_queue_release,
1001 .dev_start = qede_dev_start,
1002 .dev_set_link_up = qede_dev_set_link_up,
1003 .dev_set_link_down = qede_dev_set_link_down,
1004 .link_update = qede_link_update,
1005 .promiscuous_enable = qede_promiscuous_enable,
1006 .promiscuous_disable = qede_promiscuous_disable,
1007 .allmulticast_enable = qede_allmulticast_enable,
1008 .allmulticast_disable = qede_allmulticast_disable,
1009 .dev_stop = qede_dev_stop,
1010 .dev_close = qede_dev_close,
1011 .stats_get = qede_get_stats,
1012 .stats_reset = qede_reset_stats,
1013 .vlan_offload_set = qede_vlan_offload_set,
1014 .vlan_filter_set = qede_vlan_filter_set,
1015 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1016 .rss_hash_update = qede_rss_hash_update,
1017 .rss_hash_conf_get = qede_rss_hash_conf_get,
1018 .reta_update = qede_rss_reta_update,
1019 .reta_query = qede_rss_reta_query,
1020 .mtu_set = qede_set_mtu,
1023 static void qede_update_pf_params(struct ecore_dev *edev)
1025 struct ecore_pf_params pf_params;
1027 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1028 pf_params.eth_pf_params.num_cons = 64;
1029 qed_ops->common->update_pf_params(edev, &pf_params);
1032 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1034 struct rte_pci_device *pci_dev;
1035 struct rte_pci_addr pci_addr;
1036 struct qede_dev *adapter;
1037 struct ecore_dev *edev;
1038 struct qed_dev_eth_info dev_info;
1039 struct qed_slowpath_params params;
1041 static bool do_once = true;
1042 uint8_t bulletin_change;
1043 uint8_t vf_mac[ETHER_ADDR_LEN];
1044 uint8_t is_mac_forced;
1046 /* Fix up ecore debug level */
1047 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1048 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1049 uint32_t max_mac_addrs;
1052 /* Extract key data structures */
1053 adapter = eth_dev->data->dev_private;
1054 edev = &adapter->edev;
1055 pci_addr = eth_dev->pci_dev->addr;
1057 PMD_INIT_FUNC_TRACE(edev);
1059 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1060 pci_addr.bus, pci_addr.devid, pci_addr.function,
1061 eth_dev->data->port_id);
1063 eth_dev->rx_pkt_burst = qede_recv_pkts;
1064 eth_dev->tx_pkt_burst = qede_xmit_pkts;
1066 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1067 DP_NOTICE(edev, false,
1068 "Skipping device init from secondary process\n");
1072 pci_dev = eth_dev->pci_dev;
1074 rte_eth_copy_pci_info(eth_dev, pci_dev);
1076 qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
1078 qed_ops = qed_get_eth_ops();
1080 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1084 DP_INFO(edev, "Starting qede probe\n");
1086 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1087 dp_module, dp_level, is_vf);
1090 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1094 qede_update_pf_params(edev);
1096 rte_intr_callback_register(ð_dev->pci_dev->intr_handle,
1097 qede_interrupt_handler, (void *)eth_dev);
1099 if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) {
1100 DP_ERR(edev, "rte_intr_enable() failed\n");
1104 /* Start the Slowpath-process */
1105 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
1106 params.int_mode = ECORE_INT_MODE_MSIX;
1107 params.drv_major = QEDE_MAJOR_VERSION;
1108 params.drv_minor = QEDE_MINOR_VERSION;
1109 params.drv_rev = QEDE_REVISION_VERSION;
1110 params.drv_eng = QEDE_ENGINEERING_VERSION;
1111 strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1113 /* For CMT mode device do periodic polling for slowpath events.
1114 * This is required since uio device uses only one MSI-x
1115 * interrupt vector but we need one for each engine.
1117 if (edev->num_hwfns > 1) {
1118 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1122 DP_ERR(edev, "Unable to start periodic"
1123 " timer rc %d\n", rc);
1128 rc = qed_ops->common->slowpath_start(edev, ¶ms);
1130 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1131 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1136 rc = qed_ops->fill_dev_info(edev, &dev_info);
1138 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1139 qed_ops->common->slowpath_stop(edev);
1140 qed_ops->common->remove(edev);
1141 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1146 qede_alloc_etherdev(adapter, &dev_info);
1148 adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
1151 adapter->dev_info.num_mac_addrs =
1152 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1155 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1156 &adapter->dev_info.num_mac_addrs);
1158 /* Allocate memory for storing MAC addr */
1159 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1161 adapter->dev_info.num_mac_addrs),
1162 RTE_CACHE_LINE_SIZE);
1164 if (eth_dev->data->mac_addrs == NULL) {
1165 DP_ERR(edev, "Failed to allocate MAC address\n");
1166 qed_ops->common->slowpath_stop(edev);
1167 qed_ops->common->remove(edev);
1168 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1174 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1175 hw_info.hw_mac_addr,
1176 ð_dev->data->mac_addrs[0]);
1177 ether_addr_copy(ð_dev->data->mac_addrs[0],
1178 &adapter->primary_mac);
1180 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1182 if (bulletin_change) {
1184 ecore_vf_bulletin_get_forced_mac(
1185 ECORE_LEADING_HWFN(edev),
1188 if (is_mac_exist && is_mac_forced) {
1189 DP_INFO(edev, "VF macaddr received from PF\n");
1190 ether_addr_copy((struct ether_addr *)&vf_mac,
1191 ð_dev->data->mac_addrs[0]);
1192 ether_addr_copy(ð_dev->data->mac_addrs[0],
1193 &adapter->primary_mac);
1195 DP_NOTICE(edev, false,
1196 "No VF macaddr assigned\n");
1201 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1204 qede_print_adapter_info(adapter);
1208 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1209 adapter->primary_mac.addr_bytes[0],
1210 adapter->primary_mac.addr_bytes[1],
1211 adapter->primary_mac.addr_bytes[2],
1212 adapter->primary_mac.addr_bytes[3],
1213 adapter->primary_mac.addr_bytes[4],
1214 adapter->primary_mac.addr_bytes[5]);
1219 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1221 return qede_common_dev_init(eth_dev, 1);
1224 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1226 return qede_common_dev_init(eth_dev, 0);
1229 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1231 /* only uninitialize in the primary process */
1232 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1235 /* safe to close dev here */
1236 qede_dev_close(eth_dev);
1238 eth_dev->dev_ops = NULL;
1239 eth_dev->rx_pkt_burst = NULL;
1240 eth_dev->tx_pkt_burst = NULL;
1242 if (eth_dev->data->mac_addrs)
1243 rte_free(eth_dev->data->mac_addrs);
1245 eth_dev->data->mac_addrs = NULL;
1250 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1252 return qede_dev_common_uninit(eth_dev);
1255 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1257 return qede_dev_common_uninit(eth_dev);
1260 static struct rte_pci_id pci_id_qedevf_map[] = {
1261 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1263 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1266 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1271 static struct rte_pci_id pci_id_qede_map[] = {
1272 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1274 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1277 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1280 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1283 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1286 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1291 static struct eth_driver rte_qedevf_pmd = {
1293 .name = "rte_qedevf_pmd",
1294 .id_table = pci_id_qedevf_map,
1296 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1298 .eth_dev_init = qedevf_eth_dev_init,
1299 .eth_dev_uninit = qedevf_eth_dev_uninit,
1300 .dev_private_size = sizeof(struct qede_dev),
1303 static struct eth_driver rte_qede_pmd = {
1305 .name = "rte_qede_pmd",
1306 .id_table = pci_id_qede_map,
1308 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1310 .eth_dev_init = qede_eth_dev_init,
1311 .eth_dev_uninit = qede_eth_dev_uninit,
1312 .dev_private_size = sizeof(struct qede_dev),
1316 rte_qedevf_pmd_init(const char *name __rte_unused,
1317 const char *params __rte_unused)
1319 rte_eth_driver_register(&rte_qedevf_pmd);
1325 rte_qede_pmd_init(const char *name __rte_unused,
1326 const char *params __rte_unused)
1328 rte_eth_driver_register(&rte_qede_pmd);
1333 static struct rte_driver rte_qedevf_driver = {
1335 .init = rte_qede_pmd_init
1338 static struct rte_driver rte_qede_driver = {
1340 .init = rte_qedevf_pmd_init
1343 PMD_REGISTER_DRIVER(rte_qede_driver);
1344 PMD_REGISTER_DRIVER(rte_qedevf_driver);