2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
13 static const struct qed_eth_ops *qed_ops;
14 static const char *drivername = "qede pmd";
15 static int64_t timer_period = 1;
17 struct rte_qede_xstats_name_off {
18 char name[RTE_ETH_XSTATS_NAME_SIZE];
22 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
23 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
24 {"rx_multicast_bytes",
25 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
26 {"rx_broadcast_bytes",
27 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
28 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
29 {"rx_multicast_packets",
30 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
31 {"rx_broadcast_packets",
32 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
34 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
35 {"tx_multicast_bytes",
36 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
37 {"tx_broadcast_bytes",
38 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
39 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
40 {"tx_multicast_packets",
41 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
42 {"tx_broadcast_packets",
43 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
45 {"rx_64_byte_packets",
46 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
47 {"rx_65_to_127_byte_packets",
48 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
49 {"rx_128_to_255_byte_packets",
50 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
51 {"rx_256_to_511_byte_packets",
52 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
53 {"rx_512_to_1023_byte_packets",
54 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
55 {"rx_1024_to_1518_byte_packets",
56 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
57 {"rx_1519_to_1522_byte_packets",
58 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
59 {"rx_1519_to_2047_byte_packets",
60 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
61 {"rx_2048_to_4095_byte_packets",
62 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
63 {"rx_4096_to_9216_byte_packets",
64 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
65 {"rx_9217_to_16383_byte_packets",
66 offsetof(struct ecore_eth_stats,
67 rx_9217_to_16383_byte_packets)},
68 {"tx_64_byte_packets",
69 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
70 {"tx_65_to_127_byte_packets",
71 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
72 {"tx_128_to_255_byte_packets",
73 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
74 {"tx_256_to_511_byte_packets",
75 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
76 {"tx_512_to_1023_byte_packets",
77 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
78 {"tx_1024_to_1518_byte_packets",
79 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
80 {"trx_1519_to_1522_byte_packets",
81 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
82 {"tx_2048_to_4095_byte_packets",
83 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
84 {"tx_4096_to_9216_byte_packets",
85 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
86 {"tx_9217_to_16383_byte_packets",
87 offsetof(struct ecore_eth_stats,
88 tx_9217_to_16383_byte_packets)},
90 {"rx_mac_crtl_frames",
91 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
92 {"tx_mac_control_frames",
93 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
94 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
95 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
96 {"rx_priority_flow_control_frames",
97 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
98 {"tx_priority_flow_control_frames",
99 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
101 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
102 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
103 {"rx_carrier_errors",
104 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
105 {"rx_oversize_packet_errors",
106 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
107 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
108 {"rx_undersize_packet_errors",
109 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
110 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
111 {"rx_host_buffer_not_available",
112 offsetof(struct ecore_eth_stats, no_buff_discards)},
113 /* Number of packets discarded because they are bigger than MTU */
114 {"rx_packet_too_big_discards",
115 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
116 {"rx_ttl_zero_discards",
117 offsetof(struct ecore_eth_stats, ttl0_discard)},
118 {"rx_multi_function_tag_filter_discards",
119 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
120 {"rx_mac_filter_discards",
121 offsetof(struct ecore_eth_stats, mac_filter_discards)},
122 {"rx_hw_buffer_truncates",
123 offsetof(struct ecore_eth_stats, brb_truncates)},
124 {"rx_hw_buffer_discards",
125 offsetof(struct ecore_eth_stats, brb_discards)},
126 {"tx_lpi_entry_count",
127 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
128 {"tx_total_collisions",
129 offsetof(struct ecore_eth_stats, tx_total_collisions)},
130 {"tx_error_drop_packets",
131 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
133 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
134 {"rx_mac_unicast_packets",
135 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
136 {"rx_mac_multicast_packets",
137 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
138 {"rx_mac_broadcast_packets",
139 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
141 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
142 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
143 {"tx_mac_unicast_packets",
144 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
145 {"tx_mac_multicast_packets",
146 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
147 {"tx_mac_broadcast_packets",
148 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
150 {"lro_coalesced_packets",
151 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
152 {"lro_coalesced_events",
153 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
155 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
156 {"lro_not_coalesced_packets",
157 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
158 {"lro_coalesced_bytes",
159 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
162 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
164 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
168 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
170 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
171 struct qede_dev *qdev = eth_dev->data->dev_private;
172 struct ecore_dev *edev = &qdev->edev;
174 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
175 if (rte_intr_enable(ð_dev->pci_dev->intr_handle))
176 DP_ERR(edev, "rte_intr_enable failed\n");
180 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
182 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
183 qdev->num_tc = qdev->dev_info.num_tc;
187 static void qede_print_adapter_info(struct qede_dev *qdev)
189 struct ecore_dev *edev = &qdev->edev;
190 struct qed_dev_info *info = &qdev->dev_info.common;
191 static char ver_str[QED_DRV_VER_STR_SIZE];
193 DP_INFO(edev, "*********************************\n");
194 DP_INFO(edev, " Chip details : %s%d\n",
195 ECORE_IS_BB(edev) ? "BB" : "AH",
196 CHIP_REV_IS_A0(edev) ? 0 : 1);
198 sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
199 edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
200 QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
201 strcpy(qdev->drv_ver, ver_str);
202 DP_INFO(edev, " Driver version : %s\n", ver_str);
204 sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
205 info->fw_rev, info->fw_eng);
206 DP_INFO(edev, " Firmware version : %s\n", ver_str);
208 sprintf(ver_str, "%d.%d.%d.%d",
209 (info->mfw_rev >> 24) & 0xff,
210 (info->mfw_rev >> 16) & 0xff,
211 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
212 DP_INFO(edev, " Management firmware version : %s\n", ver_str);
214 DP_INFO(edev, " Firmware file : %s\n", fw_file);
216 DP_INFO(edev, "*********************************\n");
220 qede_set_ucast_rx_mac(struct qede_dev *qdev,
221 enum qed_filter_xcast_params_type opcode,
222 uint8_t mac[ETHER_ADDR_LEN])
224 struct ecore_dev *edev = &qdev->edev;
225 struct qed_filter_params filter_cmd;
227 memset(&filter_cmd, 0, sizeof(filter_cmd));
228 filter_cmd.type = QED_FILTER_TYPE_UCAST;
229 filter_cmd.filter.ucast.type = opcode;
230 filter_cmd.filter.ucast.mac_valid = 1;
231 rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
232 return qdev->ops->filter_config(edev, &filter_cmd);
236 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
237 uint32_t index, __rte_unused uint32_t pool)
239 struct qede_dev *qdev = eth_dev->data->dev_private;
240 struct ecore_dev *edev = &qdev->edev;
243 PMD_INIT_FUNC_TRACE(edev);
245 if (index >= qdev->dev_info.num_mac_addrs) {
246 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
247 index, qdev->dev_info.num_mac_addrs);
251 /* Adding macaddr even though promiscuous mode is set */
252 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
253 DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
255 /* Add MAC filters according to the unicast secondary macs */
256 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
257 mac_addr->addr_bytes);
259 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
263 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
265 struct qede_dev *qdev = eth_dev->data->dev_private;
266 struct ecore_dev *edev = &qdev->edev;
267 struct ether_addr mac_addr;
270 PMD_INIT_FUNC_TRACE(edev);
272 if (index >= qdev->dev_info.num_mac_addrs) {
273 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
274 index, qdev->dev_info.num_mac_addrs);
278 /* Use the index maintained by rte */
279 ether_addr_copy(ð_dev->data->mac_addrs[index], &mac_addr);
280 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
281 mac_addr.addr_bytes);
283 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
287 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
289 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
290 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
293 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
294 mac_addr->addr_bytes)) {
295 DP_ERR(edev, "Setting MAC address is not allowed\n");
296 ether_addr_copy(&qdev->primary_mac,
297 ð_dev->data->mac_addrs[0]);
301 /* First remove the primary mac */
302 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
303 qdev->primary_mac.addr_bytes);
306 DP_ERR(edev, "Unable to remove current macaddr"
307 " Reverting to previous default mac\n");
308 ether_addr_copy(&qdev->primary_mac,
309 ð_dev->data->mac_addrs[0]);
314 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
315 mac_addr->addr_bytes);
318 DP_ERR(edev, "Unable to add new default mac\n");
320 ether_addr_copy(mac_addr, &qdev->primary_mac);
326 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
328 struct ecore_dev *edev = &qdev->edev;
329 struct qed_update_vport_params params = {
331 .accept_any_vlan = action,
332 .update_accept_any_vlan_flg = 1,
336 /* Proceed only if action actually needs to be performed */
337 if (qdev->accept_any_vlan == action)
340 rc = qdev->ops->vport_update(edev, ¶ms);
342 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
343 action ? "enable" : "disable");
345 DP_INFO(edev, "%s accept-any-vlan\n",
346 action ? "enabled" : "disabled");
347 qdev->accept_any_vlan = action;
351 void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
353 struct qede_dev *qdev = eth_dev->data->dev_private;
354 struct ecore_dev *edev = &qdev->edev;
355 /* TODO: - QED_FILTER_TYPE_UCAST */
356 enum qed_filter_rx_mode_type accept_flags =
357 QED_FILTER_RX_MODE_TYPE_REGULAR;
358 struct qed_filter_params rx_mode;
361 /* Configure the struct for the Rx mode */
362 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
363 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
365 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
366 eth_dev->data->mac_addrs[0].addr_bytes);
367 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
368 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
370 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
372 mac_addrs[0].addr_bytes);
374 DP_ERR(edev, "Unable to add filter\n");
379 /* take care of VLAN mode */
380 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
381 qede_config_accept_any_vlan(qdev, true);
382 } else if (!qdev->non_configured_vlans) {
383 /* If we dont have non-configured VLANs and promisc
384 * is not set, then check if we need to disable
385 * accept_any_vlan mode.
386 * Because in this case, accept_any_vlan mode is set
387 * as part of IFF_RPOMISC flag handling.
389 qede_config_accept_any_vlan(qdev, false);
391 rx_mode.filter.accept_flags = accept_flags;
392 rc = qdev->ops->filter_config(edev, &rx_mode);
394 DP_ERR(edev, "Filter config failed rc=%d\n", rc);
397 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
399 struct qed_update_vport_params vport_update_params;
400 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
401 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
404 memset(&vport_update_params, 0, sizeof(vport_update_params));
405 vport_update_params.vport_id = 0;
406 vport_update_params.update_inner_vlan_removal_flg = 1;
407 vport_update_params.inner_vlan_removal_flg = set_stripping;
408 rc = qdev->ops->vport_update(edev, &vport_update_params);
410 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
417 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
419 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
420 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
422 if (mask & ETH_VLAN_STRIP_MASK) {
423 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
424 (void)qede_vlan_stripping(eth_dev, 1);
426 (void)qede_vlan_stripping(eth_dev, 0);
429 DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
430 mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
433 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
434 enum qed_filter_xcast_params_type opcode,
437 struct qed_filter_params filter_cmd;
438 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
440 memset(&filter_cmd, 0, sizeof(filter_cmd));
441 filter_cmd.type = QED_FILTER_TYPE_UCAST;
442 filter_cmd.filter.ucast.type = opcode;
443 filter_cmd.filter.ucast.vlan_valid = 1;
444 filter_cmd.filter.ucast.vlan = vid;
446 return qdev->ops->filter_config(edev, &filter_cmd);
449 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
450 uint16_t vlan_id, int on)
452 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
453 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
454 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
458 qdev->configured_vlans == dev_info->num_vlan_filters) {
459 DP_NOTICE(edev, false, "Reached max VLAN filter limit"
460 " enabling accept_any_vlan\n");
461 qede_config_accept_any_vlan(qdev, true);
466 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
469 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
473 qdev->configured_vlans++;
475 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
478 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
482 qdev->configured_vlans--;
485 DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
486 vlan_id, on, rc, qdev->configured_vlans);
491 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
493 struct qede_dev *qdev = eth_dev->data->dev_private;
494 struct ecore_dev *edev = &qdev->edev;
495 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
497 PMD_INIT_FUNC_TRACE(edev);
499 if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
500 DP_NOTICE(edev, false,
501 "Unequal number of rx/tx queues "
502 "is not supported RX=%u TX=%u\n",
503 eth_dev->data->nb_rx_queues,
504 eth_dev->data->nb_tx_queues);
508 /* Check requirements for 100G mode */
509 if (edev->num_hwfns > 1) {
510 if (eth_dev->data->nb_rx_queues < 2) {
511 DP_NOTICE(edev, false,
512 "100G mode requires minimum two queues\n");
516 if ((eth_dev->data->nb_rx_queues % 2) != 0) {
517 DP_NOTICE(edev, false,
518 "100G mode requires even number of queues\n");
523 qdev->num_rss = eth_dev->data->nb_rx_queues;
526 qdev->state = QEDE_CLOSE;
528 /* Sanity checks and throw warnings */
530 if (rxmode->enable_scatter == 1) {
531 DP_ERR(edev, "RX scatter packets is not supported\n");
535 if (rxmode->enable_lro == 1) {
536 DP_INFO(edev, "LRO is not supported\n");
540 if (!rxmode->hw_strip_crc)
541 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
543 if (!rxmode->hw_ip_checksum)
544 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
548 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
549 QEDE_RSS_CNT(qdev), qdev->num_tc);
551 DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
552 " port %u first_on_engine %d\n",
553 edev->hwfns[0].my_id,
554 edev->hwfns[0].rel_pf_id,
555 edev->hwfns[0].abs_pf_id,
556 edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
561 /* Info about HW descriptor ring limitations */
562 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
563 .nb_max = NUM_RX_BDS_MAX,
565 .nb_align = 128 /* lowest common multiple */
568 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
569 .nb_max = NUM_TX_BDS_MAX,
575 qede_dev_info_get(struct rte_eth_dev *eth_dev,
576 struct rte_eth_dev_info *dev_info)
578 struct qede_dev *qdev = eth_dev->data->dev_private;
579 struct ecore_dev *edev = &qdev->edev;
581 PMD_INIT_FUNC_TRACE(edev);
583 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
585 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
586 dev_info->rx_desc_lim = qede_rx_desc_lim;
587 dev_info->tx_desc_lim = qede_tx_desc_lim;
588 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
589 dev_info->max_tx_queues = dev_info->max_rx_queues;
590 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
592 dev_info->max_vfs = 0;
594 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
595 dev_info->driver_name = qdev->drv_ver;
596 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
597 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
599 dev_info->default_txconf = (struct rte_eth_txconf) {
600 .txq_flags = QEDE_TXQ_FLAGS,
603 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
604 DEV_RX_OFFLOAD_IPV4_CKSUM |
605 DEV_RX_OFFLOAD_UDP_CKSUM |
606 DEV_RX_OFFLOAD_TCP_CKSUM);
607 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
608 DEV_TX_OFFLOAD_IPV4_CKSUM |
609 DEV_TX_OFFLOAD_UDP_CKSUM |
610 DEV_TX_OFFLOAD_TCP_CKSUM);
612 dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
615 /* return 0 means link status changed, -1 means not changed */
617 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
619 struct qede_dev *qdev = eth_dev->data->dev_private;
620 struct ecore_dev *edev = &qdev->edev;
621 uint16_t link_duplex;
622 struct qed_link_output link;
623 struct rte_eth_link *curr = ð_dev->data->dev_link;
625 memset(&link, 0, sizeof(struct qed_link_output));
626 qdev->ops->common->get_link(edev, &link);
629 curr->link_speed = link.speed;
632 switch (link.duplex) {
633 case QEDE_DUPLEX_HALF:
634 link_duplex = ETH_LINK_HALF_DUPLEX;
636 case QEDE_DUPLEX_FULL:
637 link_duplex = ETH_LINK_FULL_DUPLEX;
639 case QEDE_DUPLEX_UNKNOWN:
643 curr->link_duplex = link_duplex;
646 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
649 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
650 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
652 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
653 curr->link_speed, curr->link_duplex,
654 curr->link_autoneg, curr->link_status);
656 /* return 0 means link status changed, -1 means not changed */
657 return ((curr->link_status == link.link_up) ? -1 : 0);
661 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
662 enum qed_filter_rx_mode_type accept_flags)
664 struct qede_dev *qdev = eth_dev->data->dev_private;
665 struct ecore_dev *edev = &qdev->edev;
666 struct qed_filter_params rx_mode;
668 DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
670 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
671 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
672 rx_mode.filter.accept_flags = accept_flags;
673 qdev->ops->filter_config(edev, &rx_mode);
676 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
678 struct qede_dev *qdev = eth_dev->data->dev_private;
679 struct ecore_dev *edev = &qdev->edev;
681 PMD_INIT_FUNC_TRACE(edev);
683 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
685 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
686 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
688 qede_rx_mode_setting(eth_dev, type);
691 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
693 struct qede_dev *qdev = eth_dev->data->dev_private;
694 struct ecore_dev *edev = &qdev->edev;
696 PMD_INIT_FUNC_TRACE(edev);
698 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
699 qede_rx_mode_setting(eth_dev,
700 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
702 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
705 static void qede_poll_sp_sb_cb(void *param)
707 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
708 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
709 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
712 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
713 qede_interrupt_action(&edev->hwfns[1]);
715 rc = rte_eal_alarm_set(timer_period * US_PER_S,
719 DP_ERR(edev, "Unable to start periodic"
720 " timer rc %d\n", rc);
721 assert(false && "Unable to start periodic timer");
725 static void qede_dev_close(struct rte_eth_dev *eth_dev)
727 struct qede_dev *qdev = eth_dev->data->dev_private;
728 struct ecore_dev *edev = &qdev->edev;
730 PMD_INIT_FUNC_TRACE(edev);
732 /* dev_stop() shall cleanup fp resources in hw but without releasing
733 * dma memories and sw structures so that dev_start() can be called
734 * by the app without reconfiguration. However, in dev_close() we
735 * can release all the resources and device can be brought up newly
737 if (qdev->state != QEDE_STOP)
738 qede_dev_stop(eth_dev);
740 DP_INFO(edev, "Device is already stopped\n");
742 qede_free_mem_load(qdev);
744 qede_free_fp_arrays(qdev);
746 qede_dev_set_link_state(eth_dev, false);
748 qdev->ops->common->slowpath_stop(edev);
750 qdev->ops->common->remove(edev);
752 rte_intr_disable(ð_dev->pci_dev->intr_handle);
754 rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle,
755 qede_interrupt_handler, (void *)eth_dev);
757 if (edev->num_hwfns > 1)
758 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
760 qdev->state = QEDE_CLOSE;
764 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
766 struct qede_dev *qdev = eth_dev->data->dev_private;
767 struct ecore_dev *edev = &qdev->edev;
768 struct ecore_eth_stats stats;
770 qdev->ops->get_vport_stats(edev, &stats);
773 eth_stats->ipackets = stats.rx_ucast_pkts +
774 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
776 eth_stats->ibytes = stats.rx_ucast_bytes +
777 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
779 eth_stats->ierrors = stats.rx_crc_errors +
780 stats.rx_align_errors +
781 stats.rx_carrier_errors +
782 stats.rx_oversize_packets +
783 stats.rx_jabbers + stats.rx_undersize_packets;
785 eth_stats->rx_nombuf = stats.no_buff_discards;
787 eth_stats->imissed = stats.mftag_filter_discards +
788 stats.mac_filter_discards +
789 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
792 eth_stats->opackets = stats.tx_ucast_pkts +
793 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
795 eth_stats->obytes = stats.tx_ucast_bytes +
796 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
798 eth_stats->oerrors = stats.tx_err_drop_pkts;
802 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
803 struct rte_eth_xstat_name *xstats_names, unsigned limit)
805 unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
807 if (xstats_names != NULL)
808 for (i = 0; i < stat_cnt; i++)
809 snprintf(xstats_names[i].name,
810 sizeof(xstats_names[i].name),
812 qede_xstats_strings[i].name);
818 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
821 struct qede_dev *qdev = dev->data->dev_private;
822 struct ecore_dev *edev = &qdev->edev;
823 struct ecore_eth_stats stats;
824 unsigned int num = RTE_DIM(qede_xstats_strings);
829 qdev->ops->get_vport_stats(edev, &stats);
831 for (num = 0; num < n; num++)
832 xstats[num].value = *(u64 *)(((char *)&stats) +
833 qede_xstats_strings[num].offset);
839 qede_reset_xstats(struct rte_eth_dev *dev)
841 struct qede_dev *qdev = dev->data->dev_private;
842 struct ecore_dev *edev = &qdev->edev;
844 ecore_reset_vport_stats(edev);
847 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
849 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
850 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
851 struct qed_link_params link_params;
854 DP_INFO(edev, "setting link state %d\n", link_up);
855 memset(&link_params, 0, sizeof(link_params));
856 link_params.link_up = link_up;
857 rc = qdev->ops->common->set_link(edev, &link_params);
858 if (rc != ECORE_SUCCESS)
859 DP_ERR(edev, "Unable to set link state %d\n", link_up);
864 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
866 return qede_dev_set_link_state(eth_dev, true);
869 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
871 return qede_dev_set_link_state(eth_dev, false);
874 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
876 struct qede_dev *qdev = eth_dev->data->dev_private;
877 struct ecore_dev *edev = &qdev->edev;
879 ecore_reset_vport_stats(edev);
882 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
884 enum qed_filter_rx_mode_type type =
885 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
887 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
888 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
890 qede_rx_mode_setting(eth_dev, type);
893 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
895 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
896 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
898 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
901 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
902 struct rte_eth_fc_conf *fc_conf)
904 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
905 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
906 struct qed_link_output current_link;
907 struct qed_link_params params;
909 memset(¤t_link, 0, sizeof(current_link));
910 qdev->ops->common->get_link(edev, ¤t_link);
912 memset(¶ms, 0, sizeof(params));
913 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
914 if (fc_conf->autoneg) {
915 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
916 DP_ERR(edev, "Autoneg not supported\n");
919 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
922 /* Pause is assumed to be supported (SUPPORTED_Pause) */
923 if (fc_conf->mode == RTE_FC_FULL)
924 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
925 QED_LINK_PAUSE_RX_ENABLE);
926 if (fc_conf->mode == RTE_FC_TX_PAUSE)
927 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
928 if (fc_conf->mode == RTE_FC_RX_PAUSE)
929 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
931 params.link_up = true;
932 (void)qdev->ops->common->set_link(edev, ¶ms);
937 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
938 struct rte_eth_fc_conf *fc_conf)
940 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
941 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
942 struct qed_link_output current_link;
944 memset(¤t_link, 0, sizeof(current_link));
945 qdev->ops->common->get_link(edev, ¤t_link);
947 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
948 fc_conf->autoneg = true;
950 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
951 QED_LINK_PAUSE_TX_ENABLE))
952 fc_conf->mode = RTE_FC_FULL;
953 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
954 fc_conf->mode = RTE_FC_RX_PAUSE;
955 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
956 fc_conf->mode = RTE_FC_TX_PAUSE;
958 fc_conf->mode = RTE_FC_NONE;
963 static const uint32_t *
964 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
966 static const uint32_t ptypes[] = {
972 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
978 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
979 struct rte_eth_rss_conf *rss_conf)
981 struct qed_update_vport_params vport_update_params;
982 struct qede_dev *qdev = eth_dev->data->dev_private;
983 struct ecore_dev *edev = &qdev->edev;
985 uint32_t *key = (uint32_t *)rss_conf->rss_key;
986 uint64_t hf = rss_conf->rss_hf;
990 DP_ERR(edev, "hash function 0 will disable RSS\n");
993 rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
994 rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
995 rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
996 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
997 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
998 rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1000 /* If the mapping doesn't fit any supported, return */
1001 if (rss_caps == 0 && hf != 0)
1004 memset(&vport_update_params, 0, sizeof(vport_update_params));
1007 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1008 rss_conf->rss_key_len);
1010 qdev->rss_params.rss_caps = rss_caps;
1011 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1012 sizeof(vport_update_params.rss_params));
1013 vport_update_params.update_rss_flg = 1;
1014 vport_update_params.vport_id = 0;
1016 return qdev->ops->vport_update(edev, &vport_update_params);
1019 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1020 struct rte_eth_rss_conf *rss_conf)
1022 struct qede_dev *qdev = eth_dev->data->dev_private;
1025 if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1028 if (rss_conf->rss_key)
1029 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1030 sizeof(qdev->rss_params.rss_key));
1033 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
1035 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1037 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1038 ETH_RSS_IPV6_EX : 0;
1039 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1040 ETH_RSS_NONFRAG_IPV4_TCP : 0;
1041 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1042 ETH_RSS_NONFRAG_IPV6_TCP : 0;
1043 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1044 ETH_RSS_IPV6_TCP_EX : 0;
1046 rss_conf->rss_hf = hf;
1051 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1052 struct rte_eth_rss_reta_entry64 *reta_conf,
1055 struct qed_update_vport_params vport_update_params;
1056 struct qede_dev *qdev = eth_dev->data->dev_private;
1057 struct ecore_dev *edev = &qdev->edev;
1058 uint16_t i, idx, shift;
1060 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1061 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1066 memset(&vport_update_params, 0, sizeof(vport_update_params));
1067 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1068 sizeof(vport_update_params.rss_params));
1070 for (i = 0; i < reta_size; i++) {
1071 idx = i / RTE_RETA_GROUP_SIZE;
1072 shift = i % RTE_RETA_GROUP_SIZE;
1073 if (reta_conf[idx].mask & (1ULL << shift)) {
1074 uint8_t entry = reta_conf[idx].reta[shift];
1075 qdev->rss_params.rss_ind_table[i] = entry;
1079 vport_update_params.update_rss_flg = 1;
1080 vport_update_params.vport_id = 0;
1082 return qdev->ops->vport_update(edev, &vport_update_params);
1085 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1086 struct rte_eth_rss_reta_entry64 *reta_conf,
1089 struct qede_dev *qdev = eth_dev->data->dev_private;
1090 uint16_t i, idx, shift;
1092 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1093 struct ecore_dev *edev = &qdev->edev;
1094 DP_ERR(edev, "reta_size %d is not supported\n",
1098 for (i = 0; i < reta_size; i++) {
1099 idx = i / RTE_RETA_GROUP_SIZE;
1100 shift = i % RTE_RETA_GROUP_SIZE;
1101 if (reta_conf[idx].mask & (1ULL << shift)) {
1102 uint8_t entry = qdev->rss_params.rss_ind_table[i];
1103 reta_conf[idx].reta[shift] = entry;
1110 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1112 uint32_t frame_size;
1113 struct qede_dev *qdev = dev->data->dev_private;
1114 struct rte_eth_dev_info dev_info = {0};
1116 qede_dev_info_get(dev, &dev_info);
1119 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1121 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1124 if (!dev->data->scattered_rx &&
1125 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1128 if (frame_size > ETHER_MAX_LEN)
1129 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1131 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1133 /* update max frame size */
1134 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1137 qede_dev_start(dev);
1142 static const struct eth_dev_ops qede_eth_dev_ops = {
1143 .dev_configure = qede_dev_configure,
1144 .dev_infos_get = qede_dev_info_get,
1145 .rx_queue_setup = qede_rx_queue_setup,
1146 .rx_queue_release = qede_rx_queue_release,
1147 .tx_queue_setup = qede_tx_queue_setup,
1148 .tx_queue_release = qede_tx_queue_release,
1149 .dev_start = qede_dev_start,
1150 .dev_set_link_up = qede_dev_set_link_up,
1151 .dev_set_link_down = qede_dev_set_link_down,
1152 .link_update = qede_link_update,
1153 .promiscuous_enable = qede_promiscuous_enable,
1154 .promiscuous_disable = qede_promiscuous_disable,
1155 .allmulticast_enable = qede_allmulticast_enable,
1156 .allmulticast_disable = qede_allmulticast_disable,
1157 .dev_stop = qede_dev_stop,
1158 .dev_close = qede_dev_close,
1159 .stats_get = qede_get_stats,
1160 .stats_reset = qede_reset_stats,
1161 .xstats_get = qede_get_xstats,
1162 .xstats_reset = qede_reset_xstats,
1163 .xstats_get_names = qede_get_xstats_names,
1164 .mac_addr_add = qede_mac_addr_add,
1165 .mac_addr_remove = qede_mac_addr_remove,
1166 .mac_addr_set = qede_mac_addr_set,
1167 .vlan_offload_set = qede_vlan_offload_set,
1168 .vlan_filter_set = qede_vlan_filter_set,
1169 .flow_ctrl_set = qede_flow_ctrl_set,
1170 .flow_ctrl_get = qede_flow_ctrl_get,
1171 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1172 .rss_hash_update = qede_rss_hash_update,
1173 .rss_hash_conf_get = qede_rss_hash_conf_get,
1174 .reta_update = qede_rss_reta_update,
1175 .reta_query = qede_rss_reta_query,
1176 .mtu_set = qede_set_mtu,
1179 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1180 .dev_configure = qede_dev_configure,
1181 .dev_infos_get = qede_dev_info_get,
1182 .rx_queue_setup = qede_rx_queue_setup,
1183 .rx_queue_release = qede_rx_queue_release,
1184 .tx_queue_setup = qede_tx_queue_setup,
1185 .tx_queue_release = qede_tx_queue_release,
1186 .dev_start = qede_dev_start,
1187 .dev_set_link_up = qede_dev_set_link_up,
1188 .dev_set_link_down = qede_dev_set_link_down,
1189 .link_update = qede_link_update,
1190 .promiscuous_enable = qede_promiscuous_enable,
1191 .promiscuous_disable = qede_promiscuous_disable,
1192 .allmulticast_enable = qede_allmulticast_enable,
1193 .allmulticast_disable = qede_allmulticast_disable,
1194 .dev_stop = qede_dev_stop,
1195 .dev_close = qede_dev_close,
1196 .stats_get = qede_get_stats,
1197 .stats_reset = qede_reset_stats,
1198 .xstats_get = qede_get_xstats,
1199 .xstats_reset = qede_reset_xstats,
1200 .xstats_get_names = qede_get_xstats_names,
1201 .vlan_offload_set = qede_vlan_offload_set,
1202 .vlan_filter_set = qede_vlan_filter_set,
1203 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1204 .rss_hash_update = qede_rss_hash_update,
1205 .rss_hash_conf_get = qede_rss_hash_conf_get,
1206 .reta_update = qede_rss_reta_update,
1207 .reta_query = qede_rss_reta_query,
1208 .mtu_set = qede_set_mtu,
1211 static void qede_update_pf_params(struct ecore_dev *edev)
1213 struct ecore_pf_params pf_params;
1215 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1216 pf_params.eth_pf_params.num_cons = 64;
1217 qed_ops->common->update_pf_params(edev, &pf_params);
1220 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1222 struct rte_pci_device *pci_dev;
1223 struct rte_pci_addr pci_addr;
1224 struct qede_dev *adapter;
1225 struct ecore_dev *edev;
1226 struct qed_dev_eth_info dev_info;
1227 struct qed_slowpath_params params;
1229 static bool do_once = true;
1230 uint8_t bulletin_change;
1231 uint8_t vf_mac[ETHER_ADDR_LEN];
1232 uint8_t is_mac_forced;
1234 /* Fix up ecore debug level */
1235 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1236 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1237 uint32_t max_mac_addrs;
1240 /* Extract key data structures */
1241 adapter = eth_dev->data->dev_private;
1242 edev = &adapter->edev;
1243 pci_addr = eth_dev->pci_dev->addr;
1245 PMD_INIT_FUNC_TRACE(edev);
1247 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1248 pci_addr.bus, pci_addr.devid, pci_addr.function,
1249 eth_dev->data->port_id);
1251 eth_dev->rx_pkt_burst = qede_recv_pkts;
1252 eth_dev->tx_pkt_burst = qede_xmit_pkts;
1254 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1255 DP_NOTICE(edev, false,
1256 "Skipping device init from secondary process\n");
1260 pci_dev = eth_dev->pci_dev;
1262 rte_eth_copy_pci_info(eth_dev, pci_dev);
1264 qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
1266 qed_ops = qed_get_eth_ops();
1268 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1272 DP_INFO(edev, "Starting qede probe\n");
1274 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1275 dp_module, dp_level, is_vf);
1278 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1282 qede_update_pf_params(edev);
1284 rte_intr_callback_register(ð_dev->pci_dev->intr_handle,
1285 qede_interrupt_handler, (void *)eth_dev);
1287 if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) {
1288 DP_ERR(edev, "rte_intr_enable() failed\n");
1292 /* Start the Slowpath-process */
1293 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
1294 params.int_mode = ECORE_INT_MODE_MSIX;
1295 params.drv_major = QEDE_MAJOR_VERSION;
1296 params.drv_minor = QEDE_MINOR_VERSION;
1297 params.drv_rev = QEDE_REVISION_VERSION;
1298 params.drv_eng = QEDE_ENGINEERING_VERSION;
1299 strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1301 /* For CMT mode device do periodic polling for slowpath events.
1302 * This is required since uio device uses only one MSI-x
1303 * interrupt vector but we need one for each engine.
1305 if (edev->num_hwfns > 1) {
1306 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1310 DP_ERR(edev, "Unable to start periodic"
1311 " timer rc %d\n", rc);
1316 rc = qed_ops->common->slowpath_start(edev, ¶ms);
1318 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1319 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1324 rc = qed_ops->fill_dev_info(edev, &dev_info);
1326 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1327 qed_ops->common->slowpath_stop(edev);
1328 qed_ops->common->remove(edev);
1329 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1334 qede_alloc_etherdev(adapter, &dev_info);
1336 adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
1339 adapter->dev_info.num_mac_addrs =
1340 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1343 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1344 &adapter->dev_info.num_mac_addrs);
1346 /* Allocate memory for storing MAC addr */
1347 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1349 adapter->dev_info.num_mac_addrs),
1350 RTE_CACHE_LINE_SIZE);
1352 if (eth_dev->data->mac_addrs == NULL) {
1353 DP_ERR(edev, "Failed to allocate MAC address\n");
1354 qed_ops->common->slowpath_stop(edev);
1355 qed_ops->common->remove(edev);
1356 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1362 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1363 hw_info.hw_mac_addr,
1364 ð_dev->data->mac_addrs[0]);
1365 ether_addr_copy(ð_dev->data->mac_addrs[0],
1366 &adapter->primary_mac);
1368 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1370 if (bulletin_change) {
1372 ecore_vf_bulletin_get_forced_mac(
1373 ECORE_LEADING_HWFN(edev),
1376 if (is_mac_exist && is_mac_forced) {
1377 DP_INFO(edev, "VF macaddr received from PF\n");
1378 ether_addr_copy((struct ether_addr *)&vf_mac,
1379 ð_dev->data->mac_addrs[0]);
1380 ether_addr_copy(ð_dev->data->mac_addrs[0],
1381 &adapter->primary_mac);
1383 DP_NOTICE(edev, false,
1384 "No VF macaddr assigned\n");
1389 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1392 qede_print_adapter_info(adapter);
1396 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1397 adapter->primary_mac.addr_bytes[0],
1398 adapter->primary_mac.addr_bytes[1],
1399 adapter->primary_mac.addr_bytes[2],
1400 adapter->primary_mac.addr_bytes[3],
1401 adapter->primary_mac.addr_bytes[4],
1402 adapter->primary_mac.addr_bytes[5]);
1407 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1409 return qede_common_dev_init(eth_dev, 1);
1412 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1414 return qede_common_dev_init(eth_dev, 0);
1417 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1419 /* only uninitialize in the primary process */
1420 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1423 /* safe to close dev here */
1424 qede_dev_close(eth_dev);
1426 eth_dev->dev_ops = NULL;
1427 eth_dev->rx_pkt_burst = NULL;
1428 eth_dev->tx_pkt_burst = NULL;
1430 if (eth_dev->data->mac_addrs)
1431 rte_free(eth_dev->data->mac_addrs);
1433 eth_dev->data->mac_addrs = NULL;
1438 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1440 return qede_dev_common_uninit(eth_dev);
1443 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1445 return qede_dev_common_uninit(eth_dev);
1448 static struct rte_pci_id pci_id_qedevf_map[] = {
1449 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1451 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1454 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1459 static struct rte_pci_id pci_id_qede_map[] = {
1460 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1462 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1465 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1468 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1471 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1474 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1479 static struct eth_driver rte_qedevf_pmd = {
1481 .id_table = pci_id_qedevf_map,
1483 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1484 .probe = rte_eth_dev_pci_probe,
1485 .remove = rte_eth_dev_pci_remove,
1487 .eth_dev_init = qedevf_eth_dev_init,
1488 .eth_dev_uninit = qedevf_eth_dev_uninit,
1489 .dev_private_size = sizeof(struct qede_dev),
1492 static struct eth_driver rte_qede_pmd = {
1494 .id_table = pci_id_qede_map,
1496 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1497 .probe = rte_eth_dev_pci_probe,
1498 .remove = rte_eth_dev_pci_remove,
1500 .eth_dev_init = qede_eth_dev_init,
1501 .eth_dev_uninit = qede_eth_dev_uninit,
1502 .dev_private_size = sizeof(struct qede_dev),
1505 DRIVER_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
1506 DRIVER_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1507 DRIVER_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
1508 DRIVER_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);