1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include "qede_ethdev.h"
9 #include <rte_version.h>
10 #include <rte_kvargs.h>
13 int qede_logtype_init;
14 int qede_logtype_driver;
16 static const struct qed_eth_ops *qed_ops;
17 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
20 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
22 struct rte_qede_xstats_name_off {
23 char name[RTE_ETH_XSTATS_NAME_SIZE];
27 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
29 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
30 {"rx_multicast_bytes",
31 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
32 {"rx_broadcast_bytes",
33 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
34 {"rx_unicast_packets",
35 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
36 {"rx_multicast_packets",
37 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
38 {"rx_broadcast_packets",
39 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
42 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
43 {"tx_multicast_bytes",
44 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
45 {"tx_broadcast_bytes",
46 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
47 {"tx_unicast_packets",
48 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
49 {"tx_multicast_packets",
50 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
51 {"tx_broadcast_packets",
52 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
54 {"rx_64_byte_packets",
55 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
56 {"rx_65_to_127_byte_packets",
57 offsetof(struct ecore_eth_stats_common,
58 rx_65_to_127_byte_packets)},
59 {"rx_128_to_255_byte_packets",
60 offsetof(struct ecore_eth_stats_common,
61 rx_128_to_255_byte_packets)},
62 {"rx_256_to_511_byte_packets",
63 offsetof(struct ecore_eth_stats_common,
64 rx_256_to_511_byte_packets)},
65 {"rx_512_to_1023_byte_packets",
66 offsetof(struct ecore_eth_stats_common,
67 rx_512_to_1023_byte_packets)},
68 {"rx_1024_to_1518_byte_packets",
69 offsetof(struct ecore_eth_stats_common,
70 rx_1024_to_1518_byte_packets)},
71 {"tx_64_byte_packets",
72 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
73 {"tx_65_to_127_byte_packets",
74 offsetof(struct ecore_eth_stats_common,
75 tx_65_to_127_byte_packets)},
76 {"tx_128_to_255_byte_packets",
77 offsetof(struct ecore_eth_stats_common,
78 tx_128_to_255_byte_packets)},
79 {"tx_256_to_511_byte_packets",
80 offsetof(struct ecore_eth_stats_common,
81 tx_256_to_511_byte_packets)},
82 {"tx_512_to_1023_byte_packets",
83 offsetof(struct ecore_eth_stats_common,
84 tx_512_to_1023_byte_packets)},
85 {"tx_1024_to_1518_byte_packets",
86 offsetof(struct ecore_eth_stats_common,
87 tx_1024_to_1518_byte_packets)},
89 {"rx_mac_crtl_frames",
90 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
91 {"tx_mac_control_frames",
92 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
94 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
96 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
97 {"rx_priority_flow_control_frames",
98 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
99 {"tx_priority_flow_control_frames",
100 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
103 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
105 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
106 {"rx_carrier_errors",
107 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
108 {"rx_oversize_packet_errors",
109 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
111 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
112 {"rx_undersize_packet_errors",
113 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
114 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
115 {"rx_host_buffer_not_available",
116 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
117 /* Number of packets discarded because they are bigger than MTU */
118 {"rx_packet_too_big_discards",
119 offsetof(struct ecore_eth_stats_common,
120 packet_too_big_discard)},
121 {"rx_ttl_zero_discards",
122 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
123 {"rx_multi_function_tag_filter_discards",
124 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
125 {"rx_mac_filter_discards",
126 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
127 {"rx_hw_buffer_truncates",
128 offsetof(struct ecore_eth_stats_common, brb_truncates)},
129 {"rx_hw_buffer_discards",
130 offsetof(struct ecore_eth_stats_common, brb_discards)},
131 {"tx_error_drop_packets",
132 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
134 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
135 {"rx_mac_unicast_packets",
136 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
137 {"rx_mac_multicast_packets",
138 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
139 {"rx_mac_broadcast_packets",
140 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
142 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
143 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
144 {"tx_mac_unicast_packets",
145 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
146 {"tx_mac_multicast_packets",
147 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
148 {"tx_mac_broadcast_packets",
149 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
151 {"lro_coalesced_packets",
152 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
153 {"lro_coalesced_events",
154 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
156 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
157 {"lro_not_coalesced_packets",
158 offsetof(struct ecore_eth_stats_common,
159 tpa_not_coalesced_pkts)},
160 {"lro_coalesced_bytes",
161 offsetof(struct ecore_eth_stats_common,
162 tpa_coalesced_bytes)},
165 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
166 {"rx_1519_to_1522_byte_packets",
167 offsetof(struct ecore_eth_stats, bb) +
168 offsetof(struct ecore_eth_stats_bb,
169 rx_1519_to_1522_byte_packets)},
170 {"rx_1519_to_2047_byte_packets",
171 offsetof(struct ecore_eth_stats, bb) +
172 offsetof(struct ecore_eth_stats_bb,
173 rx_1519_to_2047_byte_packets)},
174 {"rx_2048_to_4095_byte_packets",
175 offsetof(struct ecore_eth_stats, bb) +
176 offsetof(struct ecore_eth_stats_bb,
177 rx_2048_to_4095_byte_packets)},
178 {"rx_4096_to_9216_byte_packets",
179 offsetof(struct ecore_eth_stats, bb) +
180 offsetof(struct ecore_eth_stats_bb,
181 rx_4096_to_9216_byte_packets)},
182 {"rx_9217_to_16383_byte_packets",
183 offsetof(struct ecore_eth_stats, bb) +
184 offsetof(struct ecore_eth_stats_bb,
185 rx_9217_to_16383_byte_packets)},
187 {"tx_1519_to_2047_byte_packets",
188 offsetof(struct ecore_eth_stats, bb) +
189 offsetof(struct ecore_eth_stats_bb,
190 tx_1519_to_2047_byte_packets)},
191 {"tx_2048_to_4095_byte_packets",
192 offsetof(struct ecore_eth_stats, bb) +
193 offsetof(struct ecore_eth_stats_bb,
194 tx_2048_to_4095_byte_packets)},
195 {"tx_4096_to_9216_byte_packets",
196 offsetof(struct ecore_eth_stats, bb) +
197 offsetof(struct ecore_eth_stats_bb,
198 tx_4096_to_9216_byte_packets)},
199 {"tx_9217_to_16383_byte_packets",
200 offsetof(struct ecore_eth_stats, bb) +
201 offsetof(struct ecore_eth_stats_bb,
202 tx_9217_to_16383_byte_packets)},
204 {"tx_lpi_entry_count",
205 offsetof(struct ecore_eth_stats, bb) +
206 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
207 {"tx_total_collisions",
208 offsetof(struct ecore_eth_stats, bb) +
209 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
212 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
213 {"rx_1519_to_max_byte_packets",
214 offsetof(struct ecore_eth_stats, ah) +
215 offsetof(struct ecore_eth_stats_ah,
216 rx_1519_to_max_byte_packets)},
217 {"tx_1519_to_max_byte_packets",
218 offsetof(struct ecore_eth_stats, ah) +
219 offsetof(struct ecore_eth_stats_ah,
220 tx_1519_to_max_byte_packets)},
223 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
225 offsetof(struct qede_rx_queue, rx_segs)},
227 offsetof(struct qede_rx_queue, rx_hw_errors)},
228 {"rx_q_allocation_errors",
229 offsetof(struct qede_rx_queue, rx_alloc_errors)}
232 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
234 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
238 qede_interrupt_handler_intx(void *param)
240 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
241 struct qede_dev *qdev = eth_dev->data->dev_private;
242 struct ecore_dev *edev = &qdev->edev;
245 /* Check if our device actually raised an interrupt */
246 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
248 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
250 if (rte_intr_enable(eth_dev->intr_handle))
251 DP_ERR(edev, "rte_intr_enable failed\n");
256 qede_interrupt_handler(void *param)
258 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
259 struct qede_dev *qdev = eth_dev->data->dev_private;
260 struct ecore_dev *edev = &qdev->edev;
262 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
263 if (rte_intr_enable(eth_dev->intr_handle))
264 DP_ERR(edev, "rte_intr_enable failed\n");
268 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
270 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
274 static void qede_print_adapter_info(struct qede_dev *qdev)
276 struct ecore_dev *edev = &qdev->edev;
277 struct qed_dev_info *info = &qdev->dev_info.common;
278 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
279 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
281 DP_INFO(edev, "*********************************\n");
282 DP_INFO(edev, " DPDK version:%s\n", rte_version());
283 DP_INFO(edev, " Chip details : %s %c%d\n",
284 ECORE_IS_BB(edev) ? "BB" : "AH",
285 'A' + edev->chip_rev,
286 (int)edev->chip_metal);
287 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
288 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
289 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
290 ver_str, QEDE_PMD_VERSION);
291 DP_INFO(edev, " Driver version : %s\n", drv_ver);
292 DP_INFO(edev, " Firmware version : %s\n", ver_str);
294 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
296 (info->mfw_rev >> 24) & 0xff,
297 (info->mfw_rev >> 16) & 0xff,
298 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
299 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
300 DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
301 DP_INFO(edev, "*********************************\n");
304 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
306 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
307 unsigned int i = 0, j = 0, qid;
308 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
309 struct qede_tx_queue *txq;
311 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
313 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
314 RTE_ETHDEV_QUEUE_STAT_CNTRS);
315 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
316 RTE_ETHDEV_QUEUE_STAT_CNTRS);
319 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
320 offsetof(struct qede_rx_queue, rcv_pkts), 0,
322 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
323 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
325 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
326 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
330 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
331 OSAL_MEMSET((((char *)
332 (qdev->fp_array[qid].rxq)) +
333 qede_rxq_xstats_strings[j].offset),
338 if (i == rxq_stat_cntrs)
345 txq = qdev->fp_array[qid].txq;
347 OSAL_MEMSET((uint64_t *)(uintptr_t)
348 (((uint64_t)(uintptr_t)(txq)) +
349 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
353 if (i == txq_stat_cntrs)
359 qede_stop_vport(struct ecore_dev *edev)
361 struct ecore_hwfn *p_hwfn;
367 for_each_hwfn(edev, i) {
368 p_hwfn = &edev->hwfns[i];
369 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
371 if (rc != ECORE_SUCCESS) {
372 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
377 DP_INFO(edev, "vport stopped\n");
383 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
385 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
386 struct ecore_sp_vport_start_params params;
387 struct ecore_hwfn *p_hwfn;
391 if (qdev->vport_started)
392 qede_stop_vport(edev);
394 memset(¶ms, 0, sizeof(params));
397 /* @DPDK - Disable FW placement */
398 params.zero_placement_offset = 1;
399 for_each_hwfn(edev, i) {
400 p_hwfn = &edev->hwfns[i];
401 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
402 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
403 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
404 if (rc != ECORE_SUCCESS) {
405 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
409 ecore_reset_vport_stats(edev);
410 qdev->vport_started = true;
411 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
416 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
417 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
419 /* Activate or deactivate vport via vport-update */
420 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
422 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
423 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
424 struct ecore_sp_vport_update_params params;
425 struct ecore_hwfn *p_hwfn;
429 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
431 params.update_vport_active_rx_flg = 1;
432 params.update_vport_active_tx_flg = 1;
433 params.vport_active_rx_flg = flg;
434 params.vport_active_tx_flg = flg;
435 if (~qdev->enable_tx_switching & flg) {
436 params.update_tx_switching_flg = 1;
437 params.tx_switching_flg = !flg;
439 for_each_hwfn(edev, i) {
440 p_hwfn = &edev->hwfns[i];
441 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
442 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
443 ECORE_SPQ_MODE_EBLOCK, NULL);
444 if (rc != ECORE_SUCCESS) {
445 DP_ERR(edev, "Failed to update vport\n");
449 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
455 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
456 uint16_t mtu, bool enable)
458 /* Enable LRO in split mode */
459 sge_tpa_params->tpa_ipv4_en_flg = enable;
460 sge_tpa_params->tpa_ipv6_en_flg = enable;
461 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
462 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
463 /* set if tpa enable changes */
464 sge_tpa_params->update_tpa_en_flg = 1;
465 /* set if tpa parameters should be handled */
466 sge_tpa_params->update_tpa_param_flg = enable;
468 sge_tpa_params->max_buffers_per_cqe = 20;
469 /* Enable TPA in split mode. In this mode each TPA segment
470 * starts on the new BD, so there is one BD per segment.
472 sge_tpa_params->tpa_pkt_split_flg = 1;
473 sge_tpa_params->tpa_hdr_data_split_flg = 0;
474 sge_tpa_params->tpa_gro_consistent_flg = 0;
475 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
476 sge_tpa_params->tpa_max_size = 0x7FFF;
477 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
478 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
481 /* Enable/disable LRO via vport-update */
482 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
484 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
485 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
486 struct ecore_sp_vport_update_params params;
487 struct ecore_sge_tpa_params tpa_params;
488 struct ecore_hwfn *p_hwfn;
492 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
493 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
494 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
496 params.sge_tpa_params = &tpa_params;
497 for_each_hwfn(edev, i) {
498 p_hwfn = &edev->hwfns[i];
499 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
500 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
501 ECORE_SPQ_MODE_EBLOCK, NULL);
502 if (rc != ECORE_SUCCESS) {
503 DP_ERR(edev, "Failed to update LRO\n");
507 qdev->enable_lro = flg;
508 eth_dev->data->lro = flg;
510 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
516 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
517 enum qed_filter_rx_mode_type type)
519 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
520 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
521 struct ecore_filter_accept_flags flags;
523 memset(&flags, 0, sizeof(flags));
525 flags.update_rx_mode_config = 1;
526 flags.update_tx_mode_config = 1;
527 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
528 ECORE_ACCEPT_MCAST_MATCHED |
531 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
532 ECORE_ACCEPT_MCAST_MATCHED |
535 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
536 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
538 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
539 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
541 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
542 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
543 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
544 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
545 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
546 ECORE_ACCEPT_MCAST_UNMATCHED;
549 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
550 ECORE_SPQ_MODE_CB, NULL);
554 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
557 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
558 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
559 struct qede_ucast_entry *tmp = NULL;
560 struct qede_ucast_entry *u;
561 struct ether_addr *mac_addr;
563 mac_addr = (struct ether_addr *)ucast->mac;
565 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
566 if ((memcmp(mac_addr, &tmp->mac,
567 ETHER_ADDR_LEN) == 0) &&
568 ucast->vni == tmp->vni &&
569 ucast->vlan == tmp->vlan) {
570 DP_INFO(edev, "Unicast MAC is already added"
571 " with vlan = %u, vni = %u\n",
572 ucast->vlan, ucast->vni);
576 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
577 RTE_CACHE_LINE_SIZE);
579 DP_ERR(edev, "Did not allocate memory for ucast\n");
582 ether_addr_copy(mac_addr, &u->mac);
583 u->vlan = ucast->vlan;
585 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
588 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
589 if ((memcmp(mac_addr, &tmp->mac,
590 ETHER_ADDR_LEN) == 0) &&
591 ucast->vlan == tmp->vlan &&
592 ucast->vni == tmp->vni)
596 DP_INFO(edev, "Unicast MAC is not found\n");
599 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
607 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
608 uint32_t mc_addrs_num)
610 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
611 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
612 struct ecore_filter_mcast mcast;
613 struct qede_mcast_entry *m = NULL;
617 for (i = 0; i < mc_addrs_num; i++) {
618 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
619 RTE_CACHE_LINE_SIZE);
621 DP_ERR(edev, "Did not allocate memory for mcast\n");
624 ether_addr_copy(&mc_addrs[i], &m->mac);
625 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
627 memset(&mcast, 0, sizeof(mcast));
628 mcast.num_mc_addrs = mc_addrs_num;
629 mcast.opcode = ECORE_FILTER_ADD;
630 for (i = 0; i < mc_addrs_num; i++)
631 ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
633 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
634 if (rc != ECORE_SUCCESS) {
635 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
642 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
644 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
645 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
646 struct qede_mcast_entry *tmp = NULL;
647 struct ecore_filter_mcast mcast;
651 memset(&mcast, 0, sizeof(mcast));
652 mcast.num_mc_addrs = qdev->num_mc_addr;
653 mcast.opcode = ECORE_FILTER_REMOVE;
655 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
656 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
659 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
660 if (rc != ECORE_SUCCESS) {
661 DP_ERR(edev, "Failed to delete multicast filter\n");
665 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
666 tmp = SLIST_FIRST(&qdev->mc_list_head);
667 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
669 SLIST_INIT(&qdev->mc_list_head);
675 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
678 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
679 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
680 enum _ecore_status_t rc = ECORE_INVAL;
682 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
683 DP_ERR(edev, "Ucast filter table limit exceeded,"
684 " Please enable promisc mode\n");
688 rc = qede_ucast_filter(eth_dev, ucast, add);
690 rc = ecore_filter_ucast_cmd(edev, ucast,
691 ECORE_SPQ_MODE_CB, NULL);
692 /* Indicate error only for add filter operation.
693 * Delete filter operations are not severe.
695 if ((rc != ECORE_SUCCESS) && add)
696 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
703 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
704 __rte_unused uint32_t index, __rte_unused uint32_t pool)
706 struct ecore_filter_ucast ucast;
709 if (!is_valid_assigned_ether_addr(mac_addr))
712 qede_set_ucast_cmn_params(&ucast);
713 ucast.opcode = ECORE_FILTER_ADD;
714 ucast.type = ECORE_FILTER_MAC;
715 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
716 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
721 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
723 struct qede_dev *qdev = eth_dev->data->dev_private;
724 struct ecore_dev *edev = &qdev->edev;
725 struct ecore_filter_ucast ucast;
727 PMD_INIT_FUNC_TRACE(edev);
729 if (index >= qdev->dev_info.num_mac_filters) {
730 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
731 index, qdev->dev_info.num_mac_filters);
735 if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
738 qede_set_ucast_cmn_params(&ucast);
739 ucast.opcode = ECORE_FILTER_REMOVE;
740 ucast.type = ECORE_FILTER_MAC;
742 /* Use the index maintained by rte */
743 ether_addr_copy(ð_dev->data->mac_addrs[index],
744 (struct ether_addr *)&ucast.mac);
746 qede_mac_int_ops(eth_dev, &ucast, false);
750 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
752 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
753 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
755 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
756 mac_addr->addr_bytes)) {
757 DP_ERR(edev, "Setting MAC address is not allowed\n");
761 qede_mac_addr_remove(eth_dev, 0);
763 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
766 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
768 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
769 struct ecore_sp_vport_update_params params;
770 struct ecore_hwfn *p_hwfn;
774 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
776 params.update_accept_any_vlan_flg = 1;
777 params.accept_any_vlan = flg;
778 for_each_hwfn(edev, i) {
779 p_hwfn = &edev->hwfns[i];
780 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
781 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
782 ECORE_SPQ_MODE_EBLOCK, NULL);
783 if (rc != ECORE_SUCCESS) {
784 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
789 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
792 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
794 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
795 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
796 struct ecore_sp_vport_update_params params;
797 struct ecore_hwfn *p_hwfn;
801 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
803 params.update_inner_vlan_removal_flg = 1;
804 params.inner_vlan_removal_flg = flg;
805 for_each_hwfn(edev, i) {
806 p_hwfn = &edev->hwfns[i];
807 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
808 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
809 ECORE_SPQ_MODE_EBLOCK, NULL);
810 if (rc != ECORE_SUCCESS) {
811 DP_ERR(edev, "Failed to update vport\n");
816 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
820 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
821 uint16_t vlan_id, int on)
823 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
824 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
825 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
826 struct qede_vlan_entry *tmp = NULL;
827 struct qede_vlan_entry *vlan;
828 struct ecore_filter_ucast ucast;
832 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
833 DP_ERR(edev, "Reached max VLAN filter limit"
834 " enabling accept_any_vlan\n");
835 qede_config_accept_any_vlan(qdev, true);
839 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
840 if (tmp->vid == vlan_id) {
841 DP_INFO(edev, "VLAN %u already configured\n",
847 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
848 RTE_CACHE_LINE_SIZE);
851 DP_ERR(edev, "Did not allocate memory for VLAN\n");
855 qede_set_ucast_cmn_params(&ucast);
856 ucast.opcode = ECORE_FILTER_ADD;
857 ucast.type = ECORE_FILTER_VLAN;
858 ucast.vlan = vlan_id;
859 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
862 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
867 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
868 qdev->configured_vlans++;
869 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
870 vlan_id, qdev->configured_vlans);
873 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
874 if (tmp->vid == vlan_id)
879 if (qdev->configured_vlans == 0) {
881 "No VLAN filters configured yet\n");
885 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
889 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
891 qede_set_ucast_cmn_params(&ucast);
892 ucast.opcode = ECORE_FILTER_REMOVE;
893 ucast.type = ECORE_FILTER_VLAN;
894 ucast.vlan = vlan_id;
895 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
898 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
901 qdev->configured_vlans--;
902 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
903 vlan_id, qdev->configured_vlans);
910 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
912 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
913 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
914 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
916 if (mask & ETH_VLAN_STRIP_MASK) {
917 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
918 (void)qede_vlan_stripping(eth_dev, 1);
920 (void)qede_vlan_stripping(eth_dev, 0);
923 if (mask & ETH_VLAN_FILTER_MASK) {
924 /* VLAN filtering kicks in when a VLAN is added */
925 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
926 qede_vlan_filter_set(eth_dev, 0, 1);
928 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
930 " Please remove existing VLAN filters"
931 " before disabling VLAN filtering\n");
932 /* Signal app that VLAN filtering is still
935 eth_dev->data->dev_conf.rxmode.offloads |=
936 DEV_RX_OFFLOAD_VLAN_FILTER;
938 qede_vlan_filter_set(eth_dev, 0, 0);
943 if (mask & ETH_VLAN_EXTEND_MASK)
944 DP_ERR(edev, "Extend VLAN not supported\n");
946 qdev->vlan_offload_mask = mask;
948 DP_INFO(edev, "VLAN offload mask %d\n", mask);
953 static void qede_prandom_bytes(uint32_t *buff)
957 srand((unsigned int)time(NULL));
958 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
962 int qede_config_rss(struct rte_eth_dev *eth_dev)
964 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
965 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
966 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
967 struct rte_eth_rss_reta_entry64 reta_conf[2];
968 struct rte_eth_rss_conf rss_conf;
969 uint32_t i, id, pos, q;
971 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
972 if (!rss_conf.rss_key) {
973 DP_INFO(edev, "Applying driver default key\n");
974 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
975 qede_prandom_bytes(&def_rss_key[0]);
976 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
979 /* Configure RSS hash */
980 if (qede_rss_hash_update(eth_dev, &rss_conf))
983 /* Configure default RETA */
984 memset(reta_conf, 0, sizeof(reta_conf));
985 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
986 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
988 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
989 id = i / RTE_RETA_GROUP_SIZE;
990 pos = i % RTE_RETA_GROUP_SIZE;
991 q = i % QEDE_RSS_COUNT(qdev);
992 reta_conf[id].reta[pos] = q;
994 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
995 ECORE_RSS_IND_TABLE_SIZE))
1001 static void qede_fastpath_start(struct ecore_dev *edev)
1003 struct ecore_hwfn *p_hwfn;
1006 for_each_hwfn(edev, i) {
1007 p_hwfn = &edev->hwfns[i];
1008 ecore_hw_start_fastpath(p_hwfn);
1012 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1014 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1015 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1016 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1018 PMD_INIT_FUNC_TRACE(edev);
1020 /* Update MTU only if it has changed */
1021 if (eth_dev->data->mtu != qdev->mtu) {
1022 if (qede_update_mtu(eth_dev, qdev->mtu))
1026 /* Configure TPA parameters */
1027 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1028 if (qede_enable_tpa(eth_dev, true))
1030 /* Enable scatter mode for LRO */
1031 if (!eth_dev->data->scattered_rx)
1032 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1036 if (qede_start_queues(eth_dev))
1040 qede_reset_queue_stats(qdev, true);
1042 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1043 * enabling RSS. Hence RSS configuration is deferred upto this point.
1044 * Also, we would like to retain similar behavior in PF case, so we
1045 * don't do PF/VF specific check here.
1047 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1048 if (qede_config_rss(eth_dev))
1052 if (qede_activate_vport(eth_dev, true))
1055 /* Update link status */
1056 qede_link_update(eth_dev, 0);
1058 /* Start/resume traffic */
1059 qede_fastpath_start(edev);
1061 DP_INFO(edev, "Device started\n");
1065 DP_ERR(edev, "Device start fails\n");
1066 return -1; /* common error code is < 0 */
1069 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1071 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1072 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1074 PMD_INIT_FUNC_TRACE(edev);
1077 if (qede_activate_vport(eth_dev, false))
1080 if (qdev->enable_lro)
1081 qede_enable_tpa(eth_dev, false);
1084 qede_stop_queues(eth_dev);
1086 /* Disable traffic */
1087 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1089 DP_INFO(edev, "Device is stopped\n");
1092 static const char * const valid_args[] = {
1093 QEDE_NPAR_TX_SWITCHING,
1094 QEDE_VF_TX_SWITCHING,
1098 static int qede_args_check(const char *key, const char *val, void *opaque)
1102 struct rte_eth_dev *eth_dev = opaque;
1103 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1107 tmp = strtoul(val, NULL, 0);
1109 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1113 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1114 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1115 qdev->enable_tx_switching = !!tmp;
1116 DP_INFO(edev, "Disabling %s tx-switching\n",
1117 strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1124 static int qede_args(struct rte_eth_dev *eth_dev)
1126 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1127 struct rte_kvargs *kvlist;
1128 struct rte_devargs *devargs;
1132 devargs = pci_dev->device.devargs;
1134 return 0; /* return success */
1136 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1140 /* Process parameters. */
1141 for (i = 0; (valid_args[i] != NULL); ++i) {
1142 if (rte_kvargs_count(kvlist, valid_args[i])) {
1143 ret = rte_kvargs_process(kvlist, valid_args[i],
1144 qede_args_check, eth_dev);
1145 if (ret != ECORE_SUCCESS) {
1146 rte_kvargs_free(kvlist);
1151 rte_kvargs_free(kvlist);
1156 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1158 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1159 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1160 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1163 PMD_INIT_FUNC_TRACE(edev);
1165 /* Check requirements for 100G mode */
1166 if (ECORE_IS_CMT(edev)) {
1167 if (eth_dev->data->nb_rx_queues < 2 ||
1168 eth_dev->data->nb_tx_queues < 2) {
1169 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1173 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1174 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1176 "100G mode needs even no. of RX/TX queues\n");
1181 /* We need to have min 1 RX queue.There is no min check in
1182 * rte_eth_dev_configure(), so we are checking it here.
1184 if (eth_dev->data->nb_rx_queues == 0) {
1185 DP_ERR(edev, "Minimum one RX queue is required\n");
1189 /* Enable Tx switching by default */
1190 qdev->enable_tx_switching = 1;
1192 /* Parse devargs and fix up rxmode */
1193 if (qede_args(eth_dev))
1194 DP_NOTICE(edev, false,
1195 "Invalid devargs supplied, requested change will not take effect\n");
1197 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1198 rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1199 DP_ERR(edev, "Unsupported multi-queue mode\n");
1202 /* Flow director mode check */
1203 if (qede_check_fdir_support(eth_dev))
1206 qede_dealloc_fp_resc(eth_dev);
1207 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1208 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1209 if (qede_alloc_fp_resc(qdev))
1212 /* If jumbo enabled adjust MTU */
1213 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1214 eth_dev->data->mtu =
1215 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1216 ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1218 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1219 eth_dev->data->scattered_rx = 1;
1221 if (qede_start_vport(qdev, eth_dev->data->mtu))
1224 qdev->mtu = eth_dev->data->mtu;
1226 /* Enable VLAN offloads by default */
1227 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1228 ETH_VLAN_FILTER_MASK);
1232 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1233 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1238 /* Info about HW descriptor ring limitations */
1239 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1240 .nb_max = 0x8000, /* 32K */
1242 .nb_align = 128 /* lowest common multiple */
1245 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1246 .nb_max = 0x8000, /* 32K */
1249 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1250 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1254 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1255 struct rte_eth_dev_info *dev_info)
1257 struct qede_dev *qdev = eth_dev->data->dev_private;
1258 struct ecore_dev *edev = &qdev->edev;
1259 struct qed_link_output link;
1260 uint32_t speed_cap = 0;
1262 PMD_INIT_FUNC_TRACE(edev);
1264 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1265 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1266 dev_info->rx_desc_lim = qede_rx_desc_lim;
1267 dev_info->tx_desc_lim = qede_tx_desc_lim;
1270 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1271 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1273 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1274 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1275 dev_info->max_tx_queues = dev_info->max_rx_queues;
1277 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1278 dev_info->max_vfs = 0;
1279 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1280 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1281 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1282 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1283 DEV_RX_OFFLOAD_UDP_CKSUM |
1284 DEV_RX_OFFLOAD_TCP_CKSUM |
1285 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1286 DEV_RX_OFFLOAD_TCP_LRO |
1287 DEV_RX_OFFLOAD_KEEP_CRC |
1288 DEV_RX_OFFLOAD_SCATTER |
1289 DEV_RX_OFFLOAD_JUMBO_FRAME |
1290 DEV_RX_OFFLOAD_VLAN_FILTER |
1291 DEV_RX_OFFLOAD_VLAN_STRIP);
1292 dev_info->rx_queue_offload_capa = 0;
1294 /* TX offloads are on a per-packet basis, so it is applicable
1295 * to both at port and queue levels.
1297 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1298 DEV_TX_OFFLOAD_IPV4_CKSUM |
1299 DEV_TX_OFFLOAD_UDP_CKSUM |
1300 DEV_TX_OFFLOAD_TCP_CKSUM |
1301 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1302 DEV_TX_OFFLOAD_MULTI_SEGS |
1303 DEV_TX_OFFLOAD_TCP_TSO |
1304 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1305 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1306 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1308 dev_info->default_txconf = (struct rte_eth_txconf) {
1309 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1312 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1313 /* Packets are always dropped if no descriptors are available */
1318 memset(&link, 0, sizeof(struct qed_link_output));
1319 qdev->ops->common->get_link(edev, &link);
1320 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1321 speed_cap |= ETH_LINK_SPEED_1G;
1322 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1323 speed_cap |= ETH_LINK_SPEED_10G;
1324 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1325 speed_cap |= ETH_LINK_SPEED_25G;
1326 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1327 speed_cap |= ETH_LINK_SPEED_40G;
1328 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1329 speed_cap |= ETH_LINK_SPEED_50G;
1330 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1331 speed_cap |= ETH_LINK_SPEED_100G;
1332 dev_info->speed_capa = speed_cap;
1335 /* return 0 means link status changed, -1 means not changed */
1337 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1339 struct qede_dev *qdev = eth_dev->data->dev_private;
1340 struct ecore_dev *edev = &qdev->edev;
1341 struct qed_link_output q_link;
1342 struct rte_eth_link link;
1343 uint16_t link_duplex;
1345 memset(&q_link, 0, sizeof(q_link));
1346 memset(&link, 0, sizeof(link));
1348 qdev->ops->common->get_link(edev, &q_link);
1351 link.link_speed = q_link.speed;
1354 switch (q_link.duplex) {
1355 case QEDE_DUPLEX_HALF:
1356 link_duplex = ETH_LINK_HALF_DUPLEX;
1358 case QEDE_DUPLEX_FULL:
1359 link_duplex = ETH_LINK_FULL_DUPLEX;
1361 case QEDE_DUPLEX_UNKNOWN:
1365 link.link_duplex = link_duplex;
1368 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1371 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1372 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1374 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1375 link.link_speed, link.link_duplex,
1376 link.link_autoneg, link.link_status);
1378 return rte_eth_linkstatus_set(eth_dev, &link);
1381 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1383 struct qede_dev *qdev = eth_dev->data->dev_private;
1384 struct ecore_dev *edev = &qdev->edev;
1385 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1387 PMD_INIT_FUNC_TRACE(edev);
1389 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1390 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1392 qed_configure_filter_rx_mode(eth_dev, type);
1395 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1397 struct qede_dev *qdev = eth_dev->data->dev_private;
1398 struct ecore_dev *edev = &qdev->edev;
1400 PMD_INIT_FUNC_TRACE(edev);
1402 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1403 qed_configure_filter_rx_mode(eth_dev,
1404 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1406 qed_configure_filter_rx_mode(eth_dev,
1407 QED_FILTER_RX_MODE_TYPE_REGULAR);
1410 static void qede_poll_sp_sb_cb(void *param)
1412 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1413 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1414 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1417 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1418 qede_interrupt_action(&edev->hwfns[1]);
1420 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1424 DP_ERR(edev, "Unable to start periodic"
1425 " timer rc %d\n", rc);
1426 assert(false && "Unable to start periodic timer");
1430 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1432 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1433 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1434 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1436 PMD_INIT_FUNC_TRACE(edev);
1438 /* dev_stop() shall cleanup fp resources in hw but without releasing
1439 * dma memories and sw structures so that dev_start() can be called
1440 * by the app without reconfiguration. However, in dev_close() we
1441 * can release all the resources and device can be brought up newly
1443 if (eth_dev->data->dev_started)
1444 qede_dev_stop(eth_dev);
1446 qede_stop_vport(edev);
1447 qdev->vport_started = false;
1448 qede_fdir_dealloc_resc(eth_dev);
1449 qede_dealloc_fp_resc(eth_dev);
1451 eth_dev->data->nb_rx_queues = 0;
1452 eth_dev->data->nb_tx_queues = 0;
1454 /* Bring the link down */
1455 qede_dev_set_link_state(eth_dev, false);
1456 qdev->ops->common->slowpath_stop(edev);
1457 qdev->ops->common->remove(edev);
1458 rte_intr_disable(&pci_dev->intr_handle);
1460 switch (pci_dev->intr_handle.type) {
1461 case RTE_INTR_HANDLE_UIO_INTX:
1462 case RTE_INTR_HANDLE_VFIO_LEGACY:
1463 rte_intr_callback_unregister(&pci_dev->intr_handle,
1464 qede_interrupt_handler_intx,
1468 rte_intr_callback_unregister(&pci_dev->intr_handle,
1469 qede_interrupt_handler,
1473 if (ECORE_IS_CMT(edev))
1474 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1478 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1480 struct qede_dev *qdev = eth_dev->data->dev_private;
1481 struct ecore_dev *edev = &qdev->edev;
1482 struct ecore_eth_stats stats;
1483 unsigned int i = 0, j = 0, qid;
1484 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1485 struct qede_tx_queue *txq;
1487 ecore_get_vport_stats(edev, &stats);
1490 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1491 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1493 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1494 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1496 eth_stats->ierrors = stats.common.rx_crc_errors +
1497 stats.common.rx_align_errors +
1498 stats.common.rx_carrier_errors +
1499 stats.common.rx_oversize_packets +
1500 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1502 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1504 eth_stats->imissed = stats.common.mftag_filter_discards +
1505 stats.common.mac_filter_discards +
1506 stats.common.no_buff_discards +
1507 stats.common.brb_truncates + stats.common.brb_discards;
1510 eth_stats->opackets = stats.common.tx_ucast_pkts +
1511 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1513 eth_stats->obytes = stats.common.tx_ucast_bytes +
1514 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1516 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1519 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1520 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1521 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1522 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1523 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1524 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1525 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1526 "Not all the queue stats will be displayed. Set"
1527 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1528 " appropriately and retry.\n");
1531 eth_stats->q_ipackets[i] =
1533 ((char *)(qdev->fp_array[qid].rxq)) +
1534 offsetof(struct qede_rx_queue,
1536 eth_stats->q_errors[i] =
1538 ((char *)(qdev->fp_array[qid].rxq)) +
1539 offsetof(struct qede_rx_queue,
1542 ((char *)(qdev->fp_array[qid].rxq)) +
1543 offsetof(struct qede_rx_queue,
1546 if (i == rxq_stat_cntrs)
1551 txq = qdev->fp_array[qid].txq;
1552 eth_stats->q_opackets[j] =
1553 *((uint64_t *)(uintptr_t)
1554 (((uint64_t)(uintptr_t)(txq)) +
1555 offsetof(struct qede_tx_queue,
1558 if (j == txq_stat_cntrs)
1566 qede_get_xstats_count(struct qede_dev *qdev) {
1567 if (ECORE_IS_BB(&qdev->edev))
1568 return RTE_DIM(qede_xstats_strings) +
1569 RTE_DIM(qede_bb_xstats_strings) +
1570 (RTE_DIM(qede_rxq_xstats_strings) *
1571 RTE_MIN(QEDE_RSS_COUNT(qdev),
1572 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1574 return RTE_DIM(qede_xstats_strings) +
1575 RTE_DIM(qede_ah_xstats_strings) +
1576 (RTE_DIM(qede_rxq_xstats_strings) *
1577 RTE_MIN(QEDE_RSS_COUNT(qdev),
1578 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1582 qede_get_xstats_names(struct rte_eth_dev *dev,
1583 struct rte_eth_xstat_name *xstats_names,
1584 __rte_unused unsigned int limit)
1586 struct qede_dev *qdev = dev->data->dev_private;
1587 struct ecore_dev *edev = &qdev->edev;
1588 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1589 unsigned int i, qid, stat_idx = 0;
1590 unsigned int rxq_stat_cntrs;
1592 if (xstats_names != NULL) {
1593 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1594 snprintf(xstats_names[stat_idx].name,
1595 sizeof(xstats_names[stat_idx].name),
1597 qede_xstats_strings[i].name);
1601 if (ECORE_IS_BB(edev)) {
1602 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1603 snprintf(xstats_names[stat_idx].name,
1604 sizeof(xstats_names[stat_idx].name),
1606 qede_bb_xstats_strings[i].name);
1610 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1611 snprintf(xstats_names[stat_idx].name,
1612 sizeof(xstats_names[stat_idx].name),
1614 qede_ah_xstats_strings[i].name);
1619 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1620 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1621 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1622 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1623 snprintf(xstats_names[stat_idx].name,
1624 sizeof(xstats_names[stat_idx].name),
1626 qede_rxq_xstats_strings[i].name, qid,
1627 qede_rxq_xstats_strings[i].name + 4);
1637 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1640 struct qede_dev *qdev = dev->data->dev_private;
1641 struct ecore_dev *edev = &qdev->edev;
1642 struct ecore_eth_stats stats;
1643 const unsigned int num = qede_get_xstats_count(qdev);
1644 unsigned int i, qid, stat_idx = 0;
1645 unsigned int rxq_stat_cntrs;
1650 ecore_get_vport_stats(edev, &stats);
1652 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1653 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1654 qede_xstats_strings[i].offset);
1655 xstats[stat_idx].id = stat_idx;
1659 if (ECORE_IS_BB(edev)) {
1660 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1661 xstats[stat_idx].value =
1662 *(uint64_t *)(((char *)&stats) +
1663 qede_bb_xstats_strings[i].offset);
1664 xstats[stat_idx].id = stat_idx;
1668 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1669 xstats[stat_idx].value =
1670 *(uint64_t *)(((char *)&stats) +
1671 qede_ah_xstats_strings[i].offset);
1672 xstats[stat_idx].id = stat_idx;
1677 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1678 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1679 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1681 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1682 xstats[stat_idx].value = *(uint64_t *)(
1683 ((char *)(qdev->fp_array[qid].rxq)) +
1684 qede_rxq_xstats_strings[i].offset);
1685 xstats[stat_idx].id = stat_idx;
1695 qede_reset_xstats(struct rte_eth_dev *dev)
1697 struct qede_dev *qdev = dev->data->dev_private;
1698 struct ecore_dev *edev = &qdev->edev;
1700 ecore_reset_vport_stats(edev);
1701 qede_reset_queue_stats(qdev, true);
1704 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1706 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1707 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1708 struct qed_link_params link_params;
1711 DP_INFO(edev, "setting link state %d\n", link_up);
1712 memset(&link_params, 0, sizeof(link_params));
1713 link_params.link_up = link_up;
1714 rc = qdev->ops->common->set_link(edev, &link_params);
1715 if (rc != ECORE_SUCCESS)
1716 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1721 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1723 return qede_dev_set_link_state(eth_dev, true);
1726 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1728 return qede_dev_set_link_state(eth_dev, false);
1731 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1733 struct qede_dev *qdev = eth_dev->data->dev_private;
1734 struct ecore_dev *edev = &qdev->edev;
1736 ecore_reset_vport_stats(edev);
1737 qede_reset_queue_stats(qdev, false);
1740 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1742 enum qed_filter_rx_mode_type type =
1743 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1745 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1746 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1748 qed_configure_filter_rx_mode(eth_dev, type);
1751 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1753 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1754 qed_configure_filter_rx_mode(eth_dev,
1755 QED_FILTER_RX_MODE_TYPE_PROMISC);
1757 qed_configure_filter_rx_mode(eth_dev,
1758 QED_FILTER_RX_MODE_TYPE_REGULAR);
1762 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
1763 uint32_t mc_addrs_num)
1765 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1766 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1769 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1770 DP_ERR(edev, "Reached max multicast filters limit,"
1771 "Please enable multicast promisc mode\n");
1775 for (i = 0; i < mc_addrs_num; i++) {
1776 if (!is_multicast_ether_addr(&mc_addrs[i])) {
1777 DP_ERR(edev, "Not a valid multicast MAC\n");
1782 /* Flush all existing entries */
1783 if (qede_del_mcast_filters(eth_dev))
1786 /* Set new mcast list */
1787 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1790 /* Update MTU via vport-update without doing port restart.
1791 * The vport must be deactivated before calling this API.
1793 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1795 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1796 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1797 struct ecore_hwfn *p_hwfn;
1802 struct ecore_sp_vport_update_params params;
1804 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1805 params.vport_id = 0;
1807 params.vport_id = 0;
1808 for_each_hwfn(edev, i) {
1809 p_hwfn = &edev->hwfns[i];
1810 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1811 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1812 ECORE_SPQ_MODE_EBLOCK, NULL);
1813 if (rc != ECORE_SUCCESS)
1817 for_each_hwfn(edev, i) {
1818 p_hwfn = &edev->hwfns[i];
1819 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1820 if (rc == ECORE_INVAL) {
1821 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1822 /* Recreate vport */
1823 rc = qede_start_vport(qdev, mtu);
1824 if (rc != ECORE_SUCCESS)
1827 /* Restore config lost due to vport stop */
1828 if (eth_dev->data->promiscuous)
1829 qede_promiscuous_enable(eth_dev);
1831 qede_promiscuous_disable(eth_dev);
1833 if (eth_dev->data->all_multicast)
1834 qede_allmulticast_enable(eth_dev);
1836 qede_allmulticast_disable(eth_dev);
1838 qede_vlan_offload_set(eth_dev,
1839 qdev->vlan_offload_mask);
1840 } else if (rc != ECORE_SUCCESS) {
1845 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1850 DP_ERR(edev, "Failed to update MTU\n");
1854 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1855 struct rte_eth_fc_conf *fc_conf)
1857 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1858 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1859 struct qed_link_output current_link;
1860 struct qed_link_params params;
1862 memset(¤t_link, 0, sizeof(current_link));
1863 qdev->ops->common->get_link(edev, ¤t_link);
1865 memset(¶ms, 0, sizeof(params));
1866 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1867 if (fc_conf->autoneg) {
1868 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1869 DP_ERR(edev, "Autoneg not supported\n");
1872 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1875 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1876 if (fc_conf->mode == RTE_FC_FULL)
1877 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1878 QED_LINK_PAUSE_RX_ENABLE);
1879 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1880 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1881 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1882 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1884 params.link_up = true;
1885 (void)qdev->ops->common->set_link(edev, ¶ms);
1890 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1891 struct rte_eth_fc_conf *fc_conf)
1893 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1894 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1895 struct qed_link_output current_link;
1897 memset(¤t_link, 0, sizeof(current_link));
1898 qdev->ops->common->get_link(edev, ¤t_link);
1900 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1901 fc_conf->autoneg = true;
1903 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1904 QED_LINK_PAUSE_TX_ENABLE))
1905 fc_conf->mode = RTE_FC_FULL;
1906 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1907 fc_conf->mode = RTE_FC_RX_PAUSE;
1908 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1909 fc_conf->mode = RTE_FC_TX_PAUSE;
1911 fc_conf->mode = RTE_FC_NONE;
1916 static const uint32_t *
1917 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1919 static const uint32_t ptypes[] = {
1921 RTE_PTYPE_L2_ETHER_VLAN,
1926 RTE_PTYPE_TUNNEL_VXLAN,
1928 RTE_PTYPE_TUNNEL_GENEVE,
1929 RTE_PTYPE_TUNNEL_GRE,
1931 RTE_PTYPE_INNER_L2_ETHER,
1932 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1933 RTE_PTYPE_INNER_L3_IPV4,
1934 RTE_PTYPE_INNER_L3_IPV6,
1935 RTE_PTYPE_INNER_L4_TCP,
1936 RTE_PTYPE_INNER_L4_UDP,
1937 RTE_PTYPE_INNER_L4_FRAG,
1941 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1947 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1950 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1951 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1952 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1953 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1954 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1955 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1956 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
1957 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
1960 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1961 struct rte_eth_rss_conf *rss_conf)
1963 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1964 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1965 struct ecore_sp_vport_update_params vport_update_params;
1966 struct ecore_rss_params rss_params;
1967 struct ecore_hwfn *p_hwfn;
1968 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1969 uint64_t hf = rss_conf->rss_hf;
1970 uint8_t len = rss_conf->rss_key_len;
1975 memset(&vport_update_params, 0, sizeof(vport_update_params));
1976 memset(&rss_params, 0, sizeof(rss_params));
1978 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1979 (unsigned long)hf, len, key);
1983 DP_INFO(edev, "Enabling rss\n");
1986 qede_init_rss_caps(&rss_params.rss_caps, hf);
1987 rss_params.update_rss_capabilities = 1;
1991 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1992 DP_ERR(edev, "RSS key length exceeds limit\n");
1995 DP_INFO(edev, "Applying user supplied hash key\n");
1996 rss_params.update_rss_key = 1;
1997 memcpy(&rss_params.rss_key, key, len);
1999 rss_params.rss_enable = 1;
2002 rss_params.update_rss_config = 1;
2003 /* tbl_size has to be set with capabilities */
2004 rss_params.rss_table_size_log = 7;
2005 vport_update_params.vport_id = 0;
2006 /* pass the L2 handles instead of qids */
2007 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2008 idx = i % QEDE_RSS_COUNT(qdev);
2009 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2011 vport_update_params.rss_params = &rss_params;
2013 for_each_hwfn(edev, i) {
2014 p_hwfn = &edev->hwfns[i];
2015 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2016 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2017 ECORE_SPQ_MODE_EBLOCK, NULL);
2019 DP_ERR(edev, "vport-update for RSS failed\n");
2023 qdev->rss_enable = rss_params.rss_enable;
2025 /* Update local structure for hash query */
2026 qdev->rss_conf.rss_hf = hf;
2027 qdev->rss_conf.rss_key_len = len;
2028 if (qdev->rss_enable) {
2029 if (qdev->rss_conf.rss_key == NULL) {
2030 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2031 if (qdev->rss_conf.rss_key == NULL) {
2032 DP_ERR(edev, "No memory to store RSS key\n");
2037 DP_INFO(edev, "Storing RSS key\n");
2038 memcpy(qdev->rss_conf.rss_key, key, len);
2040 } else if (!qdev->rss_enable && len == 0) {
2041 if (qdev->rss_conf.rss_key) {
2042 free(qdev->rss_conf.rss_key);
2043 qdev->rss_conf.rss_key = NULL;
2044 DP_INFO(edev, "Free RSS key\n");
2051 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2052 struct rte_eth_rss_conf *rss_conf)
2054 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2056 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2057 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2059 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2060 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2061 rss_conf->rss_key_len);
2065 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2066 struct ecore_rss_params *rss)
2069 bool rss_mode = 1; /* enable */
2070 struct ecore_queue_cid *cid;
2071 struct ecore_rss_params *t_rss;
2073 /* In regular scenario, we'd simply need to take input handlers.
2074 * But in CMT, we'd have to split the handlers according to the
2075 * engine they were configured on. We'd then have to understand
2076 * whether RSS is really required, since 2-queues on CMT doesn't
2080 /* CMT should be round-robin */
2081 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2082 cid = rss->rss_ind_table[i];
2084 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2089 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2093 t_rss->update_rss_ind_table = 1;
2094 t_rss->rss_table_size_log = 7;
2095 t_rss->update_rss_config = 1;
2097 /* Make sure RSS is actually required */
2098 for_each_hwfn(edev, fn) {
2099 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2101 if (rss[fn].rss_ind_table[i] !=
2102 rss[fn].rss_ind_table[0])
2106 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2108 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2115 t_rss->rss_enable = rss_mode;
2120 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2121 struct rte_eth_rss_reta_entry64 *reta_conf,
2124 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2125 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2126 struct ecore_sp_vport_update_params vport_update_params;
2127 struct ecore_rss_params *params;
2128 struct ecore_hwfn *p_hwfn;
2129 uint16_t i, idx, shift;
2133 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2134 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2139 memset(&vport_update_params, 0, sizeof(vport_update_params));
2140 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2141 RTE_CACHE_LINE_SIZE);
2142 if (params == NULL) {
2143 DP_ERR(edev, "failed to allocate memory\n");
2147 for (i = 0; i < reta_size; i++) {
2148 idx = i / RTE_RETA_GROUP_SIZE;
2149 shift = i % RTE_RETA_GROUP_SIZE;
2150 if (reta_conf[idx].mask & (1ULL << shift)) {
2151 entry = reta_conf[idx].reta[shift];
2152 /* Pass rxq handles to ecore */
2153 params->rss_ind_table[i] =
2154 qdev->fp_array[entry].rxq->handle;
2155 /* Update the local copy for RETA query command */
2156 qdev->rss_ind_table[i] = entry;
2160 params->update_rss_ind_table = 1;
2161 params->rss_table_size_log = 7;
2162 params->update_rss_config = 1;
2164 /* Fix up RETA for CMT mode device */
2165 if (ECORE_IS_CMT(edev))
2166 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2168 vport_update_params.vport_id = 0;
2169 /* Use the current value of rss_enable */
2170 params->rss_enable = qdev->rss_enable;
2171 vport_update_params.rss_params = params;
2173 for_each_hwfn(edev, i) {
2174 p_hwfn = &edev->hwfns[i];
2175 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2176 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2177 ECORE_SPQ_MODE_EBLOCK, NULL);
2179 DP_ERR(edev, "vport-update for RSS failed\n");
2189 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2190 struct rte_eth_rss_reta_entry64 *reta_conf,
2193 struct qede_dev *qdev = eth_dev->data->dev_private;
2194 struct ecore_dev *edev = &qdev->edev;
2195 uint16_t i, idx, shift;
2198 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2199 DP_ERR(edev, "reta_size %d is not supported\n",
2204 for (i = 0; i < reta_size; i++) {
2205 idx = i / RTE_RETA_GROUP_SIZE;
2206 shift = i % RTE_RETA_GROUP_SIZE;
2207 if (reta_conf[idx].mask & (1ULL << shift)) {
2208 entry = qdev->rss_ind_table[i];
2209 reta_conf[idx].reta[shift] = entry;
2218 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2220 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2221 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2222 struct rte_eth_dev_info dev_info = {0};
2223 struct qede_fastpath *fp;
2224 uint32_t max_rx_pkt_len;
2225 uint32_t frame_size;
2227 bool restart = false;
2230 PMD_INIT_FUNC_TRACE(edev);
2231 qede_dev_info_get(dev, &dev_info);
2232 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2233 frame_size = max_rx_pkt_len;
2234 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2235 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2236 mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2240 if (!dev->data->scattered_rx &&
2241 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2242 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2243 dev->data->min_rx_buf_size);
2246 /* Temporarily replace I/O functions with dummy ones. It cannot
2247 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2249 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2250 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2251 if (dev->data->dev_started) {
2252 dev->data->dev_started = 0;
2259 /* Fix up RX buf size for all queues of the port */
2261 fp = &qdev->fp_array[i];
2262 if (fp->rxq != NULL) {
2263 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2264 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2265 /* cache align the mbuf size to simplfy rx_buf_size
2268 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2269 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2273 fp->rxq->rx_buf_size = rc;
2276 if (max_rx_pkt_len > ETHER_MAX_LEN)
2277 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2279 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2281 if (!dev->data->dev_started && restart) {
2282 qede_dev_start(dev);
2283 dev->data->dev_started = 1;
2286 /* update max frame size */
2287 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2289 dev->rx_pkt_burst = qede_recv_pkts;
2290 dev->tx_pkt_burst = qede_xmit_pkts;
2296 qede_dev_reset(struct rte_eth_dev *dev)
2300 ret = qede_eth_dev_uninit(dev);
2304 return qede_eth_dev_init(dev);
2307 static const struct eth_dev_ops qede_eth_dev_ops = {
2308 .dev_configure = qede_dev_configure,
2309 .dev_infos_get = qede_dev_info_get,
2310 .rx_queue_setup = qede_rx_queue_setup,
2311 .rx_queue_release = qede_rx_queue_release,
2312 .rx_descriptor_status = qede_rx_descriptor_status,
2313 .tx_queue_setup = qede_tx_queue_setup,
2314 .tx_queue_release = qede_tx_queue_release,
2315 .dev_start = qede_dev_start,
2316 .dev_reset = qede_dev_reset,
2317 .dev_set_link_up = qede_dev_set_link_up,
2318 .dev_set_link_down = qede_dev_set_link_down,
2319 .link_update = qede_link_update,
2320 .promiscuous_enable = qede_promiscuous_enable,
2321 .promiscuous_disable = qede_promiscuous_disable,
2322 .allmulticast_enable = qede_allmulticast_enable,
2323 .allmulticast_disable = qede_allmulticast_disable,
2324 .set_mc_addr_list = qede_set_mc_addr_list,
2325 .dev_stop = qede_dev_stop,
2326 .dev_close = qede_dev_close,
2327 .stats_get = qede_get_stats,
2328 .stats_reset = qede_reset_stats,
2329 .xstats_get = qede_get_xstats,
2330 .xstats_reset = qede_reset_xstats,
2331 .xstats_get_names = qede_get_xstats_names,
2332 .mac_addr_add = qede_mac_addr_add,
2333 .mac_addr_remove = qede_mac_addr_remove,
2334 .mac_addr_set = qede_mac_addr_set,
2335 .vlan_offload_set = qede_vlan_offload_set,
2336 .vlan_filter_set = qede_vlan_filter_set,
2337 .flow_ctrl_set = qede_flow_ctrl_set,
2338 .flow_ctrl_get = qede_flow_ctrl_get,
2339 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2340 .rss_hash_update = qede_rss_hash_update,
2341 .rss_hash_conf_get = qede_rss_hash_conf_get,
2342 .reta_update = qede_rss_reta_update,
2343 .reta_query = qede_rss_reta_query,
2344 .mtu_set = qede_set_mtu,
2345 .filter_ctrl = qede_dev_filter_ctrl,
2346 .udp_tunnel_port_add = qede_udp_dst_port_add,
2347 .udp_tunnel_port_del = qede_udp_dst_port_del,
2350 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2351 .dev_configure = qede_dev_configure,
2352 .dev_infos_get = qede_dev_info_get,
2353 .rx_queue_setup = qede_rx_queue_setup,
2354 .rx_queue_release = qede_rx_queue_release,
2355 .rx_descriptor_status = qede_rx_descriptor_status,
2356 .tx_queue_setup = qede_tx_queue_setup,
2357 .tx_queue_release = qede_tx_queue_release,
2358 .dev_start = qede_dev_start,
2359 .dev_reset = qede_dev_reset,
2360 .dev_set_link_up = qede_dev_set_link_up,
2361 .dev_set_link_down = qede_dev_set_link_down,
2362 .link_update = qede_link_update,
2363 .promiscuous_enable = qede_promiscuous_enable,
2364 .promiscuous_disable = qede_promiscuous_disable,
2365 .allmulticast_enable = qede_allmulticast_enable,
2366 .allmulticast_disable = qede_allmulticast_disable,
2367 .set_mc_addr_list = qede_set_mc_addr_list,
2368 .dev_stop = qede_dev_stop,
2369 .dev_close = qede_dev_close,
2370 .stats_get = qede_get_stats,
2371 .stats_reset = qede_reset_stats,
2372 .xstats_get = qede_get_xstats,
2373 .xstats_reset = qede_reset_xstats,
2374 .xstats_get_names = qede_get_xstats_names,
2375 .vlan_offload_set = qede_vlan_offload_set,
2376 .vlan_filter_set = qede_vlan_filter_set,
2377 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2378 .rss_hash_update = qede_rss_hash_update,
2379 .rss_hash_conf_get = qede_rss_hash_conf_get,
2380 .reta_update = qede_rss_reta_update,
2381 .reta_query = qede_rss_reta_query,
2382 .mtu_set = qede_set_mtu,
2383 .udp_tunnel_port_add = qede_udp_dst_port_add,
2384 .udp_tunnel_port_del = qede_udp_dst_port_del,
2385 .mac_addr_add = qede_mac_addr_add,
2386 .mac_addr_remove = qede_mac_addr_remove,
2387 .mac_addr_set = qede_mac_addr_set,
2390 static void qede_update_pf_params(struct ecore_dev *edev)
2392 struct ecore_pf_params pf_params;
2394 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2395 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2396 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2397 qed_ops->common->update_pf_params(edev, &pf_params);
2400 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2402 struct rte_pci_device *pci_dev;
2403 struct rte_pci_addr pci_addr;
2404 struct qede_dev *adapter;
2405 struct ecore_dev *edev;
2406 struct qed_dev_eth_info dev_info;
2407 struct qed_slowpath_params params;
2408 static bool do_once = true;
2409 uint8_t bulletin_change;
2410 uint8_t vf_mac[ETHER_ADDR_LEN];
2411 uint8_t is_mac_forced;
2413 /* Fix up ecore debug level */
2414 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2415 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2419 /* Extract key data structures */
2420 adapter = eth_dev->data->dev_private;
2421 adapter->ethdev = eth_dev;
2422 edev = &adapter->edev;
2423 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2424 pci_addr = pci_dev->addr;
2426 PMD_INIT_FUNC_TRACE(edev);
2428 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2429 pci_addr.bus, pci_addr.devid, pci_addr.function,
2430 eth_dev->data->port_id);
2432 eth_dev->rx_pkt_burst = qede_recv_pkts;
2433 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2434 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2436 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2437 DP_ERR(edev, "Skipping device init from secondary process\n");
2441 rte_eth_copy_pci_info(eth_dev, pci_dev);
2444 edev->vendor_id = pci_dev->id.vendor_id;
2445 edev->device_id = pci_dev->id.device_id;
2447 qed_ops = qed_get_eth_ops();
2449 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2453 DP_INFO(edev, "Starting qede probe\n");
2454 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2457 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2460 qede_update_pf_params(edev);
2462 switch (pci_dev->intr_handle.type) {
2463 case RTE_INTR_HANDLE_UIO_INTX:
2464 case RTE_INTR_HANDLE_VFIO_LEGACY:
2465 int_mode = ECORE_INT_MODE_INTA;
2466 rte_intr_callback_register(&pci_dev->intr_handle,
2467 qede_interrupt_handler_intx,
2471 int_mode = ECORE_INT_MODE_MSIX;
2472 rte_intr_callback_register(&pci_dev->intr_handle,
2473 qede_interrupt_handler,
2477 if (rte_intr_enable(&pci_dev->intr_handle)) {
2478 DP_ERR(edev, "rte_intr_enable() failed\n");
2482 /* Start the Slowpath-process */
2483 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2485 params.int_mode = int_mode;
2486 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2487 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2488 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2489 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2490 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2491 QEDE_PMD_DRV_VER_STR_SIZE);
2493 /* For CMT mode device do periodic polling for slowpath events.
2494 * This is required since uio device uses only one MSI-x
2495 * interrupt vector but we need one for each engine.
2497 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2498 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2502 DP_ERR(edev, "Unable to start periodic"
2503 " timer rc %d\n", rc);
2508 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2510 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2511 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2516 rc = qed_ops->fill_dev_info(edev, &dev_info);
2518 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2519 qed_ops->common->slowpath_stop(edev);
2520 qed_ops->common->remove(edev);
2521 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2526 qede_alloc_etherdev(adapter, &dev_info);
2528 adapter->ops->common->set_name(edev, edev->name);
2531 adapter->dev_info.num_mac_filters =
2532 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2535 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2536 (uint32_t *)&adapter->dev_info.num_mac_filters);
2538 /* Allocate memory for storing MAC addr */
2539 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2541 adapter->dev_info.num_mac_filters),
2542 RTE_CACHE_LINE_SIZE);
2544 if (eth_dev->data->mac_addrs == NULL) {
2545 DP_ERR(edev, "Failed to allocate MAC address\n");
2546 qed_ops->common->slowpath_stop(edev);
2547 qed_ops->common->remove(edev);
2548 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2554 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2555 hw_info.hw_mac_addr,
2556 ð_dev->data->mac_addrs[0]);
2557 ether_addr_copy(ð_dev->data->mac_addrs[0],
2558 &adapter->primary_mac);
2560 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2562 if (bulletin_change) {
2564 ecore_vf_bulletin_get_forced_mac(
2565 ECORE_LEADING_HWFN(edev),
2569 DP_INFO(edev, "VF macaddr received from PF\n");
2570 ether_addr_copy((struct ether_addr *)&vf_mac,
2571 ð_dev->data->mac_addrs[0]);
2572 ether_addr_copy(ð_dev->data->mac_addrs[0],
2573 &adapter->primary_mac);
2575 DP_ERR(edev, "No VF macaddr assigned\n");
2580 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2583 qede_print_adapter_info(adapter);
2587 /* Bring-up the link */
2588 qede_dev_set_link_state(eth_dev, true);
2590 adapter->num_tx_queues = 0;
2591 adapter->num_rx_queues = 0;
2592 SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2593 SLIST_INIT(&adapter->vlan_list_head);
2594 SLIST_INIT(&adapter->uc_list_head);
2595 SLIST_INIT(&adapter->mc_list_head);
2596 adapter->mtu = ETHER_MTU;
2597 adapter->vport_started = false;
2599 /* VF tunnel offloads is enabled by default in PF driver */
2600 adapter->vxlan.num_filters = 0;
2601 adapter->geneve.num_filters = 0;
2602 adapter->ipgre.num_filters = 0;
2604 adapter->vxlan.enable = true;
2605 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2606 ETH_TUNNEL_FILTER_IVLAN;
2607 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2608 adapter->geneve.enable = true;
2609 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2610 ETH_TUNNEL_FILTER_IVLAN;
2611 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2612 adapter->ipgre.enable = true;
2613 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2614 ETH_TUNNEL_FILTER_IVLAN;
2616 adapter->vxlan.enable = false;
2617 adapter->geneve.enable = false;
2618 adapter->ipgre.enable = false;
2621 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2622 adapter->primary_mac.addr_bytes[0],
2623 adapter->primary_mac.addr_bytes[1],
2624 adapter->primary_mac.addr_bytes[2],
2625 adapter->primary_mac.addr_bytes[3],
2626 adapter->primary_mac.addr_bytes[4],
2627 adapter->primary_mac.addr_bytes[5]);
2629 DP_INFO(edev, "Device initialized\n");
2634 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2636 return qede_common_dev_init(eth_dev, 1);
2639 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2641 return qede_common_dev_init(eth_dev, 0);
2644 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2646 struct qede_dev *qdev = eth_dev->data->dev_private;
2647 struct ecore_dev *edev = &qdev->edev;
2649 PMD_INIT_FUNC_TRACE(edev);
2651 /* only uninitialize in the primary process */
2652 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2655 /* safe to close dev here */
2656 qede_dev_close(eth_dev);
2658 eth_dev->dev_ops = NULL;
2659 eth_dev->rx_pkt_burst = NULL;
2660 eth_dev->tx_pkt_burst = NULL;
2665 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2667 return qede_dev_common_uninit(eth_dev);
2670 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2672 return qede_dev_common_uninit(eth_dev);
2675 static const struct rte_pci_id pci_id_qedevf_map[] = {
2676 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2678 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2681 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2684 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2689 static const struct rte_pci_id pci_id_qede_map[] = {
2690 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2692 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2695 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2698 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2701 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2704 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2707 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2710 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2713 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2716 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2719 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2724 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2725 struct rte_pci_device *pci_dev)
2727 return rte_eth_dev_pci_generic_probe(pci_dev,
2728 sizeof(struct qede_dev), qedevf_eth_dev_init);
2731 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2733 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2736 static struct rte_pci_driver rte_qedevf_pmd = {
2737 .id_table = pci_id_qedevf_map,
2738 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2739 RTE_PCI_DRV_IOVA_AS_VA,
2740 .probe = qedevf_eth_dev_pci_probe,
2741 .remove = qedevf_eth_dev_pci_remove,
2744 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2745 struct rte_pci_device *pci_dev)
2747 return rte_eth_dev_pci_generic_probe(pci_dev,
2748 sizeof(struct qede_dev), qede_eth_dev_init);
2751 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2753 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2756 static struct rte_pci_driver rte_qede_pmd = {
2757 .id_table = pci_id_qede_map,
2758 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2759 RTE_PCI_DRV_IOVA_AS_VA,
2760 .probe = qede_eth_dev_pci_probe,
2761 .remove = qede_eth_dev_pci_remove,
2764 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2765 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2766 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2767 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2768 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2769 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2771 RTE_INIT(qede_init_log)
2773 qede_logtype_init = rte_log_register("pmd.net.qede.init");
2774 if (qede_logtype_init >= 0)
2775 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
2776 qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
2777 if (qede_logtype_driver >= 0)
2778 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);