1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include "qede_ethdev.h"
8 #include <rte_string_fns.h>
10 #include <rte_version.h>
11 #include <rte_kvargs.h>
14 int qede_logtype_init;
15 int qede_logtype_driver;
17 static const struct qed_eth_ops *qed_ops;
18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
23 struct rte_qede_xstats_name_off {
24 char name[RTE_ETH_XSTATS_NAME_SIZE];
28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
31 {"rx_multicast_bytes",
32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
33 {"rx_broadcast_bytes",
34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
35 {"rx_unicast_packets",
36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
37 {"rx_multicast_packets",
38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
39 {"rx_broadcast_packets",
40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
44 {"tx_multicast_bytes",
45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
46 {"tx_broadcast_bytes",
47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
48 {"tx_unicast_packets",
49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
50 {"tx_multicast_packets",
51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
52 {"tx_broadcast_packets",
53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
55 {"rx_64_byte_packets",
56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
57 {"rx_65_to_127_byte_packets",
58 offsetof(struct ecore_eth_stats_common,
59 rx_65_to_127_byte_packets)},
60 {"rx_128_to_255_byte_packets",
61 offsetof(struct ecore_eth_stats_common,
62 rx_128_to_255_byte_packets)},
63 {"rx_256_to_511_byte_packets",
64 offsetof(struct ecore_eth_stats_common,
65 rx_256_to_511_byte_packets)},
66 {"rx_512_to_1023_byte_packets",
67 offsetof(struct ecore_eth_stats_common,
68 rx_512_to_1023_byte_packets)},
69 {"rx_1024_to_1518_byte_packets",
70 offsetof(struct ecore_eth_stats_common,
71 rx_1024_to_1518_byte_packets)},
72 {"tx_64_byte_packets",
73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
74 {"tx_65_to_127_byte_packets",
75 offsetof(struct ecore_eth_stats_common,
76 tx_65_to_127_byte_packets)},
77 {"tx_128_to_255_byte_packets",
78 offsetof(struct ecore_eth_stats_common,
79 tx_128_to_255_byte_packets)},
80 {"tx_256_to_511_byte_packets",
81 offsetof(struct ecore_eth_stats_common,
82 tx_256_to_511_byte_packets)},
83 {"tx_512_to_1023_byte_packets",
84 offsetof(struct ecore_eth_stats_common,
85 tx_512_to_1023_byte_packets)},
86 {"tx_1024_to_1518_byte_packets",
87 offsetof(struct ecore_eth_stats_common,
88 tx_1024_to_1518_byte_packets)},
90 {"rx_mac_crtl_frames",
91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
92 {"tx_mac_control_frames",
93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
98 {"rx_priority_flow_control_frames",
99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
100 {"tx_priority_flow_control_frames",
101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
106 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
107 {"rx_carrier_errors",
108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
109 {"rx_oversize_packet_errors",
110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
112 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
113 {"rx_undersize_packet_errors",
114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
116 {"rx_host_buffer_not_available",
117 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
118 /* Number of packets discarded because they are bigger than MTU */
119 {"rx_packet_too_big_discards",
120 offsetof(struct ecore_eth_stats_common,
121 packet_too_big_discard)},
122 {"rx_ttl_zero_discards",
123 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
124 {"rx_multi_function_tag_filter_discards",
125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
126 {"rx_mac_filter_discards",
127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
128 {"rx_gft_filter_drop",
129 offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
130 {"rx_hw_buffer_truncates",
131 offsetof(struct ecore_eth_stats_common, brb_truncates)},
132 {"rx_hw_buffer_discards",
133 offsetof(struct ecore_eth_stats_common, brb_discards)},
134 {"tx_error_drop_packets",
135 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
137 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
138 {"rx_mac_unicast_packets",
139 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
140 {"rx_mac_multicast_packets",
141 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
142 {"rx_mac_broadcast_packets",
143 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
145 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
146 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
147 {"tx_mac_unicast_packets",
148 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
149 {"tx_mac_multicast_packets",
150 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
151 {"tx_mac_broadcast_packets",
152 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
154 {"lro_coalesced_packets",
155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
156 {"lro_coalesced_events",
157 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
159 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
160 {"lro_not_coalesced_packets",
161 offsetof(struct ecore_eth_stats_common,
162 tpa_not_coalesced_pkts)},
163 {"lro_coalesced_bytes",
164 offsetof(struct ecore_eth_stats_common,
165 tpa_coalesced_bytes)},
168 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
169 {"rx_1519_to_1522_byte_packets",
170 offsetof(struct ecore_eth_stats, bb) +
171 offsetof(struct ecore_eth_stats_bb,
172 rx_1519_to_1522_byte_packets)},
173 {"rx_1519_to_2047_byte_packets",
174 offsetof(struct ecore_eth_stats, bb) +
175 offsetof(struct ecore_eth_stats_bb,
176 rx_1519_to_2047_byte_packets)},
177 {"rx_2048_to_4095_byte_packets",
178 offsetof(struct ecore_eth_stats, bb) +
179 offsetof(struct ecore_eth_stats_bb,
180 rx_2048_to_4095_byte_packets)},
181 {"rx_4096_to_9216_byte_packets",
182 offsetof(struct ecore_eth_stats, bb) +
183 offsetof(struct ecore_eth_stats_bb,
184 rx_4096_to_9216_byte_packets)},
185 {"rx_9217_to_16383_byte_packets",
186 offsetof(struct ecore_eth_stats, bb) +
187 offsetof(struct ecore_eth_stats_bb,
188 rx_9217_to_16383_byte_packets)},
190 {"tx_1519_to_2047_byte_packets",
191 offsetof(struct ecore_eth_stats, bb) +
192 offsetof(struct ecore_eth_stats_bb,
193 tx_1519_to_2047_byte_packets)},
194 {"tx_2048_to_4095_byte_packets",
195 offsetof(struct ecore_eth_stats, bb) +
196 offsetof(struct ecore_eth_stats_bb,
197 tx_2048_to_4095_byte_packets)},
198 {"tx_4096_to_9216_byte_packets",
199 offsetof(struct ecore_eth_stats, bb) +
200 offsetof(struct ecore_eth_stats_bb,
201 tx_4096_to_9216_byte_packets)},
202 {"tx_9217_to_16383_byte_packets",
203 offsetof(struct ecore_eth_stats, bb) +
204 offsetof(struct ecore_eth_stats_bb,
205 tx_9217_to_16383_byte_packets)},
207 {"tx_lpi_entry_count",
208 offsetof(struct ecore_eth_stats, bb) +
209 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
210 {"tx_total_collisions",
211 offsetof(struct ecore_eth_stats, bb) +
212 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
215 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
216 {"rx_1519_to_max_byte_packets",
217 offsetof(struct ecore_eth_stats, ah) +
218 offsetof(struct ecore_eth_stats_ah,
219 rx_1519_to_max_byte_packets)},
220 {"tx_1519_to_max_byte_packets",
221 offsetof(struct ecore_eth_stats, ah) +
222 offsetof(struct ecore_eth_stats_ah,
223 tx_1519_to_max_byte_packets)},
226 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
228 offsetof(struct qede_rx_queue, rx_segs)},
230 offsetof(struct qede_rx_queue, rx_hw_errors)},
231 {"rx_q_allocation_errors",
232 offsetof(struct qede_rx_queue, rx_alloc_errors)}
235 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
237 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
241 qede_interrupt_handler_intx(void *param)
243 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
244 struct qede_dev *qdev = eth_dev->data->dev_private;
245 struct ecore_dev *edev = &qdev->edev;
248 /* Check if our device actually raised an interrupt */
249 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
251 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
253 if (rte_intr_ack(eth_dev->intr_handle))
254 DP_ERR(edev, "rte_intr_ack failed\n");
259 qede_interrupt_handler(void *param)
261 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
262 struct qede_dev *qdev = eth_dev->data->dev_private;
263 struct ecore_dev *edev = &qdev->edev;
265 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
266 if (rte_intr_ack(eth_dev->intr_handle))
267 DP_ERR(edev, "rte_intr_ack failed\n");
271 qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
273 struct qede_dev *qdev = dev->data->dev_private;
274 struct ecore_dev *edev = &qdev->edev;
276 if (ECORE_IS_CMT(edev)) {
277 dev->rx_pkt_burst = qede_recv_pkts_cmt;
278 dev->tx_pkt_burst = qede_xmit_pkts_cmt;
282 if (dev->data->lro || dev->data->scattered_rx) {
283 DP_INFO(edev, "Assigning qede_recv_pkts\n");
284 dev->rx_pkt_burst = qede_recv_pkts;
286 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
287 dev->rx_pkt_burst = qede_recv_pkts_regular;
290 dev->tx_pkt_burst = qede_xmit_pkts;
294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
296 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
300 static void qede_print_adapter_info(struct qede_dev *qdev)
302 struct ecore_dev *edev = &qdev->edev;
303 struct qed_dev_info *info = &qdev->dev_info.common;
304 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
306 DP_INFO(edev, "**************************************************\n");
307 DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version());
308 DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n",
309 ECORE_IS_BB(edev) ? "BB" : "AH",
310 'A' + edev->chip_rev,
311 (int)edev->chip_metal);
312 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
313 QEDE_PMD_DRV_VERSION);
314 DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str);
316 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
317 QEDE_PMD_BASE_VERSION);
318 DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str);
321 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
322 QEDE_PMD_FW_VERSION);
324 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
325 info->fw_major, info->fw_minor,
326 info->fw_rev, info->fw_eng);
327 DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str);
329 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
331 (info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
332 QED_MFW_VERSION_3_OFFSET,
333 (info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
334 QED_MFW_VERSION_2_OFFSET,
335 (info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
336 QED_MFW_VERSION_1_OFFSET,
337 (info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
338 QED_MFW_VERSION_0_OFFSET);
339 DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str);
340 DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file);
341 DP_INFO(edev, "**************************************************\n");
344 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
346 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
347 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
348 unsigned int i = 0, j = 0, qid;
349 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
350 struct qede_tx_queue *txq;
352 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
354 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
355 RTE_ETHDEV_QUEUE_STAT_CNTRS);
356 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
357 RTE_ETHDEV_QUEUE_STAT_CNTRS);
359 for (qid = 0; qid < qdev->num_rx_queues; qid++) {
360 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
361 offsetof(struct qede_rx_queue, rcv_pkts), 0,
363 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
364 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
366 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
367 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
371 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
372 OSAL_MEMSET((((char *)
373 (qdev->fp_array[qid].rxq)) +
374 qede_rxq_xstats_strings[j].offset),
379 if (i == rxq_stat_cntrs)
385 for (qid = 0; qid < qdev->num_tx_queues; qid++) {
386 txq = qdev->fp_array[qid].txq;
388 OSAL_MEMSET((uint64_t *)(uintptr_t)
389 (((uint64_t)(uintptr_t)(txq)) +
390 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
394 if (i == txq_stat_cntrs)
400 qede_stop_vport(struct ecore_dev *edev)
402 struct ecore_hwfn *p_hwfn;
408 for_each_hwfn(edev, i) {
409 p_hwfn = &edev->hwfns[i];
410 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
412 if (rc != ECORE_SUCCESS) {
413 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
418 DP_INFO(edev, "vport stopped\n");
424 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
426 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
427 struct ecore_sp_vport_start_params params;
428 struct ecore_hwfn *p_hwfn;
432 if (qdev->vport_started)
433 qede_stop_vport(edev);
435 memset(¶ms, 0, sizeof(params));
438 /* @DPDK - Disable FW placement */
439 params.zero_placement_offset = 1;
440 for_each_hwfn(edev, i) {
441 p_hwfn = &edev->hwfns[i];
442 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
443 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
444 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
445 if (rc != ECORE_SUCCESS) {
446 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
450 ecore_reset_vport_stats(edev);
451 qdev->vport_started = true;
452 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
457 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
458 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
460 /* Activate or deactivate vport via vport-update */
461 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
463 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
464 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
465 struct ecore_sp_vport_update_params params;
466 struct ecore_hwfn *p_hwfn;
470 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
472 params.update_vport_active_rx_flg = 1;
473 params.update_vport_active_tx_flg = 1;
474 params.vport_active_rx_flg = flg;
475 params.vport_active_tx_flg = flg;
476 if ((qdev->enable_tx_switching == false) && (flg == true)) {
477 params.update_tx_switching_flg = 1;
478 params.tx_switching_flg = !flg;
480 for_each_hwfn(edev, i) {
481 p_hwfn = &edev->hwfns[i];
482 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
483 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
484 ECORE_SPQ_MODE_EBLOCK, NULL);
485 if (rc != ECORE_SUCCESS) {
486 DP_ERR(edev, "Failed to update vport\n");
490 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
496 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
497 uint16_t mtu, bool enable)
499 /* Enable LRO in split mode */
500 sge_tpa_params->tpa_ipv4_en_flg = enable;
501 sge_tpa_params->tpa_ipv6_en_flg = enable;
502 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
503 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
504 /* set if tpa enable changes */
505 sge_tpa_params->update_tpa_en_flg = 1;
506 /* set if tpa parameters should be handled */
507 sge_tpa_params->update_tpa_param_flg = enable;
509 sge_tpa_params->max_buffers_per_cqe = 20;
510 /* Enable TPA in split mode. In this mode each TPA segment
511 * starts on the new BD, so there is one BD per segment.
513 sge_tpa_params->tpa_pkt_split_flg = 1;
514 sge_tpa_params->tpa_hdr_data_split_flg = 0;
515 sge_tpa_params->tpa_gro_consistent_flg = 0;
516 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
517 sge_tpa_params->tpa_max_size = 0x7FFF;
518 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
519 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
522 /* Enable/disable LRO via vport-update */
523 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
525 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
526 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
527 struct ecore_sp_vport_update_params params;
528 struct ecore_sge_tpa_params tpa_params;
529 struct ecore_hwfn *p_hwfn;
533 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
534 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
535 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
537 params.sge_tpa_params = &tpa_params;
538 for_each_hwfn(edev, i) {
539 p_hwfn = &edev->hwfns[i];
540 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
541 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
542 ECORE_SPQ_MODE_EBLOCK, NULL);
543 if (rc != ECORE_SUCCESS) {
544 DP_ERR(edev, "Failed to update LRO\n");
548 qdev->enable_lro = flg;
549 eth_dev->data->lro = flg;
551 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
557 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
558 enum qed_filter_rx_mode_type type)
560 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562 struct ecore_filter_accept_flags flags;
564 memset(&flags, 0, sizeof(flags));
566 flags.update_rx_mode_config = 1;
567 flags.update_tx_mode_config = 1;
568 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
569 ECORE_ACCEPT_MCAST_MATCHED |
572 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
573 ECORE_ACCEPT_MCAST_MATCHED |
576 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
577 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
579 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
580 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
582 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
583 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
584 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
585 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
586 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
587 ECORE_ACCEPT_MCAST_UNMATCHED;
590 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
591 ECORE_SPQ_MODE_CB, NULL);
595 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
598 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
599 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
600 struct qede_ucast_entry *tmp = NULL;
601 struct qede_ucast_entry *u;
602 struct rte_ether_addr *mac_addr;
604 mac_addr = (struct rte_ether_addr *)ucast->mac;
606 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
607 if ((memcmp(mac_addr, &tmp->mac,
608 RTE_ETHER_ADDR_LEN) == 0) &&
609 ucast->vni == tmp->vni &&
610 ucast->vlan == tmp->vlan) {
611 DP_INFO(edev, "Unicast MAC is already added"
612 " with vlan = %u, vni = %u\n",
613 ucast->vlan, ucast->vni);
617 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
618 RTE_CACHE_LINE_SIZE);
620 DP_ERR(edev, "Did not allocate memory for ucast\n");
623 rte_ether_addr_copy(mac_addr, &u->mac);
624 u->vlan = ucast->vlan;
626 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
629 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
630 if ((memcmp(mac_addr, &tmp->mac,
631 RTE_ETHER_ADDR_LEN) == 0) &&
632 ucast->vlan == tmp->vlan &&
633 ucast->vni == tmp->vni)
637 DP_INFO(edev, "Unicast MAC is not found\n");
640 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
648 qede_add_mcast_filters(struct rte_eth_dev *eth_dev,
649 struct rte_ether_addr *mc_addrs,
650 uint32_t mc_addrs_num)
652 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
653 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
654 struct ecore_filter_mcast mcast;
655 struct qede_mcast_entry *m = NULL;
659 for (i = 0; i < mc_addrs_num; i++) {
660 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
661 RTE_CACHE_LINE_SIZE);
663 DP_ERR(edev, "Did not allocate memory for mcast\n");
666 rte_ether_addr_copy(&mc_addrs[i], &m->mac);
667 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
669 memset(&mcast, 0, sizeof(mcast));
670 mcast.num_mc_addrs = mc_addrs_num;
671 mcast.opcode = ECORE_FILTER_ADD;
672 for (i = 0; i < mc_addrs_num; i++)
673 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
675 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
676 if (rc != ECORE_SUCCESS) {
677 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
684 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
686 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
687 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
688 struct qede_mcast_entry *tmp = NULL;
689 struct ecore_filter_mcast mcast;
693 memset(&mcast, 0, sizeof(mcast));
694 mcast.num_mc_addrs = qdev->num_mc_addr;
695 mcast.opcode = ECORE_FILTER_REMOVE;
697 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
698 rte_ether_addr_copy(&tmp->mac,
699 (struct rte_ether_addr *)&mcast.mac[j]);
702 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
703 if (rc != ECORE_SUCCESS) {
704 DP_ERR(edev, "Failed to delete multicast filter\n");
708 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
709 tmp = SLIST_FIRST(&qdev->mc_list_head);
710 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
712 SLIST_INIT(&qdev->mc_list_head);
718 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
721 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
722 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
723 enum _ecore_status_t rc = ECORE_INVAL;
725 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
726 DP_ERR(edev, "Ucast filter table limit exceeded,"
727 " Please enable promisc mode\n");
731 rc = qede_ucast_filter(eth_dev, ucast, add);
733 rc = ecore_filter_ucast_cmd(edev, ucast,
734 ECORE_SPQ_MODE_CB, NULL);
735 /* Indicate error only for add filter operation.
736 * Delete filter operations are not severe.
738 if ((rc != ECORE_SUCCESS) && add)
739 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
746 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr,
747 __rte_unused uint32_t index, __rte_unused uint32_t pool)
749 struct ecore_filter_ucast ucast;
752 if (!rte_is_valid_assigned_ether_addr(mac_addr))
755 qede_set_ucast_cmn_params(&ucast);
756 ucast.opcode = ECORE_FILTER_ADD;
757 ucast.type = ECORE_FILTER_MAC;
758 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
759 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
764 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
766 struct qede_dev *qdev = eth_dev->data->dev_private;
767 struct ecore_dev *edev = &qdev->edev;
768 struct ecore_filter_ucast ucast;
770 PMD_INIT_FUNC_TRACE(edev);
772 if (index >= qdev->dev_info.num_mac_filters) {
773 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
774 index, qdev->dev_info.num_mac_filters);
778 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
781 qede_set_ucast_cmn_params(&ucast);
782 ucast.opcode = ECORE_FILTER_REMOVE;
783 ucast.type = ECORE_FILTER_MAC;
785 /* Use the index maintained by rte */
786 rte_ether_addr_copy(ð_dev->data->mac_addrs[index],
787 (struct rte_ether_addr *)&ucast.mac);
789 qede_mac_int_ops(eth_dev, &ucast, false);
793 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
795 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
796 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
798 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
799 mac_addr->addr_bytes)) {
800 DP_ERR(edev, "Setting MAC address is not allowed\n");
804 qede_mac_addr_remove(eth_dev, 0);
806 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
809 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
811 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
812 struct ecore_sp_vport_update_params params;
813 struct ecore_hwfn *p_hwfn;
817 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
819 params.update_accept_any_vlan_flg = 1;
820 params.accept_any_vlan = flg;
821 for_each_hwfn(edev, i) {
822 p_hwfn = &edev->hwfns[i];
823 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
824 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
825 ECORE_SPQ_MODE_EBLOCK, NULL);
826 if (rc != ECORE_SUCCESS) {
827 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
832 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
835 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
837 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
838 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
839 struct ecore_sp_vport_update_params params;
840 struct ecore_hwfn *p_hwfn;
844 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
846 params.update_inner_vlan_removal_flg = 1;
847 params.inner_vlan_removal_flg = flg;
848 for_each_hwfn(edev, i) {
849 p_hwfn = &edev->hwfns[i];
850 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
851 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
852 ECORE_SPQ_MODE_EBLOCK, NULL);
853 if (rc != ECORE_SUCCESS) {
854 DP_ERR(edev, "Failed to update vport\n");
859 qdev->vlan_strip_flg = flg;
861 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
865 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
866 uint16_t vlan_id, int on)
868 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
869 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
870 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
871 struct qede_vlan_entry *tmp = NULL;
872 struct qede_vlan_entry *vlan;
873 struct ecore_filter_ucast ucast;
877 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
878 DP_ERR(edev, "Reached max VLAN filter limit"
879 " enabling accept_any_vlan\n");
880 qede_config_accept_any_vlan(qdev, true);
884 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
885 if (tmp->vid == vlan_id) {
886 DP_INFO(edev, "VLAN %u already configured\n",
892 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
893 RTE_CACHE_LINE_SIZE);
896 DP_ERR(edev, "Did not allocate memory for VLAN\n");
900 qede_set_ucast_cmn_params(&ucast);
901 ucast.opcode = ECORE_FILTER_ADD;
902 ucast.type = ECORE_FILTER_VLAN;
903 ucast.vlan = vlan_id;
904 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
907 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
912 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
913 qdev->configured_vlans++;
914 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
915 vlan_id, qdev->configured_vlans);
918 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
919 if (tmp->vid == vlan_id)
924 if (qdev->configured_vlans == 0) {
926 "No VLAN filters configured yet\n");
930 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
934 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
936 qede_set_ucast_cmn_params(&ucast);
937 ucast.opcode = ECORE_FILTER_REMOVE;
938 ucast.type = ECORE_FILTER_VLAN;
939 ucast.vlan = vlan_id;
940 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
943 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
946 qdev->configured_vlans--;
947 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
948 vlan_id, qdev->configured_vlans);
955 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
957 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
958 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
959 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
961 if (mask & ETH_VLAN_STRIP_MASK) {
962 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
963 (void)qede_vlan_stripping(eth_dev, 1);
965 (void)qede_vlan_stripping(eth_dev, 0);
968 if (mask & ETH_VLAN_FILTER_MASK) {
969 /* VLAN filtering kicks in when a VLAN is added */
970 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
971 qede_vlan_filter_set(eth_dev, 0, 1);
973 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
975 " Please remove existing VLAN filters"
976 " before disabling VLAN filtering\n");
977 /* Signal app that VLAN filtering is still
980 eth_dev->data->dev_conf.rxmode.offloads |=
981 DEV_RX_OFFLOAD_VLAN_FILTER;
983 qede_vlan_filter_set(eth_dev, 0, 0);
988 if (mask & ETH_VLAN_EXTEND_MASK)
989 DP_ERR(edev, "Extend VLAN not supported\n");
991 qdev->vlan_offload_mask = mask;
993 DP_INFO(edev, "VLAN offload mask %d\n", mask);
998 static void qede_prandom_bytes(uint32_t *buff)
1002 srand((unsigned int)time(NULL));
1003 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1007 int qede_config_rss(struct rte_eth_dev *eth_dev)
1009 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1010 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1011 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1012 struct rte_eth_rss_reta_entry64 reta_conf[2];
1013 struct rte_eth_rss_conf rss_conf;
1014 uint32_t i, id, pos, q;
1016 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1017 if (!rss_conf.rss_key) {
1018 DP_INFO(edev, "Applying driver default key\n");
1019 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1020 qede_prandom_bytes(&def_rss_key[0]);
1021 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1024 /* Configure RSS hash */
1025 if (qede_rss_hash_update(eth_dev, &rss_conf))
1028 /* Configure default RETA */
1029 memset(reta_conf, 0, sizeof(reta_conf));
1030 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1031 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1033 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1034 id = i / RTE_RETA_GROUP_SIZE;
1035 pos = i % RTE_RETA_GROUP_SIZE;
1036 q = i % QEDE_RSS_COUNT(eth_dev);
1037 reta_conf[id].reta[pos] = q;
1039 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1040 ECORE_RSS_IND_TABLE_SIZE))
1046 static void qede_fastpath_start(struct ecore_dev *edev)
1048 struct ecore_hwfn *p_hwfn;
1051 for_each_hwfn(edev, i) {
1052 p_hwfn = &edev->hwfns[i];
1053 ecore_hw_start_fastpath(p_hwfn);
1057 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1059 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1060 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1061 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1063 PMD_INIT_FUNC_TRACE(edev);
1065 /* Update MTU only if it has changed */
1066 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
1067 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1069 qdev->mtu = qdev->new_mtu;
1073 /* Configure TPA parameters */
1074 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1075 if (qede_enable_tpa(eth_dev, true))
1077 /* Enable scatter mode for LRO */
1078 if (!eth_dev->data->scattered_rx)
1079 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1083 if (qede_start_queues(eth_dev))
1087 qede_reset_queue_stats(qdev, true);
1089 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1090 * enabling RSS. Hence RSS configuration is deferred upto this point.
1091 * Also, we would like to retain similar behavior in PF case, so we
1092 * don't do PF/VF specific check here.
1094 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1095 if (qede_config_rss(eth_dev))
1099 if (qede_activate_vport(eth_dev, true))
1102 /* Update link status */
1103 qede_link_update(eth_dev, 0);
1105 /* Start/resume traffic */
1106 qede_fastpath_start(edev);
1108 qede_assign_rxtx_handlers(eth_dev);
1109 DP_INFO(edev, "Device started\n");
1113 DP_ERR(edev, "Device start fails\n");
1114 return -1; /* common error code is < 0 */
1117 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1119 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1120 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1122 PMD_INIT_FUNC_TRACE(edev);
1125 if (qede_activate_vport(eth_dev, false))
1128 if (qdev->enable_lro)
1129 qede_enable_tpa(eth_dev, false);
1132 qede_stop_queues(eth_dev);
1134 /* Disable traffic */
1135 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1137 DP_INFO(edev, "Device is stopped\n");
1140 static const char * const valid_args[] = {
1141 QEDE_NPAR_TX_SWITCHING,
1142 QEDE_VF_TX_SWITCHING,
1146 static int qede_args_check(const char *key, const char *val, void *opaque)
1150 struct rte_eth_dev *eth_dev = opaque;
1151 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1152 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1155 tmp = strtoul(val, NULL, 0);
1157 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1161 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1162 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1163 qdev->enable_tx_switching = !!tmp;
1164 DP_INFO(edev, "Disabling %s tx-switching\n",
1165 strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1172 static int qede_args(struct rte_eth_dev *eth_dev)
1174 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1175 struct rte_kvargs *kvlist;
1176 struct rte_devargs *devargs;
1180 devargs = pci_dev->device.devargs;
1182 return 0; /* return success */
1184 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1188 /* Process parameters. */
1189 for (i = 0; (valid_args[i] != NULL); ++i) {
1190 if (rte_kvargs_count(kvlist, valid_args[i])) {
1191 ret = rte_kvargs_process(kvlist, valid_args[i],
1192 qede_args_check, eth_dev);
1193 if (ret != ECORE_SUCCESS) {
1194 rte_kvargs_free(kvlist);
1199 rte_kvargs_free(kvlist);
1204 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1206 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1207 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1208 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1211 PMD_INIT_FUNC_TRACE(edev);
1213 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
1214 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1216 /* We need to have min 1 RX queue.There is no min check in
1217 * rte_eth_dev_configure(), so we are checking it here.
1219 if (eth_dev->data->nb_rx_queues == 0) {
1220 DP_ERR(edev, "Minimum one RX queue is required\n");
1224 /* Enable Tx switching by default */
1225 qdev->enable_tx_switching = 1;
1227 /* Parse devargs and fix up rxmode */
1228 if (qede_args(eth_dev))
1229 DP_NOTICE(edev, false,
1230 "Invalid devargs supplied, requested change will not take effect\n");
1232 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1233 rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1234 DP_ERR(edev, "Unsupported multi-queue mode\n");
1237 /* Flow director mode check */
1238 if (qede_check_fdir_support(eth_dev))
1241 qede_dealloc_fp_resc(eth_dev);
1242 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
1243 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
1245 if (qede_alloc_fp_resc(qdev))
1248 /* If jumbo enabled adjust MTU */
1249 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1250 eth_dev->data->mtu =
1251 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1252 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1254 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1255 eth_dev->data->scattered_rx = 1;
1257 if (qede_start_vport(qdev, eth_dev->data->mtu))
1260 qdev->mtu = eth_dev->data->mtu;
1262 /* Enable VLAN offloads by default */
1263 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1264 ETH_VLAN_FILTER_MASK);
1268 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1269 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
1271 if (ECORE_IS_CMT(edev))
1272 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
1273 qdev->num_rx_queues, qdev->num_tx_queues);
1279 /* Info about HW descriptor ring limitations */
1280 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1281 .nb_max = 0x8000, /* 32K */
1283 .nb_align = 128 /* lowest common multiple */
1286 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1287 .nb_max = 0x8000, /* 32K */
1290 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1291 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1295 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1296 struct rte_eth_dev_info *dev_info)
1298 struct qede_dev *qdev = eth_dev->data->dev_private;
1299 struct ecore_dev *edev = &qdev->edev;
1300 struct qed_link_output link;
1301 uint32_t speed_cap = 0;
1303 PMD_INIT_FUNC_TRACE(edev);
1305 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1306 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1307 dev_info->rx_desc_lim = qede_rx_desc_lim;
1308 dev_info->tx_desc_lim = qede_tx_desc_lim;
1311 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1312 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1314 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1315 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1316 /* Since CMT mode internally doubles the number of queues */
1317 if (ECORE_IS_CMT(edev))
1318 dev_info->max_rx_queues = dev_info->max_rx_queues / 2;
1320 dev_info->max_tx_queues = dev_info->max_rx_queues;
1322 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1323 dev_info->max_vfs = 0;
1324 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1325 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1326 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1327 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1328 DEV_RX_OFFLOAD_UDP_CKSUM |
1329 DEV_RX_OFFLOAD_TCP_CKSUM |
1330 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1331 DEV_RX_OFFLOAD_TCP_LRO |
1332 DEV_RX_OFFLOAD_KEEP_CRC |
1333 DEV_RX_OFFLOAD_SCATTER |
1334 DEV_RX_OFFLOAD_JUMBO_FRAME |
1335 DEV_RX_OFFLOAD_VLAN_FILTER |
1336 DEV_RX_OFFLOAD_VLAN_STRIP |
1337 DEV_RX_OFFLOAD_RSS_HASH);
1338 dev_info->rx_queue_offload_capa = 0;
1340 /* TX offloads are on a per-packet basis, so it is applicable
1341 * to both at port and queue levels.
1343 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1344 DEV_TX_OFFLOAD_IPV4_CKSUM |
1345 DEV_TX_OFFLOAD_UDP_CKSUM |
1346 DEV_TX_OFFLOAD_TCP_CKSUM |
1347 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1348 DEV_TX_OFFLOAD_MULTI_SEGS |
1349 DEV_TX_OFFLOAD_TCP_TSO |
1350 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1351 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1352 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1354 dev_info->default_txconf = (struct rte_eth_txconf) {
1355 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1358 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1359 /* Packets are always dropped if no descriptors are available */
1364 memset(&link, 0, sizeof(struct qed_link_output));
1365 qdev->ops->common->get_link(edev, &link);
1366 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1367 speed_cap |= ETH_LINK_SPEED_1G;
1368 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1369 speed_cap |= ETH_LINK_SPEED_10G;
1370 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1371 speed_cap |= ETH_LINK_SPEED_25G;
1372 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1373 speed_cap |= ETH_LINK_SPEED_40G;
1374 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1375 speed_cap |= ETH_LINK_SPEED_50G;
1376 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1377 speed_cap |= ETH_LINK_SPEED_100G;
1378 dev_info->speed_capa = speed_cap;
1383 /* return 0 means link status changed, -1 means not changed */
1385 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1387 struct qede_dev *qdev = eth_dev->data->dev_private;
1388 struct ecore_dev *edev = &qdev->edev;
1389 struct qed_link_output q_link;
1390 struct rte_eth_link link;
1391 uint16_t link_duplex;
1393 memset(&q_link, 0, sizeof(q_link));
1394 memset(&link, 0, sizeof(link));
1396 qdev->ops->common->get_link(edev, &q_link);
1399 link.link_speed = q_link.speed;
1402 switch (q_link.duplex) {
1403 case QEDE_DUPLEX_HALF:
1404 link_duplex = ETH_LINK_HALF_DUPLEX;
1406 case QEDE_DUPLEX_FULL:
1407 link_duplex = ETH_LINK_FULL_DUPLEX;
1409 case QEDE_DUPLEX_UNKNOWN:
1413 link.link_duplex = link_duplex;
1416 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1419 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1420 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1422 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1423 link.link_speed, link.link_duplex,
1424 link.link_autoneg, link.link_status);
1426 return rte_eth_linkstatus_set(eth_dev, &link);
1429 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1431 struct qede_dev *qdev = eth_dev->data->dev_private;
1432 struct ecore_dev *edev = &qdev->edev;
1433 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1434 enum _ecore_status_t ecore_status;
1436 PMD_INIT_FUNC_TRACE(edev);
1438 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1439 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1441 ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
1443 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1446 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1448 struct qede_dev *qdev = eth_dev->data->dev_private;
1449 struct ecore_dev *edev = &qdev->edev;
1450 enum _ecore_status_t ecore_status;
1452 PMD_INIT_FUNC_TRACE(edev);
1454 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1455 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1456 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1458 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1459 QED_FILTER_RX_MODE_TYPE_REGULAR);
1461 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1464 static void qede_poll_sp_sb_cb(void *param)
1466 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1467 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1468 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1471 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1472 qede_interrupt_action(&edev->hwfns[1]);
1474 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1478 DP_ERR(edev, "Unable to start periodic"
1479 " timer rc %d\n", rc);
1483 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1485 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1486 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1487 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1489 PMD_INIT_FUNC_TRACE(edev);
1491 /* dev_stop() shall cleanup fp resources in hw but without releasing
1492 * dma memories and sw structures so that dev_start() can be called
1493 * by the app without reconfiguration. However, in dev_close() we
1494 * can release all the resources and device can be brought up newly
1496 if (eth_dev->data->dev_started)
1497 qede_dev_stop(eth_dev);
1499 qede_stop_vport(edev);
1500 qdev->vport_started = false;
1501 qede_fdir_dealloc_resc(eth_dev);
1502 qede_dealloc_fp_resc(eth_dev);
1504 eth_dev->data->nb_rx_queues = 0;
1505 eth_dev->data->nb_tx_queues = 0;
1507 /* Bring the link down */
1508 qede_dev_set_link_state(eth_dev, false);
1509 qdev->ops->common->slowpath_stop(edev);
1510 qdev->ops->common->remove(edev);
1511 rte_intr_disable(&pci_dev->intr_handle);
1513 switch (pci_dev->intr_handle.type) {
1514 case RTE_INTR_HANDLE_UIO_INTX:
1515 case RTE_INTR_HANDLE_VFIO_LEGACY:
1516 rte_intr_callback_unregister(&pci_dev->intr_handle,
1517 qede_interrupt_handler_intx,
1521 rte_intr_callback_unregister(&pci_dev->intr_handle,
1522 qede_interrupt_handler,
1526 if (ECORE_IS_CMT(edev))
1527 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1531 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1533 struct qede_dev *qdev = eth_dev->data->dev_private;
1534 struct ecore_dev *edev = &qdev->edev;
1535 struct ecore_eth_stats stats;
1536 unsigned int i = 0, j = 0, qid, idx, hw_fn;
1537 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1538 struct qede_tx_queue *txq;
1540 ecore_get_vport_stats(edev, &stats);
1543 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1544 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1546 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1547 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1549 eth_stats->ierrors = stats.common.rx_crc_errors +
1550 stats.common.rx_align_errors +
1551 stats.common.rx_carrier_errors +
1552 stats.common.rx_oversize_packets +
1553 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1555 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1557 eth_stats->imissed = stats.common.mftag_filter_discards +
1558 stats.common.mac_filter_discards +
1559 stats.common.no_buff_discards +
1560 stats.common.brb_truncates + stats.common.brb_discards;
1563 eth_stats->opackets = stats.common.tx_ucast_pkts +
1564 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1566 eth_stats->obytes = stats.common.tx_ucast_bytes +
1567 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1569 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1572 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
1573 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1574 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
1575 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1576 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
1577 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
1578 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1579 "Not all the queue stats will be displayed. Set"
1580 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1581 " appropriately and retry.\n");
1583 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
1584 eth_stats->q_ipackets[i] = 0;
1585 eth_stats->q_errors[i] = 0;
1587 for_each_hwfn(edev, hw_fn) {
1588 idx = qid * edev->num_hwfns + hw_fn;
1590 eth_stats->q_ipackets[i] +=
1592 (((char *)(qdev->fp_array[idx].rxq)) +
1593 offsetof(struct qede_rx_queue,
1595 eth_stats->q_errors[i] +=
1597 (((char *)(qdev->fp_array[idx].rxq)) +
1598 offsetof(struct qede_rx_queue,
1601 (((char *)(qdev->fp_array[idx].rxq)) +
1602 offsetof(struct qede_rx_queue,
1607 if (i == rxq_stat_cntrs)
1611 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
1612 eth_stats->q_opackets[j] = 0;
1614 for_each_hwfn(edev, hw_fn) {
1615 idx = qid * edev->num_hwfns + hw_fn;
1617 txq = qdev->fp_array[idx].txq;
1618 eth_stats->q_opackets[j] +=
1619 *((uint64_t *)(uintptr_t)
1620 (((uint64_t)(uintptr_t)(txq)) +
1621 offsetof(struct qede_tx_queue,
1626 if (j == txq_stat_cntrs)
1634 qede_get_xstats_count(struct qede_dev *qdev) {
1635 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
1637 if (ECORE_IS_BB(&qdev->edev))
1638 return RTE_DIM(qede_xstats_strings) +
1639 RTE_DIM(qede_bb_xstats_strings) +
1640 (RTE_DIM(qede_rxq_xstats_strings) *
1641 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
1643 return RTE_DIM(qede_xstats_strings) +
1644 RTE_DIM(qede_ah_xstats_strings) +
1645 (RTE_DIM(qede_rxq_xstats_strings) *
1646 QEDE_RSS_COUNT(dev));
1650 qede_get_xstats_names(struct rte_eth_dev *dev,
1651 struct rte_eth_xstat_name *xstats_names,
1652 __rte_unused unsigned int limit)
1654 struct qede_dev *qdev = dev->data->dev_private;
1655 struct ecore_dev *edev = &qdev->edev;
1656 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1657 unsigned int i, qid, hw_fn, stat_idx = 0;
1659 if (xstats_names == NULL)
1662 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1663 strlcpy(xstats_names[stat_idx].name,
1664 qede_xstats_strings[i].name,
1665 sizeof(xstats_names[stat_idx].name));
1669 if (ECORE_IS_BB(edev)) {
1670 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1671 strlcpy(xstats_names[stat_idx].name,
1672 qede_bb_xstats_strings[i].name,
1673 sizeof(xstats_names[stat_idx].name));
1677 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1678 strlcpy(xstats_names[stat_idx].name,
1679 qede_ah_xstats_strings[i].name,
1680 sizeof(xstats_names[stat_idx].name));
1685 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
1686 for_each_hwfn(edev, hw_fn) {
1687 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1688 snprintf(xstats_names[stat_idx].name,
1689 RTE_ETH_XSTATS_NAME_SIZE,
1691 qede_rxq_xstats_strings[i].name,
1693 qede_rxq_xstats_strings[i].name + 4);
1703 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1706 struct qede_dev *qdev = dev->data->dev_private;
1707 struct ecore_dev *edev = &qdev->edev;
1708 struct ecore_eth_stats stats;
1709 const unsigned int num = qede_get_xstats_count(qdev);
1710 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
1715 ecore_get_vport_stats(edev, &stats);
1717 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1718 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1719 qede_xstats_strings[i].offset);
1720 xstats[stat_idx].id = stat_idx;
1724 if (ECORE_IS_BB(edev)) {
1725 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1726 xstats[stat_idx].value =
1727 *(uint64_t *)(((char *)&stats) +
1728 qede_bb_xstats_strings[i].offset);
1729 xstats[stat_idx].id = stat_idx;
1733 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1734 xstats[stat_idx].value =
1735 *(uint64_t *)(((char *)&stats) +
1736 qede_ah_xstats_strings[i].offset);
1737 xstats[stat_idx].id = stat_idx;
1742 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
1743 for_each_hwfn(edev, hw_fn) {
1744 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1745 fpidx = qid * edev->num_hwfns + hw_fn;
1746 xstats[stat_idx].value = *(uint64_t *)
1747 (((char *)(qdev->fp_array[fpidx].rxq)) +
1748 qede_rxq_xstats_strings[i].offset);
1749 xstats[stat_idx].id = stat_idx;
1760 qede_reset_xstats(struct rte_eth_dev *dev)
1762 struct qede_dev *qdev = dev->data->dev_private;
1763 struct ecore_dev *edev = &qdev->edev;
1765 ecore_reset_vport_stats(edev);
1766 qede_reset_queue_stats(qdev, true);
1771 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1773 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1774 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1775 struct qed_link_params link_params;
1778 DP_INFO(edev, "setting link state %d\n", link_up);
1779 memset(&link_params, 0, sizeof(link_params));
1780 link_params.link_up = link_up;
1781 rc = qdev->ops->common->set_link(edev, &link_params);
1782 if (rc != ECORE_SUCCESS)
1783 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1788 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1790 return qede_dev_set_link_state(eth_dev, true);
1793 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1795 return qede_dev_set_link_state(eth_dev, false);
1798 static int qede_reset_stats(struct rte_eth_dev *eth_dev)
1800 struct qede_dev *qdev = eth_dev->data->dev_private;
1801 struct ecore_dev *edev = &qdev->edev;
1803 ecore_reset_vport_stats(edev);
1804 qede_reset_queue_stats(qdev, false);
1809 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1811 enum qed_filter_rx_mode_type type =
1812 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1813 enum _ecore_status_t ecore_status;
1815 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1816 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1818 ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
1820 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1823 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1825 enum _ecore_status_t ecore_status;
1827 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1828 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1829 QED_FILTER_RX_MODE_TYPE_PROMISC);
1831 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1832 QED_FILTER_RX_MODE_TYPE_REGULAR);
1834 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1838 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev,
1839 struct rte_ether_addr *mc_addrs,
1840 uint32_t mc_addrs_num)
1842 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1843 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1846 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1847 DP_ERR(edev, "Reached max multicast filters limit,"
1848 "Please enable multicast promisc mode\n");
1852 for (i = 0; i < mc_addrs_num; i++) {
1853 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
1854 DP_ERR(edev, "Not a valid multicast MAC\n");
1859 /* Flush all existing entries */
1860 if (qede_del_mcast_filters(eth_dev))
1863 /* Set new mcast list */
1864 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1867 /* Update MTU via vport-update without doing port restart.
1868 * The vport must be deactivated before calling this API.
1870 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1872 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1873 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1874 struct ecore_hwfn *p_hwfn;
1879 struct ecore_sp_vport_update_params params;
1881 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1882 params.vport_id = 0;
1884 params.vport_id = 0;
1885 for_each_hwfn(edev, i) {
1886 p_hwfn = &edev->hwfns[i];
1887 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1888 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1889 ECORE_SPQ_MODE_EBLOCK, NULL);
1890 if (rc != ECORE_SUCCESS)
1894 for_each_hwfn(edev, i) {
1895 p_hwfn = &edev->hwfns[i];
1896 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1897 if (rc == ECORE_INVAL) {
1898 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1899 /* Recreate vport */
1900 rc = qede_start_vport(qdev, mtu);
1901 if (rc != ECORE_SUCCESS)
1904 /* Restore config lost due to vport stop */
1905 if (eth_dev->data->promiscuous)
1906 qede_promiscuous_enable(eth_dev);
1908 qede_promiscuous_disable(eth_dev);
1910 if (eth_dev->data->all_multicast)
1911 qede_allmulticast_enable(eth_dev);
1913 qede_allmulticast_disable(eth_dev);
1915 qede_vlan_offload_set(eth_dev,
1916 qdev->vlan_offload_mask);
1917 } else if (rc != ECORE_SUCCESS) {
1922 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1927 DP_ERR(edev, "Failed to update MTU\n");
1931 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1932 struct rte_eth_fc_conf *fc_conf)
1934 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1935 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1936 struct qed_link_output current_link;
1937 struct qed_link_params params;
1939 memset(¤t_link, 0, sizeof(current_link));
1940 qdev->ops->common->get_link(edev, ¤t_link);
1942 memset(¶ms, 0, sizeof(params));
1943 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1944 if (fc_conf->autoneg) {
1945 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1946 DP_ERR(edev, "Autoneg not supported\n");
1949 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1952 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1953 if (fc_conf->mode == RTE_FC_FULL)
1954 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1955 QED_LINK_PAUSE_RX_ENABLE);
1956 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1957 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1958 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1959 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1961 params.link_up = true;
1962 (void)qdev->ops->common->set_link(edev, ¶ms);
1967 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1968 struct rte_eth_fc_conf *fc_conf)
1970 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1971 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1972 struct qed_link_output current_link;
1974 memset(¤t_link, 0, sizeof(current_link));
1975 qdev->ops->common->get_link(edev, ¤t_link);
1977 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1978 fc_conf->autoneg = true;
1980 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1981 QED_LINK_PAUSE_TX_ENABLE))
1982 fc_conf->mode = RTE_FC_FULL;
1983 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1984 fc_conf->mode = RTE_FC_RX_PAUSE;
1985 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1986 fc_conf->mode = RTE_FC_TX_PAUSE;
1988 fc_conf->mode = RTE_FC_NONE;
1993 static const uint32_t *
1994 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1996 static const uint32_t ptypes[] = {
1998 RTE_PTYPE_L2_ETHER_VLAN,
2003 RTE_PTYPE_TUNNEL_VXLAN,
2005 RTE_PTYPE_TUNNEL_GENEVE,
2006 RTE_PTYPE_TUNNEL_GRE,
2008 RTE_PTYPE_INNER_L2_ETHER,
2009 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2010 RTE_PTYPE_INNER_L3_IPV4,
2011 RTE_PTYPE_INNER_L3_IPV6,
2012 RTE_PTYPE_INNER_L4_TCP,
2013 RTE_PTYPE_INNER_L4_UDP,
2014 RTE_PTYPE_INNER_L4_FRAG,
2018 if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
2019 eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
2020 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
2026 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2029 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
2030 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
2031 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
2032 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
2033 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
2034 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
2035 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
2036 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
2039 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2040 struct rte_eth_rss_conf *rss_conf)
2042 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2043 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2044 struct ecore_sp_vport_update_params vport_update_params;
2045 struct ecore_rss_params rss_params;
2046 struct ecore_hwfn *p_hwfn;
2047 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2048 uint64_t hf = rss_conf->rss_hf;
2049 uint8_t len = rss_conf->rss_key_len;
2050 uint8_t idx, i, j, fpidx;
2053 memset(&vport_update_params, 0, sizeof(vport_update_params));
2054 memset(&rss_params, 0, sizeof(rss_params));
2056 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2057 (unsigned long)hf, len, key);
2061 DP_INFO(edev, "Enabling rss\n");
2064 qede_init_rss_caps(&rss_params.rss_caps, hf);
2065 rss_params.update_rss_capabilities = 1;
2069 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2070 DP_ERR(edev, "RSS key length exceeds limit\n");
2073 DP_INFO(edev, "Applying user supplied hash key\n");
2074 rss_params.update_rss_key = 1;
2075 memcpy(&rss_params.rss_key, key, len);
2077 rss_params.rss_enable = 1;
2080 rss_params.update_rss_config = 1;
2081 /* tbl_size has to be set with capabilities */
2082 rss_params.rss_table_size_log = 7;
2083 vport_update_params.vport_id = 0;
2085 for_each_hwfn(edev, i) {
2086 /* pass the L2 handles instead of qids */
2087 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
2088 idx = j % QEDE_RSS_COUNT(eth_dev);
2089 fpidx = idx * edev->num_hwfns + i;
2090 rss_params.rss_ind_table[j] =
2091 qdev->fp_array[fpidx].rxq->handle;
2094 vport_update_params.rss_params = &rss_params;
2096 p_hwfn = &edev->hwfns[i];
2097 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2098 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2099 ECORE_SPQ_MODE_EBLOCK, NULL);
2101 DP_ERR(edev, "vport-update for RSS failed\n");
2105 qdev->rss_enable = rss_params.rss_enable;
2107 /* Update local structure for hash query */
2108 qdev->rss_conf.rss_hf = hf;
2109 qdev->rss_conf.rss_key_len = len;
2110 if (qdev->rss_enable) {
2111 if (qdev->rss_conf.rss_key == NULL) {
2112 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2113 if (qdev->rss_conf.rss_key == NULL) {
2114 DP_ERR(edev, "No memory to store RSS key\n");
2119 DP_INFO(edev, "Storing RSS key\n");
2120 memcpy(qdev->rss_conf.rss_key, key, len);
2122 } else if (!qdev->rss_enable && len == 0) {
2123 if (qdev->rss_conf.rss_key) {
2124 free(qdev->rss_conf.rss_key);
2125 qdev->rss_conf.rss_key = NULL;
2126 DP_INFO(edev, "Free RSS key\n");
2133 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2134 struct rte_eth_rss_conf *rss_conf)
2136 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2138 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2139 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2141 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2142 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2143 rss_conf->rss_key_len);
2147 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2148 struct rte_eth_rss_reta_entry64 *reta_conf,
2151 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2152 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2153 struct ecore_sp_vport_update_params vport_update_params;
2154 struct ecore_rss_params *params;
2155 uint16_t i, j, idx, fid, shift;
2156 struct ecore_hwfn *p_hwfn;
2160 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2161 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2166 memset(&vport_update_params, 0, sizeof(vport_update_params));
2167 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
2168 if (params == NULL) {
2169 DP_ERR(edev, "failed to allocate memory\n");
2173 params->update_rss_ind_table = 1;
2174 params->rss_table_size_log = 7;
2175 params->update_rss_config = 1;
2177 vport_update_params.vport_id = 0;
2178 /* Use the current value of rss_enable */
2179 params->rss_enable = qdev->rss_enable;
2180 vport_update_params.rss_params = params;
2182 for_each_hwfn(edev, i) {
2183 for (j = 0; j < reta_size; j++) {
2184 idx = j / RTE_RETA_GROUP_SIZE;
2185 shift = j % RTE_RETA_GROUP_SIZE;
2186 if (reta_conf[idx].mask & (1ULL << shift)) {
2187 entry = reta_conf[idx].reta[shift];
2188 fid = entry * edev->num_hwfns + i;
2189 /* Pass rxq handles to ecore */
2190 params->rss_ind_table[j] =
2191 qdev->fp_array[fid].rxq->handle;
2192 /* Update the local copy for RETA query cmd */
2193 qdev->rss_ind_table[j] = entry;
2197 p_hwfn = &edev->hwfns[i];
2198 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2199 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2200 ECORE_SPQ_MODE_EBLOCK, NULL);
2202 DP_ERR(edev, "vport-update for RSS failed\n");
2212 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2213 struct rte_eth_rss_reta_entry64 *reta_conf,
2216 struct qede_dev *qdev = eth_dev->data->dev_private;
2217 struct ecore_dev *edev = &qdev->edev;
2218 uint16_t i, idx, shift;
2221 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2222 DP_ERR(edev, "reta_size %d is not supported\n",
2227 for (i = 0; i < reta_size; i++) {
2228 idx = i / RTE_RETA_GROUP_SIZE;
2229 shift = i % RTE_RETA_GROUP_SIZE;
2230 if (reta_conf[idx].mask & (1ULL << shift)) {
2231 entry = qdev->rss_ind_table[i];
2232 reta_conf[idx].reta[shift] = entry;
2241 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2243 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2244 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2245 struct rte_eth_dev_info dev_info = {0};
2246 struct qede_fastpath *fp;
2247 uint32_t max_rx_pkt_len;
2248 uint32_t frame_size;
2250 bool restart = false;
2253 PMD_INIT_FUNC_TRACE(edev);
2254 rc = qede_dev_info_get(dev, &dev_info);
2256 DP_ERR(edev, "Error during getting ethernet device info\n");
2259 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2260 frame_size = max_rx_pkt_len;
2261 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
2262 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2263 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
2267 if (!dev->data->scattered_rx &&
2268 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2269 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2270 dev->data->min_rx_buf_size);
2273 /* Temporarily replace I/O functions with dummy ones. It cannot
2274 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2276 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2277 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2278 if (dev->data->dev_started) {
2279 dev->data->dev_started = 0;
2284 qdev->new_mtu = mtu;
2286 /* Fix up RX buf size for all queues of the port */
2287 for (i = 0; i < qdev->num_rx_queues; i++) {
2288 fp = &qdev->fp_array[i];
2289 if (fp->rxq != NULL) {
2290 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2291 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2292 /* cache align the mbuf size to simplfy rx_buf_size
2295 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2296 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2300 fp->rxq->rx_buf_size = rc;
2303 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
2304 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2306 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2308 if (!dev->data->dev_started && restart) {
2309 qede_dev_start(dev);
2310 dev->data->dev_started = 1;
2313 /* update max frame size */
2314 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2317 qede_assign_rxtx_handlers(dev);
2318 if (ECORE_IS_CMT(edev)) {
2319 dev->rx_pkt_burst = qede_recv_pkts_cmt;
2320 dev->tx_pkt_burst = qede_xmit_pkts_cmt;
2322 dev->rx_pkt_burst = qede_recv_pkts;
2323 dev->tx_pkt_burst = qede_xmit_pkts;
2329 qede_dev_reset(struct rte_eth_dev *dev)
2333 ret = qede_eth_dev_uninit(dev);
2337 return qede_eth_dev_init(dev);
2340 static const struct eth_dev_ops qede_eth_dev_ops = {
2341 .dev_configure = qede_dev_configure,
2342 .dev_infos_get = qede_dev_info_get,
2343 .rx_queue_setup = qede_rx_queue_setup,
2344 .rx_queue_release = qede_rx_queue_release,
2345 .rx_descriptor_status = qede_rx_descriptor_status,
2346 .tx_queue_setup = qede_tx_queue_setup,
2347 .tx_queue_release = qede_tx_queue_release,
2348 .dev_start = qede_dev_start,
2349 .dev_reset = qede_dev_reset,
2350 .dev_set_link_up = qede_dev_set_link_up,
2351 .dev_set_link_down = qede_dev_set_link_down,
2352 .link_update = qede_link_update,
2353 .promiscuous_enable = qede_promiscuous_enable,
2354 .promiscuous_disable = qede_promiscuous_disable,
2355 .allmulticast_enable = qede_allmulticast_enable,
2356 .allmulticast_disable = qede_allmulticast_disable,
2357 .set_mc_addr_list = qede_set_mc_addr_list,
2358 .dev_stop = qede_dev_stop,
2359 .dev_close = qede_dev_close,
2360 .stats_get = qede_get_stats,
2361 .stats_reset = qede_reset_stats,
2362 .xstats_get = qede_get_xstats,
2363 .xstats_reset = qede_reset_xstats,
2364 .xstats_get_names = qede_get_xstats_names,
2365 .mac_addr_add = qede_mac_addr_add,
2366 .mac_addr_remove = qede_mac_addr_remove,
2367 .mac_addr_set = qede_mac_addr_set,
2368 .vlan_offload_set = qede_vlan_offload_set,
2369 .vlan_filter_set = qede_vlan_filter_set,
2370 .flow_ctrl_set = qede_flow_ctrl_set,
2371 .flow_ctrl_get = qede_flow_ctrl_get,
2372 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2373 .rss_hash_update = qede_rss_hash_update,
2374 .rss_hash_conf_get = qede_rss_hash_conf_get,
2375 .reta_update = qede_rss_reta_update,
2376 .reta_query = qede_rss_reta_query,
2377 .mtu_set = qede_set_mtu,
2378 .filter_ctrl = qede_dev_filter_ctrl,
2379 .udp_tunnel_port_add = qede_udp_dst_port_add,
2380 .udp_tunnel_port_del = qede_udp_dst_port_del,
2383 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2384 .dev_configure = qede_dev_configure,
2385 .dev_infos_get = qede_dev_info_get,
2386 .rx_queue_setup = qede_rx_queue_setup,
2387 .rx_queue_release = qede_rx_queue_release,
2388 .rx_descriptor_status = qede_rx_descriptor_status,
2389 .tx_queue_setup = qede_tx_queue_setup,
2390 .tx_queue_release = qede_tx_queue_release,
2391 .dev_start = qede_dev_start,
2392 .dev_reset = qede_dev_reset,
2393 .dev_set_link_up = qede_dev_set_link_up,
2394 .dev_set_link_down = qede_dev_set_link_down,
2395 .link_update = qede_link_update,
2396 .promiscuous_enable = qede_promiscuous_enable,
2397 .promiscuous_disable = qede_promiscuous_disable,
2398 .allmulticast_enable = qede_allmulticast_enable,
2399 .allmulticast_disable = qede_allmulticast_disable,
2400 .set_mc_addr_list = qede_set_mc_addr_list,
2401 .dev_stop = qede_dev_stop,
2402 .dev_close = qede_dev_close,
2403 .stats_get = qede_get_stats,
2404 .stats_reset = qede_reset_stats,
2405 .xstats_get = qede_get_xstats,
2406 .xstats_reset = qede_reset_xstats,
2407 .xstats_get_names = qede_get_xstats_names,
2408 .vlan_offload_set = qede_vlan_offload_set,
2409 .vlan_filter_set = qede_vlan_filter_set,
2410 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2411 .rss_hash_update = qede_rss_hash_update,
2412 .rss_hash_conf_get = qede_rss_hash_conf_get,
2413 .reta_update = qede_rss_reta_update,
2414 .reta_query = qede_rss_reta_query,
2415 .mtu_set = qede_set_mtu,
2416 .udp_tunnel_port_add = qede_udp_dst_port_add,
2417 .udp_tunnel_port_del = qede_udp_dst_port_del,
2418 .mac_addr_add = qede_mac_addr_add,
2419 .mac_addr_remove = qede_mac_addr_remove,
2420 .mac_addr_set = qede_mac_addr_set,
2423 static void qede_update_pf_params(struct ecore_dev *edev)
2425 struct ecore_pf_params pf_params;
2427 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2428 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2429 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2430 qed_ops->common->update_pf_params(edev, &pf_params);
2433 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2435 struct rte_pci_device *pci_dev;
2436 struct rte_pci_addr pci_addr;
2437 struct qede_dev *adapter;
2438 struct ecore_dev *edev;
2439 struct qed_dev_eth_info dev_info;
2440 struct qed_slowpath_params params;
2441 static bool do_once = true;
2442 uint8_t bulletin_change;
2443 uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
2444 uint8_t is_mac_forced;
2446 /* Fix up ecore debug level */
2447 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2448 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2452 /* Extract key data structures */
2453 adapter = eth_dev->data->dev_private;
2454 adapter->ethdev = eth_dev;
2455 edev = &adapter->edev;
2456 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2457 pci_addr = pci_dev->addr;
2459 PMD_INIT_FUNC_TRACE(edev);
2461 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2462 pci_addr.bus, pci_addr.devid, pci_addr.function,
2463 eth_dev->data->port_id);
2465 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2466 DP_ERR(edev, "Skipping device init from secondary process\n");
2470 rte_eth_copy_pci_info(eth_dev, pci_dev);
2473 edev->vendor_id = pci_dev->id.vendor_id;
2474 edev->device_id = pci_dev->id.device_id;
2476 qed_ops = qed_get_eth_ops();
2478 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2483 DP_INFO(edev, "Starting qede probe\n");
2484 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2487 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2491 qede_update_pf_params(edev);
2493 switch (pci_dev->intr_handle.type) {
2494 case RTE_INTR_HANDLE_UIO_INTX:
2495 case RTE_INTR_HANDLE_VFIO_LEGACY:
2496 int_mode = ECORE_INT_MODE_INTA;
2497 rte_intr_callback_register(&pci_dev->intr_handle,
2498 qede_interrupt_handler_intx,
2502 int_mode = ECORE_INT_MODE_MSIX;
2503 rte_intr_callback_register(&pci_dev->intr_handle,
2504 qede_interrupt_handler,
2508 if (rte_intr_enable(&pci_dev->intr_handle)) {
2509 DP_ERR(edev, "rte_intr_enable() failed\n");
2514 /* Start the Slowpath-process */
2515 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2517 params.int_mode = int_mode;
2518 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2519 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2520 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2521 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2522 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2523 QEDE_PMD_DRV_VER_STR_SIZE);
2525 qede_assign_rxtx_handlers(eth_dev);
2526 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2528 /* For CMT mode device do periodic polling for slowpath events.
2529 * This is required since uio device uses only one MSI-x
2530 * interrupt vector but we need one for each engine.
2532 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2533 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2537 DP_ERR(edev, "Unable to start periodic"
2538 " timer rc %d\n", rc);
2544 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2546 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2547 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2553 rc = qed_ops->fill_dev_info(edev, &dev_info);
2555 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2556 qed_ops->common->slowpath_stop(edev);
2557 qed_ops->common->remove(edev);
2558 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2564 qede_alloc_etherdev(adapter, &dev_info);
2567 qede_print_adapter_info(adapter);
2571 adapter->ops->common->set_name(edev, edev->name);
2574 adapter->dev_info.num_mac_filters =
2575 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2578 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2579 (uint32_t *)&adapter->dev_info.num_mac_filters);
2581 /* Allocate memory for storing MAC addr */
2582 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2583 (RTE_ETHER_ADDR_LEN *
2584 adapter->dev_info.num_mac_filters),
2585 RTE_CACHE_LINE_SIZE);
2587 if (eth_dev->data->mac_addrs == NULL) {
2588 DP_ERR(edev, "Failed to allocate MAC address\n");
2589 qed_ops->common->slowpath_stop(edev);
2590 qed_ops->common->remove(edev);
2591 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2597 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
2598 hw_info.hw_mac_addr,
2599 ð_dev->data->mac_addrs[0]);
2600 rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
2601 &adapter->primary_mac);
2603 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2605 if (bulletin_change) {
2607 ecore_vf_bulletin_get_forced_mac(
2608 ECORE_LEADING_HWFN(edev),
2612 DP_INFO(edev, "VF macaddr received from PF\n");
2613 rte_ether_addr_copy(
2614 (struct rte_ether_addr *)&vf_mac,
2615 ð_dev->data->mac_addrs[0]);
2616 rte_ether_addr_copy(
2617 ð_dev->data->mac_addrs[0],
2618 &adapter->primary_mac);
2620 DP_ERR(edev, "No VF macaddr assigned\n");
2625 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2627 /* Bring-up the link */
2628 qede_dev_set_link_state(eth_dev, true);
2630 adapter->num_tx_queues = 0;
2631 adapter->num_rx_queues = 0;
2632 SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2633 SLIST_INIT(&adapter->vlan_list_head);
2634 SLIST_INIT(&adapter->uc_list_head);
2635 SLIST_INIT(&adapter->mc_list_head);
2636 adapter->mtu = RTE_ETHER_MTU;
2637 adapter->vport_started = false;
2639 /* VF tunnel offloads is enabled by default in PF driver */
2640 adapter->vxlan.num_filters = 0;
2641 adapter->geneve.num_filters = 0;
2642 adapter->ipgre.num_filters = 0;
2644 adapter->vxlan.enable = true;
2645 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2646 ETH_TUNNEL_FILTER_IVLAN;
2647 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2648 adapter->geneve.enable = true;
2649 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2650 ETH_TUNNEL_FILTER_IVLAN;
2651 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2652 adapter->ipgre.enable = true;
2653 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2654 ETH_TUNNEL_FILTER_IVLAN;
2656 adapter->vxlan.enable = false;
2657 adapter->geneve.enable = false;
2658 adapter->ipgre.enable = false;
2661 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2662 adapter->primary_mac.addr_bytes[0],
2663 adapter->primary_mac.addr_bytes[1],
2664 adapter->primary_mac.addr_bytes[2],
2665 adapter->primary_mac.addr_bytes[3],
2666 adapter->primary_mac.addr_bytes[4],
2667 adapter->primary_mac.addr_bytes[5]);
2669 DP_INFO(edev, "Device initialized\n");
2675 qede_print_adapter_info(adapter);
2681 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2683 return qede_common_dev_init(eth_dev, 1);
2686 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2688 return qede_common_dev_init(eth_dev, 0);
2691 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2693 struct qede_dev *qdev = eth_dev->data->dev_private;
2694 struct ecore_dev *edev = &qdev->edev;
2696 PMD_INIT_FUNC_TRACE(edev);
2698 /* only uninitialize in the primary process */
2699 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2702 /* safe to close dev here */
2703 qede_dev_close(eth_dev);
2705 eth_dev->dev_ops = NULL;
2706 eth_dev->rx_pkt_burst = NULL;
2707 eth_dev->tx_pkt_burst = NULL;
2712 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2714 return qede_dev_common_uninit(eth_dev);
2717 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2719 return qede_dev_common_uninit(eth_dev);
2722 static const struct rte_pci_id pci_id_qedevf_map[] = {
2723 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2725 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2728 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2731 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2736 static const struct rte_pci_id pci_id_qede_map[] = {
2737 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2739 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2742 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2745 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2748 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2751 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2754 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2757 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2760 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2763 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2766 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2771 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2772 struct rte_pci_device *pci_dev)
2774 return rte_eth_dev_pci_generic_probe(pci_dev,
2775 sizeof(struct qede_dev), qedevf_eth_dev_init);
2778 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2780 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2783 static struct rte_pci_driver rte_qedevf_pmd = {
2784 .id_table = pci_id_qedevf_map,
2785 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2786 .probe = qedevf_eth_dev_pci_probe,
2787 .remove = qedevf_eth_dev_pci_remove,
2790 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2791 struct rte_pci_device *pci_dev)
2793 return rte_eth_dev_pci_generic_probe(pci_dev,
2794 sizeof(struct qede_dev), qede_eth_dev_init);
2797 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2799 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2802 static struct rte_pci_driver rte_qede_pmd = {
2803 .id_table = pci_id_qede_map,
2804 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2805 .probe = qede_eth_dev_pci_probe,
2806 .remove = qede_eth_dev_pci_remove,
2809 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2810 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2811 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2812 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2813 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2814 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2816 RTE_INIT(qede_init_log)
2818 qede_logtype_init = rte_log_register("pmd.net.qede.init");
2819 if (qede_logtype_init >= 0)
2820 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
2821 qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
2822 if (qede_logtype_driver >= 0)
2823 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);