1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include "qede_ethdev.h"
8 #include <rte_string_fns.h>
10 #include <rte_kvargs.h>
12 static const struct qed_eth_ops *qed_ops;
13 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
16 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
18 struct rte_qede_xstats_name_off {
19 char name[RTE_ETH_XSTATS_NAME_SIZE];
23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
25 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
26 {"rx_multicast_bytes",
27 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
28 {"rx_broadcast_bytes",
29 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
30 {"rx_unicast_packets",
31 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
32 {"rx_multicast_packets",
33 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
34 {"rx_broadcast_packets",
35 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
38 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
39 {"tx_multicast_bytes",
40 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
41 {"tx_broadcast_bytes",
42 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
43 {"tx_unicast_packets",
44 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
45 {"tx_multicast_packets",
46 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
47 {"tx_broadcast_packets",
48 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
50 {"rx_64_byte_packets",
51 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
52 {"rx_65_to_127_byte_packets",
53 offsetof(struct ecore_eth_stats_common,
54 rx_65_to_127_byte_packets)},
55 {"rx_128_to_255_byte_packets",
56 offsetof(struct ecore_eth_stats_common,
57 rx_128_to_255_byte_packets)},
58 {"rx_256_to_511_byte_packets",
59 offsetof(struct ecore_eth_stats_common,
60 rx_256_to_511_byte_packets)},
61 {"rx_512_to_1023_byte_packets",
62 offsetof(struct ecore_eth_stats_common,
63 rx_512_to_1023_byte_packets)},
64 {"rx_1024_to_1518_byte_packets",
65 offsetof(struct ecore_eth_stats_common,
66 rx_1024_to_1518_byte_packets)},
67 {"tx_64_byte_packets",
68 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
69 {"tx_65_to_127_byte_packets",
70 offsetof(struct ecore_eth_stats_common,
71 tx_65_to_127_byte_packets)},
72 {"tx_128_to_255_byte_packets",
73 offsetof(struct ecore_eth_stats_common,
74 tx_128_to_255_byte_packets)},
75 {"tx_256_to_511_byte_packets",
76 offsetof(struct ecore_eth_stats_common,
77 tx_256_to_511_byte_packets)},
78 {"tx_512_to_1023_byte_packets",
79 offsetof(struct ecore_eth_stats_common,
80 tx_512_to_1023_byte_packets)},
81 {"tx_1024_to_1518_byte_packets",
82 offsetof(struct ecore_eth_stats_common,
83 tx_1024_to_1518_byte_packets)},
85 {"rx_mac_crtl_frames",
86 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
87 {"tx_mac_control_frames",
88 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
90 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
92 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
93 {"rx_priority_flow_control_frames",
94 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
95 {"tx_priority_flow_control_frames",
96 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
99 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
101 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
102 {"rx_carrier_errors",
103 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
104 {"rx_oversize_packet_errors",
105 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
107 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
108 {"rx_undersize_packet_errors",
109 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
110 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
111 {"rx_host_buffer_not_available",
112 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
113 /* Number of packets discarded because they are bigger than MTU */
114 {"rx_packet_too_big_discards",
115 offsetof(struct ecore_eth_stats_common,
116 packet_too_big_discard)},
117 {"rx_ttl_zero_discards",
118 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
119 {"rx_multi_function_tag_filter_discards",
120 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
121 {"rx_mac_filter_discards",
122 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
123 {"rx_gft_filter_drop",
124 offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
125 {"rx_hw_buffer_truncates",
126 offsetof(struct ecore_eth_stats_common, brb_truncates)},
127 {"rx_hw_buffer_discards",
128 offsetof(struct ecore_eth_stats_common, brb_discards)},
129 {"tx_error_drop_packets",
130 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
132 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
133 {"rx_mac_unicast_packets",
134 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
135 {"rx_mac_multicast_packets",
136 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
137 {"rx_mac_broadcast_packets",
138 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
140 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
141 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
142 {"tx_mac_unicast_packets",
143 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
144 {"tx_mac_multicast_packets",
145 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
146 {"tx_mac_broadcast_packets",
147 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
149 {"lro_coalesced_packets",
150 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
151 {"lro_coalesced_events",
152 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
154 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
155 {"lro_not_coalesced_packets",
156 offsetof(struct ecore_eth_stats_common,
157 tpa_not_coalesced_pkts)},
158 {"lro_coalesced_bytes",
159 offsetof(struct ecore_eth_stats_common,
160 tpa_coalesced_bytes)},
163 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
164 {"rx_1519_to_1522_byte_packets",
165 offsetof(struct ecore_eth_stats, bb) +
166 offsetof(struct ecore_eth_stats_bb,
167 rx_1519_to_1522_byte_packets)},
168 {"rx_1519_to_2047_byte_packets",
169 offsetof(struct ecore_eth_stats, bb) +
170 offsetof(struct ecore_eth_stats_bb,
171 rx_1519_to_2047_byte_packets)},
172 {"rx_2048_to_4095_byte_packets",
173 offsetof(struct ecore_eth_stats, bb) +
174 offsetof(struct ecore_eth_stats_bb,
175 rx_2048_to_4095_byte_packets)},
176 {"rx_4096_to_9216_byte_packets",
177 offsetof(struct ecore_eth_stats, bb) +
178 offsetof(struct ecore_eth_stats_bb,
179 rx_4096_to_9216_byte_packets)},
180 {"rx_9217_to_16383_byte_packets",
181 offsetof(struct ecore_eth_stats, bb) +
182 offsetof(struct ecore_eth_stats_bb,
183 rx_9217_to_16383_byte_packets)},
185 {"tx_1519_to_2047_byte_packets",
186 offsetof(struct ecore_eth_stats, bb) +
187 offsetof(struct ecore_eth_stats_bb,
188 tx_1519_to_2047_byte_packets)},
189 {"tx_2048_to_4095_byte_packets",
190 offsetof(struct ecore_eth_stats, bb) +
191 offsetof(struct ecore_eth_stats_bb,
192 tx_2048_to_4095_byte_packets)},
193 {"tx_4096_to_9216_byte_packets",
194 offsetof(struct ecore_eth_stats, bb) +
195 offsetof(struct ecore_eth_stats_bb,
196 tx_4096_to_9216_byte_packets)},
197 {"tx_9217_to_16383_byte_packets",
198 offsetof(struct ecore_eth_stats, bb) +
199 offsetof(struct ecore_eth_stats_bb,
200 tx_9217_to_16383_byte_packets)},
202 {"tx_lpi_entry_count",
203 offsetof(struct ecore_eth_stats, bb) +
204 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
205 {"tx_total_collisions",
206 offsetof(struct ecore_eth_stats, bb) +
207 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
210 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
211 {"rx_1519_to_max_byte_packets",
212 offsetof(struct ecore_eth_stats, ah) +
213 offsetof(struct ecore_eth_stats_ah,
214 rx_1519_to_max_byte_packets)},
215 {"tx_1519_to_max_byte_packets",
216 offsetof(struct ecore_eth_stats, ah) +
217 offsetof(struct ecore_eth_stats_ah,
218 tx_1519_to_max_byte_packets)},
221 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
223 offsetof(struct qede_rx_queue, rx_segs)},
225 offsetof(struct qede_rx_queue, rx_hw_errors)},
226 {"rx_q_allocation_errors",
227 offsetof(struct qede_rx_queue, rx_alloc_errors)}
230 /* Get FW version string based on fw_size */
232 qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
234 struct qede_dev *qdev = dev->data->dev_private;
235 struct ecore_dev *edev = &qdev->edev;
236 struct qed_dev_info *info = &qdev->dev_info.common;
237 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
244 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
245 QEDE_PMD_FW_VERSION);
247 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
248 info->fw_major, info->fw_minor,
249 info->fw_rev, info->fw_eng);
250 size = strlen(ver_str);
251 if (size + 1 <= fw_size) /* Add 1 byte for "\0" */
252 strlcpy(fw_ver, ver_str, fw_size);
256 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
258 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3),
259 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2),
260 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1),
261 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0));
262 size = strlen(ver_str);
263 if (size + 1 <= fw_size)
264 strlcpy(fw_ver, ver_str, fw_size);
269 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
271 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2),
272 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1),
273 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0));
274 size = strlen(ver_str);
275 if (size + 1 <= fw_size)
276 strlcpy(fw_ver, ver_str, fw_size);
282 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
284 OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
285 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
286 OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
290 qede_interrupt_handler_intx(void *param)
292 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
293 struct qede_dev *qdev = eth_dev->data->dev_private;
294 struct ecore_dev *edev = &qdev->edev;
297 /* Check if our device actually raised an interrupt */
298 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
300 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
302 if (rte_intr_ack(eth_dev->intr_handle))
303 DP_ERR(edev, "rte_intr_ack failed\n");
308 qede_interrupt_handler(void *param)
310 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
311 struct qede_dev *qdev = eth_dev->data->dev_private;
312 struct ecore_dev *edev = &qdev->edev;
314 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
315 if (rte_intr_ack(eth_dev->intr_handle))
316 DP_ERR(edev, "rte_intr_ack failed\n");
320 qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
322 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
323 struct qede_dev *qdev = dev->data->dev_private;
324 struct ecore_dev *edev = &qdev->edev;
325 bool use_tx_offload = false;
328 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
329 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
333 if (ECORE_IS_CMT(edev)) {
334 dev->rx_pkt_burst = qede_recv_pkts_cmt;
335 dev->tx_pkt_burst = qede_xmit_pkts_cmt;
339 if (dev->data->lro || dev->data->scattered_rx) {
340 DP_INFO(edev, "Assigning qede_recv_pkts\n");
341 dev->rx_pkt_burst = qede_recv_pkts;
343 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
344 dev->rx_pkt_burst = qede_recv_pkts_regular;
347 use_tx_offload = !!(tx_offloads &
348 (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
349 DEV_TX_OFFLOAD_TCP_TSO | /* tso */
350 DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
352 if (use_tx_offload) {
353 DP_INFO(edev, "Assigning qede_xmit_pkts\n");
354 dev->tx_pkt_burst = qede_xmit_pkts;
356 DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
357 dev->tx_pkt_burst = qede_xmit_pkts_regular;
362 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
364 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
368 static void qede_print_adapter_info(struct rte_eth_dev *dev)
370 struct qede_dev *qdev = dev->data->dev_private;
371 struct ecore_dev *edev = &qdev->edev;
372 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
374 DP_INFO(edev, "**************************************************\n");
375 DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version());
376 DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details",
377 ECORE_IS_BB(edev) ? "BB" : "AH",
378 'A' + edev->chip_rev,
379 (int)edev->chip_metal);
380 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
381 QEDE_PMD_DRV_VERSION);
382 DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str);
383 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
384 QEDE_PMD_BASE_VERSION);
385 DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str);
386 qede_fw_version_get(dev, ver_str, sizeof(ver_str));
387 DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str);
388 DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file);
389 DP_INFO(edev, "**************************************************\n");
392 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
394 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
395 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
396 unsigned int i = 0, j = 0, qid;
397 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
398 struct qede_tx_queue *txq;
400 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
402 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
403 RTE_ETHDEV_QUEUE_STAT_CNTRS);
404 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
405 RTE_ETHDEV_QUEUE_STAT_CNTRS);
407 for (qid = 0; qid < qdev->num_rx_queues; qid++) {
408 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
409 offsetof(struct qede_rx_queue, rcv_pkts), 0,
411 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
412 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
414 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
415 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
419 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
420 OSAL_MEMSET((((char *)
421 (qdev->fp_array[qid].rxq)) +
422 qede_rxq_xstats_strings[j].offset),
427 if (i == rxq_stat_cntrs)
433 for (qid = 0; qid < qdev->num_tx_queues; qid++) {
434 txq = qdev->fp_array[qid].txq;
436 OSAL_MEMSET((uint64_t *)(uintptr_t)
437 (((uint64_t)(uintptr_t)(txq)) +
438 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
442 if (i == txq_stat_cntrs)
448 qede_stop_vport(struct ecore_dev *edev)
450 struct ecore_hwfn *p_hwfn;
456 for_each_hwfn(edev, i) {
457 p_hwfn = &edev->hwfns[i];
458 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
460 if (rc != ECORE_SUCCESS) {
461 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
466 DP_INFO(edev, "vport stopped\n");
472 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
474 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
475 struct ecore_sp_vport_start_params params;
476 struct ecore_hwfn *p_hwfn;
480 if (qdev->vport_started)
481 qede_stop_vport(edev);
483 memset(¶ms, 0, sizeof(params));
486 /* @DPDK - Disable FW placement */
487 params.zero_placement_offset = 1;
488 for_each_hwfn(edev, i) {
489 p_hwfn = &edev->hwfns[i];
490 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
491 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
492 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
493 if (rc != ECORE_SUCCESS) {
494 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
498 ecore_reset_vport_stats(edev);
499 qdev->vport_started = true;
500 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
505 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
506 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
508 /* Activate or deactivate vport via vport-update */
509 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
511 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
512 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
513 struct ecore_sp_vport_update_params params;
514 struct ecore_hwfn *p_hwfn;
518 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
520 params.update_vport_active_rx_flg = 1;
521 params.update_vport_active_tx_flg = 1;
522 params.vport_active_rx_flg = flg;
523 params.vport_active_tx_flg = flg;
524 if ((qdev->enable_tx_switching == false) && (flg == true)) {
525 params.update_tx_switching_flg = 1;
526 params.tx_switching_flg = !flg;
528 for_each_hwfn(edev, i) {
529 p_hwfn = &edev->hwfns[i];
530 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
531 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
532 ECORE_SPQ_MODE_EBLOCK, NULL);
533 if (rc != ECORE_SUCCESS) {
534 DP_ERR(edev, "Failed to update vport\n");
538 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
544 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
545 uint16_t mtu, bool enable)
547 /* Enable LRO in split mode */
548 sge_tpa_params->tpa_ipv4_en_flg = enable;
549 sge_tpa_params->tpa_ipv6_en_flg = enable;
550 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
551 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
552 /* set if tpa enable changes */
553 sge_tpa_params->update_tpa_en_flg = 1;
554 /* set if tpa parameters should be handled */
555 sge_tpa_params->update_tpa_param_flg = enable;
557 sge_tpa_params->max_buffers_per_cqe = 20;
558 /* Enable TPA in split mode. In this mode each TPA segment
559 * starts on the new BD, so there is one BD per segment.
561 sge_tpa_params->tpa_pkt_split_flg = 1;
562 sge_tpa_params->tpa_hdr_data_split_flg = 0;
563 sge_tpa_params->tpa_gro_consistent_flg = 0;
564 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
565 sge_tpa_params->tpa_max_size = 0x7FFF;
566 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
567 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
570 /* Enable/disable LRO via vport-update */
571 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
573 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
574 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
575 struct ecore_sp_vport_update_params params;
576 struct ecore_sge_tpa_params tpa_params;
577 struct ecore_hwfn *p_hwfn;
581 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
582 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
583 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
585 params.sge_tpa_params = &tpa_params;
586 for_each_hwfn(edev, i) {
587 p_hwfn = &edev->hwfns[i];
588 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
589 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
590 ECORE_SPQ_MODE_EBLOCK, NULL);
591 if (rc != ECORE_SUCCESS) {
592 DP_ERR(edev, "Failed to update LRO\n");
596 qdev->enable_lro = flg;
597 eth_dev->data->lro = flg;
599 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
605 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
606 enum qed_filter_rx_mode_type type)
608 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
609 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
610 struct ecore_filter_accept_flags flags;
612 memset(&flags, 0, sizeof(flags));
614 flags.update_rx_mode_config = 1;
615 flags.update_tx_mode_config = 1;
616 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
617 ECORE_ACCEPT_MCAST_MATCHED |
620 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
621 ECORE_ACCEPT_MCAST_MATCHED |
624 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
625 flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED |
626 ECORE_ACCEPT_MCAST_UNMATCHED);
628 flags.tx_accept_filter |=
629 (ECORE_ACCEPT_UCAST_UNMATCHED |
630 ECORE_ACCEPT_MCAST_UNMATCHED);
631 DP_INFO(edev, "Enabling Tx unmatched flags for VF\n");
633 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
634 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
637 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
638 ECORE_SPQ_MODE_CB, NULL);
642 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
645 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
646 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
647 struct qede_ucast_entry *tmp = NULL;
648 struct qede_ucast_entry *u;
649 struct rte_ether_addr *mac_addr;
651 mac_addr = (struct rte_ether_addr *)ucast->mac;
653 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
654 if ((memcmp(mac_addr, &tmp->mac,
655 RTE_ETHER_ADDR_LEN) == 0) &&
656 ucast->vni == tmp->vni &&
657 ucast->vlan == tmp->vlan) {
658 DP_INFO(edev, "Unicast MAC is already added"
659 " with vlan = %u, vni = %u\n",
660 ucast->vlan, ucast->vni);
664 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
665 RTE_CACHE_LINE_SIZE);
667 DP_ERR(edev, "Did not allocate memory for ucast\n");
670 rte_ether_addr_copy(mac_addr, &u->mac);
671 u->vlan = ucast->vlan;
673 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
676 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
677 if ((memcmp(mac_addr, &tmp->mac,
678 RTE_ETHER_ADDR_LEN) == 0) &&
679 ucast->vlan == tmp->vlan &&
680 ucast->vni == tmp->vni)
684 DP_INFO(edev, "Unicast MAC is not found\n");
687 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
695 qede_add_mcast_filters(struct rte_eth_dev *eth_dev,
696 struct rte_ether_addr *mc_addrs,
697 uint32_t mc_addrs_num)
699 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
700 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
701 struct ecore_filter_mcast mcast;
702 struct qede_mcast_entry *m = NULL;
706 for (i = 0; i < mc_addrs_num; i++) {
707 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
708 RTE_CACHE_LINE_SIZE);
710 DP_ERR(edev, "Did not allocate memory for mcast\n");
713 rte_ether_addr_copy(&mc_addrs[i], &m->mac);
714 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
716 memset(&mcast, 0, sizeof(mcast));
717 mcast.num_mc_addrs = mc_addrs_num;
718 mcast.opcode = ECORE_FILTER_ADD;
719 for (i = 0; i < mc_addrs_num; i++)
720 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
722 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
723 if (rc != ECORE_SUCCESS) {
724 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
731 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
733 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
734 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
735 struct qede_mcast_entry *tmp = NULL;
736 struct ecore_filter_mcast mcast;
740 memset(&mcast, 0, sizeof(mcast));
741 mcast.num_mc_addrs = qdev->num_mc_addr;
742 mcast.opcode = ECORE_FILTER_REMOVE;
744 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
745 rte_ether_addr_copy(&tmp->mac,
746 (struct rte_ether_addr *)&mcast.mac[j]);
749 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
750 if (rc != ECORE_SUCCESS) {
751 DP_ERR(edev, "Failed to delete multicast filter\n");
755 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
756 tmp = SLIST_FIRST(&qdev->mc_list_head);
757 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
759 SLIST_INIT(&qdev->mc_list_head);
765 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
768 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
769 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
770 enum _ecore_status_t rc = ECORE_INVAL;
772 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
773 DP_ERR(edev, "Ucast filter table limit exceeded,"
774 " Please enable promisc mode\n");
778 rc = qede_ucast_filter(eth_dev, ucast, add);
780 rc = ecore_filter_ucast_cmd(edev, ucast,
781 ECORE_SPQ_MODE_CB, NULL);
782 /* Indicate error only for add filter operation.
783 * Delete filter operations are not severe.
785 if ((rc != ECORE_SUCCESS) && add)
786 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
793 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr,
794 __rte_unused uint32_t index, __rte_unused uint32_t pool)
796 struct ecore_filter_ucast ucast;
799 if (!rte_is_valid_assigned_ether_addr(mac_addr))
802 qede_set_ucast_cmn_params(&ucast);
803 ucast.opcode = ECORE_FILTER_ADD;
804 ucast.type = ECORE_FILTER_MAC;
805 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
806 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
811 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
813 struct qede_dev *qdev = eth_dev->data->dev_private;
814 struct ecore_dev *edev = &qdev->edev;
815 struct ecore_filter_ucast ucast;
817 PMD_INIT_FUNC_TRACE(edev);
819 if (index >= qdev->dev_info.num_mac_filters) {
820 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
821 index, qdev->dev_info.num_mac_filters);
825 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
828 qede_set_ucast_cmn_params(&ucast);
829 ucast.opcode = ECORE_FILTER_REMOVE;
830 ucast.type = ECORE_FILTER_MAC;
832 /* Use the index maintained by rte */
833 rte_ether_addr_copy(ð_dev->data->mac_addrs[index],
834 (struct rte_ether_addr *)&ucast.mac);
836 qede_mac_int_ops(eth_dev, &ucast, false);
840 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
842 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
843 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
845 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
846 mac_addr->addr_bytes)) {
847 DP_ERR(edev, "Setting MAC address is not allowed\n");
851 qede_mac_addr_remove(eth_dev, 0);
853 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
856 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
858 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
859 struct ecore_sp_vport_update_params params;
860 struct ecore_hwfn *p_hwfn;
864 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
866 params.update_accept_any_vlan_flg = 1;
867 params.accept_any_vlan = flg;
868 for_each_hwfn(edev, i) {
869 p_hwfn = &edev->hwfns[i];
870 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
871 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
872 ECORE_SPQ_MODE_EBLOCK, NULL);
873 if (rc != ECORE_SUCCESS) {
874 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
879 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
882 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
884 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
885 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
886 struct ecore_sp_vport_update_params params;
887 struct ecore_hwfn *p_hwfn;
891 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
893 params.update_inner_vlan_removal_flg = 1;
894 params.inner_vlan_removal_flg = flg;
895 for_each_hwfn(edev, i) {
896 p_hwfn = &edev->hwfns[i];
897 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
898 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
899 ECORE_SPQ_MODE_EBLOCK, NULL);
900 if (rc != ECORE_SUCCESS) {
901 DP_ERR(edev, "Failed to update vport\n");
906 qdev->vlan_strip_flg = flg;
908 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
912 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
913 uint16_t vlan_id, int on)
915 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
916 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
917 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
918 struct qede_vlan_entry *tmp = NULL;
919 struct qede_vlan_entry *vlan;
920 struct ecore_filter_ucast ucast;
924 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
925 DP_ERR(edev, "Reached max VLAN filter limit"
926 " enabling accept_any_vlan\n");
927 qede_config_accept_any_vlan(qdev, true);
931 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
932 if (tmp->vid == vlan_id) {
933 DP_INFO(edev, "VLAN %u already configured\n",
939 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
940 RTE_CACHE_LINE_SIZE);
943 DP_ERR(edev, "Did not allocate memory for VLAN\n");
947 qede_set_ucast_cmn_params(&ucast);
948 ucast.opcode = ECORE_FILTER_ADD;
949 ucast.type = ECORE_FILTER_VLAN;
950 ucast.vlan = vlan_id;
951 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
954 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
959 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
960 qdev->configured_vlans++;
961 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
962 vlan_id, qdev->configured_vlans);
965 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
966 if (tmp->vid == vlan_id)
971 if (qdev->configured_vlans == 0) {
973 "No VLAN filters configured yet\n");
977 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
981 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
983 qede_set_ucast_cmn_params(&ucast);
984 ucast.opcode = ECORE_FILTER_REMOVE;
985 ucast.type = ECORE_FILTER_VLAN;
986 ucast.vlan = vlan_id;
987 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
990 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
993 qdev->configured_vlans--;
994 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
995 vlan_id, qdev->configured_vlans);
1002 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1004 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1005 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1006 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1008 if (mask & ETH_VLAN_STRIP_MASK) {
1009 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1010 (void)qede_vlan_stripping(eth_dev, 1);
1012 (void)qede_vlan_stripping(eth_dev, 0);
1015 if (mask & ETH_VLAN_FILTER_MASK) {
1016 /* VLAN filtering kicks in when a VLAN is added */
1017 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
1018 qede_vlan_filter_set(eth_dev, 0, 1);
1020 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1022 " Please remove existing VLAN filters"
1023 " before disabling VLAN filtering\n");
1024 /* Signal app that VLAN filtering is still
1027 eth_dev->data->dev_conf.rxmode.offloads |=
1028 DEV_RX_OFFLOAD_VLAN_FILTER;
1030 qede_vlan_filter_set(eth_dev, 0, 0);
1035 qdev->vlan_offload_mask = mask;
1037 DP_INFO(edev, "VLAN offload mask %d\n", mask);
1042 static void qede_prandom_bytes(uint32_t *buff)
1046 srand((unsigned int)time(NULL));
1047 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1051 int qede_config_rss(struct rte_eth_dev *eth_dev)
1053 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1054 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1055 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1056 struct rte_eth_rss_reta_entry64 reta_conf[2];
1057 struct rte_eth_rss_conf rss_conf;
1058 uint32_t i, id, pos, q;
1060 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1061 if (!rss_conf.rss_key) {
1062 DP_INFO(edev, "Applying driver default key\n");
1063 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1064 qede_prandom_bytes(&def_rss_key[0]);
1065 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1068 /* Configure RSS hash */
1069 if (qede_rss_hash_update(eth_dev, &rss_conf))
1072 /* Configure default RETA */
1073 memset(reta_conf, 0, sizeof(reta_conf));
1074 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1075 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1077 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1078 id = i / RTE_RETA_GROUP_SIZE;
1079 pos = i % RTE_RETA_GROUP_SIZE;
1080 q = i % QEDE_RSS_COUNT(eth_dev);
1081 reta_conf[id].reta[pos] = q;
1083 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1084 ECORE_RSS_IND_TABLE_SIZE))
1090 static void qede_fastpath_start(struct ecore_dev *edev)
1092 struct ecore_hwfn *p_hwfn;
1095 for_each_hwfn(edev, i) {
1096 p_hwfn = &edev->hwfns[i];
1097 ecore_hw_start_fastpath(p_hwfn);
1101 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1103 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1105 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1107 PMD_INIT_FUNC_TRACE(edev);
1109 /* Update MTU only if it has changed */
1110 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
1111 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1113 qdev->mtu = qdev->new_mtu;
1117 /* Configure TPA parameters */
1118 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1119 if (qede_enable_tpa(eth_dev, true))
1121 /* Enable scatter mode for LRO */
1122 if (!eth_dev->data->scattered_rx)
1123 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1127 if (qede_start_queues(eth_dev))
1131 qede_reset_queue_stats(qdev, true);
1133 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1134 * enabling RSS. Hence RSS configuration is deferred up to this point.
1135 * Also, we would like to retain similar behavior in PF case, so we
1136 * don't do PF/VF specific check here.
1138 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1139 if (qede_config_rss(eth_dev))
1143 if (qede_activate_vport(eth_dev, true))
1146 /* Bring-up the link */
1147 qede_dev_set_link_state(eth_dev, true);
1149 /* Update link status */
1150 qede_link_update(eth_dev, 0);
1152 /* Start/resume traffic */
1153 qede_fastpath_start(edev);
1155 /* Assign I/O handlers */
1156 qede_assign_rxtx_handlers(eth_dev, false);
1158 DP_INFO(edev, "Device started\n");
1162 DP_ERR(edev, "Device start fails\n");
1163 return -1; /* common error code is < 0 */
1166 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1168 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1169 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1171 PMD_INIT_FUNC_TRACE(edev);
1173 /* Bring the link down */
1174 qede_dev_set_link_state(eth_dev, false);
1176 /* Update link status */
1177 qede_link_update(eth_dev, 0);
1179 /* Replace I/O functions with dummy ones. It cannot
1180 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
1182 qede_assign_rxtx_handlers(eth_dev, true);
1185 if (qede_activate_vport(eth_dev, false))
1188 if (qdev->enable_lro)
1189 qede_enable_tpa(eth_dev, false);
1192 qede_stop_queues(eth_dev);
1194 /* Disable traffic */
1195 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1197 DP_INFO(edev, "Device is stopped\n");
1200 static const char * const valid_args[] = {
1201 QEDE_NPAR_TX_SWITCHING,
1202 QEDE_VF_TX_SWITCHING,
1206 static int qede_args_check(const char *key, const char *val, void *opaque)
1210 struct rte_eth_dev *eth_dev = opaque;
1211 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1212 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1215 tmp = strtoul(val, NULL, 0);
1217 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1221 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1222 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1223 qdev->enable_tx_switching = !!tmp;
1224 DP_INFO(edev, "Disabling %s tx-switching\n",
1225 strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1232 static int qede_args(struct rte_eth_dev *eth_dev)
1234 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1235 struct rte_kvargs *kvlist;
1236 struct rte_devargs *devargs;
1240 devargs = pci_dev->device.devargs;
1242 return 0; /* return success */
1244 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1248 /* Process parameters. */
1249 for (i = 0; (valid_args[i] != NULL); ++i) {
1250 if (rte_kvargs_count(kvlist, valid_args[i])) {
1251 ret = rte_kvargs_process(kvlist, valid_args[i],
1252 qede_args_check, eth_dev);
1253 if (ret != ECORE_SUCCESS) {
1254 rte_kvargs_free(kvlist);
1259 rte_kvargs_free(kvlist);
1264 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1266 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1267 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1268 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1273 PMD_INIT_FUNC_TRACE(edev);
1275 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
1276 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1278 /* We need to have min 1 RX queue.There is no min check in
1279 * rte_eth_dev_configure(), so we are checking it here.
1281 if (eth_dev->data->nb_rx_queues == 0) {
1282 DP_ERR(edev, "Minimum one RX queue is required\n");
1286 /* Enable Tx switching by default */
1287 qdev->enable_tx_switching = 1;
1289 /* Parse devargs and fix up rxmode */
1290 if (qede_args(eth_dev))
1291 DP_NOTICE(edev, false,
1292 "Invalid devargs supplied, requested change will not take effect\n");
1294 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1295 rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1296 DP_ERR(edev, "Unsupported multi-queue mode\n");
1299 /* Flow director mode check */
1300 if (qede_check_fdir_support(eth_dev))
1303 /* Allocate/reallocate fastpath resources only for new queue config */
1304 num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns;
1305 num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns;
1306 if (qdev->num_tx_queues != num_txqs ||
1307 qdev->num_rx_queues != num_rxqs) {
1308 qede_dealloc_fp_resc(eth_dev);
1309 qdev->num_tx_queues = num_txqs;
1310 qdev->num_rx_queues = num_rxqs;
1311 if (qede_alloc_fp_resc(qdev))
1315 /* If jumbo enabled adjust MTU */
1316 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1317 eth_dev->data->mtu =
1318 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1319 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1321 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1322 eth_dev->data->scattered_rx = 1;
1324 if (qede_start_vport(qdev, eth_dev->data->mtu))
1327 qdev->mtu = eth_dev->data->mtu;
1329 /* Enable VLAN offloads by default */
1330 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1331 ETH_VLAN_FILTER_MASK);
1335 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1336 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
1338 if (ECORE_IS_CMT(edev))
1339 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
1340 qdev->num_rx_queues, qdev->num_tx_queues);
1346 /* Info about HW descriptor ring limitations */
1347 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1348 .nb_max = 0x8000, /* 32K */
1350 .nb_align = 128 /* lowest common multiple */
1353 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1354 .nb_max = 0x8000, /* 32K */
1357 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1358 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1362 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1363 struct rte_eth_dev_info *dev_info)
1365 struct qede_dev *qdev = eth_dev->data->dev_private;
1366 struct ecore_dev *edev = &qdev->edev;
1367 struct qed_link_output link;
1368 uint32_t speed_cap = 0;
1370 PMD_INIT_FUNC_TRACE(edev);
1372 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1373 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1374 dev_info->rx_desc_lim = qede_rx_desc_lim;
1375 dev_info->tx_desc_lim = qede_tx_desc_lim;
1378 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1379 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1381 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1382 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1383 /* Since CMT mode internally doubles the number of queues */
1384 if (ECORE_IS_CMT(edev))
1385 dev_info->max_rx_queues = dev_info->max_rx_queues / 2;
1387 dev_info->max_tx_queues = dev_info->max_rx_queues;
1389 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1390 dev_info->max_vfs = 0;
1391 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1392 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1393 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1394 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1395 DEV_RX_OFFLOAD_UDP_CKSUM |
1396 DEV_RX_OFFLOAD_TCP_CKSUM |
1397 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1398 DEV_RX_OFFLOAD_TCP_LRO |
1399 DEV_RX_OFFLOAD_KEEP_CRC |
1400 DEV_RX_OFFLOAD_SCATTER |
1401 DEV_RX_OFFLOAD_JUMBO_FRAME |
1402 DEV_RX_OFFLOAD_VLAN_FILTER |
1403 DEV_RX_OFFLOAD_VLAN_STRIP |
1404 DEV_RX_OFFLOAD_RSS_HASH);
1405 dev_info->rx_queue_offload_capa = 0;
1407 /* TX offloads are on a per-packet basis, so it is applicable
1408 * to both at port and queue levels.
1410 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1411 DEV_TX_OFFLOAD_IPV4_CKSUM |
1412 DEV_TX_OFFLOAD_UDP_CKSUM |
1413 DEV_TX_OFFLOAD_TCP_CKSUM |
1414 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1415 DEV_TX_OFFLOAD_MULTI_SEGS |
1416 DEV_TX_OFFLOAD_TCP_TSO |
1417 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1418 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1419 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1421 dev_info->default_txconf = (struct rte_eth_txconf) {
1422 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1425 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1426 /* Packets are always dropped if no descriptors are available */
1431 memset(&link, 0, sizeof(struct qed_link_output));
1432 qdev->ops->common->get_link(edev, &link);
1433 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1434 speed_cap |= ETH_LINK_SPEED_1G;
1435 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1436 speed_cap |= ETH_LINK_SPEED_10G;
1437 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1438 speed_cap |= ETH_LINK_SPEED_25G;
1439 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1440 speed_cap |= ETH_LINK_SPEED_40G;
1441 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1442 speed_cap |= ETH_LINK_SPEED_50G;
1443 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1444 speed_cap |= ETH_LINK_SPEED_100G;
1445 dev_info->speed_capa = speed_cap;
1450 /* return 0 means link status changed, -1 means not changed */
1452 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1454 struct qede_dev *qdev = eth_dev->data->dev_private;
1455 struct ecore_dev *edev = &qdev->edev;
1456 struct qed_link_output q_link;
1457 struct rte_eth_link link;
1458 uint16_t link_duplex;
1460 memset(&q_link, 0, sizeof(q_link));
1461 memset(&link, 0, sizeof(link));
1463 qdev->ops->common->get_link(edev, &q_link);
1466 link.link_speed = q_link.speed;
1469 switch (q_link.duplex) {
1470 case QEDE_DUPLEX_HALF:
1471 link_duplex = ETH_LINK_HALF_DUPLEX;
1473 case QEDE_DUPLEX_FULL:
1474 link_duplex = ETH_LINK_FULL_DUPLEX;
1476 case QEDE_DUPLEX_UNKNOWN:
1480 link.link_duplex = link_duplex;
1483 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1486 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1487 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1489 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1490 link.link_speed, link.link_duplex,
1491 link.link_autoneg, link.link_status);
1493 return rte_eth_linkstatus_set(eth_dev, &link);
1496 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1498 enum _ecore_status_t ecore_status;
1499 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1500 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1501 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1503 PMD_INIT_FUNC_TRACE(edev);
1505 ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
1507 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1510 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1512 struct qede_dev *qdev = eth_dev->data->dev_private;
1513 struct ecore_dev *edev = &qdev->edev;
1514 enum _ecore_status_t ecore_status;
1516 PMD_INIT_FUNC_TRACE(edev);
1518 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1519 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1520 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1522 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1523 QED_FILTER_RX_MODE_TYPE_REGULAR);
1525 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1528 static void qede_poll_sp_sb_cb(void *param)
1530 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1531 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1532 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1535 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1536 qede_interrupt_action(&edev->hwfns[1]);
1538 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1542 DP_ERR(edev, "Unable to start periodic"
1543 " timer rc %d\n", rc);
1547 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1549 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1550 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1551 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1553 PMD_INIT_FUNC_TRACE(edev);
1555 /* dev_stop() shall cleanup fp resources in hw but without releasing
1556 * dma memories and sw structures so that dev_start() can be called
1557 * by the app without reconfiguration. However, in dev_close() we
1558 * can release all the resources and device can be brought up newly
1560 if (eth_dev->data->dev_started)
1561 qede_dev_stop(eth_dev);
1563 if (qdev->vport_started)
1564 qede_stop_vport(edev);
1565 qdev->vport_started = false;
1566 qede_fdir_dealloc_resc(eth_dev);
1567 qede_dealloc_fp_resc(eth_dev);
1569 eth_dev->data->nb_rx_queues = 0;
1570 eth_dev->data->nb_tx_queues = 0;
1572 qdev->ops->common->slowpath_stop(edev);
1573 qdev->ops->common->remove(edev);
1574 rte_intr_disable(&pci_dev->intr_handle);
1576 switch (pci_dev->intr_handle.type) {
1577 case RTE_INTR_HANDLE_UIO_INTX:
1578 case RTE_INTR_HANDLE_VFIO_LEGACY:
1579 rte_intr_callback_unregister(&pci_dev->intr_handle,
1580 qede_interrupt_handler_intx,
1584 rte_intr_callback_unregister(&pci_dev->intr_handle,
1585 qede_interrupt_handler,
1589 if (ECORE_IS_CMT(edev))
1590 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1594 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1596 struct qede_dev *qdev = eth_dev->data->dev_private;
1597 struct ecore_dev *edev = &qdev->edev;
1598 struct ecore_eth_stats stats;
1599 unsigned int i = 0, j = 0, qid, idx, hw_fn;
1600 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1601 struct qede_tx_queue *txq;
1603 ecore_get_vport_stats(edev, &stats);
1606 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1607 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1609 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1610 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1612 eth_stats->ierrors = stats.common.rx_crc_errors +
1613 stats.common.rx_align_errors +
1614 stats.common.rx_carrier_errors +
1615 stats.common.rx_oversize_packets +
1616 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1618 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1620 eth_stats->imissed = stats.common.mftag_filter_discards +
1621 stats.common.mac_filter_discards +
1622 stats.common.no_buff_discards +
1623 stats.common.brb_truncates + stats.common.brb_discards;
1626 eth_stats->opackets = stats.common.tx_ucast_pkts +
1627 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1629 eth_stats->obytes = stats.common.tx_ucast_bytes +
1630 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1632 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1635 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
1636 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1637 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
1638 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1639 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
1640 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
1641 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1642 "Not all the queue stats will be displayed. Set"
1643 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1644 " appropriately and retry.\n");
1646 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
1647 eth_stats->q_ipackets[i] = 0;
1648 eth_stats->q_errors[i] = 0;
1650 for_each_hwfn(edev, hw_fn) {
1651 idx = qid * edev->num_hwfns + hw_fn;
1653 eth_stats->q_ipackets[i] +=
1655 (((char *)(qdev->fp_array[idx].rxq)) +
1656 offsetof(struct qede_rx_queue,
1658 eth_stats->q_errors[i] +=
1660 (((char *)(qdev->fp_array[idx].rxq)) +
1661 offsetof(struct qede_rx_queue,
1664 (((char *)(qdev->fp_array[idx].rxq)) +
1665 offsetof(struct qede_rx_queue,
1670 if (i == rxq_stat_cntrs)
1674 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
1675 eth_stats->q_opackets[j] = 0;
1677 for_each_hwfn(edev, hw_fn) {
1678 idx = qid * edev->num_hwfns + hw_fn;
1680 txq = qdev->fp_array[idx].txq;
1681 eth_stats->q_opackets[j] +=
1682 *((uint64_t *)(uintptr_t)
1683 (((uint64_t)(uintptr_t)(txq)) +
1684 offsetof(struct qede_tx_queue,
1689 if (j == txq_stat_cntrs)
1697 qede_get_xstats_count(struct qede_dev *qdev) {
1698 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
1700 if (ECORE_IS_BB(&qdev->edev))
1701 return RTE_DIM(qede_xstats_strings) +
1702 RTE_DIM(qede_bb_xstats_strings) +
1703 (RTE_DIM(qede_rxq_xstats_strings) *
1704 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
1706 return RTE_DIM(qede_xstats_strings) +
1707 RTE_DIM(qede_ah_xstats_strings) +
1708 (RTE_DIM(qede_rxq_xstats_strings) *
1709 QEDE_RSS_COUNT(dev));
1713 qede_get_xstats_names(struct rte_eth_dev *dev,
1714 struct rte_eth_xstat_name *xstats_names,
1715 __rte_unused unsigned int limit)
1717 struct qede_dev *qdev = dev->data->dev_private;
1718 struct ecore_dev *edev = &qdev->edev;
1719 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1720 unsigned int i, qid, hw_fn, stat_idx = 0;
1722 if (xstats_names == NULL)
1725 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1726 strlcpy(xstats_names[stat_idx].name,
1727 qede_xstats_strings[i].name,
1728 sizeof(xstats_names[stat_idx].name));
1732 if (ECORE_IS_BB(edev)) {
1733 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1734 strlcpy(xstats_names[stat_idx].name,
1735 qede_bb_xstats_strings[i].name,
1736 sizeof(xstats_names[stat_idx].name));
1740 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1741 strlcpy(xstats_names[stat_idx].name,
1742 qede_ah_xstats_strings[i].name,
1743 sizeof(xstats_names[stat_idx].name));
1748 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
1749 for_each_hwfn(edev, hw_fn) {
1750 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1751 snprintf(xstats_names[stat_idx].name,
1752 RTE_ETH_XSTATS_NAME_SIZE,
1754 qede_rxq_xstats_strings[i].name,
1756 qede_rxq_xstats_strings[i].name + 4);
1766 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1769 struct qede_dev *qdev = dev->data->dev_private;
1770 struct ecore_dev *edev = &qdev->edev;
1771 struct ecore_eth_stats stats;
1772 const unsigned int num = qede_get_xstats_count(qdev);
1773 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
1778 ecore_get_vport_stats(edev, &stats);
1780 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1781 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1782 qede_xstats_strings[i].offset);
1783 xstats[stat_idx].id = stat_idx;
1787 if (ECORE_IS_BB(edev)) {
1788 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1789 xstats[stat_idx].value =
1790 *(uint64_t *)(((char *)&stats) +
1791 qede_bb_xstats_strings[i].offset);
1792 xstats[stat_idx].id = stat_idx;
1796 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1797 xstats[stat_idx].value =
1798 *(uint64_t *)(((char *)&stats) +
1799 qede_ah_xstats_strings[i].offset);
1800 xstats[stat_idx].id = stat_idx;
1805 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
1806 for_each_hwfn(edev, hw_fn) {
1807 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1808 fpidx = qid * edev->num_hwfns + hw_fn;
1809 xstats[stat_idx].value = *(uint64_t *)
1810 (((char *)(qdev->fp_array[fpidx].rxq)) +
1811 qede_rxq_xstats_strings[i].offset);
1812 xstats[stat_idx].id = stat_idx;
1823 qede_reset_xstats(struct rte_eth_dev *dev)
1825 struct qede_dev *qdev = dev->data->dev_private;
1826 struct ecore_dev *edev = &qdev->edev;
1828 ecore_reset_vport_stats(edev);
1829 qede_reset_queue_stats(qdev, true);
1834 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1836 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1837 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1838 struct qed_link_params link_params;
1841 DP_INFO(edev, "setting link state %d\n", link_up);
1842 memset(&link_params, 0, sizeof(link_params));
1843 link_params.link_up = link_up;
1844 rc = qdev->ops->common->set_link(edev, &link_params);
1845 if (rc != ECORE_SUCCESS)
1846 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1851 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1853 return qede_dev_set_link_state(eth_dev, true);
1856 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1858 return qede_dev_set_link_state(eth_dev, false);
1861 static int qede_reset_stats(struct rte_eth_dev *eth_dev)
1863 struct qede_dev *qdev = eth_dev->data->dev_private;
1864 struct ecore_dev *edev = &qdev->edev;
1866 ecore_reset_vport_stats(edev);
1867 qede_reset_queue_stats(qdev, false);
1872 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1874 enum qed_filter_rx_mode_type type =
1875 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1876 enum _ecore_status_t ecore_status;
1878 ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
1880 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1883 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1885 enum _ecore_status_t ecore_status;
1887 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1888 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1889 QED_FILTER_RX_MODE_TYPE_PROMISC);
1891 ecore_status = qed_configure_filter_rx_mode(eth_dev,
1892 QED_FILTER_RX_MODE_TYPE_REGULAR);
1894 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
1898 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev,
1899 struct rte_ether_addr *mc_addrs,
1900 uint32_t mc_addrs_num)
1902 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1903 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1906 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1907 DP_ERR(edev, "Reached max multicast filters limit,"
1908 "Please enable multicast promisc mode\n");
1912 for (i = 0; i < mc_addrs_num; i++) {
1913 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
1914 DP_ERR(edev, "Not a valid multicast MAC\n");
1919 /* Flush all existing entries */
1920 if (qede_del_mcast_filters(eth_dev))
1923 /* Set new mcast list */
1924 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1927 /* Update MTU via vport-update without doing port restart.
1928 * The vport must be deactivated before calling this API.
1930 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1932 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1933 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1934 struct ecore_hwfn *p_hwfn;
1939 struct ecore_sp_vport_update_params params;
1941 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1942 params.vport_id = 0;
1944 params.vport_id = 0;
1945 for_each_hwfn(edev, i) {
1946 p_hwfn = &edev->hwfns[i];
1947 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1948 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1949 ECORE_SPQ_MODE_EBLOCK, NULL);
1950 if (rc != ECORE_SUCCESS)
1954 for_each_hwfn(edev, i) {
1955 p_hwfn = &edev->hwfns[i];
1956 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1957 if (rc == ECORE_INVAL) {
1958 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1959 /* Recreate vport */
1960 rc = qede_start_vport(qdev, mtu);
1961 if (rc != ECORE_SUCCESS)
1964 /* Restore config lost due to vport stop */
1965 if (eth_dev->data->promiscuous)
1966 qede_promiscuous_enable(eth_dev);
1968 qede_promiscuous_disable(eth_dev);
1970 if (eth_dev->data->all_multicast)
1971 qede_allmulticast_enable(eth_dev);
1973 qede_allmulticast_disable(eth_dev);
1975 qede_vlan_offload_set(eth_dev,
1976 qdev->vlan_offload_mask);
1977 } else if (rc != ECORE_SUCCESS) {
1982 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1987 DP_ERR(edev, "Failed to update MTU\n");
1991 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1992 struct rte_eth_fc_conf *fc_conf)
1994 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1995 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1996 struct qed_link_output current_link;
1997 struct qed_link_params params;
1999 memset(¤t_link, 0, sizeof(current_link));
2000 qdev->ops->common->get_link(edev, ¤t_link);
2002 memset(¶ms, 0, sizeof(params));
2003 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
2004 if (fc_conf->autoneg) {
2005 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
2006 DP_ERR(edev, "Autoneg not supported\n");
2009 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2012 /* Pause is assumed to be supported (SUPPORTED_Pause) */
2013 if (fc_conf->mode == RTE_FC_FULL)
2014 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
2015 QED_LINK_PAUSE_RX_ENABLE);
2016 if (fc_conf->mode == RTE_FC_TX_PAUSE)
2017 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2018 if (fc_conf->mode == RTE_FC_RX_PAUSE)
2019 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2021 params.link_up = true;
2022 (void)qdev->ops->common->set_link(edev, ¶ms);
2027 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
2028 struct rte_eth_fc_conf *fc_conf)
2030 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2031 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2032 struct qed_link_output current_link;
2034 memset(¤t_link, 0, sizeof(current_link));
2035 qdev->ops->common->get_link(edev, ¤t_link);
2037 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
2038 fc_conf->autoneg = true;
2040 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
2041 QED_LINK_PAUSE_TX_ENABLE))
2042 fc_conf->mode = RTE_FC_FULL;
2043 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
2044 fc_conf->mode = RTE_FC_RX_PAUSE;
2045 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
2046 fc_conf->mode = RTE_FC_TX_PAUSE;
2048 fc_conf->mode = RTE_FC_NONE;
2053 static const uint32_t *
2054 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2056 static const uint32_t ptypes[] = {
2058 RTE_PTYPE_L2_ETHER_VLAN,
2063 RTE_PTYPE_TUNNEL_VXLAN,
2065 RTE_PTYPE_TUNNEL_GENEVE,
2066 RTE_PTYPE_TUNNEL_GRE,
2068 RTE_PTYPE_INNER_L2_ETHER,
2069 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2070 RTE_PTYPE_INNER_L3_IPV4,
2071 RTE_PTYPE_INNER_L3_IPV6,
2072 RTE_PTYPE_INNER_L4_TCP,
2073 RTE_PTYPE_INNER_L4_UDP,
2074 RTE_PTYPE_INNER_L4_FRAG,
2078 if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
2079 eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
2080 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
2086 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2089 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
2090 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
2091 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
2092 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
2093 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
2094 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
2095 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
2096 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
2099 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2100 struct rte_eth_rss_conf *rss_conf)
2102 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2103 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2104 struct ecore_sp_vport_update_params vport_update_params;
2105 struct ecore_rss_params rss_params;
2106 struct ecore_hwfn *p_hwfn;
2107 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2108 uint64_t hf = rss_conf->rss_hf;
2109 uint8_t len = rss_conf->rss_key_len;
2110 uint8_t idx, i, j, fpidx;
2113 memset(&vport_update_params, 0, sizeof(vport_update_params));
2114 memset(&rss_params, 0, sizeof(rss_params));
2116 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2117 (unsigned long)hf, len, key);
2121 DP_INFO(edev, "Enabling rss\n");
2124 qede_init_rss_caps(&rss_params.rss_caps, hf);
2125 rss_params.update_rss_capabilities = 1;
2129 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2130 DP_ERR(edev, "RSS key length exceeds limit\n");
2133 DP_INFO(edev, "Applying user supplied hash key\n");
2134 rss_params.update_rss_key = 1;
2135 memcpy(&rss_params.rss_key, key, len);
2137 rss_params.rss_enable = 1;
2140 rss_params.update_rss_config = 1;
2141 /* tbl_size has to be set with capabilities */
2142 rss_params.rss_table_size_log = 7;
2143 vport_update_params.vport_id = 0;
2145 for_each_hwfn(edev, i) {
2146 /* pass the L2 handles instead of qids */
2147 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
2148 idx = j % QEDE_RSS_COUNT(eth_dev);
2149 fpidx = idx * edev->num_hwfns + i;
2150 rss_params.rss_ind_table[j] =
2151 qdev->fp_array[fpidx].rxq->handle;
2154 vport_update_params.rss_params = &rss_params;
2156 p_hwfn = &edev->hwfns[i];
2157 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2158 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2159 ECORE_SPQ_MODE_EBLOCK, NULL);
2161 DP_ERR(edev, "vport-update for RSS failed\n");
2165 qdev->rss_enable = rss_params.rss_enable;
2167 /* Update local structure for hash query */
2168 qdev->rss_conf.rss_hf = hf;
2169 qdev->rss_conf.rss_key_len = len;
2170 if (qdev->rss_enable) {
2171 if (qdev->rss_conf.rss_key == NULL) {
2172 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2173 if (qdev->rss_conf.rss_key == NULL) {
2174 DP_ERR(edev, "No memory to store RSS key\n");
2179 DP_INFO(edev, "Storing RSS key\n");
2180 memcpy(qdev->rss_conf.rss_key, key, len);
2182 } else if (!qdev->rss_enable && len == 0) {
2183 if (qdev->rss_conf.rss_key) {
2184 free(qdev->rss_conf.rss_key);
2185 qdev->rss_conf.rss_key = NULL;
2186 DP_INFO(edev, "Free RSS key\n");
2193 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2194 struct rte_eth_rss_conf *rss_conf)
2196 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2198 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2199 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2201 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2202 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2203 rss_conf->rss_key_len);
2207 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2208 struct rte_eth_rss_reta_entry64 *reta_conf,
2211 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2212 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2213 struct ecore_sp_vport_update_params vport_update_params;
2214 struct ecore_rss_params *params;
2215 uint16_t i, j, idx, fid, shift;
2216 struct ecore_hwfn *p_hwfn;
2220 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2221 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2226 memset(&vport_update_params, 0, sizeof(vport_update_params));
2227 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
2228 if (params == NULL) {
2229 DP_ERR(edev, "failed to allocate memory\n");
2233 params->update_rss_ind_table = 1;
2234 params->rss_table_size_log = 7;
2235 params->update_rss_config = 1;
2237 vport_update_params.vport_id = 0;
2238 /* Use the current value of rss_enable */
2239 params->rss_enable = qdev->rss_enable;
2240 vport_update_params.rss_params = params;
2242 for_each_hwfn(edev, i) {
2243 for (j = 0; j < reta_size; j++) {
2244 idx = j / RTE_RETA_GROUP_SIZE;
2245 shift = j % RTE_RETA_GROUP_SIZE;
2246 if (reta_conf[idx].mask & (1ULL << shift)) {
2247 entry = reta_conf[idx].reta[shift];
2248 fid = entry * edev->num_hwfns + i;
2249 /* Pass rxq handles to ecore */
2250 params->rss_ind_table[j] =
2251 qdev->fp_array[fid].rxq->handle;
2252 /* Update the local copy for RETA query cmd */
2253 qdev->rss_ind_table[j] = entry;
2257 p_hwfn = &edev->hwfns[i];
2258 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2259 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2260 ECORE_SPQ_MODE_EBLOCK, NULL);
2262 DP_ERR(edev, "vport-update for RSS failed\n");
2272 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2273 struct rte_eth_rss_reta_entry64 *reta_conf,
2276 struct qede_dev *qdev = eth_dev->data->dev_private;
2277 struct ecore_dev *edev = &qdev->edev;
2278 uint16_t i, idx, shift;
2281 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2282 DP_ERR(edev, "reta_size %d is not supported\n",
2287 for (i = 0; i < reta_size; i++) {
2288 idx = i / RTE_RETA_GROUP_SIZE;
2289 shift = i % RTE_RETA_GROUP_SIZE;
2290 if (reta_conf[idx].mask & (1ULL << shift)) {
2291 entry = qdev->rss_ind_table[i];
2292 reta_conf[idx].reta[shift] = entry;
2301 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2303 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2304 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2305 struct rte_eth_dev_info dev_info = {0};
2306 struct qede_fastpath *fp;
2307 uint32_t max_rx_pkt_len;
2308 uint32_t frame_size;
2310 bool restart = false;
2313 PMD_INIT_FUNC_TRACE(edev);
2314 rc = qede_dev_info_get(dev, &dev_info);
2316 DP_ERR(edev, "Error during getting ethernet device info\n");
2319 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2320 frame_size = max_rx_pkt_len;
2321 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
2322 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2323 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
2327 if (!dev->data->scattered_rx &&
2328 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2329 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2330 dev->data->min_rx_buf_size);
2333 if (dev->data->dev_started) {
2334 dev->data->dev_started = 0;
2339 qdev->new_mtu = mtu;
2341 /* Fix up RX buf size for all queues of the port */
2342 for (i = 0; i < qdev->num_rx_queues; i++) {
2343 fp = &qdev->fp_array[i];
2344 if (fp->rxq != NULL) {
2345 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2346 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2347 /* cache align the mbuf size to simplfy rx_buf_size
2350 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2351 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2355 fp->rxq->rx_buf_size = rc;
2358 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
2359 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2361 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2363 if (!dev->data->dev_started && restart) {
2364 qede_dev_start(dev);
2365 dev->data->dev_started = 1;
2368 /* update max frame size */
2369 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2375 qede_dev_reset(struct rte_eth_dev *dev)
2379 ret = qede_eth_dev_uninit(dev);
2383 return qede_eth_dev_init(dev);
2386 static const struct eth_dev_ops qede_eth_dev_ops = {
2387 .dev_configure = qede_dev_configure,
2388 .dev_infos_get = qede_dev_info_get,
2389 .rx_queue_setup = qede_rx_queue_setup,
2390 .rx_queue_release = qede_rx_queue_release,
2391 .tx_queue_setup = qede_tx_queue_setup,
2392 .tx_queue_release = qede_tx_queue_release,
2393 .dev_start = qede_dev_start,
2394 .dev_reset = qede_dev_reset,
2395 .dev_set_link_up = qede_dev_set_link_up,
2396 .dev_set_link_down = qede_dev_set_link_down,
2397 .link_update = qede_link_update,
2398 .promiscuous_enable = qede_promiscuous_enable,
2399 .promiscuous_disable = qede_promiscuous_disable,
2400 .allmulticast_enable = qede_allmulticast_enable,
2401 .allmulticast_disable = qede_allmulticast_disable,
2402 .set_mc_addr_list = qede_set_mc_addr_list,
2403 .dev_stop = qede_dev_stop,
2404 .dev_close = qede_dev_close,
2405 .stats_get = qede_get_stats,
2406 .stats_reset = qede_reset_stats,
2407 .xstats_get = qede_get_xstats,
2408 .xstats_reset = qede_reset_xstats,
2409 .xstats_get_names = qede_get_xstats_names,
2410 .mac_addr_add = qede_mac_addr_add,
2411 .mac_addr_remove = qede_mac_addr_remove,
2412 .mac_addr_set = qede_mac_addr_set,
2413 .vlan_offload_set = qede_vlan_offload_set,
2414 .vlan_filter_set = qede_vlan_filter_set,
2415 .flow_ctrl_set = qede_flow_ctrl_set,
2416 .flow_ctrl_get = qede_flow_ctrl_get,
2417 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2418 .rss_hash_update = qede_rss_hash_update,
2419 .rss_hash_conf_get = qede_rss_hash_conf_get,
2420 .reta_update = qede_rss_reta_update,
2421 .reta_query = qede_rss_reta_query,
2422 .mtu_set = qede_set_mtu,
2423 .filter_ctrl = qede_dev_filter_ctrl,
2424 .udp_tunnel_port_add = qede_udp_dst_port_add,
2425 .udp_tunnel_port_del = qede_udp_dst_port_del,
2426 .fw_version_get = qede_fw_version_get,
2427 .get_reg = qede_get_regs,
2430 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2431 .dev_configure = qede_dev_configure,
2432 .dev_infos_get = qede_dev_info_get,
2433 .rx_queue_setup = qede_rx_queue_setup,
2434 .rx_queue_release = qede_rx_queue_release,
2435 .tx_queue_setup = qede_tx_queue_setup,
2436 .tx_queue_release = qede_tx_queue_release,
2437 .dev_start = qede_dev_start,
2438 .dev_reset = qede_dev_reset,
2439 .dev_set_link_up = qede_dev_set_link_up,
2440 .dev_set_link_down = qede_dev_set_link_down,
2441 .link_update = qede_link_update,
2442 .promiscuous_enable = qede_promiscuous_enable,
2443 .promiscuous_disable = qede_promiscuous_disable,
2444 .allmulticast_enable = qede_allmulticast_enable,
2445 .allmulticast_disable = qede_allmulticast_disable,
2446 .set_mc_addr_list = qede_set_mc_addr_list,
2447 .dev_stop = qede_dev_stop,
2448 .dev_close = qede_dev_close,
2449 .stats_get = qede_get_stats,
2450 .stats_reset = qede_reset_stats,
2451 .xstats_get = qede_get_xstats,
2452 .xstats_reset = qede_reset_xstats,
2453 .xstats_get_names = qede_get_xstats_names,
2454 .vlan_offload_set = qede_vlan_offload_set,
2455 .vlan_filter_set = qede_vlan_filter_set,
2456 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2457 .rss_hash_update = qede_rss_hash_update,
2458 .rss_hash_conf_get = qede_rss_hash_conf_get,
2459 .reta_update = qede_rss_reta_update,
2460 .reta_query = qede_rss_reta_query,
2461 .mtu_set = qede_set_mtu,
2462 .udp_tunnel_port_add = qede_udp_dst_port_add,
2463 .udp_tunnel_port_del = qede_udp_dst_port_del,
2464 .mac_addr_add = qede_mac_addr_add,
2465 .mac_addr_remove = qede_mac_addr_remove,
2466 .mac_addr_set = qede_mac_addr_set,
2467 .fw_version_get = qede_fw_version_get,
2470 static void qede_update_pf_params(struct ecore_dev *edev)
2472 struct ecore_pf_params pf_params;
2474 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2475 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2476 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2477 qed_ops->common->update_pf_params(edev, &pf_params);
2480 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2482 struct rte_pci_device *pci_dev;
2483 struct rte_pci_addr pci_addr;
2484 struct qede_dev *adapter;
2485 struct ecore_dev *edev;
2486 struct qed_dev_eth_info dev_info;
2487 struct qed_slowpath_params params;
2488 static bool do_once = true;
2489 uint8_t bulletin_change;
2490 uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
2491 uint8_t is_mac_forced;
2493 /* Fix up ecore debug level */
2494 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2495 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2499 /* Extract key data structures */
2500 adapter = eth_dev->data->dev_private;
2501 adapter->ethdev = eth_dev;
2502 edev = &adapter->edev;
2503 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2504 pci_addr = pci_dev->addr;
2506 PMD_INIT_FUNC_TRACE(edev);
2508 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2509 pci_addr.bus, pci_addr.devid, pci_addr.function,
2510 eth_dev->data->port_id);
2512 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2513 DP_ERR(edev, "Skipping device init from secondary process\n");
2517 rte_eth_copy_pci_info(eth_dev, pci_dev);
2520 edev->vendor_id = pci_dev->id.vendor_id;
2521 edev->device_id = pci_dev->id.device_id;
2523 qed_ops = qed_get_eth_ops();
2525 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2530 DP_INFO(edev, "Starting qede probe\n");
2531 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2534 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2538 qede_update_pf_params(edev);
2540 switch (pci_dev->intr_handle.type) {
2541 case RTE_INTR_HANDLE_UIO_INTX:
2542 case RTE_INTR_HANDLE_VFIO_LEGACY:
2543 int_mode = ECORE_INT_MODE_INTA;
2544 rte_intr_callback_register(&pci_dev->intr_handle,
2545 qede_interrupt_handler_intx,
2549 int_mode = ECORE_INT_MODE_MSIX;
2550 rte_intr_callback_register(&pci_dev->intr_handle,
2551 qede_interrupt_handler,
2555 if (rte_intr_enable(&pci_dev->intr_handle)) {
2556 DP_ERR(edev, "rte_intr_enable() failed\n");
2561 /* Start the Slowpath-process */
2562 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2564 params.int_mode = int_mode;
2565 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2566 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2567 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2568 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2569 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2570 QEDE_PMD_DRV_VER_STR_SIZE);
2572 qede_assign_rxtx_handlers(eth_dev, true);
2573 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2575 /* For CMT mode device do periodic polling for slowpath events.
2576 * This is required since uio device uses only one MSI-x
2577 * interrupt vector but we need one for each engine.
2579 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2580 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2584 DP_ERR(edev, "Unable to start periodic"
2585 " timer rc %d\n", rc);
2591 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2593 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2594 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2600 rc = qed_ops->fill_dev_info(edev, &dev_info);
2602 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2603 qed_ops->common->slowpath_stop(edev);
2604 qed_ops->common->remove(edev);
2605 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2611 qede_alloc_etherdev(adapter, &dev_info);
2614 qede_print_adapter_info(eth_dev);
2618 adapter->ops->common->set_name(edev, edev->name);
2621 adapter->dev_info.num_mac_filters =
2622 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2625 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2626 (uint32_t *)&adapter->dev_info.num_mac_filters);
2628 /* Allocate memory for storing MAC addr */
2629 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2630 (RTE_ETHER_ADDR_LEN *
2631 adapter->dev_info.num_mac_filters),
2632 RTE_CACHE_LINE_SIZE);
2634 if (eth_dev->data->mac_addrs == NULL) {
2635 DP_ERR(edev, "Failed to allocate MAC address\n");
2636 qed_ops->common->slowpath_stop(edev);
2637 qed_ops->common->remove(edev);
2638 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2644 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
2645 hw_info.hw_mac_addr,
2646 ð_dev->data->mac_addrs[0]);
2647 rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
2648 &adapter->primary_mac);
2650 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2652 if (bulletin_change) {
2654 ecore_vf_bulletin_get_forced_mac(
2655 ECORE_LEADING_HWFN(edev),
2659 DP_INFO(edev, "VF macaddr received from PF\n");
2660 rte_ether_addr_copy(
2661 (struct rte_ether_addr *)&vf_mac,
2662 ð_dev->data->mac_addrs[0]);
2663 rte_ether_addr_copy(
2664 ð_dev->data->mac_addrs[0],
2665 &adapter->primary_mac);
2667 DP_ERR(edev, "No VF macaddr assigned\n");
2672 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2673 eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
2675 adapter->num_tx_queues = 0;
2676 adapter->num_rx_queues = 0;
2677 SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2678 SLIST_INIT(&adapter->vlan_list_head);
2679 SLIST_INIT(&adapter->uc_list_head);
2680 SLIST_INIT(&adapter->mc_list_head);
2681 adapter->mtu = RTE_ETHER_MTU;
2682 adapter->vport_started = false;
2684 /* VF tunnel offloads is enabled by default in PF driver */
2685 adapter->vxlan.num_filters = 0;
2686 adapter->geneve.num_filters = 0;
2687 adapter->ipgre.num_filters = 0;
2689 adapter->vxlan.enable = true;
2690 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2691 ETH_TUNNEL_FILTER_IVLAN;
2692 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2693 adapter->geneve.enable = true;
2694 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2695 ETH_TUNNEL_FILTER_IVLAN;
2696 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2697 adapter->ipgre.enable = true;
2698 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2699 ETH_TUNNEL_FILTER_IVLAN;
2701 adapter->vxlan.enable = false;
2702 adapter->geneve.enable = false;
2703 adapter->ipgre.enable = false;
2704 qed_ops->sriov_configure(edev, pci_dev->max_vfs);
2707 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2708 adapter->primary_mac.addr_bytes[0],
2709 adapter->primary_mac.addr_bytes[1],
2710 adapter->primary_mac.addr_bytes[2],
2711 adapter->primary_mac.addr_bytes[3],
2712 adapter->primary_mac.addr_bytes[4],
2713 adapter->primary_mac.addr_bytes[5]);
2715 DP_INFO(edev, "Device initialized\n");
2721 qede_print_adapter_info(eth_dev);
2727 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2729 return qede_common_dev_init(eth_dev, 1);
2732 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2734 return qede_common_dev_init(eth_dev, 0);
2737 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2739 struct qede_dev *qdev = eth_dev->data->dev_private;
2740 struct ecore_dev *edev = &qdev->edev;
2742 PMD_INIT_FUNC_TRACE(edev);
2744 /* only uninitialize in the primary process */
2745 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2748 /* safe to close dev here */
2749 qede_dev_close(eth_dev);
2751 eth_dev->dev_ops = NULL;
2752 eth_dev->rx_pkt_burst = NULL;
2753 eth_dev->tx_pkt_burst = NULL;
2758 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2760 return qede_dev_common_uninit(eth_dev);
2763 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2765 return qede_dev_common_uninit(eth_dev);
2768 static const struct rte_pci_id pci_id_qedevf_map[] = {
2769 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2771 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2774 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2777 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2782 static const struct rte_pci_id pci_id_qede_map[] = {
2783 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2785 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2788 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2791 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2794 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2797 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2800 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2803 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2806 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2809 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2812 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2817 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2818 struct rte_pci_device *pci_dev)
2820 return rte_eth_dev_pci_generic_probe(pci_dev,
2821 sizeof(struct qede_dev), qedevf_eth_dev_init);
2824 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2826 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2829 static struct rte_pci_driver rte_qedevf_pmd = {
2830 .id_table = pci_id_qedevf_map,
2831 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2832 .probe = qedevf_eth_dev_pci_probe,
2833 .remove = qedevf_eth_dev_pci_remove,
2836 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2837 struct rte_pci_device *pci_dev)
2839 return rte_eth_dev_pci_generic_probe(pci_dev,
2840 sizeof(struct qede_dev), qede_eth_dev_init);
2843 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2845 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2848 static struct rte_pci_driver rte_qede_pmd = {
2849 .id_table = pci_id_qede_map,
2850 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2851 .probe = qede_eth_dev_pci_probe,
2852 .remove = qede_eth_dev_pci_remove,
2855 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2856 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2857 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2858 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2859 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2860 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2861 RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE);
2862 RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE);