2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 #include <rte_kvargs.h>
15 int qede_logtype_init;
16 int qede_logtype_driver;
18 static const struct qed_eth_ops *qed_ops;
19 static int64_t timer_period = 1;
21 /* VXLAN tunnel classification mapping */
22 const struct _qede_udp_tunn_types {
23 uint16_t rte_filter_type;
24 enum ecore_filter_ucast_type qede_type;
25 enum ecore_tunn_clss qede_tunn_clss;
27 } qede_tunn_types[] = {
29 ETH_TUNNEL_FILTER_OMAC,
31 ECORE_TUNN_CLSS_MAC_VLAN,
35 ETH_TUNNEL_FILTER_TENID,
37 ECORE_TUNN_CLSS_MAC_VNI,
41 ETH_TUNNEL_FILTER_IMAC,
42 ECORE_FILTER_INNER_MAC,
43 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47 ETH_TUNNEL_FILTER_IVLAN,
48 ECORE_FILTER_INNER_VLAN,
49 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
54 ECORE_FILTER_MAC_VNI_PAIR,
55 ECORE_TUNN_CLSS_MAC_VNI,
59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
62 "outer-mac and inner-mac"
65 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
68 "outer-mac and inner-vlan"
71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
72 ECORE_FILTER_INNER_MAC_VNI_PAIR,
73 ECORE_TUNN_CLSS_INNER_MAC_VNI,
77 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
83 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
84 ECORE_FILTER_INNER_PAIR,
85 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
86 "inner-mac and inner-vlan",
89 ETH_TUNNEL_FILTER_OIP,
95 ETH_TUNNEL_FILTER_IIP,
101 RTE_TUNNEL_FILTER_IMAC_IVLAN,
107 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
113 RTE_TUNNEL_FILTER_IMAC_TENID,
119 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
126 struct rte_qede_xstats_name_off {
127 char name[RTE_ETH_XSTATS_NAME_SIZE];
131 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
133 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
134 {"rx_multicast_bytes",
135 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
136 {"rx_broadcast_bytes",
137 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
138 {"rx_unicast_packets",
139 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
140 {"rx_multicast_packets",
141 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
142 {"rx_broadcast_packets",
143 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
146 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
147 {"tx_multicast_bytes",
148 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
149 {"tx_broadcast_bytes",
150 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
151 {"tx_unicast_packets",
152 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
153 {"tx_multicast_packets",
154 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
155 {"tx_broadcast_packets",
156 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
158 {"rx_64_byte_packets",
159 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
160 {"rx_65_to_127_byte_packets",
161 offsetof(struct ecore_eth_stats_common,
162 rx_65_to_127_byte_packets)},
163 {"rx_128_to_255_byte_packets",
164 offsetof(struct ecore_eth_stats_common,
165 rx_128_to_255_byte_packets)},
166 {"rx_256_to_511_byte_packets",
167 offsetof(struct ecore_eth_stats_common,
168 rx_256_to_511_byte_packets)},
169 {"rx_512_to_1023_byte_packets",
170 offsetof(struct ecore_eth_stats_common,
171 rx_512_to_1023_byte_packets)},
172 {"rx_1024_to_1518_byte_packets",
173 offsetof(struct ecore_eth_stats_common,
174 rx_1024_to_1518_byte_packets)},
175 {"tx_64_byte_packets",
176 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
177 {"tx_65_to_127_byte_packets",
178 offsetof(struct ecore_eth_stats_common,
179 tx_65_to_127_byte_packets)},
180 {"tx_128_to_255_byte_packets",
181 offsetof(struct ecore_eth_stats_common,
182 tx_128_to_255_byte_packets)},
183 {"tx_256_to_511_byte_packets",
184 offsetof(struct ecore_eth_stats_common,
185 tx_256_to_511_byte_packets)},
186 {"tx_512_to_1023_byte_packets",
187 offsetof(struct ecore_eth_stats_common,
188 tx_512_to_1023_byte_packets)},
189 {"tx_1024_to_1518_byte_packets",
190 offsetof(struct ecore_eth_stats_common,
191 tx_1024_to_1518_byte_packets)},
193 {"rx_mac_crtl_frames",
194 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
195 {"tx_mac_control_frames",
196 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
198 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
200 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
201 {"rx_priority_flow_control_frames",
202 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
203 {"tx_priority_flow_control_frames",
204 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
207 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
209 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
210 {"rx_carrier_errors",
211 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
212 {"rx_oversize_packet_errors",
213 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
215 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
216 {"rx_undersize_packet_errors",
217 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
218 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
219 {"rx_host_buffer_not_available",
220 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
221 /* Number of packets discarded because they are bigger than MTU */
222 {"rx_packet_too_big_discards",
223 offsetof(struct ecore_eth_stats_common,
224 packet_too_big_discard)},
225 {"rx_ttl_zero_discards",
226 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
227 {"rx_multi_function_tag_filter_discards",
228 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
229 {"rx_mac_filter_discards",
230 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
231 {"rx_hw_buffer_truncates",
232 offsetof(struct ecore_eth_stats_common, brb_truncates)},
233 {"rx_hw_buffer_discards",
234 offsetof(struct ecore_eth_stats_common, brb_discards)},
235 {"tx_error_drop_packets",
236 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
239 {"rx_mac_unicast_packets",
240 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
241 {"rx_mac_multicast_packets",
242 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
243 {"rx_mac_broadcast_packets",
244 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
246 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
248 {"tx_mac_unicast_packets",
249 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
250 {"tx_mac_multicast_packets",
251 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
252 {"tx_mac_broadcast_packets",
253 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
255 {"lro_coalesced_packets",
256 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
257 {"lro_coalesced_events",
258 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
260 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
261 {"lro_not_coalesced_packets",
262 offsetof(struct ecore_eth_stats_common,
263 tpa_not_coalesced_pkts)},
264 {"lro_coalesced_bytes",
265 offsetof(struct ecore_eth_stats_common,
266 tpa_coalesced_bytes)},
269 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
270 {"rx_1519_to_1522_byte_packets",
271 offsetof(struct ecore_eth_stats, bb) +
272 offsetof(struct ecore_eth_stats_bb,
273 rx_1519_to_1522_byte_packets)},
274 {"rx_1519_to_2047_byte_packets",
275 offsetof(struct ecore_eth_stats, bb) +
276 offsetof(struct ecore_eth_stats_bb,
277 rx_1519_to_2047_byte_packets)},
278 {"rx_2048_to_4095_byte_packets",
279 offsetof(struct ecore_eth_stats, bb) +
280 offsetof(struct ecore_eth_stats_bb,
281 rx_2048_to_4095_byte_packets)},
282 {"rx_4096_to_9216_byte_packets",
283 offsetof(struct ecore_eth_stats, bb) +
284 offsetof(struct ecore_eth_stats_bb,
285 rx_4096_to_9216_byte_packets)},
286 {"rx_9217_to_16383_byte_packets",
287 offsetof(struct ecore_eth_stats, bb) +
288 offsetof(struct ecore_eth_stats_bb,
289 rx_9217_to_16383_byte_packets)},
291 {"tx_1519_to_2047_byte_packets",
292 offsetof(struct ecore_eth_stats, bb) +
293 offsetof(struct ecore_eth_stats_bb,
294 tx_1519_to_2047_byte_packets)},
295 {"tx_2048_to_4095_byte_packets",
296 offsetof(struct ecore_eth_stats, bb) +
297 offsetof(struct ecore_eth_stats_bb,
298 tx_2048_to_4095_byte_packets)},
299 {"tx_4096_to_9216_byte_packets",
300 offsetof(struct ecore_eth_stats, bb) +
301 offsetof(struct ecore_eth_stats_bb,
302 tx_4096_to_9216_byte_packets)},
303 {"tx_9217_to_16383_byte_packets",
304 offsetof(struct ecore_eth_stats, bb) +
305 offsetof(struct ecore_eth_stats_bb,
306 tx_9217_to_16383_byte_packets)},
308 {"tx_lpi_entry_count",
309 offsetof(struct ecore_eth_stats, bb) +
310 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
311 {"tx_total_collisions",
312 offsetof(struct ecore_eth_stats, bb) +
313 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
316 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
317 {"rx_1519_to_max_byte_packets",
318 offsetof(struct ecore_eth_stats, ah) +
319 offsetof(struct ecore_eth_stats_ah,
320 rx_1519_to_max_byte_packets)},
321 {"tx_1519_to_max_byte_packets",
322 offsetof(struct ecore_eth_stats, ah) +
323 offsetof(struct ecore_eth_stats_ah,
324 tx_1519_to_max_byte_packets)},
327 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
329 offsetof(struct qede_rx_queue, rx_segs)},
331 offsetof(struct qede_rx_queue, rx_hw_errors)},
332 {"rx_q_allocation_errors",
333 offsetof(struct qede_rx_queue, rx_alloc_errors)}
336 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
338 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
342 qede_interrupt_handler(void *param)
344 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
345 struct qede_dev *qdev = eth_dev->data->dev_private;
346 struct ecore_dev *edev = &qdev->edev;
348 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
349 if (rte_intr_enable(eth_dev->intr_handle))
350 DP_ERR(edev, "rte_intr_enable failed\n");
354 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
356 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
360 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
361 static void qede_print_adapter_info(struct qede_dev *qdev)
363 struct ecore_dev *edev = &qdev->edev;
364 struct qed_dev_info *info = &qdev->dev_info.common;
365 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
366 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
368 DP_INFO(edev, "*********************************\n");
369 DP_INFO(edev, " DPDK version:%s\n", rte_version());
370 DP_INFO(edev, " Chip details : %s %c%d\n",
371 ECORE_IS_BB(edev) ? "BB" : "AH",
372 'A' + edev->chip_rev,
373 (int)edev->chip_metal);
374 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
375 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
376 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
377 ver_str, QEDE_PMD_VERSION);
378 DP_INFO(edev, " Driver version : %s\n", drv_ver);
379 DP_INFO(edev, " Firmware version : %s\n", ver_str);
381 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
383 (info->mfw_rev >> 24) & 0xff,
384 (info->mfw_rev >> 16) & 0xff,
385 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
386 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
387 DP_INFO(edev, " Firmware file : %s\n", fw_file);
388 DP_INFO(edev, "*********************************\n");
393 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
395 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
396 struct ecore_sp_vport_start_params params;
397 struct ecore_hwfn *p_hwfn;
401 memset(¶ms, 0, sizeof(params));
404 /* @DPDK - Disable FW placement */
405 params.zero_placement_offset = 1;
406 for_each_hwfn(edev, i) {
407 p_hwfn = &edev->hwfns[i];
408 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
409 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
410 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
411 if (rc != ECORE_SUCCESS) {
412 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
416 ecore_reset_vport_stats(edev);
417 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
423 qede_stop_vport(struct ecore_dev *edev)
425 struct ecore_hwfn *p_hwfn;
431 for_each_hwfn(edev, i) {
432 p_hwfn = &edev->hwfns[i];
433 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
435 if (rc != ECORE_SUCCESS) {
436 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
444 /* Activate or deactivate vport via vport-update */
445 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
447 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
448 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
449 struct ecore_sp_vport_update_params params;
450 struct ecore_hwfn *p_hwfn;
454 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
456 params.update_vport_active_rx_flg = 1;
457 params.update_vport_active_tx_flg = 1;
458 params.vport_active_rx_flg = flg;
459 params.vport_active_tx_flg = flg;
460 if (!qdev->enable_tx_switching) {
462 params.update_tx_switching_flg = 1;
463 params.tx_switching_flg = !flg;
464 DP_INFO(edev, "VF tx-switching is disabled\n");
467 for_each_hwfn(edev, i) {
468 p_hwfn = &edev->hwfns[i];
469 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
470 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
471 ECORE_SPQ_MODE_EBLOCK, NULL);
472 if (rc != ECORE_SUCCESS) {
473 DP_ERR(edev, "Failed to update vport\n");
477 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
483 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
484 uint16_t mtu, bool enable)
486 /* Enable LRO in split mode */
487 sge_tpa_params->tpa_ipv4_en_flg = enable;
488 sge_tpa_params->tpa_ipv6_en_flg = enable;
489 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
490 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
491 /* set if tpa enable changes */
492 sge_tpa_params->update_tpa_en_flg = 1;
493 /* set if tpa parameters should be handled */
494 sge_tpa_params->update_tpa_param_flg = enable;
496 sge_tpa_params->max_buffers_per_cqe = 20;
497 /* Enable TPA in split mode. In this mode each TPA segment
498 * starts on the new BD, so there is one BD per segment.
500 sge_tpa_params->tpa_pkt_split_flg = 1;
501 sge_tpa_params->tpa_hdr_data_split_flg = 0;
502 sge_tpa_params->tpa_gro_consistent_flg = 0;
503 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
504 sge_tpa_params->tpa_max_size = 0x7FFF;
505 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
506 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
509 /* Enable/disable LRO via vport-update */
510 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
512 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
513 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
514 struct ecore_sp_vport_update_params params;
515 struct ecore_sge_tpa_params tpa_params;
516 struct ecore_hwfn *p_hwfn;
520 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
521 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
522 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
524 params.sge_tpa_params = &tpa_params;
525 for_each_hwfn(edev, i) {
526 p_hwfn = &edev->hwfns[i];
527 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
528 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
529 ECORE_SPQ_MODE_EBLOCK, NULL);
530 if (rc != ECORE_SUCCESS) {
531 DP_ERR(edev, "Failed to update LRO\n");
535 qdev->enable_lro = flg;
536 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
541 /* Update MTU via vport-update without doing port restart.
542 * The vport must be deactivated before calling this API.
544 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
546 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
547 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
548 struct ecore_sp_vport_update_params params;
549 struct ecore_hwfn *p_hwfn;
553 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
557 for_each_hwfn(edev, i) {
558 p_hwfn = &edev->hwfns[i];
559 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
560 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
561 ECORE_SPQ_MODE_EBLOCK, NULL);
562 if (rc != ECORE_SUCCESS) {
563 DP_ERR(edev, "Failed to update MTU\n");
567 DP_INFO(edev, "MTU updated to %u\n", mtu);
572 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
574 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
575 ucast->is_rx_filter = true;
576 ucast->is_tx_filter = true;
577 /* ucast->assert_on_error = true; - For debug */
581 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
582 enum qed_filter_rx_mode_type type)
584 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
585 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
586 struct ecore_filter_accept_flags flags;
588 memset(&flags, 0, sizeof(flags));
590 flags.update_rx_mode_config = 1;
591 flags.update_tx_mode_config = 1;
592 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
593 ECORE_ACCEPT_MCAST_MATCHED |
596 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
597 ECORE_ACCEPT_MCAST_MATCHED |
600 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
601 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
603 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
604 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
606 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
607 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
608 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
609 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
610 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
611 ECORE_ACCEPT_MCAST_UNMATCHED;
614 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
615 ECORE_SPQ_MODE_CB, NULL);
619 qede_tunnel_update(struct qede_dev *qdev,
620 struct ecore_tunnel_info *tunn_info)
622 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
623 enum _ecore_status_t rc = ECORE_INVAL;
624 struct ecore_hwfn *p_hwfn;
625 struct ecore_ptt *p_ptt;
628 for_each_hwfn(edev, i) {
629 p_hwfn = &edev->hwfns[i];
630 p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
631 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
632 tunn_info, ECORE_SPQ_MODE_CB, NULL);
634 ecore_ptt_release(p_hwfn, p_ptt);
636 if (rc != ECORE_SUCCESS)
644 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
647 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
648 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
649 enum _ecore_status_t rc = ECORE_INVAL;
650 struct ecore_tunnel_info tunn;
652 if (qdev->vxlan.enable == enable)
653 return ECORE_SUCCESS;
655 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
656 tunn.vxlan.b_update_mode = true;
657 tunn.vxlan.b_mode_enabled = enable;
658 tunn.b_update_rx_cls = true;
659 tunn.b_update_tx_cls = true;
660 tunn.vxlan.tun_cls = clss;
662 tunn.vxlan_port.b_update_port = true;
663 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
665 rc = qede_tunnel_update(qdev, &tunn);
666 if (rc == ECORE_SUCCESS) {
667 qdev->vxlan.enable = enable;
668 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
669 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
670 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
672 DP_ERR(edev, "Failed to update tunn_clss %u\n",
680 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
683 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
684 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
685 enum _ecore_status_t rc = ECORE_INVAL;
686 struct ecore_tunnel_info tunn;
688 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
689 tunn.l2_geneve.b_update_mode = true;
690 tunn.l2_geneve.b_mode_enabled = enable;
691 tunn.ip_geneve.b_update_mode = true;
692 tunn.ip_geneve.b_mode_enabled = enable;
693 tunn.l2_geneve.tun_cls = clss;
694 tunn.ip_geneve.tun_cls = clss;
695 tunn.b_update_rx_cls = true;
696 tunn.b_update_tx_cls = true;
698 tunn.geneve_port.b_update_port = true;
699 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
701 rc = qede_tunnel_update(qdev, &tunn);
702 if (rc == ECORE_SUCCESS) {
703 qdev->geneve.enable = enable;
704 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
705 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
706 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
708 DP_ERR(edev, "Failed to update tunn_clss %u\n",
716 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
717 enum rte_eth_tunnel_type tunn_type, bool enable)
722 case RTE_TUNNEL_TYPE_VXLAN:
723 rc = qede_vxlan_enable(eth_dev, clss, enable);
725 case RTE_TUNNEL_TYPE_GENEVE:
726 rc = qede_geneve_enable(eth_dev, clss, enable);
737 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
740 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
741 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
742 struct qede_ucast_entry *tmp = NULL;
743 struct qede_ucast_entry *u;
744 struct ether_addr *mac_addr;
746 mac_addr = (struct ether_addr *)ucast->mac;
748 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
749 if ((memcmp(mac_addr, &tmp->mac,
750 ETHER_ADDR_LEN) == 0) &&
751 ucast->vni == tmp->vni &&
752 ucast->vlan == tmp->vlan) {
753 DP_ERR(edev, "Unicast MAC is already added"
754 " with vlan = %u, vni = %u\n",
755 ucast->vlan, ucast->vni);
759 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
760 RTE_CACHE_LINE_SIZE);
762 DP_ERR(edev, "Did not allocate memory for ucast\n");
765 ether_addr_copy(mac_addr, &u->mac);
766 u->vlan = ucast->vlan;
768 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
771 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
772 if ((memcmp(mac_addr, &tmp->mac,
773 ETHER_ADDR_LEN) == 0) &&
774 ucast->vlan == tmp->vlan &&
775 ucast->vni == tmp->vni)
779 DP_INFO(edev, "Unicast MAC is not found\n");
782 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
790 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
793 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
794 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
795 struct ether_addr *mac_addr;
796 struct qede_mcast_entry *tmp = NULL;
797 struct qede_mcast_entry *m;
799 mac_addr = (struct ether_addr *)mcast->mac;
801 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
802 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
804 "Multicast MAC is already added\n");
808 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
809 RTE_CACHE_LINE_SIZE);
812 "Did not allocate memory for mcast\n");
815 ether_addr_copy(mac_addr, &m->mac);
816 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
819 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
820 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
824 DP_INFO(edev, "Multicast mac is not found\n");
827 SLIST_REMOVE(&qdev->mc_list_head, tmp,
828 qede_mcast_entry, list);
835 static enum _ecore_status_t
836 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
839 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
840 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
841 enum _ecore_status_t rc;
842 struct ecore_filter_mcast mcast;
843 struct qede_mcast_entry *tmp;
847 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
849 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
851 "Mcast filter table limit exceeded, "
852 "Please enable mcast promisc mode\n");
856 rc = qede_mcast_filter(eth_dev, ucast, add);
858 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
859 memset(&mcast, 0, sizeof(mcast));
860 mcast.num_mc_addrs = qdev->num_mc_addr;
861 mcast.opcode = ECORE_FILTER_ADD;
862 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
863 ether_addr_copy(&tmp->mac,
864 (struct ether_addr *)&mcast.mac[j]);
867 rc = ecore_filter_mcast_cmd(edev, &mcast,
868 ECORE_SPQ_MODE_CB, NULL);
870 if (rc != ECORE_SUCCESS) {
871 DP_ERR(edev, "Failed to add multicast filter"
872 " rc = %d, op = %d\n", rc, add);
874 } else { /* Unicast */
876 if (qdev->num_uc_addr >=
877 qdev->dev_info.num_mac_filters) {
879 "Ucast filter table limit exceeded,"
880 " Please enable promisc mode\n");
884 rc = qede_ucast_filter(eth_dev, ucast, add);
886 rc = ecore_filter_ucast_cmd(edev, ucast,
887 ECORE_SPQ_MODE_CB, NULL);
888 if (rc != ECORE_SUCCESS) {
889 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
898 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
899 __rte_unused uint32_t index, __rte_unused uint32_t pool)
901 struct ecore_filter_ucast ucast;
904 qede_set_ucast_cmn_params(&ucast);
905 ucast.type = ECORE_FILTER_MAC;
906 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
907 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
912 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
914 struct qede_dev *qdev = eth_dev->data->dev_private;
915 struct ecore_dev *edev = &qdev->edev;
916 struct ecore_filter_ucast ucast;
918 PMD_INIT_FUNC_TRACE(edev);
920 if (index >= qdev->dev_info.num_mac_filters) {
921 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
922 index, qdev->dev_info.num_mac_filters);
926 qede_set_ucast_cmn_params(&ucast);
927 ucast.opcode = ECORE_FILTER_REMOVE;
928 ucast.type = ECORE_FILTER_MAC;
930 /* Use the index maintained by rte */
931 ether_addr_copy(ð_dev->data->mac_addrs[index],
932 (struct ether_addr *)&ucast.mac);
934 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
938 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
940 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
941 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
943 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
944 mac_addr->addr_bytes)) {
945 DP_ERR(edev, "Setting MAC address is not allowed\n");
946 ether_addr_copy(&qdev->primary_mac,
947 ð_dev->data->mac_addrs[0]);
951 qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
954 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
956 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
957 struct ecore_sp_vport_update_params params;
958 struct ecore_hwfn *p_hwfn;
962 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
964 params.update_accept_any_vlan_flg = 1;
965 params.accept_any_vlan = flg;
966 for_each_hwfn(edev, i) {
967 p_hwfn = &edev->hwfns[i];
968 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
969 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
970 ECORE_SPQ_MODE_EBLOCK, NULL);
971 if (rc != ECORE_SUCCESS) {
972 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
977 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
980 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
982 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
983 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
984 struct ecore_sp_vport_update_params params;
985 struct ecore_hwfn *p_hwfn;
989 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
991 params.update_inner_vlan_removal_flg = 1;
992 params.inner_vlan_removal_flg = flg;
993 for_each_hwfn(edev, i) {
994 p_hwfn = &edev->hwfns[i];
995 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
996 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
997 ECORE_SPQ_MODE_EBLOCK, NULL);
998 if (rc != ECORE_SUCCESS) {
999 DP_ERR(edev, "Failed to update vport\n");
1004 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
1008 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
1009 uint16_t vlan_id, int on)
1011 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1012 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1013 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
1014 struct qede_vlan_entry *tmp = NULL;
1015 struct qede_vlan_entry *vlan;
1016 struct ecore_filter_ucast ucast;
1020 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1021 DP_ERR(edev, "Reached max VLAN filter limit"
1022 " enabling accept_any_vlan\n");
1023 qede_config_accept_any_vlan(qdev, true);
1027 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1028 if (tmp->vid == vlan_id) {
1029 DP_ERR(edev, "VLAN %u already configured\n",
1035 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1036 RTE_CACHE_LINE_SIZE);
1039 DP_ERR(edev, "Did not allocate memory for VLAN\n");
1043 qede_set_ucast_cmn_params(&ucast);
1044 ucast.opcode = ECORE_FILTER_ADD;
1045 ucast.type = ECORE_FILTER_VLAN;
1046 ucast.vlan = vlan_id;
1047 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1050 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1054 vlan->vid = vlan_id;
1055 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1056 qdev->configured_vlans++;
1057 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1058 vlan_id, qdev->configured_vlans);
1061 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1062 if (tmp->vid == vlan_id)
1067 if (qdev->configured_vlans == 0) {
1069 "No VLAN filters configured yet\n");
1073 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1077 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1079 qede_set_ucast_cmn_params(&ucast);
1080 ucast.opcode = ECORE_FILTER_REMOVE;
1081 ucast.type = ECORE_FILTER_VLAN;
1082 ucast.vlan = vlan_id;
1083 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1086 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1089 qdev->configured_vlans--;
1090 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1091 vlan_id, qdev->configured_vlans);
1098 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1102 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1104 if (mask & ETH_VLAN_STRIP_MASK) {
1105 if (rxmode->hw_vlan_strip)
1106 (void)qede_vlan_stripping(eth_dev, 1);
1108 (void)qede_vlan_stripping(eth_dev, 0);
1111 if (mask & ETH_VLAN_FILTER_MASK) {
1112 /* VLAN filtering kicks in when a VLAN is added */
1113 if (rxmode->hw_vlan_filter) {
1114 qede_vlan_filter_set(eth_dev, 0, 1);
1116 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1118 " Please remove existing VLAN filters"
1119 " before disabling VLAN filtering\n");
1120 /* Signal app that VLAN filtering is still
1123 rxmode->hw_vlan_filter = true;
1125 qede_vlan_filter_set(eth_dev, 0, 0);
1130 if (mask & ETH_VLAN_EXTEND_MASK)
1131 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1132 " and classification is based on outer tag only\n");
1134 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1135 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1140 static void qede_prandom_bytes(uint32_t *buff)
1144 srand((unsigned int)time(NULL));
1145 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1149 int qede_config_rss(struct rte_eth_dev *eth_dev)
1151 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1152 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1153 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1155 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1156 struct rte_eth_rss_reta_entry64 reta_conf[2];
1157 struct rte_eth_rss_conf rss_conf;
1158 uint32_t i, id, pos, q;
1160 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1161 if (!rss_conf.rss_key) {
1162 DP_INFO(edev, "Applying driver default key\n");
1163 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1164 qede_prandom_bytes(&def_rss_key[0]);
1165 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1168 /* Configure RSS hash */
1169 if (qede_rss_hash_update(eth_dev, &rss_conf))
1172 /* Configure default RETA */
1173 memset(reta_conf, 0, sizeof(reta_conf));
1174 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1175 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1177 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1178 id = i / RTE_RETA_GROUP_SIZE;
1179 pos = i % RTE_RETA_GROUP_SIZE;
1180 q = i % QEDE_RSS_COUNT(qdev);
1181 reta_conf[id].reta[pos] = q;
1183 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1184 ECORE_RSS_IND_TABLE_SIZE))
1190 static void qede_fastpath_start(struct ecore_dev *edev)
1192 struct ecore_hwfn *p_hwfn;
1195 for_each_hwfn(edev, i) {
1196 p_hwfn = &edev->hwfns[i];
1197 ecore_hw_start_fastpath(p_hwfn);
1201 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1203 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1204 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1205 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1207 PMD_INIT_FUNC_TRACE(edev);
1209 /* Update MTU only if it has changed */
1210 if (qdev->mtu != qdev->new_mtu) {
1211 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1213 qdev->mtu = qdev->new_mtu;
1216 /* Configure TPA parameters */
1217 if (rxmode->enable_lro) {
1218 if (qede_enable_tpa(eth_dev, true))
1220 /* Enable scatter mode for LRO */
1221 if (!rxmode->enable_scatter)
1222 eth_dev->data->scattered_rx = 1;
1226 if (qede_start_queues(eth_dev))
1229 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1230 * enabling RSS. Hence RSS configuration is deferred upto this point.
1231 * Also, we would like to retain similar behavior in PF case, so we
1232 * don't do PF/VF specific check here.
1234 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1235 if (qede_config_rss(eth_dev))
1239 if (qede_activate_vport(eth_dev, true))
1242 /* Bring-up the link */
1243 qede_dev_set_link_state(eth_dev, true);
1245 /* Update link status */
1246 qede_link_update(eth_dev, 0);
1248 /* Start/resume traffic */
1249 qede_fastpath_start(edev);
1251 DP_INFO(edev, "Device started\n");
1255 DP_ERR(edev, "Device start fails\n");
1256 return -1; /* common error code is < 0 */
1259 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1261 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1262 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1264 PMD_INIT_FUNC_TRACE(edev);
1267 if (qede_activate_vport(eth_dev, false))
1270 if (qdev->enable_lro)
1271 qede_enable_tpa(eth_dev, false);
1274 qede_stop_queues(eth_dev);
1276 /* Disable traffic */
1277 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1279 /* Bring the link down */
1280 qede_dev_set_link_state(eth_dev, false);
1282 DP_INFO(edev, "Device is stopped\n");
1285 #define QEDE_TX_SWITCHING "vf_txswitch"
1287 const char *valid_args[] = {
1292 static int qede_args_check(const char *key, const char *val, void *opaque)
1296 struct rte_eth_dev *eth_dev = opaque;
1297 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1298 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1299 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1303 tmp = strtoul(val, NULL, 0);
1305 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1309 if (strcmp(QEDE_TX_SWITCHING, key) == 0)
1310 qdev->enable_tx_switching = !!tmp;
1315 static int qede_args(struct rte_eth_dev *eth_dev)
1317 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1318 struct rte_kvargs *kvlist;
1319 struct rte_devargs *devargs;
1323 devargs = pci_dev->device.devargs;
1325 return 0; /* return success */
1327 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1331 /* Process parameters. */
1332 for (i = 0; (valid_args[i] != NULL); ++i) {
1333 if (rte_kvargs_count(kvlist, valid_args[i])) {
1334 ret = rte_kvargs_process(kvlist, valid_args[i],
1335 qede_args_check, eth_dev);
1336 if (ret != ECORE_SUCCESS) {
1337 rte_kvargs_free(kvlist);
1342 rte_kvargs_free(kvlist);
1347 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1349 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1350 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1351 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1354 PMD_INIT_FUNC_TRACE(edev);
1356 /* Check requirements for 100G mode */
1357 if (ECORE_IS_CMT(edev)) {
1358 if (eth_dev->data->nb_rx_queues < 2 ||
1359 eth_dev->data->nb_tx_queues < 2) {
1360 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1364 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1365 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1367 "100G mode needs even no. of RX/TX queues\n");
1372 /* We need to have min 1 RX queue.There is no min check in
1373 * rte_eth_dev_configure(), so we are checking it here.
1375 if (eth_dev->data->nb_rx_queues == 0) {
1376 DP_ERR(edev, "Minimum one RX queue is required\n");
1380 /* Enable Tx switching by default */
1381 qdev->enable_tx_switching = 1;
1383 /* Parse devargs and fix up rxmode */
1384 if (qede_args(eth_dev))
1387 /* Sanity checks and throw warnings */
1388 if (rxmode->enable_scatter)
1389 eth_dev->data->scattered_rx = 1;
1391 if (!rxmode->hw_strip_crc)
1392 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1394 if (!rxmode->hw_ip_checksum)
1395 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1397 if (rxmode->header_split)
1398 DP_INFO(edev, "Header split enable is not supported\n");
1399 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1401 DP_ERR(edev, "Unsupported multi-queue mode\n");
1404 /* Flow director mode check */
1405 if (qede_check_fdir_support(eth_dev))
1408 /* Deallocate resources if held previously. It is needed only if the
1409 * queue count has been changed from previous configuration. If its
1410 * going to change then it means RX/TX queue setup will be called
1411 * again and the fastpath pointers will be reinitialized there.
1413 if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1414 qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1415 qede_dealloc_fp_resc(eth_dev);
1416 /* Proceed with updated queue count */
1417 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1418 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1419 if (qede_alloc_fp_resc(qdev))
1423 /* VF's MTU has to be set using vport-start where as
1424 * PF's MTU can be updated via vport-update.
1427 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1430 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1434 qdev->mtu = rxmode->max_rx_pkt_len;
1435 qdev->new_mtu = qdev->mtu;
1437 /* Enable VLAN offloads by default */
1438 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1439 ETH_VLAN_FILTER_MASK |
1440 ETH_VLAN_EXTEND_MASK);
1444 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1445 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1450 /* Info about HW descriptor ring limitations */
1451 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1452 .nb_max = 0x8000, /* 32K */
1454 .nb_align = 128 /* lowest common multiple */
1457 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1458 .nb_max = 0x8000, /* 32K */
1461 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1462 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1466 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1467 struct rte_eth_dev_info *dev_info)
1469 struct qede_dev *qdev = eth_dev->data->dev_private;
1470 struct ecore_dev *edev = &qdev->edev;
1471 struct qed_link_output link;
1472 uint32_t speed_cap = 0;
1474 PMD_INIT_FUNC_TRACE(edev);
1476 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1477 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1478 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1479 dev_info->rx_desc_lim = qede_rx_desc_lim;
1480 dev_info->tx_desc_lim = qede_tx_desc_lim;
1483 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1484 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1486 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1487 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1488 dev_info->max_tx_queues = dev_info->max_rx_queues;
1490 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1491 dev_info->max_vfs = 0;
1492 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1493 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1494 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1496 dev_info->default_txconf = (struct rte_eth_txconf) {
1497 .txq_flags = QEDE_TXQ_FLAGS,
1500 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
1501 DEV_RX_OFFLOAD_IPV4_CKSUM |
1502 DEV_RX_OFFLOAD_UDP_CKSUM |
1503 DEV_RX_OFFLOAD_TCP_CKSUM |
1504 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1505 DEV_RX_OFFLOAD_TCP_LRO);
1507 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1508 DEV_TX_OFFLOAD_IPV4_CKSUM |
1509 DEV_TX_OFFLOAD_UDP_CKSUM |
1510 DEV_TX_OFFLOAD_TCP_CKSUM |
1511 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1512 DEV_TX_OFFLOAD_TCP_TSO |
1513 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1514 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1516 memset(&link, 0, sizeof(struct qed_link_output));
1517 qdev->ops->common->get_link(edev, &link);
1518 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1519 speed_cap |= ETH_LINK_SPEED_1G;
1520 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1521 speed_cap |= ETH_LINK_SPEED_10G;
1522 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1523 speed_cap |= ETH_LINK_SPEED_25G;
1524 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1525 speed_cap |= ETH_LINK_SPEED_40G;
1526 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1527 speed_cap |= ETH_LINK_SPEED_50G;
1528 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1529 speed_cap |= ETH_LINK_SPEED_100G;
1530 dev_info->speed_capa = speed_cap;
1533 /* return 0 means link status changed, -1 means not changed */
1535 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1537 struct qede_dev *qdev = eth_dev->data->dev_private;
1538 struct ecore_dev *edev = &qdev->edev;
1539 uint16_t link_duplex;
1540 struct qed_link_output link;
1541 struct rte_eth_link *curr = ð_dev->data->dev_link;
1543 memset(&link, 0, sizeof(struct qed_link_output));
1544 qdev->ops->common->get_link(edev, &link);
1547 curr->link_speed = link.speed;
1550 switch (link.duplex) {
1551 case QEDE_DUPLEX_HALF:
1552 link_duplex = ETH_LINK_HALF_DUPLEX;
1554 case QEDE_DUPLEX_FULL:
1555 link_duplex = ETH_LINK_FULL_DUPLEX;
1557 case QEDE_DUPLEX_UNKNOWN:
1561 curr->link_duplex = link_duplex;
1564 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1567 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1568 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1570 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1571 curr->link_speed, curr->link_duplex,
1572 curr->link_autoneg, curr->link_status);
1574 /* return 0 means link status changed, -1 means not changed */
1575 return ((curr->link_status == link.link_up) ? -1 : 0);
1578 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1580 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1581 struct qede_dev *qdev = eth_dev->data->dev_private;
1582 struct ecore_dev *edev = &qdev->edev;
1584 PMD_INIT_FUNC_TRACE(edev);
1587 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1589 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1590 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1592 qed_configure_filter_rx_mode(eth_dev, type);
1595 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1597 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1598 struct qede_dev *qdev = eth_dev->data->dev_private;
1599 struct ecore_dev *edev = &qdev->edev;
1601 PMD_INIT_FUNC_TRACE(edev);
1604 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1605 qed_configure_filter_rx_mode(eth_dev,
1606 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1608 qed_configure_filter_rx_mode(eth_dev,
1609 QED_FILTER_RX_MODE_TYPE_REGULAR);
1612 static void qede_poll_sp_sb_cb(void *param)
1614 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1615 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1616 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1619 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1620 qede_interrupt_action(&edev->hwfns[1]);
1622 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1626 DP_ERR(edev, "Unable to start periodic"
1627 " timer rc %d\n", rc);
1628 assert(false && "Unable to start periodic timer");
1632 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1634 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1635 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1636 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1638 PMD_INIT_FUNC_TRACE(edev);
1640 /* dev_stop() shall cleanup fp resources in hw but without releasing
1641 * dma memories and sw structures so that dev_start() can be called
1642 * by the app without reconfiguration. However, in dev_close() we
1643 * can release all the resources and device can be brought up newly
1645 if (eth_dev->data->dev_started)
1646 qede_dev_stop(eth_dev);
1648 qede_stop_vport(edev);
1649 qede_fdir_dealloc_resc(eth_dev);
1650 qede_dealloc_fp_resc(eth_dev);
1652 eth_dev->data->nb_rx_queues = 0;
1653 eth_dev->data->nb_tx_queues = 0;
1655 qdev->ops->common->slowpath_stop(edev);
1656 qdev->ops->common->remove(edev);
1657 rte_intr_disable(&pci_dev->intr_handle);
1658 rte_intr_callback_unregister(&pci_dev->intr_handle,
1659 qede_interrupt_handler, (void *)eth_dev);
1660 if (ECORE_IS_CMT(edev))
1661 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1665 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1667 struct qede_dev *qdev = eth_dev->data->dev_private;
1668 struct ecore_dev *edev = &qdev->edev;
1669 struct ecore_eth_stats stats;
1670 unsigned int i = 0, j = 0, qid;
1671 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1672 struct qede_tx_queue *txq;
1674 ecore_get_vport_stats(edev, &stats);
1677 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1678 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1680 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1681 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1683 eth_stats->ierrors = stats.common.rx_crc_errors +
1684 stats.common.rx_align_errors +
1685 stats.common.rx_carrier_errors +
1686 stats.common.rx_oversize_packets +
1687 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1689 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1691 eth_stats->imissed = stats.common.mftag_filter_discards +
1692 stats.common.mac_filter_discards +
1693 stats.common.no_buff_discards +
1694 stats.common.brb_truncates + stats.common.brb_discards;
1697 eth_stats->opackets = stats.common.tx_ucast_pkts +
1698 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1700 eth_stats->obytes = stats.common.tx_ucast_bytes +
1701 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1703 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1706 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1707 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1708 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1709 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1710 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1711 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1712 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1713 "Not all the queue stats will be displayed. Set"
1714 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1715 " appropriately and retry.\n");
1718 eth_stats->q_ipackets[i] =
1720 ((char *)(qdev->fp_array[qid].rxq)) +
1721 offsetof(struct qede_rx_queue,
1723 eth_stats->q_errors[i] =
1725 ((char *)(qdev->fp_array[qid].rxq)) +
1726 offsetof(struct qede_rx_queue,
1729 ((char *)(qdev->fp_array[qid].rxq)) +
1730 offsetof(struct qede_rx_queue,
1733 if (i == rxq_stat_cntrs)
1738 txq = qdev->fp_array[qid].txq;
1739 eth_stats->q_opackets[j] =
1740 *((uint64_t *)(uintptr_t)
1741 (((uint64_t)(uintptr_t)(txq)) +
1742 offsetof(struct qede_tx_queue,
1745 if (j == txq_stat_cntrs)
1753 qede_get_xstats_count(struct qede_dev *qdev) {
1754 if (ECORE_IS_BB(&qdev->edev))
1755 return RTE_DIM(qede_xstats_strings) +
1756 RTE_DIM(qede_bb_xstats_strings) +
1757 (RTE_DIM(qede_rxq_xstats_strings) *
1758 RTE_MIN(QEDE_RSS_COUNT(qdev),
1759 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1761 return RTE_DIM(qede_xstats_strings) +
1762 RTE_DIM(qede_ah_xstats_strings) +
1763 (RTE_DIM(qede_rxq_xstats_strings) *
1764 RTE_MIN(QEDE_RSS_COUNT(qdev),
1765 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1769 qede_get_xstats_names(struct rte_eth_dev *dev,
1770 struct rte_eth_xstat_name *xstats_names,
1771 __rte_unused unsigned int limit)
1773 struct qede_dev *qdev = dev->data->dev_private;
1774 struct ecore_dev *edev = &qdev->edev;
1775 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1776 unsigned int i, qid, stat_idx = 0;
1777 unsigned int rxq_stat_cntrs;
1779 if (xstats_names != NULL) {
1780 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1781 snprintf(xstats_names[stat_idx].name,
1782 sizeof(xstats_names[stat_idx].name),
1784 qede_xstats_strings[i].name);
1788 if (ECORE_IS_BB(edev)) {
1789 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1790 snprintf(xstats_names[stat_idx].name,
1791 sizeof(xstats_names[stat_idx].name),
1793 qede_bb_xstats_strings[i].name);
1797 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1798 snprintf(xstats_names[stat_idx].name,
1799 sizeof(xstats_names[stat_idx].name),
1801 qede_ah_xstats_strings[i].name);
1806 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1807 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1808 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1809 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1810 snprintf(xstats_names[stat_idx].name,
1811 sizeof(xstats_names[stat_idx].name),
1813 qede_rxq_xstats_strings[i].name, qid,
1814 qede_rxq_xstats_strings[i].name + 4);
1824 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1827 struct qede_dev *qdev = dev->data->dev_private;
1828 struct ecore_dev *edev = &qdev->edev;
1829 struct ecore_eth_stats stats;
1830 const unsigned int num = qede_get_xstats_count(qdev);
1831 unsigned int i, qid, stat_idx = 0;
1832 unsigned int rxq_stat_cntrs;
1837 ecore_get_vport_stats(edev, &stats);
1839 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1840 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1841 qede_xstats_strings[i].offset);
1842 xstats[stat_idx].id = stat_idx;
1846 if (ECORE_IS_BB(edev)) {
1847 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1848 xstats[stat_idx].value =
1849 *(uint64_t *)(((char *)&stats) +
1850 qede_bb_xstats_strings[i].offset);
1851 xstats[stat_idx].id = stat_idx;
1855 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1856 xstats[stat_idx].value =
1857 *(uint64_t *)(((char *)&stats) +
1858 qede_ah_xstats_strings[i].offset);
1859 xstats[stat_idx].id = stat_idx;
1864 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1865 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1866 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1868 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1869 xstats[stat_idx].value = *(uint64_t *)(
1870 ((char *)(qdev->fp_array[qid].rxq)) +
1871 qede_rxq_xstats_strings[i].offset);
1872 xstats[stat_idx].id = stat_idx;
1882 qede_reset_xstats(struct rte_eth_dev *dev)
1884 struct qede_dev *qdev = dev->data->dev_private;
1885 struct ecore_dev *edev = &qdev->edev;
1887 ecore_reset_vport_stats(edev);
1890 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1892 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1893 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1894 struct qed_link_params link_params;
1897 DP_INFO(edev, "setting link state %d\n", link_up);
1898 memset(&link_params, 0, sizeof(link_params));
1899 link_params.link_up = link_up;
1900 rc = qdev->ops->common->set_link(edev, &link_params);
1901 if (rc != ECORE_SUCCESS)
1902 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1907 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1909 return qede_dev_set_link_state(eth_dev, true);
1912 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1914 return qede_dev_set_link_state(eth_dev, false);
1917 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1919 struct qede_dev *qdev = eth_dev->data->dev_private;
1920 struct ecore_dev *edev = &qdev->edev;
1922 ecore_reset_vport_stats(edev);
1925 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1927 enum qed_filter_rx_mode_type type =
1928 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1930 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1931 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1933 qed_configure_filter_rx_mode(eth_dev, type);
1936 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1938 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1939 qed_configure_filter_rx_mode(eth_dev,
1940 QED_FILTER_RX_MODE_TYPE_PROMISC);
1942 qed_configure_filter_rx_mode(eth_dev,
1943 QED_FILTER_RX_MODE_TYPE_REGULAR);
1946 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1947 struct rte_eth_fc_conf *fc_conf)
1949 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1950 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1951 struct qed_link_output current_link;
1952 struct qed_link_params params;
1954 memset(¤t_link, 0, sizeof(current_link));
1955 qdev->ops->common->get_link(edev, ¤t_link);
1957 memset(¶ms, 0, sizeof(params));
1958 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1959 if (fc_conf->autoneg) {
1960 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1961 DP_ERR(edev, "Autoneg not supported\n");
1964 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1967 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1968 if (fc_conf->mode == RTE_FC_FULL)
1969 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1970 QED_LINK_PAUSE_RX_ENABLE);
1971 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1972 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1973 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1974 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1976 params.link_up = true;
1977 (void)qdev->ops->common->set_link(edev, ¶ms);
1982 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1983 struct rte_eth_fc_conf *fc_conf)
1985 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1986 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1987 struct qed_link_output current_link;
1989 memset(¤t_link, 0, sizeof(current_link));
1990 qdev->ops->common->get_link(edev, ¤t_link);
1992 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1993 fc_conf->autoneg = true;
1995 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1996 QED_LINK_PAUSE_TX_ENABLE))
1997 fc_conf->mode = RTE_FC_FULL;
1998 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1999 fc_conf->mode = RTE_FC_RX_PAUSE;
2000 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
2001 fc_conf->mode = RTE_FC_TX_PAUSE;
2003 fc_conf->mode = RTE_FC_NONE;
2008 static const uint32_t *
2009 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2011 static const uint32_t ptypes[] = {
2013 RTE_PTYPE_L2_ETHER_VLAN,
2018 RTE_PTYPE_TUNNEL_VXLAN,
2020 RTE_PTYPE_TUNNEL_GENEVE,
2022 RTE_PTYPE_INNER_L2_ETHER,
2023 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2024 RTE_PTYPE_INNER_L3_IPV4,
2025 RTE_PTYPE_INNER_L3_IPV6,
2026 RTE_PTYPE_INNER_L4_TCP,
2027 RTE_PTYPE_INNER_L4_UDP,
2028 RTE_PTYPE_INNER_L4_FRAG,
2032 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
2038 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2041 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
2042 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
2043 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
2044 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
2045 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
2046 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
2047 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
2048 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
2051 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2052 struct rte_eth_rss_conf *rss_conf)
2054 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2055 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2056 struct ecore_sp_vport_update_params vport_update_params;
2057 struct ecore_rss_params rss_params;
2058 struct ecore_hwfn *p_hwfn;
2059 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2060 uint64_t hf = rss_conf->rss_hf;
2061 uint8_t len = rss_conf->rss_key_len;
2066 memset(&vport_update_params, 0, sizeof(vport_update_params));
2067 memset(&rss_params, 0, sizeof(rss_params));
2069 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2070 (unsigned long)hf, len, key);
2074 DP_INFO(edev, "Enabling rss\n");
2077 qede_init_rss_caps(&rss_params.rss_caps, hf);
2078 rss_params.update_rss_capabilities = 1;
2082 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2083 DP_ERR(edev, "RSS key length exceeds limit\n");
2086 DP_INFO(edev, "Applying user supplied hash key\n");
2087 rss_params.update_rss_key = 1;
2088 memcpy(&rss_params.rss_key, key, len);
2090 rss_params.rss_enable = 1;
2093 rss_params.update_rss_config = 1;
2094 /* tbl_size has to be set with capabilities */
2095 rss_params.rss_table_size_log = 7;
2096 vport_update_params.vport_id = 0;
2097 /* pass the L2 handles instead of qids */
2098 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2099 idx = qdev->rss_ind_table[i];
2100 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2102 vport_update_params.rss_params = &rss_params;
2104 for_each_hwfn(edev, i) {
2105 p_hwfn = &edev->hwfns[i];
2106 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2107 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2108 ECORE_SPQ_MODE_EBLOCK, NULL);
2110 DP_ERR(edev, "vport-update for RSS failed\n");
2114 qdev->rss_enable = rss_params.rss_enable;
2116 /* Update local structure for hash query */
2117 qdev->rss_conf.rss_hf = hf;
2118 qdev->rss_conf.rss_key_len = len;
2119 if (qdev->rss_enable) {
2120 if (qdev->rss_conf.rss_key == NULL) {
2121 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2122 if (qdev->rss_conf.rss_key == NULL) {
2123 DP_ERR(edev, "No memory to store RSS key\n");
2128 DP_INFO(edev, "Storing RSS key\n");
2129 memcpy(qdev->rss_conf.rss_key, key, len);
2131 } else if (!qdev->rss_enable && len == 0) {
2132 if (qdev->rss_conf.rss_key) {
2133 free(qdev->rss_conf.rss_key);
2134 qdev->rss_conf.rss_key = NULL;
2135 DP_INFO(edev, "Free RSS key\n");
2142 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2143 struct rte_eth_rss_conf *rss_conf)
2145 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2147 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2148 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2150 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2151 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2152 rss_conf->rss_key_len);
2156 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2157 struct ecore_rss_params *rss)
2160 bool rss_mode = 1; /* enable */
2161 struct ecore_queue_cid *cid;
2162 struct ecore_rss_params *t_rss;
2164 /* In regular scenario, we'd simply need to take input handlers.
2165 * But in CMT, we'd have to split the handlers according to the
2166 * engine they were configured on. We'd then have to understand
2167 * whether RSS is really required, since 2-queues on CMT doesn't
2171 /* CMT should be round-robin */
2172 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2173 cid = rss->rss_ind_table[i];
2175 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2180 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2184 t_rss->update_rss_ind_table = 1;
2185 t_rss->rss_table_size_log = 7;
2186 t_rss->update_rss_config = 1;
2188 /* Make sure RSS is actually required */
2189 for_each_hwfn(edev, fn) {
2190 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2192 if (rss[fn].rss_ind_table[i] !=
2193 rss[fn].rss_ind_table[0])
2197 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2199 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2206 t_rss->rss_enable = rss_mode;
2211 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2212 struct rte_eth_rss_reta_entry64 *reta_conf,
2215 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2216 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2217 struct ecore_sp_vport_update_params vport_update_params;
2218 struct ecore_rss_params *params;
2219 struct ecore_hwfn *p_hwfn;
2220 uint16_t i, idx, shift;
2224 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2225 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2230 memset(&vport_update_params, 0, sizeof(vport_update_params));
2231 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2232 RTE_CACHE_LINE_SIZE);
2233 if (params == NULL) {
2234 DP_ERR(edev, "failed to allocate memory\n");
2238 for (i = 0; i < reta_size; i++) {
2239 idx = i / RTE_RETA_GROUP_SIZE;
2240 shift = i % RTE_RETA_GROUP_SIZE;
2241 if (reta_conf[idx].mask & (1ULL << shift)) {
2242 entry = reta_conf[idx].reta[shift];
2243 /* Pass rxq handles to ecore */
2244 params->rss_ind_table[i] =
2245 qdev->fp_array[entry].rxq->handle;
2246 /* Update the local copy for RETA query command */
2247 qdev->rss_ind_table[i] = entry;
2251 params->update_rss_ind_table = 1;
2252 params->rss_table_size_log = 7;
2253 params->update_rss_config = 1;
2255 /* Fix up RETA for CMT mode device */
2256 if (ECORE_IS_CMT(edev))
2257 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2259 vport_update_params.vport_id = 0;
2260 /* Use the current value of rss_enable */
2261 params->rss_enable = qdev->rss_enable;
2262 vport_update_params.rss_params = params;
2264 for_each_hwfn(edev, i) {
2265 p_hwfn = &edev->hwfns[i];
2266 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2267 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2268 ECORE_SPQ_MODE_EBLOCK, NULL);
2270 DP_ERR(edev, "vport-update for RSS failed\n");
2280 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2281 struct rte_eth_rss_reta_entry64 *reta_conf,
2284 struct qede_dev *qdev = eth_dev->data->dev_private;
2285 struct ecore_dev *edev = &qdev->edev;
2286 uint16_t i, idx, shift;
2289 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2290 DP_ERR(edev, "reta_size %d is not supported\n",
2295 for (i = 0; i < reta_size; i++) {
2296 idx = i / RTE_RETA_GROUP_SIZE;
2297 shift = i % RTE_RETA_GROUP_SIZE;
2298 if (reta_conf[idx].mask & (1ULL << shift)) {
2299 entry = qdev->rss_ind_table[i];
2300 reta_conf[idx].reta[shift] = entry;
2309 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2311 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2312 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2313 struct rte_eth_dev_info dev_info = {0};
2314 struct qede_fastpath *fp;
2315 uint32_t frame_size;
2316 uint16_t rx_buf_size;
2320 PMD_INIT_FUNC_TRACE(edev);
2321 qede_dev_info_get(dev, &dev_info);
2322 frame_size = mtu + QEDE_ETH_OVERHEAD;
2323 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2324 DP_ERR(edev, "MTU %u out of range\n", mtu);
2327 if (!dev->data->scattered_rx &&
2328 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2329 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2330 dev->data->min_rx_buf_size);
2333 /* Temporarily replace I/O functions with dummy ones. It cannot
2334 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2336 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2337 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2341 /* Fix up RX buf size for all queues of the port */
2343 fp = &qdev->fp_array[i];
2344 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2345 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2346 if (dev->data->scattered_rx)
2347 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2349 rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2350 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2351 fp->rxq->rx_buf_size = rx_buf_size;
2352 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2354 qede_dev_start(dev);
2355 if (frame_size > ETHER_MAX_LEN)
2356 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2358 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2359 /* update max frame size */
2360 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2362 dev->rx_pkt_burst = qede_recv_pkts;
2363 dev->tx_pkt_burst = qede_xmit_pkts;
2369 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2370 struct rte_eth_udp_tunnel *tunnel_udp)
2372 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2373 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2374 struct ecore_tunnel_info tunn; /* @DPDK */
2378 PMD_INIT_FUNC_TRACE(edev);
2380 memset(&tunn, 0, sizeof(tunn));
2382 switch (tunnel_udp->prot_type) {
2383 case RTE_TUNNEL_TYPE_VXLAN:
2384 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2385 DP_ERR(edev, "UDP port %u doesn't exist\n",
2386 tunnel_udp->udp_port);
2391 tunn.vxlan_port.b_update_port = true;
2392 tunn.vxlan_port.port = udp_port;
2394 rc = qede_tunnel_update(qdev, &tunn);
2395 if (rc != ECORE_SUCCESS) {
2396 DP_ERR(edev, "Unable to config UDP port %u\n",
2397 tunn.vxlan_port.port);
2401 qdev->vxlan.udp_port = udp_port;
2402 /* If the request is to delete UDP port and if the number of
2403 * VXLAN filters have reached 0 then VxLAN offload can be be
2406 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2407 return qede_vxlan_enable(eth_dev,
2408 ECORE_TUNN_CLSS_MAC_VLAN, false);
2412 case RTE_TUNNEL_TYPE_GENEVE:
2413 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
2414 DP_ERR(edev, "UDP port %u doesn't exist\n",
2415 tunnel_udp->udp_port);
2421 tunn.geneve_port.b_update_port = true;
2422 tunn.geneve_port.port = udp_port;
2424 rc = qede_tunnel_update(qdev, &tunn);
2425 if (rc != ECORE_SUCCESS) {
2426 DP_ERR(edev, "Unable to config UDP port %u\n",
2427 tunn.vxlan_port.port);
2431 qdev->vxlan.udp_port = udp_port;
2432 /* If the request is to delete UDP port and if the number of
2433 * GENEVE filters have reached 0 then GENEVE offload can be be
2436 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
2437 return qede_geneve_enable(eth_dev,
2438 ECORE_TUNN_CLSS_MAC_VLAN, false);
2450 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2451 struct rte_eth_udp_tunnel *tunnel_udp)
2453 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2454 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2455 struct ecore_tunnel_info tunn; /* @DPDK */
2459 PMD_INIT_FUNC_TRACE(edev);
2461 memset(&tunn, 0, sizeof(tunn));
2463 switch (tunnel_udp->prot_type) {
2464 case RTE_TUNNEL_TYPE_VXLAN:
2465 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2467 "UDP port %u for VXLAN was already configured\n",
2468 tunnel_udp->udp_port);
2469 return ECORE_SUCCESS;
2472 /* Enable VxLAN tunnel with default MAC/VLAN classification if
2473 * it was not enabled while adding VXLAN filter before UDP port
2476 if (!qdev->vxlan.enable) {
2477 rc = qede_vxlan_enable(eth_dev,
2478 ECORE_TUNN_CLSS_MAC_VLAN, true);
2479 if (rc != ECORE_SUCCESS) {
2480 DP_ERR(edev, "Failed to enable VXLAN "
2481 "prior to updating UDP port\n");
2485 udp_port = tunnel_udp->udp_port;
2487 tunn.vxlan_port.b_update_port = true;
2488 tunn.vxlan_port.port = udp_port;
2490 rc = qede_tunnel_update(qdev, &tunn);
2491 if (rc != ECORE_SUCCESS) {
2492 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
2497 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
2499 qdev->vxlan.udp_port = udp_port;
2502 case RTE_TUNNEL_TYPE_GENEVE:
2503 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
2505 "UDP port %u for GENEVE was already configured\n",
2506 tunnel_udp->udp_port);
2507 return ECORE_SUCCESS;
2510 /* Enable GENEVE tunnel with default MAC/VLAN classification if
2511 * it was not enabled while adding GENEVE filter before UDP port
2514 if (!qdev->geneve.enable) {
2515 rc = qede_geneve_enable(eth_dev,
2516 ECORE_TUNN_CLSS_MAC_VLAN, true);
2517 if (rc != ECORE_SUCCESS) {
2518 DP_ERR(edev, "Failed to enable GENEVE "
2519 "prior to updating UDP port\n");
2523 udp_port = tunnel_udp->udp_port;
2525 tunn.geneve_port.b_update_port = true;
2526 tunn.geneve_port.port = udp_port;
2528 rc = qede_tunnel_update(qdev, &tunn);
2529 if (rc != ECORE_SUCCESS) {
2530 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
2535 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
2537 qdev->geneve.udp_port = udp_port;
2547 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2548 uint32_t *clss, char *str)
2551 *clss = MAX_ECORE_TUNN_CLSS;
2553 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2554 if (filter == qede_tunn_types[j].rte_filter_type) {
2555 *type = qede_tunn_types[j].qede_type;
2556 *clss = qede_tunn_types[j].qede_tunn_clss;
2557 strcpy(str, qede_tunn_types[j].string);
2564 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2565 const struct rte_eth_tunnel_filter_conf *conf,
2568 /* Init commmon ucast params first */
2569 qede_set_ucast_cmn_params(ucast);
2571 /* Copy out the required fields based on classification type */
2575 case ECORE_FILTER_VNI:
2576 ucast->vni = conf->tenant_id;
2578 case ECORE_FILTER_INNER_VLAN:
2579 ucast->vlan = conf->inner_vlan;
2581 case ECORE_FILTER_MAC:
2582 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2585 case ECORE_FILTER_INNER_MAC:
2586 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2589 case ECORE_FILTER_MAC_VNI_PAIR:
2590 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2592 ucast->vni = conf->tenant_id;
2594 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2595 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2597 ucast->vni = conf->tenant_id;
2599 case ECORE_FILTER_INNER_PAIR:
2600 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2602 ucast->vlan = conf->inner_vlan;
2608 return ECORE_SUCCESS;
2612 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2613 const struct rte_eth_tunnel_filter_conf *conf,
2614 __attribute__((unused)) enum rte_filter_op filter_op,
2615 enum ecore_tunn_clss *clss,
2618 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2619 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2620 struct ecore_filter_ucast ucast = {0};
2621 enum ecore_filter_ucast_type type;
2622 uint16_t filter_type = 0;
2626 filter_type = conf->filter_type;
2627 /* Determine if the given filter classification is supported */
2628 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
2629 if (*clss == MAX_ECORE_TUNN_CLSS) {
2630 DP_ERR(edev, "Unsupported filter type\n");
2633 /* Init tunnel ucast params */
2634 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2635 if (rc != ECORE_SUCCESS) {
2636 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
2640 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2641 str, filter_op, ucast.type);
2643 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
2645 /* Skip MAC/VLAN if filter is based on VNI */
2646 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2647 rc = qede_mac_int_ops(eth_dev, &ucast, add);
2648 if ((rc == 0) && add) {
2649 /* Enable accept anyvlan */
2650 qede_config_accept_any_vlan(qdev, true);
2653 rc = qede_ucast_filter(eth_dev, &ucast, add);
2655 rc = ecore_filter_ucast_cmd(edev, &ucast,
2656 ECORE_SPQ_MODE_CB, NULL);
2663 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2664 enum rte_filter_op filter_op,
2665 const struct rte_eth_tunnel_filter_conf *conf)
2667 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2668 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2669 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2673 PMD_INIT_FUNC_TRACE(edev);
2675 switch (filter_op) {
2676 case RTE_ETH_FILTER_ADD:
2679 case RTE_ETH_FILTER_DELETE:
2683 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2688 return qede_tunn_enable(eth_dev,
2689 ECORE_TUNN_CLSS_MAC_VLAN,
2690 conf->tunnel_type, add);
2692 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
2693 if (rc != ECORE_SUCCESS)
2697 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
2698 qdev->vxlan.num_filters++;
2699 qdev->vxlan.filter_type = conf->filter_type;
2700 } else { /* GENEVE */
2701 qdev->geneve.num_filters++;
2702 qdev->geneve.filter_type = conf->filter_type;
2705 if (!qdev->vxlan.enable || !qdev->geneve.enable)
2706 return qede_tunn_enable(eth_dev, clss,
2710 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
2711 qdev->vxlan.num_filters--;
2713 qdev->geneve.num_filters--;
2715 /* Disable VXLAN if VXLAN filters become 0 */
2716 if ((qdev->vxlan.num_filters == 0) ||
2717 (qdev->geneve.num_filters == 0))
2718 return qede_tunn_enable(eth_dev, clss,
2726 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2727 enum rte_filter_type filter_type,
2728 enum rte_filter_op filter_op,
2731 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2732 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2733 struct rte_eth_tunnel_filter_conf *filter_conf =
2734 (struct rte_eth_tunnel_filter_conf *)arg;
2736 switch (filter_type) {
2737 case RTE_ETH_FILTER_TUNNEL:
2738 switch (filter_conf->tunnel_type) {
2739 case RTE_TUNNEL_TYPE_VXLAN:
2740 case RTE_TUNNEL_TYPE_GENEVE:
2742 "Packet steering to the specified Rx queue"
2743 " is not supported with UDP tunneling");
2744 return(qede_tunn_filter_config(eth_dev, filter_op,
2746 /* Place holders for future tunneling support */
2747 case RTE_TUNNEL_TYPE_TEREDO:
2748 case RTE_TUNNEL_TYPE_NVGRE:
2749 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2750 case RTE_L2_TUNNEL_TYPE_E_TAG:
2751 DP_ERR(edev, "Unsupported tunnel type %d\n",
2752 filter_conf->tunnel_type);
2754 case RTE_TUNNEL_TYPE_NONE:
2759 case RTE_ETH_FILTER_FDIR:
2760 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2761 case RTE_ETH_FILTER_NTUPLE:
2762 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2763 case RTE_ETH_FILTER_MACVLAN:
2764 case RTE_ETH_FILTER_ETHERTYPE:
2765 case RTE_ETH_FILTER_FLEXIBLE:
2766 case RTE_ETH_FILTER_SYN:
2767 case RTE_ETH_FILTER_HASH:
2768 case RTE_ETH_FILTER_L2_TUNNEL:
2769 case RTE_ETH_FILTER_MAX:
2771 DP_ERR(edev, "Unsupported filter type %d\n",
2779 static const struct eth_dev_ops qede_eth_dev_ops = {
2780 .dev_configure = qede_dev_configure,
2781 .dev_infos_get = qede_dev_info_get,
2782 .rx_queue_setup = qede_rx_queue_setup,
2783 .rx_queue_release = qede_rx_queue_release,
2784 .tx_queue_setup = qede_tx_queue_setup,
2785 .tx_queue_release = qede_tx_queue_release,
2786 .dev_start = qede_dev_start,
2787 .dev_set_link_up = qede_dev_set_link_up,
2788 .dev_set_link_down = qede_dev_set_link_down,
2789 .link_update = qede_link_update,
2790 .promiscuous_enable = qede_promiscuous_enable,
2791 .promiscuous_disable = qede_promiscuous_disable,
2792 .allmulticast_enable = qede_allmulticast_enable,
2793 .allmulticast_disable = qede_allmulticast_disable,
2794 .dev_stop = qede_dev_stop,
2795 .dev_close = qede_dev_close,
2796 .stats_get = qede_get_stats,
2797 .stats_reset = qede_reset_stats,
2798 .xstats_get = qede_get_xstats,
2799 .xstats_reset = qede_reset_xstats,
2800 .xstats_get_names = qede_get_xstats_names,
2801 .mac_addr_add = qede_mac_addr_add,
2802 .mac_addr_remove = qede_mac_addr_remove,
2803 .mac_addr_set = qede_mac_addr_set,
2804 .vlan_offload_set = qede_vlan_offload_set,
2805 .vlan_filter_set = qede_vlan_filter_set,
2806 .flow_ctrl_set = qede_flow_ctrl_set,
2807 .flow_ctrl_get = qede_flow_ctrl_get,
2808 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2809 .rss_hash_update = qede_rss_hash_update,
2810 .rss_hash_conf_get = qede_rss_hash_conf_get,
2811 .reta_update = qede_rss_reta_update,
2812 .reta_query = qede_rss_reta_query,
2813 .mtu_set = qede_set_mtu,
2814 .filter_ctrl = qede_dev_filter_ctrl,
2815 .udp_tunnel_port_add = qede_udp_dst_port_add,
2816 .udp_tunnel_port_del = qede_udp_dst_port_del,
2819 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2820 .dev_configure = qede_dev_configure,
2821 .dev_infos_get = qede_dev_info_get,
2822 .rx_queue_setup = qede_rx_queue_setup,
2823 .rx_queue_release = qede_rx_queue_release,
2824 .tx_queue_setup = qede_tx_queue_setup,
2825 .tx_queue_release = qede_tx_queue_release,
2826 .dev_start = qede_dev_start,
2827 .dev_set_link_up = qede_dev_set_link_up,
2828 .dev_set_link_down = qede_dev_set_link_down,
2829 .link_update = qede_link_update,
2830 .promiscuous_enable = qede_promiscuous_enable,
2831 .promiscuous_disable = qede_promiscuous_disable,
2832 .allmulticast_enable = qede_allmulticast_enable,
2833 .allmulticast_disable = qede_allmulticast_disable,
2834 .dev_stop = qede_dev_stop,
2835 .dev_close = qede_dev_close,
2836 .stats_get = qede_get_stats,
2837 .stats_reset = qede_reset_stats,
2838 .xstats_get = qede_get_xstats,
2839 .xstats_reset = qede_reset_xstats,
2840 .xstats_get_names = qede_get_xstats_names,
2841 .vlan_offload_set = qede_vlan_offload_set,
2842 .vlan_filter_set = qede_vlan_filter_set,
2843 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2844 .rss_hash_update = qede_rss_hash_update,
2845 .rss_hash_conf_get = qede_rss_hash_conf_get,
2846 .reta_update = qede_rss_reta_update,
2847 .reta_query = qede_rss_reta_query,
2848 .mtu_set = qede_set_mtu,
2849 .udp_tunnel_port_add = qede_udp_dst_port_add,
2850 .udp_tunnel_port_del = qede_udp_dst_port_del,
2853 static void qede_update_pf_params(struct ecore_dev *edev)
2855 struct ecore_pf_params pf_params;
2857 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2858 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2859 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2860 qed_ops->common->update_pf_params(edev, &pf_params);
2863 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2865 struct rte_pci_device *pci_dev;
2866 struct rte_pci_addr pci_addr;
2867 struct qede_dev *adapter;
2868 struct ecore_dev *edev;
2869 struct qed_dev_eth_info dev_info;
2870 struct qed_slowpath_params params;
2871 static bool do_once = true;
2872 uint8_t bulletin_change;
2873 uint8_t vf_mac[ETHER_ADDR_LEN];
2874 uint8_t is_mac_forced;
2876 /* Fix up ecore debug level */
2877 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2878 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2881 /* Extract key data structures */
2882 adapter = eth_dev->data->dev_private;
2883 adapter->ethdev = eth_dev;
2884 edev = &adapter->edev;
2885 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2886 pci_addr = pci_dev->addr;
2888 PMD_INIT_FUNC_TRACE(edev);
2890 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2891 pci_addr.bus, pci_addr.devid, pci_addr.function,
2892 eth_dev->data->port_id);
2894 eth_dev->rx_pkt_burst = qede_recv_pkts;
2895 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2896 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2898 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2899 DP_ERR(edev, "Skipping device init from secondary process\n");
2903 rte_eth_copy_pci_info(eth_dev, pci_dev);
2906 edev->vendor_id = pci_dev->id.vendor_id;
2907 edev->device_id = pci_dev->id.device_id;
2909 qed_ops = qed_get_eth_ops();
2911 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2915 DP_INFO(edev, "Starting qede probe\n");
2916 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2919 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2922 qede_update_pf_params(edev);
2923 rte_intr_callback_register(&pci_dev->intr_handle,
2924 qede_interrupt_handler, (void *)eth_dev);
2925 if (rte_intr_enable(&pci_dev->intr_handle)) {
2926 DP_ERR(edev, "rte_intr_enable() failed\n");
2930 /* Start the Slowpath-process */
2931 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2932 params.int_mode = ECORE_INT_MODE_MSIX;
2933 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2934 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2935 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2936 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2937 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2938 QEDE_PMD_DRV_VER_STR_SIZE);
2940 /* For CMT mode device do periodic polling for slowpath events.
2941 * This is required since uio device uses only one MSI-x
2942 * interrupt vector but we need one for each engine.
2944 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2945 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2949 DP_ERR(edev, "Unable to start periodic"
2950 " timer rc %d\n", rc);
2955 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2957 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2958 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2963 rc = qed_ops->fill_dev_info(edev, &dev_info);
2965 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2966 qed_ops->common->slowpath_stop(edev);
2967 qed_ops->common->remove(edev);
2968 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2973 qede_alloc_etherdev(adapter, &dev_info);
2975 adapter->ops->common->set_name(edev, edev->name);
2978 adapter->dev_info.num_mac_filters =
2979 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2982 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2983 (uint32_t *)&adapter->dev_info.num_mac_filters);
2985 /* Allocate memory for storing MAC addr */
2986 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2988 adapter->dev_info.num_mac_filters),
2989 RTE_CACHE_LINE_SIZE);
2991 if (eth_dev->data->mac_addrs == NULL) {
2992 DP_ERR(edev, "Failed to allocate MAC address\n");
2993 qed_ops->common->slowpath_stop(edev);
2994 qed_ops->common->remove(edev);
2995 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3001 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
3002 hw_info.hw_mac_addr,
3003 ð_dev->data->mac_addrs[0]);
3004 ether_addr_copy(ð_dev->data->mac_addrs[0],
3005 &adapter->primary_mac);
3007 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
3009 if (bulletin_change) {
3011 ecore_vf_bulletin_get_forced_mac(
3012 ECORE_LEADING_HWFN(edev),
3015 if (is_mac_exist && is_mac_forced) {
3016 DP_INFO(edev, "VF macaddr received from PF\n");
3017 ether_addr_copy((struct ether_addr *)&vf_mac,
3018 ð_dev->data->mac_addrs[0]);
3019 ether_addr_copy(ð_dev->data->mac_addrs[0],
3020 &adapter->primary_mac);
3022 DP_ERR(edev, "No VF macaddr assigned\n");
3027 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
3030 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
3031 qede_print_adapter_info(adapter);
3036 adapter->num_tx_queues = 0;
3037 adapter->num_rx_queues = 0;
3038 SLIST_INIT(&adapter->fdir_info.fdir_list_head);
3039 SLIST_INIT(&adapter->vlan_list_head);
3040 SLIST_INIT(&adapter->uc_list_head);
3041 adapter->mtu = ETHER_MTU;
3042 adapter->new_mtu = ETHER_MTU;
3044 if (qede_start_vport(adapter, adapter->mtu))
3047 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
3048 adapter->primary_mac.addr_bytes[0],
3049 adapter->primary_mac.addr_bytes[1],
3050 adapter->primary_mac.addr_bytes[2],
3051 adapter->primary_mac.addr_bytes[3],
3052 adapter->primary_mac.addr_bytes[4],
3053 adapter->primary_mac.addr_bytes[5]);
3055 DP_INFO(edev, "Device initialized\n");
3060 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
3062 return qede_common_dev_init(eth_dev, 1);
3065 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
3067 return qede_common_dev_init(eth_dev, 0);
3070 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
3072 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3073 struct qede_dev *qdev = eth_dev->data->dev_private;
3074 struct ecore_dev *edev = &qdev->edev;
3076 PMD_INIT_FUNC_TRACE(edev);
3079 /* only uninitialize in the primary process */
3080 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3083 /* safe to close dev here */
3084 qede_dev_close(eth_dev);
3086 eth_dev->dev_ops = NULL;
3087 eth_dev->rx_pkt_burst = NULL;
3088 eth_dev->tx_pkt_burst = NULL;
3090 if (eth_dev->data->mac_addrs)
3091 rte_free(eth_dev->data->mac_addrs);
3093 eth_dev->data->mac_addrs = NULL;
3098 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3100 return qede_dev_common_uninit(eth_dev);
3103 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3105 return qede_dev_common_uninit(eth_dev);
3108 static const struct rte_pci_id pci_id_qedevf_map[] = {
3109 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3111 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3114 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3117 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3122 static const struct rte_pci_id pci_id_qede_map[] = {
3123 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3125 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3128 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3131 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3134 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3137 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3140 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3143 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3146 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3149 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3152 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3157 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3158 struct rte_pci_device *pci_dev)
3160 return rte_eth_dev_pci_generic_probe(pci_dev,
3161 sizeof(struct qede_dev), qedevf_eth_dev_init);
3164 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3166 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3169 static struct rte_pci_driver rte_qedevf_pmd = {
3170 .id_table = pci_id_qedevf_map,
3171 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3172 .probe = qedevf_eth_dev_pci_probe,
3173 .remove = qedevf_eth_dev_pci_remove,
3176 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3177 struct rte_pci_device *pci_dev)
3179 return rte_eth_dev_pci_generic_probe(pci_dev,
3180 sizeof(struct qede_dev), qede_eth_dev_init);
3183 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3185 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3188 static struct rte_pci_driver rte_qede_pmd = {
3189 .id_table = pci_id_qede_map,
3190 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3191 .probe = qede_eth_dev_pci_probe,
3192 .remove = qede_eth_dev_pci_remove,
3195 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3196 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3197 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3198 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3199 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3200 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
3202 RTE_INIT(qede_init_log);
3206 qede_logtype_init = rte_log_register("pmd.qede.init");
3207 if (qede_logtype_init >= 0)
3208 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
3209 qede_logtype_driver = rte_log_register("pmd.qede.driver");
3210 if (qede_logtype_driver >= 0)
3211 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);