2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_udp_tunn_types {
19 uint16_t rte_filter_type;
20 enum ecore_filter_ucast_type qede_type;
21 enum ecore_tunn_clss qede_tunn_clss;
23 } qede_tunn_types[] = {
25 ETH_TUNNEL_FILTER_OMAC,
27 ECORE_TUNN_CLSS_MAC_VLAN,
31 ETH_TUNNEL_FILTER_TENID,
33 ECORE_TUNN_CLSS_MAC_VNI,
37 ETH_TUNNEL_FILTER_IMAC,
38 ECORE_FILTER_INNER_MAC,
39 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43 ETH_TUNNEL_FILTER_IVLAN,
44 ECORE_FILTER_INNER_VLAN,
45 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50 ECORE_FILTER_MAC_VNI_PAIR,
51 ECORE_TUNN_CLSS_MAC_VNI,
55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
58 "outer-mac and inner-mac"
61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
64 "outer-mac and inner-vlan"
67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68 ECORE_FILTER_INNER_MAC_VNI_PAIR,
69 ECORE_TUNN_CLSS_INNER_MAC_VNI,
73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80 ECORE_FILTER_INNER_PAIR,
81 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82 "inner-mac and inner-vlan",
85 ETH_TUNNEL_FILTER_OIP,
91 ETH_TUNNEL_FILTER_IIP,
97 RTE_TUNNEL_FILTER_IMAC_IVLAN,
103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
109 RTE_TUNNEL_FILTER_IMAC_TENID,
115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
122 struct rte_qede_xstats_name_off {
123 char name[RTE_ETH_XSTATS_NAME_SIZE];
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
129 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
130 {"rx_multicast_bytes",
131 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
132 {"rx_broadcast_bytes",
133 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
134 {"rx_unicast_packets",
135 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
136 {"rx_multicast_packets",
137 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
138 {"rx_broadcast_packets",
139 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
142 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
143 {"tx_multicast_bytes",
144 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
145 {"tx_broadcast_bytes",
146 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
147 {"tx_unicast_packets",
148 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
149 {"tx_multicast_packets",
150 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
151 {"tx_broadcast_packets",
152 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
154 {"rx_64_byte_packets",
155 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
156 {"rx_65_to_127_byte_packets",
157 offsetof(struct ecore_eth_stats_common,
158 rx_65_to_127_byte_packets)},
159 {"rx_128_to_255_byte_packets",
160 offsetof(struct ecore_eth_stats_common,
161 rx_128_to_255_byte_packets)},
162 {"rx_256_to_511_byte_packets",
163 offsetof(struct ecore_eth_stats_common,
164 rx_256_to_511_byte_packets)},
165 {"rx_512_to_1023_byte_packets",
166 offsetof(struct ecore_eth_stats_common,
167 rx_512_to_1023_byte_packets)},
168 {"rx_1024_to_1518_byte_packets",
169 offsetof(struct ecore_eth_stats_common,
170 rx_1024_to_1518_byte_packets)},
171 {"tx_64_byte_packets",
172 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
173 {"tx_65_to_127_byte_packets",
174 offsetof(struct ecore_eth_stats_common,
175 tx_65_to_127_byte_packets)},
176 {"tx_128_to_255_byte_packets",
177 offsetof(struct ecore_eth_stats_common,
178 tx_128_to_255_byte_packets)},
179 {"tx_256_to_511_byte_packets",
180 offsetof(struct ecore_eth_stats_common,
181 tx_256_to_511_byte_packets)},
182 {"tx_512_to_1023_byte_packets",
183 offsetof(struct ecore_eth_stats_common,
184 tx_512_to_1023_byte_packets)},
185 {"tx_1024_to_1518_byte_packets",
186 offsetof(struct ecore_eth_stats_common,
187 tx_1024_to_1518_byte_packets)},
189 {"rx_mac_crtl_frames",
190 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
191 {"tx_mac_control_frames",
192 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
194 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
196 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
197 {"rx_priority_flow_control_frames",
198 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
199 {"tx_priority_flow_control_frames",
200 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
203 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
205 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
206 {"rx_carrier_errors",
207 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
208 {"rx_oversize_packet_errors",
209 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
211 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
212 {"rx_undersize_packet_errors",
213 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
214 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
215 {"rx_host_buffer_not_available",
216 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
217 /* Number of packets discarded because they are bigger than MTU */
218 {"rx_packet_too_big_discards",
219 offsetof(struct ecore_eth_stats_common,
220 packet_too_big_discard)},
221 {"rx_ttl_zero_discards",
222 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
223 {"rx_multi_function_tag_filter_discards",
224 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
225 {"rx_mac_filter_discards",
226 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
227 {"rx_hw_buffer_truncates",
228 offsetof(struct ecore_eth_stats_common, brb_truncates)},
229 {"rx_hw_buffer_discards",
230 offsetof(struct ecore_eth_stats_common, brb_discards)},
231 {"tx_error_drop_packets",
232 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
234 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
235 {"rx_mac_unicast_packets",
236 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
237 {"rx_mac_multicast_packets",
238 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
239 {"rx_mac_broadcast_packets",
240 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
242 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
243 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
244 {"tx_mac_unicast_packets",
245 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
246 {"tx_mac_multicast_packets",
247 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
248 {"tx_mac_broadcast_packets",
249 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
251 {"lro_coalesced_packets",
252 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
253 {"lro_coalesced_events",
254 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
256 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
257 {"lro_not_coalesced_packets",
258 offsetof(struct ecore_eth_stats_common,
259 tpa_not_coalesced_pkts)},
260 {"lro_coalesced_bytes",
261 offsetof(struct ecore_eth_stats_common,
262 tpa_coalesced_bytes)},
265 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
266 {"rx_1519_to_1522_byte_packets",
267 offsetof(struct ecore_eth_stats, bb) +
268 offsetof(struct ecore_eth_stats_bb,
269 rx_1519_to_1522_byte_packets)},
270 {"rx_1519_to_2047_byte_packets",
271 offsetof(struct ecore_eth_stats, bb) +
272 offsetof(struct ecore_eth_stats_bb,
273 rx_1519_to_2047_byte_packets)},
274 {"rx_2048_to_4095_byte_packets",
275 offsetof(struct ecore_eth_stats, bb) +
276 offsetof(struct ecore_eth_stats_bb,
277 rx_2048_to_4095_byte_packets)},
278 {"rx_4096_to_9216_byte_packets",
279 offsetof(struct ecore_eth_stats, bb) +
280 offsetof(struct ecore_eth_stats_bb,
281 rx_4096_to_9216_byte_packets)},
282 {"rx_9217_to_16383_byte_packets",
283 offsetof(struct ecore_eth_stats, bb) +
284 offsetof(struct ecore_eth_stats_bb,
285 rx_9217_to_16383_byte_packets)},
287 {"tx_1519_to_2047_byte_packets",
288 offsetof(struct ecore_eth_stats, bb) +
289 offsetof(struct ecore_eth_stats_bb,
290 tx_1519_to_2047_byte_packets)},
291 {"tx_2048_to_4095_byte_packets",
292 offsetof(struct ecore_eth_stats, bb) +
293 offsetof(struct ecore_eth_stats_bb,
294 tx_2048_to_4095_byte_packets)},
295 {"tx_4096_to_9216_byte_packets",
296 offsetof(struct ecore_eth_stats, bb) +
297 offsetof(struct ecore_eth_stats_bb,
298 tx_4096_to_9216_byte_packets)},
299 {"tx_9217_to_16383_byte_packets",
300 offsetof(struct ecore_eth_stats, bb) +
301 offsetof(struct ecore_eth_stats_bb,
302 tx_9217_to_16383_byte_packets)},
304 {"tx_lpi_entry_count",
305 offsetof(struct ecore_eth_stats, bb) +
306 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
307 {"tx_total_collisions",
308 offsetof(struct ecore_eth_stats, bb) +
309 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
312 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
313 {"rx_1519_to_max_byte_packets",
314 offsetof(struct ecore_eth_stats, ah) +
315 offsetof(struct ecore_eth_stats_ah,
316 rx_1519_to_max_byte_packets)},
317 {"tx_1519_to_max_byte_packets",
318 offsetof(struct ecore_eth_stats, ah) +
319 offsetof(struct ecore_eth_stats_ah,
320 tx_1519_to_max_byte_packets)},
323 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
325 offsetof(struct qede_rx_queue, rx_segs)},
327 offsetof(struct qede_rx_queue, rx_hw_errors)},
328 {"rx_q_allocation_errors",
329 offsetof(struct qede_rx_queue, rx_alloc_errors)}
332 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
334 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
338 qede_interrupt_handler(void *param)
340 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
341 struct qede_dev *qdev = eth_dev->data->dev_private;
342 struct ecore_dev *edev = &qdev->edev;
344 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
345 if (rte_intr_enable(eth_dev->intr_handle))
346 DP_ERR(edev, "rte_intr_enable failed\n");
350 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
352 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
356 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
357 static void qede_print_adapter_info(struct qede_dev *qdev)
359 struct ecore_dev *edev = &qdev->edev;
360 struct qed_dev_info *info = &qdev->dev_info.common;
361 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
362 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
364 DP_INFO(edev, "*********************************\n");
365 DP_INFO(edev, " DPDK version:%s\n", rte_version());
366 DP_INFO(edev, " Chip details : %s %c%d\n",
367 ECORE_IS_BB(edev) ? "BB" : "AH",
368 'A' + edev->chip_rev,
369 (int)edev->chip_metal);
370 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
371 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
372 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
373 ver_str, QEDE_PMD_VERSION);
374 DP_INFO(edev, " Driver version : %s\n", drv_ver);
375 DP_INFO(edev, " Firmware version : %s\n", ver_str);
377 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
379 (info->mfw_rev >> 24) & 0xff,
380 (info->mfw_rev >> 16) & 0xff,
381 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
382 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
383 DP_INFO(edev, " Firmware file : %s\n", fw_file);
384 DP_INFO(edev, "*********************************\n");
389 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
391 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
392 struct ecore_sp_vport_start_params params;
393 struct ecore_hwfn *p_hwfn;
397 memset(¶ms, 0, sizeof(params));
400 /* @DPDK - Disable FW placement */
401 params.zero_placement_offset = 1;
402 for_each_hwfn(edev, i) {
403 p_hwfn = &edev->hwfns[i];
404 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
405 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
406 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
407 if (rc != ECORE_SUCCESS) {
408 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
412 ecore_reset_vport_stats(edev);
413 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
419 qede_stop_vport(struct ecore_dev *edev)
421 struct ecore_hwfn *p_hwfn;
427 for_each_hwfn(edev, i) {
428 p_hwfn = &edev->hwfns[i];
429 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
431 if (rc != ECORE_SUCCESS) {
432 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
440 /* Activate or deactivate vport via vport-update */
441 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
443 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
444 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
445 struct ecore_sp_vport_update_params params;
446 struct ecore_hwfn *p_hwfn;
450 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
452 params.update_vport_active_rx_flg = 1;
453 params.update_vport_active_tx_flg = 1;
454 params.vport_active_rx_flg = flg;
455 params.vport_active_tx_flg = flg;
456 #ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
458 params.update_tx_switching_flg = 1;
459 params.tx_switching_flg = !flg;
460 DP_INFO(edev, "VF tx-switching is disabled\n");
463 for_each_hwfn(edev, i) {
464 p_hwfn = &edev->hwfns[i];
465 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
466 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
467 ECORE_SPQ_MODE_EBLOCK, NULL);
468 if (rc != ECORE_SUCCESS) {
469 DP_ERR(edev, "Failed to update vport\n");
473 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
479 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
480 uint16_t mtu, bool enable)
482 /* Enable LRO in split mode */
483 sge_tpa_params->tpa_ipv4_en_flg = enable;
484 sge_tpa_params->tpa_ipv6_en_flg = enable;
485 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
486 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
487 /* set if tpa enable changes */
488 sge_tpa_params->update_tpa_en_flg = 1;
489 /* set if tpa parameters should be handled */
490 sge_tpa_params->update_tpa_param_flg = enable;
492 sge_tpa_params->max_buffers_per_cqe = 20;
493 /* Enable TPA in split mode. In this mode each TPA segment
494 * starts on the new BD, so there is one BD per segment.
496 sge_tpa_params->tpa_pkt_split_flg = 1;
497 sge_tpa_params->tpa_hdr_data_split_flg = 0;
498 sge_tpa_params->tpa_gro_consistent_flg = 0;
499 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
500 sge_tpa_params->tpa_max_size = 0x7FFF;
501 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
502 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
505 /* Enable/disable LRO via vport-update */
506 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
508 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
509 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
510 struct ecore_sp_vport_update_params params;
511 struct ecore_sge_tpa_params tpa_params;
512 struct ecore_hwfn *p_hwfn;
516 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
517 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
518 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
520 params.sge_tpa_params = &tpa_params;
521 for_each_hwfn(edev, i) {
522 p_hwfn = &edev->hwfns[i];
523 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
524 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
525 ECORE_SPQ_MODE_EBLOCK, NULL);
526 if (rc != ECORE_SUCCESS) {
527 DP_ERR(edev, "Failed to update LRO\n");
531 qdev->enable_lro = flg;
532 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
537 /* Update MTU via vport-update without doing port restart.
538 * The vport must be deactivated before calling this API.
540 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
542 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
543 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
544 struct ecore_sp_vport_update_params params;
545 struct ecore_hwfn *p_hwfn;
549 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
553 for_each_hwfn(edev, i) {
554 p_hwfn = &edev->hwfns[i];
555 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
556 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
557 ECORE_SPQ_MODE_EBLOCK, NULL);
558 if (rc != ECORE_SUCCESS) {
559 DP_ERR(edev, "Failed to update MTU\n");
563 DP_INFO(edev, "MTU updated to %u\n", mtu);
568 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
570 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
571 ucast->is_rx_filter = true;
572 ucast->is_tx_filter = true;
573 /* ucast->assert_on_error = true; - For debug */
577 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
578 enum qed_filter_rx_mode_type type)
580 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
581 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
582 struct ecore_filter_accept_flags flags;
584 memset(&flags, 0, sizeof(flags));
586 flags.update_rx_mode_config = 1;
587 flags.update_tx_mode_config = 1;
588 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
589 ECORE_ACCEPT_MCAST_MATCHED |
592 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
593 ECORE_ACCEPT_MCAST_MATCHED |
596 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
597 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
599 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
600 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
602 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
603 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
604 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
605 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
606 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
607 ECORE_ACCEPT_MCAST_UNMATCHED;
610 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
611 ECORE_SPQ_MODE_CB, NULL);
615 qede_tunnel_update(struct qede_dev *qdev,
616 struct ecore_tunnel_info *tunn_info)
618 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
619 enum _ecore_status_t rc = ECORE_INVAL;
620 struct ecore_hwfn *p_hwfn;
621 struct ecore_ptt *p_ptt;
624 for_each_hwfn(edev, i) {
625 p_hwfn = &edev->hwfns[i];
626 p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
627 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
628 tunn_info, ECORE_SPQ_MODE_CB, NULL);
630 ecore_ptt_release(p_hwfn, p_ptt);
632 if (rc != ECORE_SUCCESS)
640 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
643 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
644 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
645 enum _ecore_status_t rc = ECORE_INVAL;
646 struct ecore_tunnel_info tunn;
648 if (qdev->vxlan.enable == enable)
649 return ECORE_SUCCESS;
651 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
652 tunn.vxlan.b_update_mode = true;
653 tunn.vxlan.b_mode_enabled = enable;
654 tunn.b_update_rx_cls = true;
655 tunn.b_update_tx_cls = true;
656 tunn.vxlan.tun_cls = clss;
658 tunn.vxlan_port.b_update_port = true;
659 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
661 rc = qede_tunnel_update(qdev, &tunn);
662 if (rc == ECORE_SUCCESS) {
663 qdev->vxlan.enable = enable;
664 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
665 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
666 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
668 DP_ERR(edev, "Failed to update tunn_clss %u\n",
676 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
679 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
680 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
681 enum _ecore_status_t rc = ECORE_INVAL;
682 struct ecore_tunnel_info tunn;
684 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
685 tunn.l2_geneve.b_update_mode = true;
686 tunn.l2_geneve.b_mode_enabled = enable;
687 tunn.ip_geneve.b_update_mode = true;
688 tunn.ip_geneve.b_mode_enabled = enable;
689 tunn.l2_geneve.tun_cls = clss;
690 tunn.ip_geneve.tun_cls = clss;
691 tunn.b_update_rx_cls = true;
692 tunn.b_update_tx_cls = true;
694 tunn.geneve_port.b_update_port = true;
695 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
697 rc = qede_tunnel_update(qdev, &tunn);
698 if (rc == ECORE_SUCCESS) {
699 qdev->geneve.enable = enable;
700 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
701 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
702 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
704 DP_ERR(edev, "Failed to update tunn_clss %u\n",
712 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
713 enum rte_eth_tunnel_type tunn_type, bool enable)
718 case RTE_TUNNEL_TYPE_VXLAN:
719 rc = qede_vxlan_enable(eth_dev, clss, enable);
721 case RTE_TUNNEL_TYPE_GENEVE:
722 rc = qede_geneve_enable(eth_dev, clss, enable);
733 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
736 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
737 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
738 struct qede_ucast_entry *tmp = NULL;
739 struct qede_ucast_entry *u;
740 struct ether_addr *mac_addr;
742 mac_addr = (struct ether_addr *)ucast->mac;
744 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
745 if ((memcmp(mac_addr, &tmp->mac,
746 ETHER_ADDR_LEN) == 0) &&
747 ucast->vni == tmp->vni &&
748 ucast->vlan == tmp->vlan) {
749 DP_ERR(edev, "Unicast MAC is already added"
750 " with vlan = %u, vni = %u\n",
751 ucast->vlan, ucast->vni);
755 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
756 RTE_CACHE_LINE_SIZE);
758 DP_ERR(edev, "Did not allocate memory for ucast\n");
761 ether_addr_copy(mac_addr, &u->mac);
762 u->vlan = ucast->vlan;
764 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
767 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
768 if ((memcmp(mac_addr, &tmp->mac,
769 ETHER_ADDR_LEN) == 0) &&
770 ucast->vlan == tmp->vlan &&
771 ucast->vni == tmp->vni)
775 DP_INFO(edev, "Unicast MAC is not found\n");
778 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
786 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
789 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
790 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
791 struct ether_addr *mac_addr;
792 struct qede_mcast_entry *tmp = NULL;
793 struct qede_mcast_entry *m;
795 mac_addr = (struct ether_addr *)mcast->mac;
797 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
798 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
800 "Multicast MAC is already added\n");
804 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
805 RTE_CACHE_LINE_SIZE);
808 "Did not allocate memory for mcast\n");
811 ether_addr_copy(mac_addr, &m->mac);
812 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
815 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
816 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
820 DP_INFO(edev, "Multicast mac is not found\n");
823 SLIST_REMOVE(&qdev->mc_list_head, tmp,
824 qede_mcast_entry, list);
831 static enum _ecore_status_t
832 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
835 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
836 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
837 enum _ecore_status_t rc;
838 struct ecore_filter_mcast mcast;
839 struct qede_mcast_entry *tmp;
843 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
845 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
847 "Mcast filter table limit exceeded, "
848 "Please enable mcast promisc mode\n");
852 rc = qede_mcast_filter(eth_dev, ucast, add);
854 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
855 memset(&mcast, 0, sizeof(mcast));
856 mcast.num_mc_addrs = qdev->num_mc_addr;
857 mcast.opcode = ECORE_FILTER_ADD;
858 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
859 ether_addr_copy(&tmp->mac,
860 (struct ether_addr *)&mcast.mac[j]);
863 rc = ecore_filter_mcast_cmd(edev, &mcast,
864 ECORE_SPQ_MODE_CB, NULL);
866 if (rc != ECORE_SUCCESS) {
867 DP_ERR(edev, "Failed to add multicast filter"
868 " rc = %d, op = %d\n", rc, add);
870 } else { /* Unicast */
872 if (qdev->num_uc_addr >=
873 qdev->dev_info.num_mac_filters) {
875 "Ucast filter table limit exceeded,"
876 " Please enable promisc mode\n");
880 rc = qede_ucast_filter(eth_dev, ucast, add);
882 rc = ecore_filter_ucast_cmd(edev, ucast,
883 ECORE_SPQ_MODE_CB, NULL);
884 if (rc != ECORE_SUCCESS) {
885 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
894 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
895 __rte_unused uint32_t index, __rte_unused uint32_t pool)
897 struct ecore_filter_ucast ucast;
900 qede_set_ucast_cmn_params(&ucast);
901 ucast.type = ECORE_FILTER_MAC;
902 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
903 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
908 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
910 struct qede_dev *qdev = eth_dev->data->dev_private;
911 struct ecore_dev *edev = &qdev->edev;
912 struct ecore_filter_ucast ucast;
914 PMD_INIT_FUNC_TRACE(edev);
916 if (index >= qdev->dev_info.num_mac_filters) {
917 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
918 index, qdev->dev_info.num_mac_filters);
922 qede_set_ucast_cmn_params(&ucast);
923 ucast.opcode = ECORE_FILTER_REMOVE;
924 ucast.type = ECORE_FILTER_MAC;
926 /* Use the index maintained by rte */
927 ether_addr_copy(ð_dev->data->mac_addrs[index],
928 (struct ether_addr *)&ucast.mac);
930 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
934 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
936 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
937 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
939 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
940 mac_addr->addr_bytes)) {
941 DP_ERR(edev, "Setting MAC address is not allowed\n");
942 ether_addr_copy(&qdev->primary_mac,
943 ð_dev->data->mac_addrs[0]);
947 qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
950 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
952 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
953 struct ecore_sp_vport_update_params params;
954 struct ecore_hwfn *p_hwfn;
958 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
960 params.update_accept_any_vlan_flg = 1;
961 params.accept_any_vlan = flg;
962 for_each_hwfn(edev, i) {
963 p_hwfn = &edev->hwfns[i];
964 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
965 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
966 ECORE_SPQ_MODE_EBLOCK, NULL);
967 if (rc != ECORE_SUCCESS) {
968 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
973 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
976 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
978 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
979 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
980 struct ecore_sp_vport_update_params params;
981 struct ecore_hwfn *p_hwfn;
985 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
987 params.update_inner_vlan_removal_flg = 1;
988 params.inner_vlan_removal_flg = flg;
989 for_each_hwfn(edev, i) {
990 p_hwfn = &edev->hwfns[i];
991 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
992 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
993 ECORE_SPQ_MODE_EBLOCK, NULL);
994 if (rc != ECORE_SUCCESS) {
995 DP_ERR(edev, "Failed to update vport\n");
1000 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
1004 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
1005 uint16_t vlan_id, int on)
1007 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1008 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1009 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
1010 struct qede_vlan_entry *tmp = NULL;
1011 struct qede_vlan_entry *vlan;
1012 struct ecore_filter_ucast ucast;
1016 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1017 DP_ERR(edev, "Reached max VLAN filter limit"
1018 " enabling accept_any_vlan\n");
1019 qede_config_accept_any_vlan(qdev, true);
1023 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1024 if (tmp->vid == vlan_id) {
1025 DP_ERR(edev, "VLAN %u already configured\n",
1031 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1032 RTE_CACHE_LINE_SIZE);
1035 DP_ERR(edev, "Did not allocate memory for VLAN\n");
1039 qede_set_ucast_cmn_params(&ucast);
1040 ucast.opcode = ECORE_FILTER_ADD;
1041 ucast.type = ECORE_FILTER_VLAN;
1042 ucast.vlan = vlan_id;
1043 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1046 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1050 vlan->vid = vlan_id;
1051 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1052 qdev->configured_vlans++;
1053 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1054 vlan_id, qdev->configured_vlans);
1057 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1058 if (tmp->vid == vlan_id)
1063 if (qdev->configured_vlans == 0) {
1065 "No VLAN filters configured yet\n");
1069 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1073 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1075 qede_set_ucast_cmn_params(&ucast);
1076 ucast.opcode = ECORE_FILTER_REMOVE;
1077 ucast.type = ECORE_FILTER_VLAN;
1078 ucast.vlan = vlan_id;
1079 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1082 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1085 qdev->configured_vlans--;
1086 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1087 vlan_id, qdev->configured_vlans);
1094 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1096 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1097 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1098 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1100 if (mask & ETH_VLAN_STRIP_MASK) {
1101 if (rxmode->hw_vlan_strip)
1102 (void)qede_vlan_stripping(eth_dev, 1);
1104 (void)qede_vlan_stripping(eth_dev, 0);
1107 if (mask & ETH_VLAN_FILTER_MASK) {
1108 /* VLAN filtering kicks in when a VLAN is added */
1109 if (rxmode->hw_vlan_filter) {
1110 qede_vlan_filter_set(eth_dev, 0, 1);
1112 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1114 " Please remove existing VLAN filters"
1115 " before disabling VLAN filtering\n");
1116 /* Signal app that VLAN filtering is still
1119 rxmode->hw_vlan_filter = true;
1121 qede_vlan_filter_set(eth_dev, 0, 0);
1126 if (mask & ETH_VLAN_EXTEND_MASK)
1127 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1128 " and classification is based on outer tag only\n");
1130 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1131 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1136 static void qede_prandom_bytes(uint32_t *buff)
1140 srand((unsigned int)time(NULL));
1141 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1145 int qede_config_rss(struct rte_eth_dev *eth_dev)
1147 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1148 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1149 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1151 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1152 struct rte_eth_rss_reta_entry64 reta_conf[2];
1153 struct rte_eth_rss_conf rss_conf;
1154 uint32_t i, id, pos, q;
1156 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1157 if (!rss_conf.rss_key) {
1158 DP_INFO(edev, "Applying driver default key\n");
1159 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1160 qede_prandom_bytes(&def_rss_key[0]);
1161 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1164 /* Configure RSS hash */
1165 if (qede_rss_hash_update(eth_dev, &rss_conf))
1168 /* Configure default RETA */
1169 memset(reta_conf, 0, sizeof(reta_conf));
1170 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1171 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1173 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1174 id = i / RTE_RETA_GROUP_SIZE;
1175 pos = i % RTE_RETA_GROUP_SIZE;
1176 q = i % QEDE_RSS_COUNT(qdev);
1177 reta_conf[id].reta[pos] = q;
1179 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1180 ECORE_RSS_IND_TABLE_SIZE))
1186 static void qede_fastpath_start(struct ecore_dev *edev)
1188 struct ecore_hwfn *p_hwfn;
1191 for_each_hwfn(edev, i) {
1192 p_hwfn = &edev->hwfns[i];
1193 ecore_hw_start_fastpath(p_hwfn);
1197 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1199 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1200 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1201 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1203 PMD_INIT_FUNC_TRACE(edev);
1205 /* Update MTU only if it has changed */
1206 if (qdev->mtu != qdev->new_mtu) {
1207 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1209 qdev->mtu = qdev->new_mtu;
1212 /* Configure TPA parameters */
1213 if (rxmode->enable_lro) {
1214 if (qede_enable_tpa(eth_dev, true))
1216 /* Enable scatter mode for LRO */
1217 if (!rxmode->enable_scatter)
1218 eth_dev->data->scattered_rx = 1;
1222 if (qede_start_queues(eth_dev))
1225 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1226 * enabling RSS. Hence RSS configuration is deferred upto this point.
1227 * Also, we would like to retain similar behavior in PF case, so we
1228 * don't do PF/VF specific check here.
1230 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1231 if (qede_config_rss(eth_dev))
1235 if (qede_activate_vport(eth_dev, true))
1238 /* Bring-up the link */
1239 qede_dev_set_link_state(eth_dev, true);
1241 /* Update link status */
1242 qede_link_update(eth_dev, 0);
1244 /* Start/resume traffic */
1245 qede_fastpath_start(edev);
1247 DP_INFO(edev, "Device started\n");
1251 DP_ERR(edev, "Device start fails\n");
1252 return -1; /* common error code is < 0 */
1255 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1257 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1258 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1260 PMD_INIT_FUNC_TRACE(edev);
1263 if (qede_activate_vport(eth_dev, false))
1266 if (qdev->enable_lro)
1267 qede_enable_tpa(eth_dev, false);
1270 qede_stop_queues(eth_dev);
1272 /* Disable traffic */
1273 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1275 /* Bring the link down */
1276 qede_dev_set_link_state(eth_dev, false);
1278 DP_INFO(edev, "Device is stopped\n");
1281 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1283 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1284 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1285 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1288 PMD_INIT_FUNC_TRACE(edev);
1290 /* Check requirements for 100G mode */
1291 if (ECORE_IS_CMT(edev)) {
1292 if (eth_dev->data->nb_rx_queues < 2 ||
1293 eth_dev->data->nb_tx_queues < 2) {
1294 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1298 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1299 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1301 "100G mode needs even no. of RX/TX queues\n");
1306 /* We need to have min 1 RX queue.There is no min check in
1307 * rte_eth_dev_configure(), so we are checking it here.
1309 if (eth_dev->data->nb_rx_queues == 0) {
1310 DP_ERR(edev, "Minimum one RX queue is required\n");
1314 /* Sanity checks and throw warnings */
1315 if (rxmode->enable_scatter)
1316 eth_dev->data->scattered_rx = 1;
1318 if (!rxmode->hw_strip_crc)
1319 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1321 if (!rxmode->hw_ip_checksum)
1322 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1324 if (rxmode->header_split)
1325 DP_INFO(edev, "Header split enable is not supported\n");
1326 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1328 DP_ERR(edev, "Unsupported multi-queue mode\n");
1331 /* Flow director mode check */
1332 if (qede_check_fdir_support(eth_dev))
1335 /* Deallocate resources if held previously. It is needed only if the
1336 * queue count has been changed from previous configuration. If its
1337 * going to change then it means RX/TX queue setup will be called
1338 * again and the fastpath pointers will be reinitialized there.
1340 if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1341 qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1342 qede_dealloc_fp_resc(eth_dev);
1343 /* Proceed with updated queue count */
1344 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1345 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1346 if (qede_alloc_fp_resc(qdev))
1350 /* VF's MTU has to be set using vport-start where as
1351 * PF's MTU can be updated via vport-update.
1354 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1357 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1361 qdev->mtu = rxmode->max_rx_pkt_len;
1362 qdev->new_mtu = qdev->mtu;
1364 /* Enable VLAN offloads by default */
1365 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1366 ETH_VLAN_FILTER_MASK |
1367 ETH_VLAN_EXTEND_MASK);
1371 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1372 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1377 /* Info about HW descriptor ring limitations */
1378 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1379 .nb_max = 0x8000, /* 32K */
1381 .nb_align = 128 /* lowest common multiple */
1384 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1385 .nb_max = 0x8000, /* 32K */
1388 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1389 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1393 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1394 struct rte_eth_dev_info *dev_info)
1396 struct qede_dev *qdev = eth_dev->data->dev_private;
1397 struct ecore_dev *edev = &qdev->edev;
1398 struct qed_link_output link;
1399 uint32_t speed_cap = 0;
1401 PMD_INIT_FUNC_TRACE(edev);
1403 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1404 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1405 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1406 dev_info->rx_desc_lim = qede_rx_desc_lim;
1407 dev_info->tx_desc_lim = qede_tx_desc_lim;
1410 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1411 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1413 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1414 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1415 dev_info->max_tx_queues = dev_info->max_rx_queues;
1417 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1418 dev_info->max_vfs = 0;
1419 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1420 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1421 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1423 dev_info->default_txconf = (struct rte_eth_txconf) {
1424 .txq_flags = QEDE_TXQ_FLAGS,
1427 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
1428 DEV_RX_OFFLOAD_IPV4_CKSUM |
1429 DEV_RX_OFFLOAD_UDP_CKSUM |
1430 DEV_RX_OFFLOAD_TCP_CKSUM |
1431 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1432 DEV_RX_OFFLOAD_TCP_LRO);
1434 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1435 DEV_TX_OFFLOAD_IPV4_CKSUM |
1436 DEV_TX_OFFLOAD_UDP_CKSUM |
1437 DEV_TX_OFFLOAD_TCP_CKSUM |
1438 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1439 DEV_TX_OFFLOAD_TCP_TSO |
1440 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1441 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1443 memset(&link, 0, sizeof(struct qed_link_output));
1444 qdev->ops->common->get_link(edev, &link);
1445 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1446 speed_cap |= ETH_LINK_SPEED_1G;
1447 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1448 speed_cap |= ETH_LINK_SPEED_10G;
1449 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1450 speed_cap |= ETH_LINK_SPEED_25G;
1451 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1452 speed_cap |= ETH_LINK_SPEED_40G;
1453 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1454 speed_cap |= ETH_LINK_SPEED_50G;
1455 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1456 speed_cap |= ETH_LINK_SPEED_100G;
1457 dev_info->speed_capa = speed_cap;
1460 /* return 0 means link status changed, -1 means not changed */
1462 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1464 struct qede_dev *qdev = eth_dev->data->dev_private;
1465 struct ecore_dev *edev = &qdev->edev;
1466 uint16_t link_duplex;
1467 struct qed_link_output link;
1468 struct rte_eth_link *curr = ð_dev->data->dev_link;
1470 memset(&link, 0, sizeof(struct qed_link_output));
1471 qdev->ops->common->get_link(edev, &link);
1474 curr->link_speed = link.speed;
1477 switch (link.duplex) {
1478 case QEDE_DUPLEX_HALF:
1479 link_duplex = ETH_LINK_HALF_DUPLEX;
1481 case QEDE_DUPLEX_FULL:
1482 link_duplex = ETH_LINK_FULL_DUPLEX;
1484 case QEDE_DUPLEX_UNKNOWN:
1488 curr->link_duplex = link_duplex;
1491 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1494 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1495 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1497 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1498 curr->link_speed, curr->link_duplex,
1499 curr->link_autoneg, curr->link_status);
1501 /* return 0 means link status changed, -1 means not changed */
1502 return ((curr->link_status == link.link_up) ? -1 : 0);
1505 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1507 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1508 struct qede_dev *qdev = eth_dev->data->dev_private;
1509 struct ecore_dev *edev = &qdev->edev;
1511 PMD_INIT_FUNC_TRACE(edev);
1514 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1516 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1517 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1519 qed_configure_filter_rx_mode(eth_dev, type);
1522 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1524 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1525 struct qede_dev *qdev = eth_dev->data->dev_private;
1526 struct ecore_dev *edev = &qdev->edev;
1528 PMD_INIT_FUNC_TRACE(edev);
1531 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1532 qed_configure_filter_rx_mode(eth_dev,
1533 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1535 qed_configure_filter_rx_mode(eth_dev,
1536 QED_FILTER_RX_MODE_TYPE_REGULAR);
1539 static void qede_poll_sp_sb_cb(void *param)
1541 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1542 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1543 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1546 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1547 qede_interrupt_action(&edev->hwfns[1]);
1549 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1553 DP_ERR(edev, "Unable to start periodic"
1554 " timer rc %d\n", rc);
1555 assert(false && "Unable to start periodic timer");
1559 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1561 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1562 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1563 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1565 PMD_INIT_FUNC_TRACE(edev);
1567 /* dev_stop() shall cleanup fp resources in hw but without releasing
1568 * dma memories and sw structures so that dev_start() can be called
1569 * by the app without reconfiguration. However, in dev_close() we
1570 * can release all the resources and device can be brought up newly
1572 if (eth_dev->data->dev_started)
1573 qede_dev_stop(eth_dev);
1575 qede_stop_vport(edev);
1576 qede_fdir_dealloc_resc(eth_dev);
1577 qede_dealloc_fp_resc(eth_dev);
1579 eth_dev->data->nb_rx_queues = 0;
1580 eth_dev->data->nb_tx_queues = 0;
1582 qdev->ops->common->slowpath_stop(edev);
1583 qdev->ops->common->remove(edev);
1584 rte_intr_disable(&pci_dev->intr_handle);
1585 rte_intr_callback_unregister(&pci_dev->intr_handle,
1586 qede_interrupt_handler, (void *)eth_dev);
1587 if (ECORE_IS_CMT(edev))
1588 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1592 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1594 struct qede_dev *qdev = eth_dev->data->dev_private;
1595 struct ecore_dev *edev = &qdev->edev;
1596 struct ecore_eth_stats stats;
1597 unsigned int i = 0, j = 0, qid;
1598 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1599 struct qede_tx_queue *txq;
1601 ecore_get_vport_stats(edev, &stats);
1604 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1605 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1607 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1608 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1610 eth_stats->ierrors = stats.common.rx_crc_errors +
1611 stats.common.rx_align_errors +
1612 stats.common.rx_carrier_errors +
1613 stats.common.rx_oversize_packets +
1614 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1616 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1618 eth_stats->imissed = stats.common.mftag_filter_discards +
1619 stats.common.mac_filter_discards +
1620 stats.common.no_buff_discards +
1621 stats.common.brb_truncates + stats.common.brb_discards;
1624 eth_stats->opackets = stats.common.tx_ucast_pkts +
1625 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1627 eth_stats->obytes = stats.common.tx_ucast_bytes +
1628 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1630 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1633 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1634 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1635 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1636 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1637 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1638 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1639 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1640 "Not all the queue stats will be displayed. Set"
1641 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1642 " appropriately and retry.\n");
1645 eth_stats->q_ipackets[i] =
1647 ((char *)(qdev->fp_array[qid].rxq)) +
1648 offsetof(struct qede_rx_queue,
1650 eth_stats->q_errors[i] =
1652 ((char *)(qdev->fp_array[qid].rxq)) +
1653 offsetof(struct qede_rx_queue,
1656 ((char *)(qdev->fp_array[qid].rxq)) +
1657 offsetof(struct qede_rx_queue,
1660 if (i == rxq_stat_cntrs)
1665 txq = qdev->fp_array[qid].txq;
1666 eth_stats->q_opackets[j] =
1667 *((uint64_t *)(uintptr_t)
1668 (((uint64_t)(uintptr_t)(txq)) +
1669 offsetof(struct qede_tx_queue,
1672 if (j == txq_stat_cntrs)
1680 qede_get_xstats_count(struct qede_dev *qdev) {
1681 if (ECORE_IS_BB(&qdev->edev))
1682 return RTE_DIM(qede_xstats_strings) +
1683 RTE_DIM(qede_bb_xstats_strings) +
1684 (RTE_DIM(qede_rxq_xstats_strings) *
1685 RTE_MIN(QEDE_RSS_COUNT(qdev),
1686 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1688 return RTE_DIM(qede_xstats_strings) +
1689 RTE_DIM(qede_ah_xstats_strings) +
1690 (RTE_DIM(qede_rxq_xstats_strings) *
1691 RTE_MIN(QEDE_RSS_COUNT(qdev),
1692 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1696 qede_get_xstats_names(struct rte_eth_dev *dev,
1697 struct rte_eth_xstat_name *xstats_names,
1698 __rte_unused unsigned int limit)
1700 struct qede_dev *qdev = dev->data->dev_private;
1701 struct ecore_dev *edev = &qdev->edev;
1702 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1703 unsigned int i, qid, stat_idx = 0;
1704 unsigned int rxq_stat_cntrs;
1706 if (xstats_names != NULL) {
1707 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1708 snprintf(xstats_names[stat_idx].name,
1709 sizeof(xstats_names[stat_idx].name),
1711 qede_xstats_strings[i].name);
1715 if (ECORE_IS_BB(edev)) {
1716 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1717 snprintf(xstats_names[stat_idx].name,
1718 sizeof(xstats_names[stat_idx].name),
1720 qede_bb_xstats_strings[i].name);
1724 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1725 snprintf(xstats_names[stat_idx].name,
1726 sizeof(xstats_names[stat_idx].name),
1728 qede_ah_xstats_strings[i].name);
1733 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1734 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1735 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1736 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1737 snprintf(xstats_names[stat_idx].name,
1738 sizeof(xstats_names[stat_idx].name),
1740 qede_rxq_xstats_strings[i].name, qid,
1741 qede_rxq_xstats_strings[i].name + 4);
1751 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1754 struct qede_dev *qdev = dev->data->dev_private;
1755 struct ecore_dev *edev = &qdev->edev;
1756 struct ecore_eth_stats stats;
1757 const unsigned int num = qede_get_xstats_count(qdev);
1758 unsigned int i, qid, stat_idx = 0;
1759 unsigned int rxq_stat_cntrs;
1764 ecore_get_vport_stats(edev, &stats);
1766 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1767 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1768 qede_xstats_strings[i].offset);
1769 xstats[stat_idx].id = stat_idx;
1773 if (ECORE_IS_BB(edev)) {
1774 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1775 xstats[stat_idx].value =
1776 *(uint64_t *)(((char *)&stats) +
1777 qede_bb_xstats_strings[i].offset);
1778 xstats[stat_idx].id = stat_idx;
1782 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1783 xstats[stat_idx].value =
1784 *(uint64_t *)(((char *)&stats) +
1785 qede_ah_xstats_strings[i].offset);
1786 xstats[stat_idx].id = stat_idx;
1791 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1792 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1793 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1795 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1796 xstats[stat_idx].value = *(uint64_t *)(
1797 ((char *)(qdev->fp_array[qid].rxq)) +
1798 qede_rxq_xstats_strings[i].offset);
1799 xstats[stat_idx].id = stat_idx;
1809 qede_reset_xstats(struct rte_eth_dev *dev)
1811 struct qede_dev *qdev = dev->data->dev_private;
1812 struct ecore_dev *edev = &qdev->edev;
1814 ecore_reset_vport_stats(edev);
1817 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1819 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1820 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1821 struct qed_link_params link_params;
1824 DP_INFO(edev, "setting link state %d\n", link_up);
1825 memset(&link_params, 0, sizeof(link_params));
1826 link_params.link_up = link_up;
1827 rc = qdev->ops->common->set_link(edev, &link_params);
1828 if (rc != ECORE_SUCCESS)
1829 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1834 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1836 return qede_dev_set_link_state(eth_dev, true);
1839 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1841 return qede_dev_set_link_state(eth_dev, false);
1844 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1846 struct qede_dev *qdev = eth_dev->data->dev_private;
1847 struct ecore_dev *edev = &qdev->edev;
1849 ecore_reset_vport_stats(edev);
1852 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1854 enum qed_filter_rx_mode_type type =
1855 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1857 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1858 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1860 qed_configure_filter_rx_mode(eth_dev, type);
1863 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1865 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1866 qed_configure_filter_rx_mode(eth_dev,
1867 QED_FILTER_RX_MODE_TYPE_PROMISC);
1869 qed_configure_filter_rx_mode(eth_dev,
1870 QED_FILTER_RX_MODE_TYPE_REGULAR);
1873 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1874 struct rte_eth_fc_conf *fc_conf)
1876 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1877 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1878 struct qed_link_output current_link;
1879 struct qed_link_params params;
1881 memset(¤t_link, 0, sizeof(current_link));
1882 qdev->ops->common->get_link(edev, ¤t_link);
1884 memset(¶ms, 0, sizeof(params));
1885 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1886 if (fc_conf->autoneg) {
1887 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1888 DP_ERR(edev, "Autoneg not supported\n");
1891 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1894 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1895 if (fc_conf->mode == RTE_FC_FULL)
1896 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1897 QED_LINK_PAUSE_RX_ENABLE);
1898 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1899 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1900 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1901 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1903 params.link_up = true;
1904 (void)qdev->ops->common->set_link(edev, ¶ms);
1909 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1910 struct rte_eth_fc_conf *fc_conf)
1912 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1913 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1914 struct qed_link_output current_link;
1916 memset(¤t_link, 0, sizeof(current_link));
1917 qdev->ops->common->get_link(edev, ¤t_link);
1919 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1920 fc_conf->autoneg = true;
1922 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1923 QED_LINK_PAUSE_TX_ENABLE))
1924 fc_conf->mode = RTE_FC_FULL;
1925 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1926 fc_conf->mode = RTE_FC_RX_PAUSE;
1927 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1928 fc_conf->mode = RTE_FC_TX_PAUSE;
1930 fc_conf->mode = RTE_FC_NONE;
1935 static const uint32_t *
1936 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1938 static const uint32_t ptypes[] = {
1940 RTE_PTYPE_L2_ETHER_VLAN,
1945 RTE_PTYPE_TUNNEL_VXLAN,
1947 RTE_PTYPE_TUNNEL_GENEVE,
1949 RTE_PTYPE_INNER_L2_ETHER,
1950 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1951 RTE_PTYPE_INNER_L3_IPV4,
1952 RTE_PTYPE_INNER_L3_IPV6,
1953 RTE_PTYPE_INNER_L4_TCP,
1954 RTE_PTYPE_INNER_L4_UDP,
1955 RTE_PTYPE_INNER_L4_FRAG,
1959 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1965 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1968 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1969 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1970 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1971 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1972 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1973 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1974 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
1975 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
1978 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1979 struct rte_eth_rss_conf *rss_conf)
1981 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1982 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1983 struct ecore_sp_vport_update_params vport_update_params;
1984 struct ecore_rss_params rss_params;
1985 struct ecore_hwfn *p_hwfn;
1986 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1987 uint64_t hf = rss_conf->rss_hf;
1988 uint8_t len = rss_conf->rss_key_len;
1993 memset(&vport_update_params, 0, sizeof(vport_update_params));
1994 memset(&rss_params, 0, sizeof(rss_params));
1996 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1997 (unsigned long)hf, len, key);
2001 DP_INFO(edev, "Enabling rss\n");
2004 qede_init_rss_caps(&rss_params.rss_caps, hf);
2005 rss_params.update_rss_capabilities = 1;
2009 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2010 DP_ERR(edev, "RSS key length exceeds limit\n");
2013 DP_INFO(edev, "Applying user supplied hash key\n");
2014 rss_params.update_rss_key = 1;
2015 memcpy(&rss_params.rss_key, key, len);
2017 rss_params.rss_enable = 1;
2020 rss_params.update_rss_config = 1;
2021 /* tbl_size has to be set with capabilities */
2022 rss_params.rss_table_size_log = 7;
2023 vport_update_params.vport_id = 0;
2024 /* pass the L2 handles instead of qids */
2025 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2026 idx = qdev->rss_ind_table[i];
2027 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2029 vport_update_params.rss_params = &rss_params;
2031 for_each_hwfn(edev, i) {
2032 p_hwfn = &edev->hwfns[i];
2033 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2034 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2035 ECORE_SPQ_MODE_EBLOCK, NULL);
2037 DP_ERR(edev, "vport-update for RSS failed\n");
2041 qdev->rss_enable = rss_params.rss_enable;
2043 /* Update local structure for hash query */
2044 qdev->rss_conf.rss_hf = hf;
2045 qdev->rss_conf.rss_key_len = len;
2046 if (qdev->rss_enable) {
2047 if (qdev->rss_conf.rss_key == NULL) {
2048 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2049 if (qdev->rss_conf.rss_key == NULL) {
2050 DP_ERR(edev, "No memory to store RSS key\n");
2055 DP_INFO(edev, "Storing RSS key\n");
2056 memcpy(qdev->rss_conf.rss_key, key, len);
2058 } else if (!qdev->rss_enable && len == 0) {
2059 if (qdev->rss_conf.rss_key) {
2060 free(qdev->rss_conf.rss_key);
2061 qdev->rss_conf.rss_key = NULL;
2062 DP_INFO(edev, "Free RSS key\n");
2069 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2070 struct rte_eth_rss_conf *rss_conf)
2072 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2074 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2075 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2077 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2078 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2079 rss_conf->rss_key_len);
2083 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2084 struct ecore_rss_params *rss)
2087 bool rss_mode = 1; /* enable */
2088 struct ecore_queue_cid *cid;
2089 struct ecore_rss_params *t_rss;
2091 /* In regular scenario, we'd simply need to take input handlers.
2092 * But in CMT, we'd have to split the handlers according to the
2093 * engine they were configured on. We'd then have to understand
2094 * whether RSS is really required, since 2-queues on CMT doesn't
2098 /* CMT should be round-robin */
2099 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2100 cid = rss->rss_ind_table[i];
2102 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2107 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2111 t_rss->update_rss_ind_table = 1;
2112 t_rss->rss_table_size_log = 7;
2113 t_rss->update_rss_config = 1;
2115 /* Make sure RSS is actually required */
2116 for_each_hwfn(edev, fn) {
2117 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2119 if (rss[fn].rss_ind_table[i] !=
2120 rss[fn].rss_ind_table[0])
2124 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2126 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2133 t_rss->rss_enable = rss_mode;
2138 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2139 struct rte_eth_rss_reta_entry64 *reta_conf,
2142 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2143 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2144 struct ecore_sp_vport_update_params vport_update_params;
2145 struct ecore_rss_params *params;
2146 struct ecore_hwfn *p_hwfn;
2147 uint16_t i, idx, shift;
2151 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2152 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2157 memset(&vport_update_params, 0, sizeof(vport_update_params));
2158 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2159 RTE_CACHE_LINE_SIZE);
2160 if (params == NULL) {
2161 DP_ERR(edev, "failed to allocate memory\n");
2165 for (i = 0; i < reta_size; i++) {
2166 idx = i / RTE_RETA_GROUP_SIZE;
2167 shift = i % RTE_RETA_GROUP_SIZE;
2168 if (reta_conf[idx].mask & (1ULL << shift)) {
2169 entry = reta_conf[idx].reta[shift];
2170 /* Pass rxq handles to ecore */
2171 params->rss_ind_table[i] =
2172 qdev->fp_array[entry].rxq->handle;
2173 /* Update the local copy for RETA query command */
2174 qdev->rss_ind_table[i] = entry;
2178 params->update_rss_ind_table = 1;
2179 params->rss_table_size_log = 7;
2180 params->update_rss_config = 1;
2182 /* Fix up RETA for CMT mode device */
2183 if (ECORE_IS_CMT(edev))
2184 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2186 vport_update_params.vport_id = 0;
2187 /* Use the current value of rss_enable */
2188 params->rss_enable = qdev->rss_enable;
2189 vport_update_params.rss_params = params;
2191 for_each_hwfn(edev, i) {
2192 p_hwfn = &edev->hwfns[i];
2193 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2194 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2195 ECORE_SPQ_MODE_EBLOCK, NULL);
2197 DP_ERR(edev, "vport-update for RSS failed\n");
2207 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2208 struct rte_eth_rss_reta_entry64 *reta_conf,
2211 struct qede_dev *qdev = eth_dev->data->dev_private;
2212 struct ecore_dev *edev = &qdev->edev;
2213 uint16_t i, idx, shift;
2216 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2217 DP_ERR(edev, "reta_size %d is not supported\n",
2222 for (i = 0; i < reta_size; i++) {
2223 idx = i / RTE_RETA_GROUP_SIZE;
2224 shift = i % RTE_RETA_GROUP_SIZE;
2225 if (reta_conf[idx].mask & (1ULL << shift)) {
2226 entry = qdev->rss_ind_table[i];
2227 reta_conf[idx].reta[shift] = entry;
2236 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2238 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2239 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2240 struct rte_eth_dev_info dev_info = {0};
2241 struct qede_fastpath *fp;
2242 uint32_t frame_size;
2243 uint16_t rx_buf_size;
2247 PMD_INIT_FUNC_TRACE(edev);
2248 qede_dev_info_get(dev, &dev_info);
2249 frame_size = mtu + QEDE_ETH_OVERHEAD;
2250 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2251 DP_ERR(edev, "MTU %u out of range\n", mtu);
2254 if (!dev->data->scattered_rx &&
2255 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2256 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2257 dev->data->min_rx_buf_size);
2260 /* Temporarily replace I/O functions with dummy ones. It cannot
2261 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2263 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2264 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2268 /* Fix up RX buf size for all queues of the port */
2270 fp = &qdev->fp_array[i];
2271 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2272 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2273 if (dev->data->scattered_rx)
2274 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2276 rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2277 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2278 fp->rxq->rx_buf_size = rx_buf_size;
2279 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2281 qede_dev_start(dev);
2282 if (frame_size > ETHER_MAX_LEN)
2283 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2285 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2286 /* update max frame size */
2287 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2289 dev->rx_pkt_burst = qede_recv_pkts;
2290 dev->tx_pkt_burst = qede_xmit_pkts;
2296 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2297 struct rte_eth_udp_tunnel *tunnel_udp)
2299 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2300 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2301 struct ecore_tunnel_info tunn; /* @DPDK */
2305 PMD_INIT_FUNC_TRACE(edev);
2307 memset(&tunn, 0, sizeof(tunn));
2309 switch (tunnel_udp->prot_type) {
2310 case RTE_TUNNEL_TYPE_VXLAN:
2311 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2312 DP_ERR(edev, "UDP port %u doesn't exist\n",
2313 tunnel_udp->udp_port);
2318 tunn.vxlan_port.b_update_port = true;
2319 tunn.vxlan_port.port = udp_port;
2321 rc = qede_tunnel_update(qdev, &tunn);
2322 if (rc != ECORE_SUCCESS) {
2323 DP_ERR(edev, "Unable to config UDP port %u\n",
2324 tunn.vxlan_port.port);
2328 qdev->vxlan.udp_port = udp_port;
2329 /* If the request is to delete UDP port and if the number of
2330 * VXLAN filters have reached 0 then VxLAN offload can be be
2333 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2334 return qede_vxlan_enable(eth_dev,
2335 ECORE_TUNN_CLSS_MAC_VLAN, false);
2339 case RTE_TUNNEL_TYPE_GENEVE:
2340 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
2341 DP_ERR(edev, "UDP port %u doesn't exist\n",
2342 tunnel_udp->udp_port);
2348 tunn.geneve_port.b_update_port = true;
2349 tunn.geneve_port.port = udp_port;
2351 rc = qede_tunnel_update(qdev, &tunn);
2352 if (rc != ECORE_SUCCESS) {
2353 DP_ERR(edev, "Unable to config UDP port %u\n",
2354 tunn.vxlan_port.port);
2358 qdev->vxlan.udp_port = udp_port;
2359 /* If the request is to delete UDP port and if the number of
2360 * GENEVE filters have reached 0 then GENEVE offload can be be
2363 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
2364 return qede_geneve_enable(eth_dev,
2365 ECORE_TUNN_CLSS_MAC_VLAN, false);
2377 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2378 struct rte_eth_udp_tunnel *tunnel_udp)
2380 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2381 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2382 struct ecore_tunnel_info tunn; /* @DPDK */
2386 PMD_INIT_FUNC_TRACE(edev);
2388 memset(&tunn, 0, sizeof(tunn));
2390 switch (tunnel_udp->prot_type) {
2391 case RTE_TUNNEL_TYPE_VXLAN:
2392 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2394 "UDP port %u for VXLAN was already configured\n",
2395 tunnel_udp->udp_port);
2396 return ECORE_SUCCESS;
2399 /* Enable VxLAN tunnel with default MAC/VLAN classification if
2400 * it was not enabled while adding VXLAN filter before UDP port
2403 if (!qdev->vxlan.enable) {
2404 rc = qede_vxlan_enable(eth_dev,
2405 ECORE_TUNN_CLSS_MAC_VLAN, true);
2406 if (rc != ECORE_SUCCESS) {
2407 DP_ERR(edev, "Failed to enable VXLAN "
2408 "prior to updating UDP port\n");
2412 udp_port = tunnel_udp->udp_port;
2414 tunn.vxlan_port.b_update_port = true;
2415 tunn.vxlan_port.port = udp_port;
2417 rc = qede_tunnel_update(qdev, &tunn);
2418 if (rc != ECORE_SUCCESS) {
2419 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
2424 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
2426 qdev->vxlan.udp_port = udp_port;
2429 case RTE_TUNNEL_TYPE_GENEVE:
2430 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
2432 "UDP port %u for GENEVE was already configured\n",
2433 tunnel_udp->udp_port);
2434 return ECORE_SUCCESS;
2437 /* Enable GENEVE tunnel with default MAC/VLAN classification if
2438 * it was not enabled while adding GENEVE filter before UDP port
2441 if (!qdev->geneve.enable) {
2442 rc = qede_geneve_enable(eth_dev,
2443 ECORE_TUNN_CLSS_MAC_VLAN, true);
2444 if (rc != ECORE_SUCCESS) {
2445 DP_ERR(edev, "Failed to enable GENEVE "
2446 "prior to updating UDP port\n");
2450 udp_port = tunnel_udp->udp_port;
2452 tunn.geneve_port.b_update_port = true;
2453 tunn.geneve_port.port = udp_port;
2455 rc = qede_tunnel_update(qdev, &tunn);
2456 if (rc != ECORE_SUCCESS) {
2457 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
2462 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
2464 qdev->geneve.udp_port = udp_port;
2474 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2475 uint32_t *clss, char *str)
2478 *clss = MAX_ECORE_TUNN_CLSS;
2480 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2481 if (filter == qede_tunn_types[j].rte_filter_type) {
2482 *type = qede_tunn_types[j].qede_type;
2483 *clss = qede_tunn_types[j].qede_tunn_clss;
2484 strcpy(str, qede_tunn_types[j].string);
2491 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2492 const struct rte_eth_tunnel_filter_conf *conf,
2495 /* Init commmon ucast params first */
2496 qede_set_ucast_cmn_params(ucast);
2498 /* Copy out the required fields based on classification type */
2502 case ECORE_FILTER_VNI:
2503 ucast->vni = conf->tenant_id;
2505 case ECORE_FILTER_INNER_VLAN:
2506 ucast->vlan = conf->inner_vlan;
2508 case ECORE_FILTER_MAC:
2509 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2512 case ECORE_FILTER_INNER_MAC:
2513 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2516 case ECORE_FILTER_MAC_VNI_PAIR:
2517 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2519 ucast->vni = conf->tenant_id;
2521 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2522 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2524 ucast->vni = conf->tenant_id;
2526 case ECORE_FILTER_INNER_PAIR:
2527 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2529 ucast->vlan = conf->inner_vlan;
2535 return ECORE_SUCCESS;
2539 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2540 const struct rte_eth_tunnel_filter_conf *conf,
2541 __attribute__((unused)) enum rte_filter_op filter_op,
2542 enum ecore_tunn_clss *clss,
2545 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2546 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2547 struct ecore_filter_ucast ucast = {0};
2548 enum ecore_filter_ucast_type type;
2549 uint16_t filter_type = 0;
2553 filter_type = conf->filter_type;
2554 /* Determine if the given filter classification is supported */
2555 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
2556 if (*clss == MAX_ECORE_TUNN_CLSS) {
2557 DP_ERR(edev, "Unsupported filter type\n");
2560 /* Init tunnel ucast params */
2561 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2562 if (rc != ECORE_SUCCESS) {
2563 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
2567 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2568 str, filter_op, ucast.type);
2570 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
2572 /* Skip MAC/VLAN if filter is based on VNI */
2573 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2574 rc = qede_mac_int_ops(eth_dev, &ucast, add);
2575 if ((rc == 0) && add) {
2576 /* Enable accept anyvlan */
2577 qede_config_accept_any_vlan(qdev, true);
2580 rc = qede_ucast_filter(eth_dev, &ucast, add);
2582 rc = ecore_filter_ucast_cmd(edev, &ucast,
2583 ECORE_SPQ_MODE_CB, NULL);
2590 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2591 enum rte_filter_op filter_op,
2592 const struct rte_eth_tunnel_filter_conf *conf)
2594 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2595 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2596 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2600 PMD_INIT_FUNC_TRACE(edev);
2602 switch (filter_op) {
2603 case RTE_ETH_FILTER_ADD:
2606 case RTE_ETH_FILTER_DELETE:
2610 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2615 return qede_tunn_enable(eth_dev,
2616 ECORE_TUNN_CLSS_MAC_VLAN,
2617 conf->tunnel_type, add);
2619 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
2620 if (rc != ECORE_SUCCESS)
2624 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
2625 qdev->vxlan.num_filters++;
2626 qdev->vxlan.filter_type = conf->filter_type;
2627 } else { /* GENEVE */
2628 qdev->geneve.num_filters++;
2629 qdev->geneve.filter_type = conf->filter_type;
2632 if (!qdev->vxlan.enable || !qdev->geneve.enable)
2633 return qede_tunn_enable(eth_dev, clss,
2637 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
2638 qdev->vxlan.num_filters--;
2640 qdev->geneve.num_filters--;
2642 /* Disable VXLAN if VXLAN filters become 0 */
2643 if ((qdev->vxlan.num_filters == 0) ||
2644 (qdev->geneve.num_filters == 0))
2645 return qede_tunn_enable(eth_dev, clss,
2653 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2654 enum rte_filter_type filter_type,
2655 enum rte_filter_op filter_op,
2658 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2659 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2660 struct rte_eth_tunnel_filter_conf *filter_conf =
2661 (struct rte_eth_tunnel_filter_conf *)arg;
2663 switch (filter_type) {
2664 case RTE_ETH_FILTER_TUNNEL:
2665 switch (filter_conf->tunnel_type) {
2666 case RTE_TUNNEL_TYPE_VXLAN:
2667 case RTE_TUNNEL_TYPE_GENEVE:
2669 "Packet steering to the specified Rx queue"
2670 " is not supported with UDP tunneling");
2671 return(qede_tunn_filter_config(eth_dev, filter_op,
2673 /* Place holders for future tunneling support */
2674 case RTE_TUNNEL_TYPE_TEREDO:
2675 case RTE_TUNNEL_TYPE_NVGRE:
2676 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2677 case RTE_L2_TUNNEL_TYPE_E_TAG:
2678 DP_ERR(edev, "Unsupported tunnel type %d\n",
2679 filter_conf->tunnel_type);
2681 case RTE_TUNNEL_TYPE_NONE:
2686 case RTE_ETH_FILTER_FDIR:
2687 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2688 case RTE_ETH_FILTER_NTUPLE:
2689 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2690 case RTE_ETH_FILTER_MACVLAN:
2691 case RTE_ETH_FILTER_ETHERTYPE:
2692 case RTE_ETH_FILTER_FLEXIBLE:
2693 case RTE_ETH_FILTER_SYN:
2694 case RTE_ETH_FILTER_HASH:
2695 case RTE_ETH_FILTER_L2_TUNNEL:
2696 case RTE_ETH_FILTER_MAX:
2698 DP_ERR(edev, "Unsupported filter type %d\n",
2706 static const struct eth_dev_ops qede_eth_dev_ops = {
2707 .dev_configure = qede_dev_configure,
2708 .dev_infos_get = qede_dev_info_get,
2709 .rx_queue_setup = qede_rx_queue_setup,
2710 .rx_queue_release = qede_rx_queue_release,
2711 .tx_queue_setup = qede_tx_queue_setup,
2712 .tx_queue_release = qede_tx_queue_release,
2713 .dev_start = qede_dev_start,
2714 .dev_set_link_up = qede_dev_set_link_up,
2715 .dev_set_link_down = qede_dev_set_link_down,
2716 .link_update = qede_link_update,
2717 .promiscuous_enable = qede_promiscuous_enable,
2718 .promiscuous_disable = qede_promiscuous_disable,
2719 .allmulticast_enable = qede_allmulticast_enable,
2720 .allmulticast_disable = qede_allmulticast_disable,
2721 .dev_stop = qede_dev_stop,
2722 .dev_close = qede_dev_close,
2723 .stats_get = qede_get_stats,
2724 .stats_reset = qede_reset_stats,
2725 .xstats_get = qede_get_xstats,
2726 .xstats_reset = qede_reset_xstats,
2727 .xstats_get_names = qede_get_xstats_names,
2728 .mac_addr_add = qede_mac_addr_add,
2729 .mac_addr_remove = qede_mac_addr_remove,
2730 .mac_addr_set = qede_mac_addr_set,
2731 .vlan_offload_set = qede_vlan_offload_set,
2732 .vlan_filter_set = qede_vlan_filter_set,
2733 .flow_ctrl_set = qede_flow_ctrl_set,
2734 .flow_ctrl_get = qede_flow_ctrl_get,
2735 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2736 .rss_hash_update = qede_rss_hash_update,
2737 .rss_hash_conf_get = qede_rss_hash_conf_get,
2738 .reta_update = qede_rss_reta_update,
2739 .reta_query = qede_rss_reta_query,
2740 .mtu_set = qede_set_mtu,
2741 .filter_ctrl = qede_dev_filter_ctrl,
2742 .udp_tunnel_port_add = qede_udp_dst_port_add,
2743 .udp_tunnel_port_del = qede_udp_dst_port_del,
2746 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2747 .dev_configure = qede_dev_configure,
2748 .dev_infos_get = qede_dev_info_get,
2749 .rx_queue_setup = qede_rx_queue_setup,
2750 .rx_queue_release = qede_rx_queue_release,
2751 .tx_queue_setup = qede_tx_queue_setup,
2752 .tx_queue_release = qede_tx_queue_release,
2753 .dev_start = qede_dev_start,
2754 .dev_set_link_up = qede_dev_set_link_up,
2755 .dev_set_link_down = qede_dev_set_link_down,
2756 .link_update = qede_link_update,
2757 .promiscuous_enable = qede_promiscuous_enable,
2758 .promiscuous_disable = qede_promiscuous_disable,
2759 .allmulticast_enable = qede_allmulticast_enable,
2760 .allmulticast_disable = qede_allmulticast_disable,
2761 .dev_stop = qede_dev_stop,
2762 .dev_close = qede_dev_close,
2763 .stats_get = qede_get_stats,
2764 .stats_reset = qede_reset_stats,
2765 .xstats_get = qede_get_xstats,
2766 .xstats_reset = qede_reset_xstats,
2767 .xstats_get_names = qede_get_xstats_names,
2768 .vlan_offload_set = qede_vlan_offload_set,
2769 .vlan_filter_set = qede_vlan_filter_set,
2770 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2771 .rss_hash_update = qede_rss_hash_update,
2772 .rss_hash_conf_get = qede_rss_hash_conf_get,
2773 .reta_update = qede_rss_reta_update,
2774 .reta_query = qede_rss_reta_query,
2775 .mtu_set = qede_set_mtu,
2776 .udp_tunnel_port_add = qede_udp_dst_port_add,
2777 .udp_tunnel_port_del = qede_udp_dst_port_del,
2780 static void qede_update_pf_params(struct ecore_dev *edev)
2782 struct ecore_pf_params pf_params;
2784 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2785 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2786 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2787 qed_ops->common->update_pf_params(edev, &pf_params);
2790 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2792 struct rte_pci_device *pci_dev;
2793 struct rte_pci_addr pci_addr;
2794 struct qede_dev *adapter;
2795 struct ecore_dev *edev;
2796 struct qed_dev_eth_info dev_info;
2797 struct qed_slowpath_params params;
2798 static bool do_once = true;
2799 uint8_t bulletin_change;
2800 uint8_t vf_mac[ETHER_ADDR_LEN];
2801 uint8_t is_mac_forced;
2803 /* Fix up ecore debug level */
2804 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2805 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2808 /* Extract key data structures */
2809 adapter = eth_dev->data->dev_private;
2810 adapter->ethdev = eth_dev;
2811 edev = &adapter->edev;
2812 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2813 pci_addr = pci_dev->addr;
2815 PMD_INIT_FUNC_TRACE(edev);
2817 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2818 pci_addr.bus, pci_addr.devid, pci_addr.function,
2819 eth_dev->data->port_id);
2821 eth_dev->rx_pkt_burst = qede_recv_pkts;
2822 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2823 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2825 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2826 DP_ERR(edev, "Skipping device init from secondary process\n");
2830 rte_eth_copy_pci_info(eth_dev, pci_dev);
2833 edev->vendor_id = pci_dev->id.vendor_id;
2834 edev->device_id = pci_dev->id.device_id;
2836 qed_ops = qed_get_eth_ops();
2838 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2842 DP_INFO(edev, "Starting qede probe\n");
2843 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2846 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2849 qede_update_pf_params(edev);
2850 rte_intr_callback_register(&pci_dev->intr_handle,
2851 qede_interrupt_handler, (void *)eth_dev);
2852 if (rte_intr_enable(&pci_dev->intr_handle)) {
2853 DP_ERR(edev, "rte_intr_enable() failed\n");
2857 /* Start the Slowpath-process */
2858 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2859 params.int_mode = ECORE_INT_MODE_MSIX;
2860 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2861 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2862 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2863 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2864 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2865 QEDE_PMD_DRV_VER_STR_SIZE);
2867 /* For CMT mode device do periodic polling for slowpath events.
2868 * This is required since uio device uses only one MSI-x
2869 * interrupt vector but we need one for each engine.
2871 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2872 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2876 DP_ERR(edev, "Unable to start periodic"
2877 " timer rc %d\n", rc);
2882 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2884 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2885 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2890 rc = qed_ops->fill_dev_info(edev, &dev_info);
2892 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2893 qed_ops->common->slowpath_stop(edev);
2894 qed_ops->common->remove(edev);
2895 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2900 qede_alloc_etherdev(adapter, &dev_info);
2902 adapter->ops->common->set_name(edev, edev->name);
2905 adapter->dev_info.num_mac_filters =
2906 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2909 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2910 (uint32_t *)&adapter->dev_info.num_mac_filters);
2912 /* Allocate memory for storing MAC addr */
2913 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2915 adapter->dev_info.num_mac_filters),
2916 RTE_CACHE_LINE_SIZE);
2918 if (eth_dev->data->mac_addrs == NULL) {
2919 DP_ERR(edev, "Failed to allocate MAC address\n");
2920 qed_ops->common->slowpath_stop(edev);
2921 qed_ops->common->remove(edev);
2922 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2928 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2929 hw_info.hw_mac_addr,
2930 ð_dev->data->mac_addrs[0]);
2931 ether_addr_copy(ð_dev->data->mac_addrs[0],
2932 &adapter->primary_mac);
2934 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2936 if (bulletin_change) {
2938 ecore_vf_bulletin_get_forced_mac(
2939 ECORE_LEADING_HWFN(edev),
2942 if (is_mac_exist && is_mac_forced) {
2943 DP_INFO(edev, "VF macaddr received from PF\n");
2944 ether_addr_copy((struct ether_addr *)&vf_mac,
2945 ð_dev->data->mac_addrs[0]);
2946 ether_addr_copy(ð_dev->data->mac_addrs[0],
2947 &adapter->primary_mac);
2949 DP_ERR(edev, "No VF macaddr assigned\n");
2954 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2957 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2958 qede_print_adapter_info(adapter);
2963 adapter->num_tx_queues = 0;
2964 adapter->num_rx_queues = 0;
2965 SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2966 SLIST_INIT(&adapter->vlan_list_head);
2967 SLIST_INIT(&adapter->uc_list_head);
2968 adapter->mtu = ETHER_MTU;
2969 adapter->new_mtu = ETHER_MTU;
2971 if (qede_start_vport(adapter, adapter->mtu))
2974 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2975 adapter->primary_mac.addr_bytes[0],
2976 adapter->primary_mac.addr_bytes[1],
2977 adapter->primary_mac.addr_bytes[2],
2978 adapter->primary_mac.addr_bytes[3],
2979 adapter->primary_mac.addr_bytes[4],
2980 adapter->primary_mac.addr_bytes[5]);
2982 DP_INFO(edev, "Device initialized\n");
2987 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2989 return qede_common_dev_init(eth_dev, 1);
2992 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2994 return qede_common_dev_init(eth_dev, 0);
2997 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2999 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3000 struct qede_dev *qdev = eth_dev->data->dev_private;
3001 struct ecore_dev *edev = &qdev->edev;
3003 PMD_INIT_FUNC_TRACE(edev);
3006 /* only uninitialize in the primary process */
3007 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3010 /* safe to close dev here */
3011 qede_dev_close(eth_dev);
3013 eth_dev->dev_ops = NULL;
3014 eth_dev->rx_pkt_burst = NULL;
3015 eth_dev->tx_pkt_burst = NULL;
3017 if (eth_dev->data->mac_addrs)
3018 rte_free(eth_dev->data->mac_addrs);
3020 eth_dev->data->mac_addrs = NULL;
3025 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3027 return qede_dev_common_uninit(eth_dev);
3030 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3032 return qede_dev_common_uninit(eth_dev);
3035 static const struct rte_pci_id pci_id_qedevf_map[] = {
3036 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3038 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3041 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3044 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3049 static const struct rte_pci_id pci_id_qede_map[] = {
3050 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3052 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3055 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3058 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3061 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3064 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3067 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3070 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3073 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3076 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3079 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3084 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3085 struct rte_pci_device *pci_dev)
3087 return rte_eth_dev_pci_generic_probe(pci_dev,
3088 sizeof(struct qede_dev), qedevf_eth_dev_init);
3091 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3093 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3096 static struct rte_pci_driver rte_qedevf_pmd = {
3097 .id_table = pci_id_qedevf_map,
3098 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3099 .probe = qedevf_eth_dev_pci_probe,
3100 .remove = qedevf_eth_dev_pci_remove,
3103 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3104 struct rte_pci_device *pci_dev)
3106 return rte_eth_dev_pci_generic_probe(pci_dev,
3107 sizeof(struct qede_dev), qede_eth_dev_init);
3110 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3112 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3115 static struct rte_pci_driver rte_qede_pmd = {
3116 .id_table = pci_id_qede_map,
3117 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3118 .probe = qede_eth_dev_pci_probe,
3119 .remove = qede_eth_dev_pci_remove,
3122 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3123 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3124 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3125 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3126 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3127 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");