2 * Copyright (c) 2016 - 2018 Cavium Inc.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 #include <rte_kvargs.h>
15 int qede_logtype_init;
16 int qede_logtype_driver;
18 static const struct qed_eth_ops *qed_ops;
19 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
21 /* VXLAN tunnel classification mapping */
22 const struct _qede_udp_tunn_types {
23 uint16_t rte_filter_type;
24 enum ecore_filter_ucast_type qede_type;
25 enum ecore_tunn_clss qede_tunn_clss;
27 } qede_tunn_types[] = {
29 ETH_TUNNEL_FILTER_OMAC,
31 ECORE_TUNN_CLSS_MAC_VLAN,
35 ETH_TUNNEL_FILTER_TENID,
37 ECORE_TUNN_CLSS_MAC_VNI,
41 ETH_TUNNEL_FILTER_IMAC,
42 ECORE_FILTER_INNER_MAC,
43 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47 ETH_TUNNEL_FILTER_IVLAN,
48 ECORE_FILTER_INNER_VLAN,
49 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
54 ECORE_FILTER_MAC_VNI_PAIR,
55 ECORE_TUNN_CLSS_MAC_VNI,
59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
62 "outer-mac and inner-mac"
65 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
68 "outer-mac and inner-vlan"
71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
72 ECORE_FILTER_INNER_MAC_VNI_PAIR,
73 ECORE_TUNN_CLSS_INNER_MAC_VNI,
77 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
83 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
84 ECORE_FILTER_INNER_PAIR,
85 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
86 "inner-mac and inner-vlan",
89 ETH_TUNNEL_FILTER_OIP,
95 ETH_TUNNEL_FILTER_IIP,
101 RTE_TUNNEL_FILTER_IMAC_IVLAN,
107 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
113 RTE_TUNNEL_FILTER_IMAC_TENID,
119 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
126 struct rte_qede_xstats_name_off {
127 char name[RTE_ETH_XSTATS_NAME_SIZE];
131 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
133 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
134 {"rx_multicast_bytes",
135 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
136 {"rx_broadcast_bytes",
137 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
138 {"rx_unicast_packets",
139 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
140 {"rx_multicast_packets",
141 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
142 {"rx_broadcast_packets",
143 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
146 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
147 {"tx_multicast_bytes",
148 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
149 {"tx_broadcast_bytes",
150 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
151 {"tx_unicast_packets",
152 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
153 {"tx_multicast_packets",
154 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
155 {"tx_broadcast_packets",
156 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
158 {"rx_64_byte_packets",
159 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
160 {"rx_65_to_127_byte_packets",
161 offsetof(struct ecore_eth_stats_common,
162 rx_65_to_127_byte_packets)},
163 {"rx_128_to_255_byte_packets",
164 offsetof(struct ecore_eth_stats_common,
165 rx_128_to_255_byte_packets)},
166 {"rx_256_to_511_byte_packets",
167 offsetof(struct ecore_eth_stats_common,
168 rx_256_to_511_byte_packets)},
169 {"rx_512_to_1023_byte_packets",
170 offsetof(struct ecore_eth_stats_common,
171 rx_512_to_1023_byte_packets)},
172 {"rx_1024_to_1518_byte_packets",
173 offsetof(struct ecore_eth_stats_common,
174 rx_1024_to_1518_byte_packets)},
175 {"tx_64_byte_packets",
176 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
177 {"tx_65_to_127_byte_packets",
178 offsetof(struct ecore_eth_stats_common,
179 tx_65_to_127_byte_packets)},
180 {"tx_128_to_255_byte_packets",
181 offsetof(struct ecore_eth_stats_common,
182 tx_128_to_255_byte_packets)},
183 {"tx_256_to_511_byte_packets",
184 offsetof(struct ecore_eth_stats_common,
185 tx_256_to_511_byte_packets)},
186 {"tx_512_to_1023_byte_packets",
187 offsetof(struct ecore_eth_stats_common,
188 tx_512_to_1023_byte_packets)},
189 {"tx_1024_to_1518_byte_packets",
190 offsetof(struct ecore_eth_stats_common,
191 tx_1024_to_1518_byte_packets)},
193 {"rx_mac_crtl_frames",
194 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
195 {"tx_mac_control_frames",
196 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
198 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
200 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
201 {"rx_priority_flow_control_frames",
202 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
203 {"tx_priority_flow_control_frames",
204 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
207 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
209 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
210 {"rx_carrier_errors",
211 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
212 {"rx_oversize_packet_errors",
213 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
215 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
216 {"rx_undersize_packet_errors",
217 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
218 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
219 {"rx_host_buffer_not_available",
220 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
221 /* Number of packets discarded because they are bigger than MTU */
222 {"rx_packet_too_big_discards",
223 offsetof(struct ecore_eth_stats_common,
224 packet_too_big_discard)},
225 {"rx_ttl_zero_discards",
226 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
227 {"rx_multi_function_tag_filter_discards",
228 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
229 {"rx_mac_filter_discards",
230 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
231 {"rx_hw_buffer_truncates",
232 offsetof(struct ecore_eth_stats_common, brb_truncates)},
233 {"rx_hw_buffer_discards",
234 offsetof(struct ecore_eth_stats_common, brb_discards)},
235 {"tx_error_drop_packets",
236 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
239 {"rx_mac_unicast_packets",
240 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
241 {"rx_mac_multicast_packets",
242 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
243 {"rx_mac_broadcast_packets",
244 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
246 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
248 {"tx_mac_unicast_packets",
249 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
250 {"tx_mac_multicast_packets",
251 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
252 {"tx_mac_broadcast_packets",
253 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
255 {"lro_coalesced_packets",
256 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
257 {"lro_coalesced_events",
258 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
260 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
261 {"lro_not_coalesced_packets",
262 offsetof(struct ecore_eth_stats_common,
263 tpa_not_coalesced_pkts)},
264 {"lro_coalesced_bytes",
265 offsetof(struct ecore_eth_stats_common,
266 tpa_coalesced_bytes)},
269 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
270 {"rx_1519_to_1522_byte_packets",
271 offsetof(struct ecore_eth_stats, bb) +
272 offsetof(struct ecore_eth_stats_bb,
273 rx_1519_to_1522_byte_packets)},
274 {"rx_1519_to_2047_byte_packets",
275 offsetof(struct ecore_eth_stats, bb) +
276 offsetof(struct ecore_eth_stats_bb,
277 rx_1519_to_2047_byte_packets)},
278 {"rx_2048_to_4095_byte_packets",
279 offsetof(struct ecore_eth_stats, bb) +
280 offsetof(struct ecore_eth_stats_bb,
281 rx_2048_to_4095_byte_packets)},
282 {"rx_4096_to_9216_byte_packets",
283 offsetof(struct ecore_eth_stats, bb) +
284 offsetof(struct ecore_eth_stats_bb,
285 rx_4096_to_9216_byte_packets)},
286 {"rx_9217_to_16383_byte_packets",
287 offsetof(struct ecore_eth_stats, bb) +
288 offsetof(struct ecore_eth_stats_bb,
289 rx_9217_to_16383_byte_packets)},
291 {"tx_1519_to_2047_byte_packets",
292 offsetof(struct ecore_eth_stats, bb) +
293 offsetof(struct ecore_eth_stats_bb,
294 tx_1519_to_2047_byte_packets)},
295 {"tx_2048_to_4095_byte_packets",
296 offsetof(struct ecore_eth_stats, bb) +
297 offsetof(struct ecore_eth_stats_bb,
298 tx_2048_to_4095_byte_packets)},
299 {"tx_4096_to_9216_byte_packets",
300 offsetof(struct ecore_eth_stats, bb) +
301 offsetof(struct ecore_eth_stats_bb,
302 tx_4096_to_9216_byte_packets)},
303 {"tx_9217_to_16383_byte_packets",
304 offsetof(struct ecore_eth_stats, bb) +
305 offsetof(struct ecore_eth_stats_bb,
306 tx_9217_to_16383_byte_packets)},
308 {"tx_lpi_entry_count",
309 offsetof(struct ecore_eth_stats, bb) +
310 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
311 {"tx_total_collisions",
312 offsetof(struct ecore_eth_stats, bb) +
313 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
316 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
317 {"rx_1519_to_max_byte_packets",
318 offsetof(struct ecore_eth_stats, ah) +
319 offsetof(struct ecore_eth_stats_ah,
320 rx_1519_to_max_byte_packets)},
321 {"tx_1519_to_max_byte_packets",
322 offsetof(struct ecore_eth_stats, ah) +
323 offsetof(struct ecore_eth_stats_ah,
324 tx_1519_to_max_byte_packets)},
327 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
329 offsetof(struct qede_rx_queue, rx_segs)},
331 offsetof(struct qede_rx_queue, rx_hw_errors)},
332 {"rx_q_allocation_errors",
333 offsetof(struct qede_rx_queue, rx_alloc_errors)}
336 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
338 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
342 qede_interrupt_handler_intx(void *param)
344 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
345 struct qede_dev *qdev = eth_dev->data->dev_private;
346 struct ecore_dev *edev = &qdev->edev;
349 /* Check if our device actually raised an interrupt */
350 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
352 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
354 if (rte_intr_enable(eth_dev->intr_handle))
355 DP_ERR(edev, "rte_intr_enable failed\n");
360 qede_interrupt_handler(void *param)
362 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
363 struct qede_dev *qdev = eth_dev->data->dev_private;
364 struct ecore_dev *edev = &qdev->edev;
366 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
367 if (rte_intr_enable(eth_dev->intr_handle))
368 DP_ERR(edev, "rte_intr_enable failed\n");
372 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
374 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
378 static void qede_print_adapter_info(struct qede_dev *qdev)
380 struct ecore_dev *edev = &qdev->edev;
381 struct qed_dev_info *info = &qdev->dev_info.common;
382 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
383 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
385 DP_INFO(edev, "*********************************\n");
386 DP_INFO(edev, " DPDK version:%s\n", rte_version());
387 DP_INFO(edev, " Chip details : %s %c%d\n",
388 ECORE_IS_BB(edev) ? "BB" : "AH",
389 'A' + edev->chip_rev,
390 (int)edev->chip_metal);
391 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
392 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
393 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
394 ver_str, QEDE_PMD_VERSION);
395 DP_INFO(edev, " Driver version : %s\n", drv_ver);
396 DP_INFO(edev, " Firmware version : %s\n", ver_str);
398 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
400 (info->mfw_rev >> 24) & 0xff,
401 (info->mfw_rev >> 16) & 0xff,
402 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
403 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
404 DP_INFO(edev, " Firmware file : %s\n", fw_file);
405 DP_INFO(edev, "*********************************\n");
408 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
410 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
411 unsigned int i = 0, j = 0, qid;
412 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
413 struct qede_tx_queue *txq;
415 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
417 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
418 RTE_ETHDEV_QUEUE_STAT_CNTRS);
419 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
420 RTE_ETHDEV_QUEUE_STAT_CNTRS);
423 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
424 offsetof(struct qede_rx_queue, rcv_pkts), 0,
426 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
427 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
429 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
430 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
434 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
435 OSAL_MEMSET((((char *)
436 (qdev->fp_array[qid].rxq)) +
437 qede_rxq_xstats_strings[j].offset),
442 if (i == rxq_stat_cntrs)
449 txq = qdev->fp_array[qid].txq;
451 OSAL_MEMSET((uint64_t *)(uintptr_t)
452 (((uint64_t)(uintptr_t)(txq)) +
453 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
457 if (i == txq_stat_cntrs)
463 qede_stop_vport(struct ecore_dev *edev)
465 struct ecore_hwfn *p_hwfn;
471 for_each_hwfn(edev, i) {
472 p_hwfn = &edev->hwfns[i];
473 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
475 if (rc != ECORE_SUCCESS) {
476 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
481 DP_INFO(edev, "vport stopped\n");
487 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
489 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
490 struct ecore_sp_vport_start_params params;
491 struct ecore_hwfn *p_hwfn;
495 if (qdev->vport_started)
496 qede_stop_vport(edev);
498 memset(¶ms, 0, sizeof(params));
501 /* @DPDK - Disable FW placement */
502 params.zero_placement_offset = 1;
503 for_each_hwfn(edev, i) {
504 p_hwfn = &edev->hwfns[i];
505 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
506 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
507 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
508 if (rc != ECORE_SUCCESS) {
509 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
513 ecore_reset_vport_stats(edev);
514 qdev->vport_started = true;
515 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
520 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
521 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
523 /* Activate or deactivate vport via vport-update */
524 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
526 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
527 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
528 struct ecore_sp_vport_update_params params;
529 struct ecore_hwfn *p_hwfn;
533 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
535 params.update_vport_active_rx_flg = 1;
536 params.update_vport_active_tx_flg = 1;
537 params.vport_active_rx_flg = flg;
538 params.vport_active_tx_flg = flg;
539 if (~qdev->enable_tx_switching & flg) {
540 params.update_tx_switching_flg = 1;
541 params.tx_switching_flg = !flg;
543 for_each_hwfn(edev, i) {
544 p_hwfn = &edev->hwfns[i];
545 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
546 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
547 ECORE_SPQ_MODE_EBLOCK, NULL);
548 if (rc != ECORE_SUCCESS) {
549 DP_ERR(edev, "Failed to update vport\n");
553 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
559 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
560 uint16_t mtu, bool enable)
562 /* Enable LRO in split mode */
563 sge_tpa_params->tpa_ipv4_en_flg = enable;
564 sge_tpa_params->tpa_ipv6_en_flg = enable;
565 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
566 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
567 /* set if tpa enable changes */
568 sge_tpa_params->update_tpa_en_flg = 1;
569 /* set if tpa parameters should be handled */
570 sge_tpa_params->update_tpa_param_flg = enable;
572 sge_tpa_params->max_buffers_per_cqe = 20;
573 /* Enable TPA in split mode. In this mode each TPA segment
574 * starts on the new BD, so there is one BD per segment.
576 sge_tpa_params->tpa_pkt_split_flg = 1;
577 sge_tpa_params->tpa_hdr_data_split_flg = 0;
578 sge_tpa_params->tpa_gro_consistent_flg = 0;
579 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
580 sge_tpa_params->tpa_max_size = 0x7FFF;
581 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
582 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
585 /* Enable/disable LRO via vport-update */
586 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
588 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
589 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
590 struct ecore_sp_vport_update_params params;
591 struct ecore_sge_tpa_params tpa_params;
592 struct ecore_hwfn *p_hwfn;
596 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
597 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
598 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
600 params.sge_tpa_params = &tpa_params;
601 for_each_hwfn(edev, i) {
602 p_hwfn = &edev->hwfns[i];
603 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
604 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
605 ECORE_SPQ_MODE_EBLOCK, NULL);
606 if (rc != ECORE_SUCCESS) {
607 DP_ERR(edev, "Failed to update LRO\n");
611 qdev->enable_lro = flg;
612 eth_dev->data->lro = flg;
614 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
619 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
621 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
622 ucast->is_rx_filter = true;
623 ucast->is_tx_filter = true;
624 /* ucast->assert_on_error = true; - For debug */
628 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
629 enum qed_filter_rx_mode_type type)
631 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
632 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
633 struct ecore_filter_accept_flags flags;
635 memset(&flags, 0, sizeof(flags));
637 flags.update_rx_mode_config = 1;
638 flags.update_tx_mode_config = 1;
639 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
640 ECORE_ACCEPT_MCAST_MATCHED |
643 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
644 ECORE_ACCEPT_MCAST_MATCHED |
647 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
648 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
650 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
651 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
653 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
654 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
655 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
656 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
657 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
658 ECORE_ACCEPT_MCAST_UNMATCHED;
661 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
662 ECORE_SPQ_MODE_CB, NULL);
666 qede_tunnel_update(struct qede_dev *qdev,
667 struct ecore_tunnel_info *tunn_info)
669 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
670 enum _ecore_status_t rc = ECORE_INVAL;
671 struct ecore_hwfn *p_hwfn;
672 struct ecore_ptt *p_ptt;
675 for_each_hwfn(edev, i) {
676 p_hwfn = &edev->hwfns[i];
678 p_ptt = ecore_ptt_acquire(p_hwfn);
680 DP_ERR(p_hwfn, "Can't acquire PTT\n");
687 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
688 tunn_info, ECORE_SPQ_MODE_CB, NULL);
690 ecore_ptt_release(p_hwfn, p_ptt);
692 if (rc != ECORE_SUCCESS)
700 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
703 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
704 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
705 enum _ecore_status_t rc = ECORE_INVAL;
706 struct ecore_tunnel_info tunn;
708 if (qdev->vxlan.enable == enable)
709 return ECORE_SUCCESS;
711 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
712 tunn.vxlan.b_update_mode = true;
713 tunn.vxlan.b_mode_enabled = enable;
714 tunn.b_update_rx_cls = true;
715 tunn.b_update_tx_cls = true;
716 tunn.vxlan.tun_cls = clss;
718 tunn.vxlan_port.b_update_port = true;
719 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
721 rc = qede_tunnel_update(qdev, &tunn);
722 if (rc == ECORE_SUCCESS) {
723 qdev->vxlan.enable = enable;
724 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
725 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
726 enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
728 DP_ERR(edev, "Failed to update tunn_clss %u\n",
736 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
739 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
740 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
741 enum _ecore_status_t rc = ECORE_INVAL;
742 struct ecore_tunnel_info tunn;
744 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
745 tunn.l2_geneve.b_update_mode = true;
746 tunn.l2_geneve.b_mode_enabled = enable;
747 tunn.ip_geneve.b_update_mode = true;
748 tunn.ip_geneve.b_mode_enabled = enable;
749 tunn.l2_geneve.tun_cls = clss;
750 tunn.ip_geneve.tun_cls = clss;
751 tunn.b_update_rx_cls = true;
752 tunn.b_update_tx_cls = true;
754 tunn.geneve_port.b_update_port = true;
755 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
757 rc = qede_tunnel_update(qdev, &tunn);
758 if (rc == ECORE_SUCCESS) {
759 qdev->geneve.enable = enable;
760 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
761 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
762 enable ? "enabled" : "disabled", qdev->geneve.udp_port);
764 DP_ERR(edev, "Failed to update tunn_clss %u\n",
772 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
775 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
776 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
777 enum _ecore_status_t rc = ECORE_INVAL;
778 struct ecore_tunnel_info tunn;
780 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
781 tunn.ip_gre.b_update_mode = true;
782 tunn.ip_gre.b_mode_enabled = enable;
783 tunn.ip_gre.tun_cls = clss;
784 tunn.ip_gre.tun_cls = clss;
785 tunn.b_update_rx_cls = true;
786 tunn.b_update_tx_cls = true;
788 rc = qede_tunnel_update(qdev, &tunn);
789 if (rc == ECORE_SUCCESS) {
790 qdev->ipgre.enable = enable;
791 DP_INFO(edev, "IPGRE is %s\n",
792 enable ? "enabled" : "disabled");
794 DP_ERR(edev, "Failed to update tunn_clss %u\n",
802 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
803 enum rte_eth_tunnel_type tunn_type, bool enable)
808 case RTE_TUNNEL_TYPE_VXLAN:
809 rc = qede_vxlan_enable(eth_dev, clss, enable);
811 case RTE_TUNNEL_TYPE_GENEVE:
812 rc = qede_geneve_enable(eth_dev, clss, enable);
814 case RTE_TUNNEL_TYPE_IP_IN_GRE:
815 rc = qede_ipgre_enable(eth_dev, clss, enable);
826 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
829 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
830 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
831 struct qede_ucast_entry *tmp = NULL;
832 struct qede_ucast_entry *u;
833 struct ether_addr *mac_addr;
835 mac_addr = (struct ether_addr *)ucast->mac;
837 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
838 if ((memcmp(mac_addr, &tmp->mac,
839 ETHER_ADDR_LEN) == 0) &&
840 ucast->vni == tmp->vni &&
841 ucast->vlan == tmp->vlan) {
842 DP_INFO(edev, "Unicast MAC is already added"
843 " with vlan = %u, vni = %u\n",
844 ucast->vlan, ucast->vni);
848 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
849 RTE_CACHE_LINE_SIZE);
851 DP_ERR(edev, "Did not allocate memory for ucast\n");
854 ether_addr_copy(mac_addr, &u->mac);
855 u->vlan = ucast->vlan;
857 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
860 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
861 if ((memcmp(mac_addr, &tmp->mac,
862 ETHER_ADDR_LEN) == 0) &&
863 ucast->vlan == tmp->vlan &&
864 ucast->vni == tmp->vni)
868 DP_INFO(edev, "Unicast MAC is not found\n");
871 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
879 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
880 uint32_t mc_addrs_num)
882 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
883 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
884 struct ecore_filter_mcast mcast;
885 struct qede_mcast_entry *m = NULL;
889 for (i = 0; i < mc_addrs_num; i++) {
890 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
891 RTE_CACHE_LINE_SIZE);
893 DP_ERR(edev, "Did not allocate memory for mcast\n");
896 ether_addr_copy(&mc_addrs[i], &m->mac);
897 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
899 memset(&mcast, 0, sizeof(mcast));
900 mcast.num_mc_addrs = mc_addrs_num;
901 mcast.opcode = ECORE_FILTER_ADD;
902 for (i = 0; i < mc_addrs_num; i++)
903 ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
905 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
906 if (rc != ECORE_SUCCESS) {
907 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
914 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
916 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
917 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
918 struct qede_mcast_entry *tmp = NULL;
919 struct ecore_filter_mcast mcast;
923 memset(&mcast, 0, sizeof(mcast));
924 mcast.num_mc_addrs = qdev->num_mc_addr;
925 mcast.opcode = ECORE_FILTER_REMOVE;
927 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
928 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
931 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
932 if (rc != ECORE_SUCCESS) {
933 DP_ERR(edev, "Failed to delete multicast filter\n");
937 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
938 tmp = SLIST_FIRST(&qdev->mc_list_head);
939 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
941 SLIST_INIT(&qdev->mc_list_head);
946 static enum _ecore_status_t
947 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
950 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
951 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
952 enum _ecore_status_t rc = ECORE_INVAL;
954 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
955 DP_ERR(edev, "Ucast filter table limit exceeded,"
956 " Please enable promisc mode\n");
960 rc = qede_ucast_filter(eth_dev, ucast, add);
962 rc = ecore_filter_ucast_cmd(edev, ucast,
963 ECORE_SPQ_MODE_CB, NULL);
964 if (rc != ECORE_SUCCESS)
965 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
972 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
973 __rte_unused uint32_t index, __rte_unused uint32_t pool)
975 struct ecore_filter_ucast ucast;
978 if (!is_valid_assigned_ether_addr(mac_addr))
981 qede_set_ucast_cmn_params(&ucast);
982 ucast.opcode = ECORE_FILTER_ADD;
983 ucast.type = ECORE_FILTER_MAC;
984 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
985 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
990 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
992 struct qede_dev *qdev = eth_dev->data->dev_private;
993 struct ecore_dev *edev = &qdev->edev;
994 struct ecore_filter_ucast ucast;
996 PMD_INIT_FUNC_TRACE(edev);
998 if (index >= qdev->dev_info.num_mac_filters) {
999 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
1000 index, qdev->dev_info.num_mac_filters);
1004 if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
1007 qede_set_ucast_cmn_params(&ucast);
1008 ucast.opcode = ECORE_FILTER_REMOVE;
1009 ucast.type = ECORE_FILTER_MAC;
1011 /* Use the index maintained by rte */
1012 ether_addr_copy(ð_dev->data->mac_addrs[index],
1013 (struct ether_addr *)&ucast.mac);
1015 qede_mac_int_ops(eth_dev, &ucast, false);
1019 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
1021 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1022 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1024 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
1025 mac_addr->addr_bytes)) {
1026 DP_ERR(edev, "Setting MAC address is not allowed\n");
1030 qede_mac_addr_remove(eth_dev, 0);
1032 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
1035 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
1037 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1038 struct ecore_sp_vport_update_params params;
1039 struct ecore_hwfn *p_hwfn;
1043 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1044 params.vport_id = 0;
1045 params.update_accept_any_vlan_flg = 1;
1046 params.accept_any_vlan = flg;
1047 for_each_hwfn(edev, i) {
1048 p_hwfn = &edev->hwfns[i];
1049 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1050 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1051 ECORE_SPQ_MODE_EBLOCK, NULL);
1052 if (rc != ECORE_SUCCESS) {
1053 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
1058 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
1061 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
1063 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1064 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1065 struct ecore_sp_vport_update_params params;
1066 struct ecore_hwfn *p_hwfn;
1070 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1071 params.vport_id = 0;
1072 params.update_inner_vlan_removal_flg = 1;
1073 params.inner_vlan_removal_flg = flg;
1074 for_each_hwfn(edev, i) {
1075 p_hwfn = &edev->hwfns[i];
1076 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1077 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1078 ECORE_SPQ_MODE_EBLOCK, NULL);
1079 if (rc != ECORE_SUCCESS) {
1080 DP_ERR(edev, "Failed to update vport\n");
1085 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
1089 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
1090 uint16_t vlan_id, int on)
1092 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1093 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1094 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
1095 struct qede_vlan_entry *tmp = NULL;
1096 struct qede_vlan_entry *vlan;
1097 struct ecore_filter_ucast ucast;
1101 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1102 DP_ERR(edev, "Reached max VLAN filter limit"
1103 " enabling accept_any_vlan\n");
1104 qede_config_accept_any_vlan(qdev, true);
1108 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1109 if (tmp->vid == vlan_id) {
1110 DP_INFO(edev, "VLAN %u already configured\n",
1116 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1117 RTE_CACHE_LINE_SIZE);
1120 DP_ERR(edev, "Did not allocate memory for VLAN\n");
1124 qede_set_ucast_cmn_params(&ucast);
1125 ucast.opcode = ECORE_FILTER_ADD;
1126 ucast.type = ECORE_FILTER_VLAN;
1127 ucast.vlan = vlan_id;
1128 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1131 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1135 vlan->vid = vlan_id;
1136 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1137 qdev->configured_vlans++;
1138 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1139 vlan_id, qdev->configured_vlans);
1142 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1143 if (tmp->vid == vlan_id)
1148 if (qdev->configured_vlans == 0) {
1150 "No VLAN filters configured yet\n");
1154 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1158 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1160 qede_set_ucast_cmn_params(&ucast);
1161 ucast.opcode = ECORE_FILTER_REMOVE;
1162 ucast.type = ECORE_FILTER_VLAN;
1163 ucast.vlan = vlan_id;
1164 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1167 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1170 qdev->configured_vlans--;
1171 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1172 vlan_id, qdev->configured_vlans);
1179 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1181 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1182 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1183 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1185 if (mask & ETH_VLAN_STRIP_MASK) {
1186 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1187 (void)qede_vlan_stripping(eth_dev, 1);
1189 (void)qede_vlan_stripping(eth_dev, 0);
1192 if (mask & ETH_VLAN_FILTER_MASK) {
1193 /* VLAN filtering kicks in when a VLAN is added */
1194 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
1195 qede_vlan_filter_set(eth_dev, 0, 1);
1197 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1199 " Please remove existing VLAN filters"
1200 " before disabling VLAN filtering\n");
1201 /* Signal app that VLAN filtering is still
1204 eth_dev->data->dev_conf.rxmode.offloads |=
1205 DEV_RX_OFFLOAD_VLAN_FILTER;
1207 qede_vlan_filter_set(eth_dev, 0, 0);
1212 if (mask & ETH_VLAN_EXTEND_MASK)
1213 DP_ERR(edev, "Extend VLAN not supported\n");
1215 qdev->vlan_offload_mask = mask;
1217 DP_INFO(edev, "VLAN offload mask %d\n", mask);
1222 static void qede_prandom_bytes(uint32_t *buff)
1226 srand((unsigned int)time(NULL));
1227 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1231 int qede_config_rss(struct rte_eth_dev *eth_dev)
1233 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1234 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1235 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1236 struct rte_eth_rss_reta_entry64 reta_conf[2];
1237 struct rte_eth_rss_conf rss_conf;
1238 uint32_t i, id, pos, q;
1240 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1241 if (!rss_conf.rss_key) {
1242 DP_INFO(edev, "Applying driver default key\n");
1243 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1244 qede_prandom_bytes(&def_rss_key[0]);
1245 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1248 /* Configure RSS hash */
1249 if (qede_rss_hash_update(eth_dev, &rss_conf))
1252 /* Configure default RETA */
1253 memset(reta_conf, 0, sizeof(reta_conf));
1254 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1255 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1257 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1258 id = i / RTE_RETA_GROUP_SIZE;
1259 pos = i % RTE_RETA_GROUP_SIZE;
1260 q = i % QEDE_RSS_COUNT(qdev);
1261 reta_conf[id].reta[pos] = q;
1263 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1264 ECORE_RSS_IND_TABLE_SIZE))
1270 static void qede_fastpath_start(struct ecore_dev *edev)
1272 struct ecore_hwfn *p_hwfn;
1275 for_each_hwfn(edev, i) {
1276 p_hwfn = &edev->hwfns[i];
1277 ecore_hw_start_fastpath(p_hwfn);
1281 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1283 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1284 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1285 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1287 PMD_INIT_FUNC_TRACE(edev);
1289 /* Update MTU only if it has changed */
1290 if (eth_dev->data->mtu != qdev->mtu) {
1291 if (qede_update_mtu(eth_dev, qdev->mtu))
1295 /* Configure TPA parameters */
1296 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1297 if (qede_enable_tpa(eth_dev, true))
1299 /* Enable scatter mode for LRO */
1300 if (!eth_dev->data->scattered_rx)
1301 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1305 if (qede_start_queues(eth_dev))
1309 qede_reset_queue_stats(qdev, true);
1311 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1312 * enabling RSS. Hence RSS configuration is deferred upto this point.
1313 * Also, we would like to retain similar behavior in PF case, so we
1314 * don't do PF/VF specific check here.
1316 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1317 if (qede_config_rss(eth_dev))
1321 if (qede_activate_vport(eth_dev, true))
1324 /* Update link status */
1325 qede_link_update(eth_dev, 0);
1327 /* Start/resume traffic */
1328 qede_fastpath_start(edev);
1330 DP_INFO(edev, "Device started\n");
1334 DP_ERR(edev, "Device start fails\n");
1335 return -1; /* common error code is < 0 */
1338 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1340 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1341 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1343 PMD_INIT_FUNC_TRACE(edev);
1346 if (qede_activate_vport(eth_dev, false))
1349 if (qdev->enable_lro)
1350 qede_enable_tpa(eth_dev, false);
1353 qede_stop_queues(eth_dev);
1355 /* Disable traffic */
1356 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1358 DP_INFO(edev, "Device is stopped\n");
1361 const char *valid_args[] = {
1362 QEDE_NPAR_TX_SWITCHING,
1363 QEDE_VF_TX_SWITCHING,
1367 static int qede_args_check(const char *key, const char *val, void *opaque)
1371 struct rte_eth_dev *eth_dev = opaque;
1372 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1373 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1376 tmp = strtoul(val, NULL, 0);
1378 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1382 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1383 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1384 qdev->enable_tx_switching = !!tmp;
1385 DP_INFO(edev, "Disabling %s tx-switching\n",
1386 strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1393 static int qede_args(struct rte_eth_dev *eth_dev)
1395 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1396 struct rte_kvargs *kvlist;
1397 struct rte_devargs *devargs;
1401 devargs = pci_dev->device.devargs;
1403 return 0; /* return success */
1405 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1409 /* Process parameters. */
1410 for (i = 0; (valid_args[i] != NULL); ++i) {
1411 if (rte_kvargs_count(kvlist, valid_args[i])) {
1412 ret = rte_kvargs_process(kvlist, valid_args[i],
1413 qede_args_check, eth_dev);
1414 if (ret != ECORE_SUCCESS) {
1415 rte_kvargs_free(kvlist);
1420 rte_kvargs_free(kvlist);
1425 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1427 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1428 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1429 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1432 PMD_INIT_FUNC_TRACE(edev);
1434 /* Check requirements for 100G mode */
1435 if (ECORE_IS_CMT(edev)) {
1436 if (eth_dev->data->nb_rx_queues < 2 ||
1437 eth_dev->data->nb_tx_queues < 2) {
1438 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1442 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1443 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1445 "100G mode needs even no. of RX/TX queues\n");
1450 /* We need to have min 1 RX queue.There is no min check in
1451 * rte_eth_dev_configure(), so we are checking it here.
1453 if (eth_dev->data->nb_rx_queues == 0) {
1454 DP_ERR(edev, "Minimum one RX queue is required\n");
1458 /* Enable Tx switching by default */
1459 qdev->enable_tx_switching = 1;
1461 /* Parse devargs and fix up rxmode */
1462 if (qede_args(eth_dev))
1463 DP_NOTICE(edev, false,
1464 "Invalid devargs supplied, requested change will not take effect\n");
1466 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1467 rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1468 DP_ERR(edev, "Unsupported multi-queue mode\n");
1471 /* Flow director mode check */
1472 if (qede_check_fdir_support(eth_dev))
1475 qede_dealloc_fp_resc(eth_dev);
1476 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1477 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1478 if (qede_alloc_fp_resc(qdev))
1481 /* If jumbo enabled adjust MTU */
1482 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1483 eth_dev->data->mtu =
1484 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1485 ETHER_HDR_LEN - ETHER_CRC_LEN;
1487 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1488 eth_dev->data->scattered_rx = 1;
1490 if (qede_start_vport(qdev, eth_dev->data->mtu))
1493 qdev->mtu = eth_dev->data->mtu;
1495 /* Enable VLAN offloads by default */
1496 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1497 ETH_VLAN_FILTER_MASK |
1498 ETH_VLAN_EXTEND_MASK);
1502 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1503 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1508 /* Info about HW descriptor ring limitations */
1509 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1510 .nb_max = 0x8000, /* 32K */
1512 .nb_align = 128 /* lowest common multiple */
1515 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1516 .nb_max = 0x8000, /* 32K */
1519 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1520 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1524 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1525 struct rte_eth_dev_info *dev_info)
1527 struct qede_dev *qdev = eth_dev->data->dev_private;
1528 struct ecore_dev *edev = &qdev->edev;
1529 struct qed_link_output link;
1530 uint32_t speed_cap = 0;
1532 PMD_INIT_FUNC_TRACE(edev);
1534 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1535 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1536 dev_info->rx_desc_lim = qede_rx_desc_lim;
1537 dev_info->tx_desc_lim = qede_tx_desc_lim;
1540 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1541 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1543 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1544 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1545 dev_info->max_tx_queues = dev_info->max_rx_queues;
1547 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1548 dev_info->max_vfs = 0;
1549 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1550 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1551 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1552 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1553 DEV_RX_OFFLOAD_UDP_CKSUM |
1554 DEV_RX_OFFLOAD_TCP_CKSUM |
1555 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1556 DEV_RX_OFFLOAD_TCP_LRO |
1557 DEV_RX_OFFLOAD_CRC_STRIP |
1558 DEV_RX_OFFLOAD_SCATTER |
1559 DEV_RX_OFFLOAD_JUMBO_FRAME |
1560 DEV_RX_OFFLOAD_VLAN_FILTER |
1561 DEV_RX_OFFLOAD_VLAN_STRIP);
1562 dev_info->rx_queue_offload_capa = 0;
1564 /* TX offloads are on a per-packet basis, so it is applicable
1565 * to both at port and queue levels.
1567 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1568 DEV_TX_OFFLOAD_IPV4_CKSUM |
1569 DEV_TX_OFFLOAD_UDP_CKSUM |
1570 DEV_TX_OFFLOAD_TCP_CKSUM |
1571 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1572 DEV_TX_OFFLOAD_QINQ_INSERT |
1573 DEV_TX_OFFLOAD_MULTI_SEGS |
1574 DEV_TX_OFFLOAD_TCP_TSO |
1575 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1576 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1577 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1579 dev_info->default_txconf = (struct rte_eth_txconf) {
1580 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1583 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1584 /* Packets are always dropped if no descriptors are available */
1586 /* The below RX offloads are always enabled */
1587 .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
1588 DEV_RX_OFFLOAD_IPV4_CKSUM |
1589 DEV_RX_OFFLOAD_TCP_CKSUM |
1590 DEV_RX_OFFLOAD_UDP_CKSUM),
1593 memset(&link, 0, sizeof(struct qed_link_output));
1594 qdev->ops->common->get_link(edev, &link);
1595 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1596 speed_cap |= ETH_LINK_SPEED_1G;
1597 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1598 speed_cap |= ETH_LINK_SPEED_10G;
1599 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1600 speed_cap |= ETH_LINK_SPEED_25G;
1601 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1602 speed_cap |= ETH_LINK_SPEED_40G;
1603 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1604 speed_cap |= ETH_LINK_SPEED_50G;
1605 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1606 speed_cap |= ETH_LINK_SPEED_100G;
1607 dev_info->speed_capa = speed_cap;
1610 /* return 0 means link status changed, -1 means not changed */
1612 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1614 struct qede_dev *qdev = eth_dev->data->dev_private;
1615 struct ecore_dev *edev = &qdev->edev;
1616 struct qed_link_output q_link;
1617 struct rte_eth_link link;
1618 uint16_t link_duplex;
1620 memset(&q_link, 0, sizeof(q_link));
1621 memset(&link, 0, sizeof(link));
1623 qdev->ops->common->get_link(edev, &q_link);
1626 link.link_speed = q_link.speed;
1629 switch (q_link.duplex) {
1630 case QEDE_DUPLEX_HALF:
1631 link_duplex = ETH_LINK_HALF_DUPLEX;
1633 case QEDE_DUPLEX_FULL:
1634 link_duplex = ETH_LINK_FULL_DUPLEX;
1636 case QEDE_DUPLEX_UNKNOWN:
1640 link.link_duplex = link_duplex;
1643 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1646 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1647 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1649 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1650 link.link_speed, link.link_duplex,
1651 link.link_autoneg, link.link_status);
1653 return rte_eth_linkstatus_set(eth_dev, &link);
1656 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1658 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1659 struct qede_dev *qdev = eth_dev->data->dev_private;
1660 struct ecore_dev *edev = &qdev->edev;
1662 PMD_INIT_FUNC_TRACE(edev);
1665 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1667 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1668 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1670 qed_configure_filter_rx_mode(eth_dev, type);
1673 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1675 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1676 struct qede_dev *qdev = eth_dev->data->dev_private;
1677 struct ecore_dev *edev = &qdev->edev;
1679 PMD_INIT_FUNC_TRACE(edev);
1682 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1683 qed_configure_filter_rx_mode(eth_dev,
1684 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1686 qed_configure_filter_rx_mode(eth_dev,
1687 QED_FILTER_RX_MODE_TYPE_REGULAR);
1690 static void qede_poll_sp_sb_cb(void *param)
1692 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1693 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1694 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1697 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1698 qede_interrupt_action(&edev->hwfns[1]);
1700 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1704 DP_ERR(edev, "Unable to start periodic"
1705 " timer rc %d\n", rc);
1706 assert(false && "Unable to start periodic timer");
1710 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1712 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1713 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1714 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1716 PMD_INIT_FUNC_TRACE(edev);
1718 /* dev_stop() shall cleanup fp resources in hw but without releasing
1719 * dma memories and sw structures so that dev_start() can be called
1720 * by the app without reconfiguration. However, in dev_close() we
1721 * can release all the resources and device can be brought up newly
1723 if (eth_dev->data->dev_started)
1724 qede_dev_stop(eth_dev);
1726 qede_stop_vport(edev);
1727 qdev->vport_started = false;
1728 qede_fdir_dealloc_resc(eth_dev);
1729 qede_dealloc_fp_resc(eth_dev);
1731 eth_dev->data->nb_rx_queues = 0;
1732 eth_dev->data->nb_tx_queues = 0;
1734 /* Bring the link down */
1735 qede_dev_set_link_state(eth_dev, false);
1736 qdev->ops->common->slowpath_stop(edev);
1737 qdev->ops->common->remove(edev);
1738 rte_intr_disable(&pci_dev->intr_handle);
1739 rte_intr_callback_unregister(&pci_dev->intr_handle,
1740 qede_interrupt_handler, (void *)eth_dev);
1741 if (ECORE_IS_CMT(edev))
1742 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1746 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1748 struct qede_dev *qdev = eth_dev->data->dev_private;
1749 struct ecore_dev *edev = &qdev->edev;
1750 struct ecore_eth_stats stats;
1751 unsigned int i = 0, j = 0, qid;
1752 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1753 struct qede_tx_queue *txq;
1755 ecore_get_vport_stats(edev, &stats);
1758 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1759 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1761 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1762 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1764 eth_stats->ierrors = stats.common.rx_crc_errors +
1765 stats.common.rx_align_errors +
1766 stats.common.rx_carrier_errors +
1767 stats.common.rx_oversize_packets +
1768 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1770 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1772 eth_stats->imissed = stats.common.mftag_filter_discards +
1773 stats.common.mac_filter_discards +
1774 stats.common.no_buff_discards +
1775 stats.common.brb_truncates + stats.common.brb_discards;
1778 eth_stats->opackets = stats.common.tx_ucast_pkts +
1779 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1781 eth_stats->obytes = stats.common.tx_ucast_bytes +
1782 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1784 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1787 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1788 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1789 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1790 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1791 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1792 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1793 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1794 "Not all the queue stats will be displayed. Set"
1795 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1796 " appropriately and retry.\n");
1799 eth_stats->q_ipackets[i] =
1801 ((char *)(qdev->fp_array[qid].rxq)) +
1802 offsetof(struct qede_rx_queue,
1804 eth_stats->q_errors[i] =
1806 ((char *)(qdev->fp_array[qid].rxq)) +
1807 offsetof(struct qede_rx_queue,
1810 ((char *)(qdev->fp_array[qid].rxq)) +
1811 offsetof(struct qede_rx_queue,
1814 if (i == rxq_stat_cntrs)
1819 txq = qdev->fp_array[qid].txq;
1820 eth_stats->q_opackets[j] =
1821 *((uint64_t *)(uintptr_t)
1822 (((uint64_t)(uintptr_t)(txq)) +
1823 offsetof(struct qede_tx_queue,
1826 if (j == txq_stat_cntrs)
1834 qede_get_xstats_count(struct qede_dev *qdev) {
1835 if (ECORE_IS_BB(&qdev->edev))
1836 return RTE_DIM(qede_xstats_strings) +
1837 RTE_DIM(qede_bb_xstats_strings) +
1838 (RTE_DIM(qede_rxq_xstats_strings) *
1839 RTE_MIN(QEDE_RSS_COUNT(qdev),
1840 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1842 return RTE_DIM(qede_xstats_strings) +
1843 RTE_DIM(qede_ah_xstats_strings) +
1844 (RTE_DIM(qede_rxq_xstats_strings) *
1845 RTE_MIN(QEDE_RSS_COUNT(qdev),
1846 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1850 qede_get_xstats_names(struct rte_eth_dev *dev,
1851 struct rte_eth_xstat_name *xstats_names,
1852 __rte_unused unsigned int limit)
1854 struct qede_dev *qdev = dev->data->dev_private;
1855 struct ecore_dev *edev = &qdev->edev;
1856 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1857 unsigned int i, qid, stat_idx = 0;
1858 unsigned int rxq_stat_cntrs;
1860 if (xstats_names != NULL) {
1861 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1862 snprintf(xstats_names[stat_idx].name,
1863 sizeof(xstats_names[stat_idx].name),
1865 qede_xstats_strings[i].name);
1869 if (ECORE_IS_BB(edev)) {
1870 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1871 snprintf(xstats_names[stat_idx].name,
1872 sizeof(xstats_names[stat_idx].name),
1874 qede_bb_xstats_strings[i].name);
1878 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1879 snprintf(xstats_names[stat_idx].name,
1880 sizeof(xstats_names[stat_idx].name),
1882 qede_ah_xstats_strings[i].name);
1887 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1888 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1889 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1890 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1891 snprintf(xstats_names[stat_idx].name,
1892 sizeof(xstats_names[stat_idx].name),
1894 qede_rxq_xstats_strings[i].name, qid,
1895 qede_rxq_xstats_strings[i].name + 4);
1905 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1908 struct qede_dev *qdev = dev->data->dev_private;
1909 struct ecore_dev *edev = &qdev->edev;
1910 struct ecore_eth_stats stats;
1911 const unsigned int num = qede_get_xstats_count(qdev);
1912 unsigned int i, qid, stat_idx = 0;
1913 unsigned int rxq_stat_cntrs;
1918 ecore_get_vport_stats(edev, &stats);
1920 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1921 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1922 qede_xstats_strings[i].offset);
1923 xstats[stat_idx].id = stat_idx;
1927 if (ECORE_IS_BB(edev)) {
1928 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1929 xstats[stat_idx].value =
1930 *(uint64_t *)(((char *)&stats) +
1931 qede_bb_xstats_strings[i].offset);
1932 xstats[stat_idx].id = stat_idx;
1936 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1937 xstats[stat_idx].value =
1938 *(uint64_t *)(((char *)&stats) +
1939 qede_ah_xstats_strings[i].offset);
1940 xstats[stat_idx].id = stat_idx;
1945 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1946 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1947 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1949 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1950 xstats[stat_idx].value = *(uint64_t *)(
1951 ((char *)(qdev->fp_array[qid].rxq)) +
1952 qede_rxq_xstats_strings[i].offset);
1953 xstats[stat_idx].id = stat_idx;
1963 qede_reset_xstats(struct rte_eth_dev *dev)
1965 struct qede_dev *qdev = dev->data->dev_private;
1966 struct ecore_dev *edev = &qdev->edev;
1968 ecore_reset_vport_stats(edev);
1969 qede_reset_queue_stats(qdev, true);
1972 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1974 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1975 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1976 struct qed_link_params link_params;
1979 DP_INFO(edev, "setting link state %d\n", link_up);
1980 memset(&link_params, 0, sizeof(link_params));
1981 link_params.link_up = link_up;
1982 rc = qdev->ops->common->set_link(edev, &link_params);
1983 if (rc != ECORE_SUCCESS)
1984 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1989 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1991 return qede_dev_set_link_state(eth_dev, true);
1994 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1996 return qede_dev_set_link_state(eth_dev, false);
1999 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
2001 struct qede_dev *qdev = eth_dev->data->dev_private;
2002 struct ecore_dev *edev = &qdev->edev;
2004 ecore_reset_vport_stats(edev);
2005 qede_reset_queue_stats(qdev, false);
2008 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
2010 enum qed_filter_rx_mode_type type =
2011 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
2013 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
2014 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
2016 qed_configure_filter_rx_mode(eth_dev, type);
2019 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
2021 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
2022 qed_configure_filter_rx_mode(eth_dev,
2023 QED_FILTER_RX_MODE_TYPE_PROMISC);
2025 qed_configure_filter_rx_mode(eth_dev,
2026 QED_FILTER_RX_MODE_TYPE_REGULAR);
2030 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
2031 uint32_t mc_addrs_num)
2033 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2034 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2037 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
2038 DP_ERR(edev, "Reached max multicast filters limit,"
2039 "Please enable multicast promisc mode\n");
2043 for (i = 0; i < mc_addrs_num; i++) {
2044 if (!is_multicast_ether_addr(&mc_addrs[i])) {
2045 DP_ERR(edev, "Not a valid multicast MAC\n");
2050 /* Flush all existing entries */
2051 if (qede_del_mcast_filters(eth_dev))
2054 /* Set new mcast list */
2055 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
2058 /* Update MTU via vport-update without doing port restart.
2059 * The vport must be deactivated before calling this API.
2061 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
2063 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2064 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2065 struct ecore_hwfn *p_hwfn;
2070 struct ecore_sp_vport_update_params params;
2072 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
2073 params.vport_id = 0;
2075 params.vport_id = 0;
2076 for_each_hwfn(edev, i) {
2077 p_hwfn = &edev->hwfns[i];
2078 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2079 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
2080 ECORE_SPQ_MODE_EBLOCK, NULL);
2081 if (rc != ECORE_SUCCESS)
2085 for_each_hwfn(edev, i) {
2086 p_hwfn = &edev->hwfns[i];
2087 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
2088 if (rc == ECORE_INVAL) {
2089 DP_INFO(edev, "VF MTU Update TLV not supported\n");
2090 /* Recreate vport */
2091 rc = qede_start_vport(qdev, mtu);
2092 if (rc != ECORE_SUCCESS)
2095 /* Restore config lost due to vport stop */
2096 if (eth_dev->data->promiscuous)
2097 qede_promiscuous_enable(eth_dev);
2099 qede_promiscuous_disable(eth_dev);
2101 if (eth_dev->data->all_multicast)
2102 qede_allmulticast_enable(eth_dev);
2104 qede_allmulticast_disable(eth_dev);
2106 qede_vlan_offload_set(eth_dev,
2107 qdev->vlan_offload_mask);
2108 } else if (rc != ECORE_SUCCESS) {
2113 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
2118 DP_ERR(edev, "Failed to update MTU\n");
2122 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
2123 struct rte_eth_fc_conf *fc_conf)
2125 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2126 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2127 struct qed_link_output current_link;
2128 struct qed_link_params params;
2130 memset(¤t_link, 0, sizeof(current_link));
2131 qdev->ops->common->get_link(edev, ¤t_link);
2133 memset(¶ms, 0, sizeof(params));
2134 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
2135 if (fc_conf->autoneg) {
2136 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
2137 DP_ERR(edev, "Autoneg not supported\n");
2140 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2143 /* Pause is assumed to be supported (SUPPORTED_Pause) */
2144 if (fc_conf->mode == RTE_FC_FULL)
2145 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
2146 QED_LINK_PAUSE_RX_ENABLE);
2147 if (fc_conf->mode == RTE_FC_TX_PAUSE)
2148 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2149 if (fc_conf->mode == RTE_FC_RX_PAUSE)
2150 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2152 params.link_up = true;
2153 (void)qdev->ops->common->set_link(edev, ¶ms);
2158 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
2159 struct rte_eth_fc_conf *fc_conf)
2161 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2162 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2163 struct qed_link_output current_link;
2165 memset(¤t_link, 0, sizeof(current_link));
2166 qdev->ops->common->get_link(edev, ¤t_link);
2168 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
2169 fc_conf->autoneg = true;
2171 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
2172 QED_LINK_PAUSE_TX_ENABLE))
2173 fc_conf->mode = RTE_FC_FULL;
2174 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
2175 fc_conf->mode = RTE_FC_RX_PAUSE;
2176 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
2177 fc_conf->mode = RTE_FC_TX_PAUSE;
2179 fc_conf->mode = RTE_FC_NONE;
2184 static const uint32_t *
2185 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2187 static const uint32_t ptypes[] = {
2189 RTE_PTYPE_L2_ETHER_VLAN,
2194 RTE_PTYPE_TUNNEL_VXLAN,
2196 RTE_PTYPE_TUNNEL_GENEVE,
2197 RTE_PTYPE_TUNNEL_GRE,
2199 RTE_PTYPE_INNER_L2_ETHER,
2200 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2201 RTE_PTYPE_INNER_L3_IPV4,
2202 RTE_PTYPE_INNER_L3_IPV6,
2203 RTE_PTYPE_INNER_L4_TCP,
2204 RTE_PTYPE_INNER_L4_UDP,
2205 RTE_PTYPE_INNER_L4_FRAG,
2209 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
2215 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2218 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
2219 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
2220 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
2221 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
2222 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
2223 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
2224 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
2225 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
2228 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2229 struct rte_eth_rss_conf *rss_conf)
2231 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2232 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2233 struct ecore_sp_vport_update_params vport_update_params;
2234 struct ecore_rss_params rss_params;
2235 struct ecore_hwfn *p_hwfn;
2236 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2237 uint64_t hf = rss_conf->rss_hf;
2238 uint8_t len = rss_conf->rss_key_len;
2243 memset(&vport_update_params, 0, sizeof(vport_update_params));
2244 memset(&rss_params, 0, sizeof(rss_params));
2246 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2247 (unsigned long)hf, len, key);
2251 DP_INFO(edev, "Enabling rss\n");
2254 qede_init_rss_caps(&rss_params.rss_caps, hf);
2255 rss_params.update_rss_capabilities = 1;
2259 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2260 DP_ERR(edev, "RSS key length exceeds limit\n");
2263 DP_INFO(edev, "Applying user supplied hash key\n");
2264 rss_params.update_rss_key = 1;
2265 memcpy(&rss_params.rss_key, key, len);
2267 rss_params.rss_enable = 1;
2270 rss_params.update_rss_config = 1;
2271 /* tbl_size has to be set with capabilities */
2272 rss_params.rss_table_size_log = 7;
2273 vport_update_params.vport_id = 0;
2274 /* pass the L2 handles instead of qids */
2275 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2276 idx = i % QEDE_RSS_COUNT(qdev);
2277 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2279 vport_update_params.rss_params = &rss_params;
2281 for_each_hwfn(edev, i) {
2282 p_hwfn = &edev->hwfns[i];
2283 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2284 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2285 ECORE_SPQ_MODE_EBLOCK, NULL);
2287 DP_ERR(edev, "vport-update for RSS failed\n");
2291 qdev->rss_enable = rss_params.rss_enable;
2293 /* Update local structure for hash query */
2294 qdev->rss_conf.rss_hf = hf;
2295 qdev->rss_conf.rss_key_len = len;
2296 if (qdev->rss_enable) {
2297 if (qdev->rss_conf.rss_key == NULL) {
2298 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2299 if (qdev->rss_conf.rss_key == NULL) {
2300 DP_ERR(edev, "No memory to store RSS key\n");
2305 DP_INFO(edev, "Storing RSS key\n");
2306 memcpy(qdev->rss_conf.rss_key, key, len);
2308 } else if (!qdev->rss_enable && len == 0) {
2309 if (qdev->rss_conf.rss_key) {
2310 free(qdev->rss_conf.rss_key);
2311 qdev->rss_conf.rss_key = NULL;
2312 DP_INFO(edev, "Free RSS key\n");
2319 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2320 struct rte_eth_rss_conf *rss_conf)
2322 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2324 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2325 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2327 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2328 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2329 rss_conf->rss_key_len);
2333 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2334 struct ecore_rss_params *rss)
2337 bool rss_mode = 1; /* enable */
2338 struct ecore_queue_cid *cid;
2339 struct ecore_rss_params *t_rss;
2341 /* In regular scenario, we'd simply need to take input handlers.
2342 * But in CMT, we'd have to split the handlers according to the
2343 * engine they were configured on. We'd then have to understand
2344 * whether RSS is really required, since 2-queues on CMT doesn't
2348 /* CMT should be round-robin */
2349 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2350 cid = rss->rss_ind_table[i];
2352 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2357 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2361 t_rss->update_rss_ind_table = 1;
2362 t_rss->rss_table_size_log = 7;
2363 t_rss->update_rss_config = 1;
2365 /* Make sure RSS is actually required */
2366 for_each_hwfn(edev, fn) {
2367 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2369 if (rss[fn].rss_ind_table[i] !=
2370 rss[fn].rss_ind_table[0])
2374 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2376 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2383 t_rss->rss_enable = rss_mode;
2388 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2389 struct rte_eth_rss_reta_entry64 *reta_conf,
2392 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2393 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2394 struct ecore_sp_vport_update_params vport_update_params;
2395 struct ecore_rss_params *params;
2396 struct ecore_hwfn *p_hwfn;
2397 uint16_t i, idx, shift;
2401 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2402 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2407 memset(&vport_update_params, 0, sizeof(vport_update_params));
2408 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2409 RTE_CACHE_LINE_SIZE);
2410 if (params == NULL) {
2411 DP_ERR(edev, "failed to allocate memory\n");
2415 for (i = 0; i < reta_size; i++) {
2416 idx = i / RTE_RETA_GROUP_SIZE;
2417 shift = i % RTE_RETA_GROUP_SIZE;
2418 if (reta_conf[idx].mask & (1ULL << shift)) {
2419 entry = reta_conf[idx].reta[shift];
2420 /* Pass rxq handles to ecore */
2421 params->rss_ind_table[i] =
2422 qdev->fp_array[entry].rxq->handle;
2423 /* Update the local copy for RETA query command */
2424 qdev->rss_ind_table[i] = entry;
2428 params->update_rss_ind_table = 1;
2429 params->rss_table_size_log = 7;
2430 params->update_rss_config = 1;
2432 /* Fix up RETA for CMT mode device */
2433 if (ECORE_IS_CMT(edev))
2434 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2436 vport_update_params.vport_id = 0;
2437 /* Use the current value of rss_enable */
2438 params->rss_enable = qdev->rss_enable;
2439 vport_update_params.rss_params = params;
2441 for_each_hwfn(edev, i) {
2442 p_hwfn = &edev->hwfns[i];
2443 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2444 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2445 ECORE_SPQ_MODE_EBLOCK, NULL);
2447 DP_ERR(edev, "vport-update for RSS failed\n");
2457 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2458 struct rte_eth_rss_reta_entry64 *reta_conf,
2461 struct qede_dev *qdev = eth_dev->data->dev_private;
2462 struct ecore_dev *edev = &qdev->edev;
2463 uint16_t i, idx, shift;
2466 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2467 DP_ERR(edev, "reta_size %d is not supported\n",
2472 for (i = 0; i < reta_size; i++) {
2473 idx = i / RTE_RETA_GROUP_SIZE;
2474 shift = i % RTE_RETA_GROUP_SIZE;
2475 if (reta_conf[idx].mask & (1ULL << shift)) {
2476 entry = qdev->rss_ind_table[i];
2477 reta_conf[idx].reta[shift] = entry;
2486 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2488 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2489 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2490 struct rte_eth_dev_info dev_info = {0};
2491 struct qede_fastpath *fp;
2492 uint32_t max_rx_pkt_len;
2493 uint32_t frame_size;
2494 uint16_t rx_buf_size;
2496 bool restart = false;
2499 PMD_INIT_FUNC_TRACE(edev);
2500 qede_dev_info_get(dev, &dev_info);
2501 max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2502 frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
2503 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2504 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2505 mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2506 ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
2509 if (!dev->data->scattered_rx &&
2510 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2511 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2512 dev->data->min_rx_buf_size);
2515 /* Temporarily replace I/O functions with dummy ones. It cannot
2516 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2518 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2519 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2520 if (dev->data->dev_started) {
2521 dev->data->dev_started = 0;
2528 /* Fix up RX buf size for all queues of the port */
2530 fp = &qdev->fp_array[i];
2531 if (fp->rxq != NULL) {
2532 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2533 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2534 if (dev->data->scattered_rx)
2535 rx_buf_size = bufsz + ETHER_HDR_LEN +
2536 ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
2538 rx_buf_size = frame_size;
2539 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2540 fp->rxq->rx_buf_size = rx_buf_size;
2541 DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
2544 if (max_rx_pkt_len > ETHER_MAX_LEN)
2545 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2547 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2549 if (!dev->data->dev_started && restart) {
2550 qede_dev_start(dev);
2551 dev->data->dev_started = 1;
2554 /* update max frame size */
2555 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2557 dev->rx_pkt_burst = qede_recv_pkts;
2558 dev->tx_pkt_burst = qede_xmit_pkts;
2564 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2565 struct rte_eth_udp_tunnel *tunnel_udp)
2567 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2568 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2569 struct ecore_tunnel_info tunn; /* @DPDK */
2573 PMD_INIT_FUNC_TRACE(edev);
2575 memset(&tunn, 0, sizeof(tunn));
2577 switch (tunnel_udp->prot_type) {
2578 case RTE_TUNNEL_TYPE_VXLAN:
2579 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2580 DP_ERR(edev, "UDP port %u doesn't exist\n",
2581 tunnel_udp->udp_port);
2586 tunn.vxlan_port.b_update_port = true;
2587 tunn.vxlan_port.port = udp_port;
2589 rc = qede_tunnel_update(qdev, &tunn);
2590 if (rc != ECORE_SUCCESS) {
2591 DP_ERR(edev, "Unable to config UDP port %u\n",
2592 tunn.vxlan_port.port);
2596 qdev->vxlan.udp_port = udp_port;
2597 /* If the request is to delete UDP port and if the number of
2598 * VXLAN filters have reached 0 then VxLAN offload can be be
2601 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2602 return qede_vxlan_enable(eth_dev,
2603 ECORE_TUNN_CLSS_MAC_VLAN, false);
2606 case RTE_TUNNEL_TYPE_GENEVE:
2607 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
2608 DP_ERR(edev, "UDP port %u doesn't exist\n",
2609 tunnel_udp->udp_port);
2615 tunn.geneve_port.b_update_port = true;
2616 tunn.geneve_port.port = udp_port;
2618 rc = qede_tunnel_update(qdev, &tunn);
2619 if (rc != ECORE_SUCCESS) {
2620 DP_ERR(edev, "Unable to config UDP port %u\n",
2621 tunn.vxlan_port.port);
2625 qdev->vxlan.udp_port = udp_port;
2626 /* If the request is to delete UDP port and if the number of
2627 * GENEVE filters have reached 0 then GENEVE offload can be be
2630 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
2631 return qede_geneve_enable(eth_dev,
2632 ECORE_TUNN_CLSS_MAC_VLAN, false);
2644 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2645 struct rte_eth_udp_tunnel *tunnel_udp)
2647 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2648 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2649 struct ecore_tunnel_info tunn; /* @DPDK */
2653 PMD_INIT_FUNC_TRACE(edev);
2655 memset(&tunn, 0, sizeof(tunn));
2657 switch (tunnel_udp->prot_type) {
2658 case RTE_TUNNEL_TYPE_VXLAN:
2659 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2661 "UDP port %u for VXLAN was already configured\n",
2662 tunnel_udp->udp_port);
2663 return ECORE_SUCCESS;
2666 /* Enable VxLAN tunnel with default MAC/VLAN classification if
2667 * it was not enabled while adding VXLAN filter before UDP port
2670 if (!qdev->vxlan.enable) {
2671 rc = qede_vxlan_enable(eth_dev,
2672 ECORE_TUNN_CLSS_MAC_VLAN, true);
2673 if (rc != ECORE_SUCCESS) {
2674 DP_ERR(edev, "Failed to enable VXLAN "
2675 "prior to updating UDP port\n");
2679 udp_port = tunnel_udp->udp_port;
2681 tunn.vxlan_port.b_update_port = true;
2682 tunn.vxlan_port.port = udp_port;
2684 rc = qede_tunnel_update(qdev, &tunn);
2685 if (rc != ECORE_SUCCESS) {
2686 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
2691 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
2693 qdev->vxlan.udp_port = udp_port;
2695 case RTE_TUNNEL_TYPE_GENEVE:
2696 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
2698 "UDP port %u for GENEVE was already configured\n",
2699 tunnel_udp->udp_port);
2700 return ECORE_SUCCESS;
2703 /* Enable GENEVE tunnel with default MAC/VLAN classification if
2704 * it was not enabled while adding GENEVE filter before UDP port
2707 if (!qdev->geneve.enable) {
2708 rc = qede_geneve_enable(eth_dev,
2709 ECORE_TUNN_CLSS_MAC_VLAN, true);
2710 if (rc != ECORE_SUCCESS) {
2711 DP_ERR(edev, "Failed to enable GENEVE "
2712 "prior to updating UDP port\n");
2716 udp_port = tunnel_udp->udp_port;
2718 tunn.geneve_port.b_update_port = true;
2719 tunn.geneve_port.port = udp_port;
2721 rc = qede_tunnel_update(qdev, &tunn);
2722 if (rc != ECORE_SUCCESS) {
2723 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
2728 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
2730 qdev->geneve.udp_port = udp_port;
2739 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2740 uint32_t *clss, char *str)
2743 *clss = MAX_ECORE_TUNN_CLSS;
2745 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2746 if (filter == qede_tunn_types[j].rte_filter_type) {
2747 *type = qede_tunn_types[j].qede_type;
2748 *clss = qede_tunn_types[j].qede_tunn_clss;
2749 strcpy(str, qede_tunn_types[j].string);
2756 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2757 const struct rte_eth_tunnel_filter_conf *conf,
2760 /* Init commmon ucast params first */
2761 qede_set_ucast_cmn_params(ucast);
2763 /* Copy out the required fields based on classification type */
2767 case ECORE_FILTER_VNI:
2768 ucast->vni = conf->tenant_id;
2770 case ECORE_FILTER_INNER_VLAN:
2771 ucast->vlan = conf->inner_vlan;
2773 case ECORE_FILTER_MAC:
2774 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2777 case ECORE_FILTER_INNER_MAC:
2778 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2781 case ECORE_FILTER_MAC_VNI_PAIR:
2782 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2784 ucast->vni = conf->tenant_id;
2786 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2787 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2789 ucast->vni = conf->tenant_id;
2791 case ECORE_FILTER_INNER_PAIR:
2792 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2794 ucast->vlan = conf->inner_vlan;
2800 return ECORE_SUCCESS;
2804 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2805 const struct rte_eth_tunnel_filter_conf *conf,
2806 __attribute__((unused)) enum rte_filter_op filter_op,
2807 enum ecore_tunn_clss *clss,
2810 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2811 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2812 struct ecore_filter_ucast ucast = {0};
2813 enum ecore_filter_ucast_type type;
2814 uint16_t filter_type = 0;
2818 filter_type = conf->filter_type;
2819 /* Determine if the given filter classification is supported */
2820 qede_get_ecore_tunn_params(filter_type, &type, clss, str);
2821 if (*clss == MAX_ECORE_TUNN_CLSS) {
2822 DP_ERR(edev, "Unsupported filter type\n");
2825 /* Init tunnel ucast params */
2826 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2827 if (rc != ECORE_SUCCESS) {
2828 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
2832 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2833 str, filter_op, ucast.type);
2835 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
2837 /* Skip MAC/VLAN if filter is based on VNI */
2838 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2839 rc = qede_mac_int_ops(eth_dev, &ucast, add);
2840 if ((rc == 0) && add) {
2841 /* Enable accept anyvlan */
2842 qede_config_accept_any_vlan(qdev, true);
2845 rc = qede_ucast_filter(eth_dev, &ucast, add);
2847 rc = ecore_filter_ucast_cmd(edev, &ucast,
2848 ECORE_SPQ_MODE_CB, NULL);
2855 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2856 enum rte_filter_op filter_op,
2857 const struct rte_eth_tunnel_filter_conf *conf)
2859 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2860 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2861 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2865 PMD_INIT_FUNC_TRACE(edev);
2867 switch (filter_op) {
2868 case RTE_ETH_FILTER_ADD:
2871 case RTE_ETH_FILTER_DELETE:
2875 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2880 return qede_tunn_enable(eth_dev,
2881 ECORE_TUNN_CLSS_MAC_VLAN,
2882 conf->tunnel_type, add);
2884 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
2885 if (rc != ECORE_SUCCESS)
2889 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
2890 qdev->vxlan.num_filters++;
2891 qdev->vxlan.filter_type = conf->filter_type;
2892 } else { /* GENEVE */
2893 qdev->geneve.num_filters++;
2894 qdev->geneve.filter_type = conf->filter_type;
2897 if (!qdev->vxlan.enable || !qdev->geneve.enable ||
2898 !qdev->ipgre.enable)
2899 return qede_tunn_enable(eth_dev, clss,
2903 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
2904 qdev->vxlan.num_filters--;
2906 qdev->geneve.num_filters--;
2908 /* Disable VXLAN if VXLAN filters become 0 */
2909 if ((qdev->vxlan.num_filters == 0) ||
2910 (qdev->geneve.num_filters == 0))
2911 return qede_tunn_enable(eth_dev, clss,
2919 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2920 enum rte_filter_type filter_type,
2921 enum rte_filter_op filter_op,
2924 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2925 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2926 struct rte_eth_tunnel_filter_conf *filter_conf =
2927 (struct rte_eth_tunnel_filter_conf *)arg;
2929 switch (filter_type) {
2930 case RTE_ETH_FILTER_TUNNEL:
2931 switch (filter_conf->tunnel_type) {
2932 case RTE_TUNNEL_TYPE_VXLAN:
2933 case RTE_TUNNEL_TYPE_GENEVE:
2934 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2936 "Packet steering to the specified Rx queue"
2937 " is not supported with UDP tunneling");
2938 return(qede_tunn_filter_config(eth_dev, filter_op,
2940 case RTE_TUNNEL_TYPE_TEREDO:
2941 case RTE_TUNNEL_TYPE_NVGRE:
2942 case RTE_L2_TUNNEL_TYPE_E_TAG:
2943 DP_ERR(edev, "Unsupported tunnel type %d\n",
2944 filter_conf->tunnel_type);
2946 case RTE_TUNNEL_TYPE_NONE:
2951 case RTE_ETH_FILTER_FDIR:
2952 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2953 case RTE_ETH_FILTER_NTUPLE:
2954 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2955 case RTE_ETH_FILTER_MACVLAN:
2956 case RTE_ETH_FILTER_ETHERTYPE:
2957 case RTE_ETH_FILTER_FLEXIBLE:
2958 case RTE_ETH_FILTER_SYN:
2959 case RTE_ETH_FILTER_HASH:
2960 case RTE_ETH_FILTER_L2_TUNNEL:
2961 case RTE_ETH_FILTER_MAX:
2963 DP_ERR(edev, "Unsupported filter type %d\n",
2971 static const struct eth_dev_ops qede_eth_dev_ops = {
2972 .dev_configure = qede_dev_configure,
2973 .dev_infos_get = qede_dev_info_get,
2974 .rx_queue_setup = qede_rx_queue_setup,
2975 .rx_queue_release = qede_rx_queue_release,
2976 .tx_queue_setup = qede_tx_queue_setup,
2977 .tx_queue_release = qede_tx_queue_release,
2978 .dev_start = qede_dev_start,
2979 .dev_set_link_up = qede_dev_set_link_up,
2980 .dev_set_link_down = qede_dev_set_link_down,
2981 .link_update = qede_link_update,
2982 .promiscuous_enable = qede_promiscuous_enable,
2983 .promiscuous_disable = qede_promiscuous_disable,
2984 .allmulticast_enable = qede_allmulticast_enable,
2985 .allmulticast_disable = qede_allmulticast_disable,
2986 .set_mc_addr_list = qede_set_mc_addr_list,
2987 .dev_stop = qede_dev_stop,
2988 .dev_close = qede_dev_close,
2989 .stats_get = qede_get_stats,
2990 .stats_reset = qede_reset_stats,
2991 .xstats_get = qede_get_xstats,
2992 .xstats_reset = qede_reset_xstats,
2993 .xstats_get_names = qede_get_xstats_names,
2994 .mac_addr_add = qede_mac_addr_add,
2995 .mac_addr_remove = qede_mac_addr_remove,
2996 .mac_addr_set = qede_mac_addr_set,
2997 .vlan_offload_set = qede_vlan_offload_set,
2998 .vlan_filter_set = qede_vlan_filter_set,
2999 .flow_ctrl_set = qede_flow_ctrl_set,
3000 .flow_ctrl_get = qede_flow_ctrl_get,
3001 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
3002 .rss_hash_update = qede_rss_hash_update,
3003 .rss_hash_conf_get = qede_rss_hash_conf_get,
3004 .reta_update = qede_rss_reta_update,
3005 .reta_query = qede_rss_reta_query,
3006 .mtu_set = qede_set_mtu,
3007 .filter_ctrl = qede_dev_filter_ctrl,
3008 .udp_tunnel_port_add = qede_udp_dst_port_add,
3009 .udp_tunnel_port_del = qede_udp_dst_port_del,
3012 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
3013 .dev_configure = qede_dev_configure,
3014 .dev_infos_get = qede_dev_info_get,
3015 .rx_queue_setup = qede_rx_queue_setup,
3016 .rx_queue_release = qede_rx_queue_release,
3017 .tx_queue_setup = qede_tx_queue_setup,
3018 .tx_queue_release = qede_tx_queue_release,
3019 .dev_start = qede_dev_start,
3020 .dev_set_link_up = qede_dev_set_link_up,
3021 .dev_set_link_down = qede_dev_set_link_down,
3022 .link_update = qede_link_update,
3023 .promiscuous_enable = qede_promiscuous_enable,
3024 .promiscuous_disable = qede_promiscuous_disable,
3025 .allmulticast_enable = qede_allmulticast_enable,
3026 .allmulticast_disable = qede_allmulticast_disable,
3027 .set_mc_addr_list = qede_set_mc_addr_list,
3028 .dev_stop = qede_dev_stop,
3029 .dev_close = qede_dev_close,
3030 .stats_get = qede_get_stats,
3031 .stats_reset = qede_reset_stats,
3032 .xstats_get = qede_get_xstats,
3033 .xstats_reset = qede_reset_xstats,
3034 .xstats_get_names = qede_get_xstats_names,
3035 .vlan_offload_set = qede_vlan_offload_set,
3036 .vlan_filter_set = qede_vlan_filter_set,
3037 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
3038 .rss_hash_update = qede_rss_hash_update,
3039 .rss_hash_conf_get = qede_rss_hash_conf_get,
3040 .reta_update = qede_rss_reta_update,
3041 .reta_query = qede_rss_reta_query,
3042 .mtu_set = qede_set_mtu,
3043 .udp_tunnel_port_add = qede_udp_dst_port_add,
3044 .udp_tunnel_port_del = qede_udp_dst_port_del,
3045 .mac_addr_add = qede_mac_addr_add,
3046 .mac_addr_remove = qede_mac_addr_remove,
3047 .mac_addr_set = qede_mac_addr_set,
3050 static void qede_update_pf_params(struct ecore_dev *edev)
3052 struct ecore_pf_params pf_params;
3054 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
3055 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
3056 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
3057 qed_ops->common->update_pf_params(edev, &pf_params);
3060 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
3062 struct rte_pci_device *pci_dev;
3063 struct rte_pci_addr pci_addr;
3064 struct qede_dev *adapter;
3065 struct ecore_dev *edev;
3066 struct qed_dev_eth_info dev_info;
3067 struct qed_slowpath_params params;
3068 static bool do_once = true;
3069 uint8_t bulletin_change;
3070 uint8_t vf_mac[ETHER_ADDR_LEN];
3071 uint8_t is_mac_forced;
3073 /* Fix up ecore debug level */
3074 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
3075 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
3079 /* Extract key data structures */
3080 adapter = eth_dev->data->dev_private;
3081 adapter->ethdev = eth_dev;
3082 edev = &adapter->edev;
3083 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3084 pci_addr = pci_dev->addr;
3086 PMD_INIT_FUNC_TRACE(edev);
3088 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
3089 pci_addr.bus, pci_addr.devid, pci_addr.function,
3090 eth_dev->data->port_id);
3092 eth_dev->rx_pkt_burst = qede_recv_pkts;
3093 eth_dev->tx_pkt_burst = qede_xmit_pkts;
3094 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
3096 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3097 DP_ERR(edev, "Skipping device init from secondary process\n");
3101 rte_eth_copy_pci_info(eth_dev, pci_dev);
3104 edev->vendor_id = pci_dev->id.vendor_id;
3105 edev->device_id = pci_dev->id.device_id;
3107 qed_ops = qed_get_eth_ops();
3109 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
3113 DP_INFO(edev, "Starting qede probe\n");
3114 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
3117 DP_ERR(edev, "qede probe failed rc %d\n", rc);
3120 qede_update_pf_params(edev);
3122 switch (pci_dev->intr_handle.type) {
3123 case RTE_INTR_HANDLE_UIO_INTX:
3124 case RTE_INTR_HANDLE_VFIO_LEGACY:
3125 int_mode = ECORE_INT_MODE_INTA;
3126 rte_intr_callback_register(&pci_dev->intr_handle,
3127 qede_interrupt_handler_intx,
3131 int_mode = ECORE_INT_MODE_MSIX;
3132 rte_intr_callback_register(&pci_dev->intr_handle,
3133 qede_interrupt_handler,
3137 if (rte_intr_enable(&pci_dev->intr_handle)) {
3138 DP_ERR(edev, "rte_intr_enable() failed\n");
3142 /* Start the Slowpath-process */
3143 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
3145 params.int_mode = int_mode;
3146 params.drv_major = QEDE_PMD_VERSION_MAJOR;
3147 params.drv_minor = QEDE_PMD_VERSION_MINOR;
3148 params.drv_rev = QEDE_PMD_VERSION_REVISION;
3149 params.drv_eng = QEDE_PMD_VERSION_PATCH;
3150 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
3151 QEDE_PMD_DRV_VER_STR_SIZE);
3153 /* For CMT mode device do periodic polling for slowpath events.
3154 * This is required since uio device uses only one MSI-x
3155 * interrupt vector but we need one for each engine.
3157 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
3158 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
3162 DP_ERR(edev, "Unable to start periodic"
3163 " timer rc %d\n", rc);
3168 rc = qed_ops->common->slowpath_start(edev, ¶ms);
3170 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
3171 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3176 rc = qed_ops->fill_dev_info(edev, &dev_info);
3178 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
3179 qed_ops->common->slowpath_stop(edev);
3180 qed_ops->common->remove(edev);
3181 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3186 qede_alloc_etherdev(adapter, &dev_info);
3188 adapter->ops->common->set_name(edev, edev->name);
3191 adapter->dev_info.num_mac_filters =
3192 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
3195 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
3196 (uint32_t *)&adapter->dev_info.num_mac_filters);
3198 /* Allocate memory for storing MAC addr */
3199 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
3201 adapter->dev_info.num_mac_filters),
3202 RTE_CACHE_LINE_SIZE);
3204 if (eth_dev->data->mac_addrs == NULL) {
3205 DP_ERR(edev, "Failed to allocate MAC address\n");
3206 qed_ops->common->slowpath_stop(edev);
3207 qed_ops->common->remove(edev);
3208 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3214 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
3215 hw_info.hw_mac_addr,
3216 ð_dev->data->mac_addrs[0]);
3217 ether_addr_copy(ð_dev->data->mac_addrs[0],
3218 &adapter->primary_mac);
3220 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
3222 if (bulletin_change) {
3224 ecore_vf_bulletin_get_forced_mac(
3225 ECORE_LEADING_HWFN(edev),
3229 DP_INFO(edev, "VF macaddr received from PF\n");
3230 ether_addr_copy((struct ether_addr *)&vf_mac,
3231 ð_dev->data->mac_addrs[0]);
3232 ether_addr_copy(ð_dev->data->mac_addrs[0],
3233 &adapter->primary_mac);
3235 DP_ERR(edev, "No VF macaddr assigned\n");
3240 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
3243 qede_print_adapter_info(adapter);
3247 /* Bring-up the link */
3248 qede_dev_set_link_state(eth_dev, true);
3250 adapter->num_tx_queues = 0;
3251 adapter->num_rx_queues = 0;
3252 SLIST_INIT(&adapter->fdir_info.fdir_list_head);
3253 SLIST_INIT(&adapter->vlan_list_head);
3254 SLIST_INIT(&adapter->uc_list_head);
3255 SLIST_INIT(&adapter->mc_list_head);
3256 adapter->mtu = ETHER_MTU;
3257 adapter->vport_started = false;
3259 /* VF tunnel offloads is enabled by default in PF driver */
3260 adapter->vxlan.num_filters = 0;
3261 adapter->geneve.num_filters = 0;
3262 adapter->ipgre.num_filters = 0;
3264 adapter->vxlan.enable = true;
3265 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
3266 ETH_TUNNEL_FILTER_IVLAN;
3267 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
3268 adapter->geneve.enable = true;
3269 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
3270 ETH_TUNNEL_FILTER_IVLAN;
3271 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
3272 adapter->ipgre.enable = true;
3273 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
3274 ETH_TUNNEL_FILTER_IVLAN;
3276 adapter->vxlan.enable = false;
3277 adapter->geneve.enable = false;
3278 adapter->ipgre.enable = false;
3281 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
3282 adapter->primary_mac.addr_bytes[0],
3283 adapter->primary_mac.addr_bytes[1],
3284 adapter->primary_mac.addr_bytes[2],
3285 adapter->primary_mac.addr_bytes[3],
3286 adapter->primary_mac.addr_bytes[4],
3287 adapter->primary_mac.addr_bytes[5]);
3289 DP_INFO(edev, "Device initialized\n");
3294 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
3296 return qede_common_dev_init(eth_dev, 1);
3299 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
3301 return qede_common_dev_init(eth_dev, 0);
3304 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
3306 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3307 struct qede_dev *qdev = eth_dev->data->dev_private;
3308 struct ecore_dev *edev = &qdev->edev;
3310 PMD_INIT_FUNC_TRACE(edev);
3313 /* only uninitialize in the primary process */
3314 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3317 /* safe to close dev here */
3318 qede_dev_close(eth_dev);
3320 eth_dev->dev_ops = NULL;
3321 eth_dev->rx_pkt_burst = NULL;
3322 eth_dev->tx_pkt_burst = NULL;
3324 if (eth_dev->data->mac_addrs)
3325 rte_free(eth_dev->data->mac_addrs);
3327 eth_dev->data->mac_addrs = NULL;
3332 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3334 return qede_dev_common_uninit(eth_dev);
3337 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3339 return qede_dev_common_uninit(eth_dev);
3342 static const struct rte_pci_id pci_id_qedevf_map[] = {
3343 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3345 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3348 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3351 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3356 static const struct rte_pci_id pci_id_qede_map[] = {
3357 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3359 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3362 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3365 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3368 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3371 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3374 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3377 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3380 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3383 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3386 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3391 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3392 struct rte_pci_device *pci_dev)
3394 return rte_eth_dev_pci_generic_probe(pci_dev,
3395 sizeof(struct qede_dev), qedevf_eth_dev_init);
3398 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3400 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3403 static struct rte_pci_driver rte_qedevf_pmd = {
3404 .id_table = pci_id_qedevf_map,
3405 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3406 .probe = qedevf_eth_dev_pci_probe,
3407 .remove = qedevf_eth_dev_pci_remove,
3410 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3411 struct rte_pci_device *pci_dev)
3413 return rte_eth_dev_pci_generic_probe(pci_dev,
3414 sizeof(struct qede_dev), qede_eth_dev_init);
3417 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3419 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3422 static struct rte_pci_driver rte_qede_pmd = {
3423 .id_table = pci_id_qede_map,
3424 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3425 .probe = qede_eth_dev_pci_probe,
3426 .remove = qede_eth_dev_pci_remove,
3429 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3430 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3431 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3432 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3433 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3434 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
3436 RTE_INIT(qede_init_log);
3440 qede_logtype_init = rte_log_register("pmd.net.qede.init");
3441 if (qede_logtype_init >= 0)
3442 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
3443 qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
3444 if (qede_logtype_driver >= 0)
3445 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);