1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include "qede_ethdev.h"
8 #include <rte_string_fns.h>
10 #include <rte_version.h>
11 #include <rte_kvargs.h>
14 int qede_logtype_init;
15 int qede_logtype_driver;
17 static const struct qed_eth_ops *qed_ops;
18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
23 struct rte_qede_xstats_name_off {
24 char name[RTE_ETH_XSTATS_NAME_SIZE];
28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
31 {"rx_multicast_bytes",
32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
33 {"rx_broadcast_bytes",
34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
35 {"rx_unicast_packets",
36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
37 {"rx_multicast_packets",
38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
39 {"rx_broadcast_packets",
40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
44 {"tx_multicast_bytes",
45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
46 {"tx_broadcast_bytes",
47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
48 {"tx_unicast_packets",
49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
50 {"tx_multicast_packets",
51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
52 {"tx_broadcast_packets",
53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
55 {"rx_64_byte_packets",
56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
57 {"rx_65_to_127_byte_packets",
58 offsetof(struct ecore_eth_stats_common,
59 rx_65_to_127_byte_packets)},
60 {"rx_128_to_255_byte_packets",
61 offsetof(struct ecore_eth_stats_common,
62 rx_128_to_255_byte_packets)},
63 {"rx_256_to_511_byte_packets",
64 offsetof(struct ecore_eth_stats_common,
65 rx_256_to_511_byte_packets)},
66 {"rx_512_to_1023_byte_packets",
67 offsetof(struct ecore_eth_stats_common,
68 rx_512_to_1023_byte_packets)},
69 {"rx_1024_to_1518_byte_packets",
70 offsetof(struct ecore_eth_stats_common,
71 rx_1024_to_1518_byte_packets)},
72 {"tx_64_byte_packets",
73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
74 {"tx_65_to_127_byte_packets",
75 offsetof(struct ecore_eth_stats_common,
76 tx_65_to_127_byte_packets)},
77 {"tx_128_to_255_byte_packets",
78 offsetof(struct ecore_eth_stats_common,
79 tx_128_to_255_byte_packets)},
80 {"tx_256_to_511_byte_packets",
81 offsetof(struct ecore_eth_stats_common,
82 tx_256_to_511_byte_packets)},
83 {"tx_512_to_1023_byte_packets",
84 offsetof(struct ecore_eth_stats_common,
85 tx_512_to_1023_byte_packets)},
86 {"tx_1024_to_1518_byte_packets",
87 offsetof(struct ecore_eth_stats_common,
88 tx_1024_to_1518_byte_packets)},
90 {"rx_mac_crtl_frames",
91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
92 {"tx_mac_control_frames",
93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
98 {"rx_priority_flow_control_frames",
99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
100 {"tx_priority_flow_control_frames",
101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
106 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
107 {"rx_carrier_errors",
108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
109 {"rx_oversize_packet_errors",
110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
112 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
113 {"rx_undersize_packet_errors",
114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
116 {"rx_host_buffer_not_available",
117 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
118 /* Number of packets discarded because they are bigger than MTU */
119 {"rx_packet_too_big_discards",
120 offsetof(struct ecore_eth_stats_common,
121 packet_too_big_discard)},
122 {"rx_ttl_zero_discards",
123 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
124 {"rx_multi_function_tag_filter_discards",
125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
126 {"rx_mac_filter_discards",
127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
128 {"rx_hw_buffer_truncates",
129 offsetof(struct ecore_eth_stats_common, brb_truncates)},
130 {"rx_hw_buffer_discards",
131 offsetof(struct ecore_eth_stats_common, brb_discards)},
132 {"tx_error_drop_packets",
133 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
135 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
136 {"rx_mac_unicast_packets",
137 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
138 {"rx_mac_multicast_packets",
139 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
140 {"rx_mac_broadcast_packets",
141 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
143 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
144 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
145 {"tx_mac_unicast_packets",
146 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
147 {"tx_mac_multicast_packets",
148 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
149 {"tx_mac_broadcast_packets",
150 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
152 {"lro_coalesced_packets",
153 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
154 {"lro_coalesced_events",
155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
157 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
158 {"lro_not_coalesced_packets",
159 offsetof(struct ecore_eth_stats_common,
160 tpa_not_coalesced_pkts)},
161 {"lro_coalesced_bytes",
162 offsetof(struct ecore_eth_stats_common,
163 tpa_coalesced_bytes)},
166 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
167 {"rx_1519_to_1522_byte_packets",
168 offsetof(struct ecore_eth_stats, bb) +
169 offsetof(struct ecore_eth_stats_bb,
170 rx_1519_to_1522_byte_packets)},
171 {"rx_1519_to_2047_byte_packets",
172 offsetof(struct ecore_eth_stats, bb) +
173 offsetof(struct ecore_eth_stats_bb,
174 rx_1519_to_2047_byte_packets)},
175 {"rx_2048_to_4095_byte_packets",
176 offsetof(struct ecore_eth_stats, bb) +
177 offsetof(struct ecore_eth_stats_bb,
178 rx_2048_to_4095_byte_packets)},
179 {"rx_4096_to_9216_byte_packets",
180 offsetof(struct ecore_eth_stats, bb) +
181 offsetof(struct ecore_eth_stats_bb,
182 rx_4096_to_9216_byte_packets)},
183 {"rx_9217_to_16383_byte_packets",
184 offsetof(struct ecore_eth_stats, bb) +
185 offsetof(struct ecore_eth_stats_bb,
186 rx_9217_to_16383_byte_packets)},
188 {"tx_1519_to_2047_byte_packets",
189 offsetof(struct ecore_eth_stats, bb) +
190 offsetof(struct ecore_eth_stats_bb,
191 tx_1519_to_2047_byte_packets)},
192 {"tx_2048_to_4095_byte_packets",
193 offsetof(struct ecore_eth_stats, bb) +
194 offsetof(struct ecore_eth_stats_bb,
195 tx_2048_to_4095_byte_packets)},
196 {"tx_4096_to_9216_byte_packets",
197 offsetof(struct ecore_eth_stats, bb) +
198 offsetof(struct ecore_eth_stats_bb,
199 tx_4096_to_9216_byte_packets)},
200 {"tx_9217_to_16383_byte_packets",
201 offsetof(struct ecore_eth_stats, bb) +
202 offsetof(struct ecore_eth_stats_bb,
203 tx_9217_to_16383_byte_packets)},
205 {"tx_lpi_entry_count",
206 offsetof(struct ecore_eth_stats, bb) +
207 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
208 {"tx_total_collisions",
209 offsetof(struct ecore_eth_stats, bb) +
210 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
213 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
214 {"rx_1519_to_max_byte_packets",
215 offsetof(struct ecore_eth_stats, ah) +
216 offsetof(struct ecore_eth_stats_ah,
217 rx_1519_to_max_byte_packets)},
218 {"tx_1519_to_max_byte_packets",
219 offsetof(struct ecore_eth_stats, ah) +
220 offsetof(struct ecore_eth_stats_ah,
221 tx_1519_to_max_byte_packets)},
224 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
226 offsetof(struct qede_rx_queue, rx_segs)},
228 offsetof(struct qede_rx_queue, rx_hw_errors)},
229 {"rx_q_allocation_errors",
230 offsetof(struct qede_rx_queue, rx_alloc_errors)}
233 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
235 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
239 qede_interrupt_handler_intx(void *param)
241 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
242 struct qede_dev *qdev = eth_dev->data->dev_private;
243 struct ecore_dev *edev = &qdev->edev;
246 /* Check if our device actually raised an interrupt */
247 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
249 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
251 if (rte_intr_ack(eth_dev->intr_handle))
252 DP_ERR(edev, "rte_intr_ack failed\n");
257 qede_interrupt_handler(void *param)
259 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
260 struct qede_dev *qdev = eth_dev->data->dev_private;
261 struct ecore_dev *edev = &qdev->edev;
263 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
264 if (rte_intr_ack(eth_dev->intr_handle))
265 DP_ERR(edev, "rte_intr_ack failed\n");
269 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
271 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
275 static void qede_print_adapter_info(struct qede_dev *qdev)
277 struct ecore_dev *edev = &qdev->edev;
278 struct qed_dev_info *info = &qdev->dev_info.common;
279 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
280 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
282 DP_INFO(edev, "*********************************\n");
283 DP_INFO(edev, " DPDK version:%s\n", rte_version());
284 DP_INFO(edev, " Chip details : %s %c%d\n",
285 ECORE_IS_BB(edev) ? "BB" : "AH",
286 'A' + edev->chip_rev,
287 (int)edev->chip_metal);
288 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
289 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
290 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
291 ver_str, QEDE_PMD_VERSION);
292 DP_INFO(edev, " Driver version : %s\n", drv_ver);
293 DP_INFO(edev, " Firmware version : %s\n", ver_str);
295 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
297 (info->mfw_rev >> 24) & 0xff,
298 (info->mfw_rev >> 16) & 0xff,
299 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
300 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
301 DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
302 DP_INFO(edev, "*********************************\n");
305 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
307 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
308 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
309 unsigned int i = 0, j = 0, qid;
310 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
311 struct qede_tx_queue *txq;
313 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
315 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
316 RTE_ETHDEV_QUEUE_STAT_CNTRS);
317 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
318 RTE_ETHDEV_QUEUE_STAT_CNTRS);
320 for (qid = 0; qid < qdev->num_rx_queues; qid++) {
321 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
322 offsetof(struct qede_rx_queue, rcv_pkts), 0,
324 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
325 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
327 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
328 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
332 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
333 OSAL_MEMSET((((char *)
334 (qdev->fp_array[qid].rxq)) +
335 qede_rxq_xstats_strings[j].offset),
340 if (i == rxq_stat_cntrs)
346 for (qid = 0; qid < qdev->num_tx_queues; qid++) {
347 txq = qdev->fp_array[qid].txq;
349 OSAL_MEMSET((uint64_t *)(uintptr_t)
350 (((uint64_t)(uintptr_t)(txq)) +
351 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
355 if (i == txq_stat_cntrs)
361 qede_stop_vport(struct ecore_dev *edev)
363 struct ecore_hwfn *p_hwfn;
369 for_each_hwfn(edev, i) {
370 p_hwfn = &edev->hwfns[i];
371 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
373 if (rc != ECORE_SUCCESS) {
374 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
379 DP_INFO(edev, "vport stopped\n");
385 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
387 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
388 struct ecore_sp_vport_start_params params;
389 struct ecore_hwfn *p_hwfn;
393 if (qdev->vport_started)
394 qede_stop_vport(edev);
396 memset(¶ms, 0, sizeof(params));
399 /* @DPDK - Disable FW placement */
400 params.zero_placement_offset = 1;
401 for_each_hwfn(edev, i) {
402 p_hwfn = &edev->hwfns[i];
403 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
404 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
405 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
406 if (rc != ECORE_SUCCESS) {
407 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
411 ecore_reset_vport_stats(edev);
412 qdev->vport_started = true;
413 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
418 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
419 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
421 /* Activate or deactivate vport via vport-update */
422 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
424 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
425 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
426 struct ecore_sp_vport_update_params params;
427 struct ecore_hwfn *p_hwfn;
431 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
433 params.update_vport_active_rx_flg = 1;
434 params.update_vport_active_tx_flg = 1;
435 params.vport_active_rx_flg = flg;
436 params.vport_active_tx_flg = flg;
437 if (~qdev->enable_tx_switching & flg) {
438 params.update_tx_switching_flg = 1;
439 params.tx_switching_flg = !flg;
441 for_each_hwfn(edev, i) {
442 p_hwfn = &edev->hwfns[i];
443 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
444 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
445 ECORE_SPQ_MODE_EBLOCK, NULL);
446 if (rc != ECORE_SUCCESS) {
447 DP_ERR(edev, "Failed to update vport\n");
451 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
457 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
458 uint16_t mtu, bool enable)
460 /* Enable LRO in split mode */
461 sge_tpa_params->tpa_ipv4_en_flg = enable;
462 sge_tpa_params->tpa_ipv6_en_flg = enable;
463 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
464 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
465 /* set if tpa enable changes */
466 sge_tpa_params->update_tpa_en_flg = 1;
467 /* set if tpa parameters should be handled */
468 sge_tpa_params->update_tpa_param_flg = enable;
470 sge_tpa_params->max_buffers_per_cqe = 20;
471 /* Enable TPA in split mode. In this mode each TPA segment
472 * starts on the new BD, so there is one BD per segment.
474 sge_tpa_params->tpa_pkt_split_flg = 1;
475 sge_tpa_params->tpa_hdr_data_split_flg = 0;
476 sge_tpa_params->tpa_gro_consistent_flg = 0;
477 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
478 sge_tpa_params->tpa_max_size = 0x7FFF;
479 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
480 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
483 /* Enable/disable LRO via vport-update */
484 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
486 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
487 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
488 struct ecore_sp_vport_update_params params;
489 struct ecore_sge_tpa_params tpa_params;
490 struct ecore_hwfn *p_hwfn;
494 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
495 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
496 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
498 params.sge_tpa_params = &tpa_params;
499 for_each_hwfn(edev, i) {
500 p_hwfn = &edev->hwfns[i];
501 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
502 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
503 ECORE_SPQ_MODE_EBLOCK, NULL);
504 if (rc != ECORE_SUCCESS) {
505 DP_ERR(edev, "Failed to update LRO\n");
509 qdev->enable_lro = flg;
510 eth_dev->data->lro = flg;
512 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
518 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
519 enum qed_filter_rx_mode_type type)
521 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
522 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
523 struct ecore_filter_accept_flags flags;
525 memset(&flags, 0, sizeof(flags));
527 flags.update_rx_mode_config = 1;
528 flags.update_tx_mode_config = 1;
529 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
530 ECORE_ACCEPT_MCAST_MATCHED |
533 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
534 ECORE_ACCEPT_MCAST_MATCHED |
537 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
538 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
540 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
541 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
543 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
544 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
545 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
546 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
547 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
548 ECORE_ACCEPT_MCAST_UNMATCHED;
551 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
552 ECORE_SPQ_MODE_CB, NULL);
556 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
559 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
560 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
561 struct qede_ucast_entry *tmp = NULL;
562 struct qede_ucast_entry *u;
563 struct rte_ether_addr *mac_addr;
565 mac_addr = (struct rte_ether_addr *)ucast->mac;
567 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
568 if ((memcmp(mac_addr, &tmp->mac,
569 RTE_ETHER_ADDR_LEN) == 0) &&
570 ucast->vni == tmp->vni &&
571 ucast->vlan == tmp->vlan) {
572 DP_INFO(edev, "Unicast MAC is already added"
573 " with vlan = %u, vni = %u\n",
574 ucast->vlan, ucast->vni);
578 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
579 RTE_CACHE_LINE_SIZE);
581 DP_ERR(edev, "Did not allocate memory for ucast\n");
584 rte_ether_addr_copy(mac_addr, &u->mac);
585 u->vlan = ucast->vlan;
587 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
590 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
591 if ((memcmp(mac_addr, &tmp->mac,
592 RTE_ETHER_ADDR_LEN) == 0) &&
593 ucast->vlan == tmp->vlan &&
594 ucast->vni == tmp->vni)
598 DP_INFO(edev, "Unicast MAC is not found\n");
601 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
609 qede_add_mcast_filters(struct rte_eth_dev *eth_dev,
610 struct rte_ether_addr *mc_addrs,
611 uint32_t mc_addrs_num)
613 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
614 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
615 struct ecore_filter_mcast mcast;
616 struct qede_mcast_entry *m = NULL;
620 for (i = 0; i < mc_addrs_num; i++) {
621 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
622 RTE_CACHE_LINE_SIZE);
624 DP_ERR(edev, "Did not allocate memory for mcast\n");
627 rte_ether_addr_copy(&mc_addrs[i], &m->mac);
628 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
630 memset(&mcast, 0, sizeof(mcast));
631 mcast.num_mc_addrs = mc_addrs_num;
632 mcast.opcode = ECORE_FILTER_ADD;
633 for (i = 0; i < mc_addrs_num; i++)
634 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
636 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
637 if (rc != ECORE_SUCCESS) {
638 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
645 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
647 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
648 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
649 struct qede_mcast_entry *tmp = NULL;
650 struct ecore_filter_mcast mcast;
654 memset(&mcast, 0, sizeof(mcast));
655 mcast.num_mc_addrs = qdev->num_mc_addr;
656 mcast.opcode = ECORE_FILTER_REMOVE;
658 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
659 rte_ether_addr_copy(&tmp->mac,
660 (struct rte_ether_addr *)&mcast.mac[j]);
663 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
664 if (rc != ECORE_SUCCESS) {
665 DP_ERR(edev, "Failed to delete multicast filter\n");
669 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
670 tmp = SLIST_FIRST(&qdev->mc_list_head);
671 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
673 SLIST_INIT(&qdev->mc_list_head);
679 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
682 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
683 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
684 enum _ecore_status_t rc = ECORE_INVAL;
686 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
687 DP_ERR(edev, "Ucast filter table limit exceeded,"
688 " Please enable promisc mode\n");
692 rc = qede_ucast_filter(eth_dev, ucast, add);
694 rc = ecore_filter_ucast_cmd(edev, ucast,
695 ECORE_SPQ_MODE_CB, NULL);
696 /* Indicate error only for add filter operation.
697 * Delete filter operations are not severe.
699 if ((rc != ECORE_SUCCESS) && add)
700 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
707 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr,
708 __rte_unused uint32_t index, __rte_unused uint32_t pool)
710 struct ecore_filter_ucast ucast;
713 if (!rte_is_valid_assigned_ether_addr(mac_addr))
716 qede_set_ucast_cmn_params(&ucast);
717 ucast.opcode = ECORE_FILTER_ADD;
718 ucast.type = ECORE_FILTER_MAC;
719 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
720 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
725 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
727 struct qede_dev *qdev = eth_dev->data->dev_private;
728 struct ecore_dev *edev = &qdev->edev;
729 struct ecore_filter_ucast ucast;
731 PMD_INIT_FUNC_TRACE(edev);
733 if (index >= qdev->dev_info.num_mac_filters) {
734 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
735 index, qdev->dev_info.num_mac_filters);
739 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
742 qede_set_ucast_cmn_params(&ucast);
743 ucast.opcode = ECORE_FILTER_REMOVE;
744 ucast.type = ECORE_FILTER_MAC;
746 /* Use the index maintained by rte */
747 rte_ether_addr_copy(ð_dev->data->mac_addrs[index],
748 (struct rte_ether_addr *)&ucast.mac);
750 qede_mac_int_ops(eth_dev, &ucast, false);
754 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
756 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
757 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
759 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
760 mac_addr->addr_bytes)) {
761 DP_ERR(edev, "Setting MAC address is not allowed\n");
765 qede_mac_addr_remove(eth_dev, 0);
767 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
770 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
772 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
773 struct ecore_sp_vport_update_params params;
774 struct ecore_hwfn *p_hwfn;
778 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
780 params.update_accept_any_vlan_flg = 1;
781 params.accept_any_vlan = flg;
782 for_each_hwfn(edev, i) {
783 p_hwfn = &edev->hwfns[i];
784 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
785 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
786 ECORE_SPQ_MODE_EBLOCK, NULL);
787 if (rc != ECORE_SUCCESS) {
788 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
793 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
796 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
798 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
799 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
800 struct ecore_sp_vport_update_params params;
801 struct ecore_hwfn *p_hwfn;
805 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
807 params.update_inner_vlan_removal_flg = 1;
808 params.inner_vlan_removal_flg = flg;
809 for_each_hwfn(edev, i) {
810 p_hwfn = &edev->hwfns[i];
811 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
812 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
813 ECORE_SPQ_MODE_EBLOCK, NULL);
814 if (rc != ECORE_SUCCESS) {
815 DP_ERR(edev, "Failed to update vport\n");
820 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
824 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
825 uint16_t vlan_id, int on)
827 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
828 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
829 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
830 struct qede_vlan_entry *tmp = NULL;
831 struct qede_vlan_entry *vlan;
832 struct ecore_filter_ucast ucast;
836 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
837 DP_ERR(edev, "Reached max VLAN filter limit"
838 " enabling accept_any_vlan\n");
839 qede_config_accept_any_vlan(qdev, true);
843 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
844 if (tmp->vid == vlan_id) {
845 DP_INFO(edev, "VLAN %u already configured\n",
851 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
852 RTE_CACHE_LINE_SIZE);
855 DP_ERR(edev, "Did not allocate memory for VLAN\n");
859 qede_set_ucast_cmn_params(&ucast);
860 ucast.opcode = ECORE_FILTER_ADD;
861 ucast.type = ECORE_FILTER_VLAN;
862 ucast.vlan = vlan_id;
863 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
866 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
871 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
872 qdev->configured_vlans++;
873 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
874 vlan_id, qdev->configured_vlans);
877 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
878 if (tmp->vid == vlan_id)
883 if (qdev->configured_vlans == 0) {
885 "No VLAN filters configured yet\n");
889 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
893 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
895 qede_set_ucast_cmn_params(&ucast);
896 ucast.opcode = ECORE_FILTER_REMOVE;
897 ucast.type = ECORE_FILTER_VLAN;
898 ucast.vlan = vlan_id;
899 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
902 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
905 qdev->configured_vlans--;
906 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
907 vlan_id, qdev->configured_vlans);
914 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
916 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
917 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
918 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
920 if (mask & ETH_VLAN_STRIP_MASK) {
921 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
922 (void)qede_vlan_stripping(eth_dev, 1);
924 (void)qede_vlan_stripping(eth_dev, 0);
927 if (mask & ETH_VLAN_FILTER_MASK) {
928 /* VLAN filtering kicks in when a VLAN is added */
929 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
930 qede_vlan_filter_set(eth_dev, 0, 1);
932 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
934 " Please remove existing VLAN filters"
935 " before disabling VLAN filtering\n");
936 /* Signal app that VLAN filtering is still
939 eth_dev->data->dev_conf.rxmode.offloads |=
940 DEV_RX_OFFLOAD_VLAN_FILTER;
942 qede_vlan_filter_set(eth_dev, 0, 0);
947 if (mask & ETH_VLAN_EXTEND_MASK)
948 DP_ERR(edev, "Extend VLAN not supported\n");
950 qdev->vlan_offload_mask = mask;
952 DP_INFO(edev, "VLAN offload mask %d\n", mask);
957 static void qede_prandom_bytes(uint32_t *buff)
961 srand((unsigned int)time(NULL));
962 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
966 int qede_config_rss(struct rte_eth_dev *eth_dev)
968 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
969 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
970 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
971 struct rte_eth_rss_reta_entry64 reta_conf[2];
972 struct rte_eth_rss_conf rss_conf;
973 uint32_t i, id, pos, q;
975 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
976 if (!rss_conf.rss_key) {
977 DP_INFO(edev, "Applying driver default key\n");
978 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
979 qede_prandom_bytes(&def_rss_key[0]);
980 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
983 /* Configure RSS hash */
984 if (qede_rss_hash_update(eth_dev, &rss_conf))
987 /* Configure default RETA */
988 memset(reta_conf, 0, sizeof(reta_conf));
989 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
990 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
992 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
993 id = i / RTE_RETA_GROUP_SIZE;
994 pos = i % RTE_RETA_GROUP_SIZE;
995 q = i % QEDE_RSS_COUNT(eth_dev);
996 reta_conf[id].reta[pos] = q;
998 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
999 ECORE_RSS_IND_TABLE_SIZE))
1005 static void qede_fastpath_start(struct ecore_dev *edev)
1007 struct ecore_hwfn *p_hwfn;
1010 for_each_hwfn(edev, i) {
1011 p_hwfn = &edev->hwfns[i];
1012 ecore_hw_start_fastpath(p_hwfn);
1016 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1018 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1019 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1020 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1022 PMD_INIT_FUNC_TRACE(edev);
1024 /* Update MTU only if it has changed */
1025 if (eth_dev->data->mtu != qdev->mtu) {
1026 if (qede_update_mtu(eth_dev, qdev->mtu))
1030 /* Configure TPA parameters */
1031 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1032 if (qede_enable_tpa(eth_dev, true))
1034 /* Enable scatter mode for LRO */
1035 if (!eth_dev->data->scattered_rx)
1036 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1040 if (qede_start_queues(eth_dev))
1044 qede_reset_queue_stats(qdev, true);
1046 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1047 * enabling RSS. Hence RSS configuration is deferred upto this point.
1048 * Also, we would like to retain similar behavior in PF case, so we
1049 * don't do PF/VF specific check here.
1051 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1052 if (qede_config_rss(eth_dev))
1056 if (qede_activate_vport(eth_dev, true))
1059 /* Update link status */
1060 qede_link_update(eth_dev, 0);
1062 /* Start/resume traffic */
1063 qede_fastpath_start(edev);
1065 DP_INFO(edev, "Device started\n");
1069 DP_ERR(edev, "Device start fails\n");
1070 return -1; /* common error code is < 0 */
1073 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1075 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1076 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1078 PMD_INIT_FUNC_TRACE(edev);
1081 if (qede_activate_vport(eth_dev, false))
1084 if (qdev->enable_lro)
1085 qede_enable_tpa(eth_dev, false);
1088 qede_stop_queues(eth_dev);
1090 /* Disable traffic */
1091 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1093 DP_INFO(edev, "Device is stopped\n");
1096 static const char * const valid_args[] = {
1097 QEDE_NPAR_TX_SWITCHING,
1098 QEDE_VF_TX_SWITCHING,
1102 static int qede_args_check(const char *key, const char *val, void *opaque)
1106 struct rte_eth_dev *eth_dev = opaque;
1107 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1108 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1111 tmp = strtoul(val, NULL, 0);
1113 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1117 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1118 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1119 qdev->enable_tx_switching = !!tmp;
1120 DP_INFO(edev, "Disabling %s tx-switching\n",
1121 strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1128 static int qede_args(struct rte_eth_dev *eth_dev)
1130 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1131 struct rte_kvargs *kvlist;
1132 struct rte_devargs *devargs;
1136 devargs = pci_dev->device.devargs;
1138 return 0; /* return success */
1140 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1144 /* Process parameters. */
1145 for (i = 0; (valid_args[i] != NULL); ++i) {
1146 if (rte_kvargs_count(kvlist, valid_args[i])) {
1147 ret = rte_kvargs_process(kvlist, valid_args[i],
1148 qede_args_check, eth_dev);
1149 if (ret != ECORE_SUCCESS) {
1150 rte_kvargs_free(kvlist);
1155 rte_kvargs_free(kvlist);
1160 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1162 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1163 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1164 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1167 PMD_INIT_FUNC_TRACE(edev);
1169 /* We need to have min 1 RX queue.There is no min check in
1170 * rte_eth_dev_configure(), so we are checking it here.
1172 if (eth_dev->data->nb_rx_queues == 0) {
1173 DP_ERR(edev, "Minimum one RX queue is required\n");
1177 /* Enable Tx switching by default */
1178 qdev->enable_tx_switching = 1;
1180 /* Parse devargs and fix up rxmode */
1181 if (qede_args(eth_dev))
1182 DP_NOTICE(edev, false,
1183 "Invalid devargs supplied, requested change will not take effect\n");
1185 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1186 rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1187 DP_ERR(edev, "Unsupported multi-queue mode\n");
1190 /* Flow director mode check */
1191 if (qede_check_fdir_support(eth_dev))
1194 qede_dealloc_fp_resc(eth_dev);
1195 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
1196 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
1198 if (qede_alloc_fp_resc(qdev))
1201 /* If jumbo enabled adjust MTU */
1202 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1203 eth_dev->data->mtu =
1204 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1205 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1207 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1208 eth_dev->data->scattered_rx = 1;
1210 if (qede_start_vport(qdev, eth_dev->data->mtu))
1213 qdev->mtu = eth_dev->data->mtu;
1215 /* Enable VLAN offloads by default */
1216 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1217 ETH_VLAN_FILTER_MASK);
1221 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1222 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
1224 if (ECORE_IS_CMT(edev))
1225 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
1226 qdev->num_rx_queues, qdev->num_tx_queues);
1232 /* Info about HW descriptor ring limitations */
1233 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1234 .nb_max = 0x8000, /* 32K */
1236 .nb_align = 128 /* lowest common multiple */
1239 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1240 .nb_max = 0x8000, /* 32K */
1243 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1244 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1248 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1249 struct rte_eth_dev_info *dev_info)
1251 struct qede_dev *qdev = eth_dev->data->dev_private;
1252 struct ecore_dev *edev = &qdev->edev;
1253 struct qed_link_output link;
1254 uint32_t speed_cap = 0;
1256 PMD_INIT_FUNC_TRACE(edev);
1258 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1259 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1260 dev_info->rx_desc_lim = qede_rx_desc_lim;
1261 dev_info->tx_desc_lim = qede_tx_desc_lim;
1264 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1265 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1267 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1268 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1269 /* Since CMT mode internally doubles the number of queues */
1270 if (ECORE_IS_CMT(edev))
1271 dev_info->max_rx_queues = dev_info->max_rx_queues / 2;
1273 dev_info->max_tx_queues = dev_info->max_rx_queues;
1275 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1276 dev_info->max_vfs = 0;
1277 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1278 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1279 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1280 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1281 DEV_RX_OFFLOAD_UDP_CKSUM |
1282 DEV_RX_OFFLOAD_TCP_CKSUM |
1283 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1284 DEV_RX_OFFLOAD_TCP_LRO |
1285 DEV_RX_OFFLOAD_KEEP_CRC |
1286 DEV_RX_OFFLOAD_SCATTER |
1287 DEV_RX_OFFLOAD_JUMBO_FRAME |
1288 DEV_RX_OFFLOAD_VLAN_FILTER |
1289 DEV_RX_OFFLOAD_VLAN_STRIP);
1290 dev_info->rx_queue_offload_capa = 0;
1292 /* TX offloads are on a per-packet basis, so it is applicable
1293 * to both at port and queue levels.
1295 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1296 DEV_TX_OFFLOAD_IPV4_CKSUM |
1297 DEV_TX_OFFLOAD_UDP_CKSUM |
1298 DEV_TX_OFFLOAD_TCP_CKSUM |
1299 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1300 DEV_TX_OFFLOAD_MULTI_SEGS |
1301 DEV_TX_OFFLOAD_TCP_TSO |
1302 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1303 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1304 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1306 dev_info->default_txconf = (struct rte_eth_txconf) {
1307 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1310 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1311 /* Packets are always dropped if no descriptors are available */
1316 memset(&link, 0, sizeof(struct qed_link_output));
1317 qdev->ops->common->get_link(edev, &link);
1318 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1319 speed_cap |= ETH_LINK_SPEED_1G;
1320 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1321 speed_cap |= ETH_LINK_SPEED_10G;
1322 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1323 speed_cap |= ETH_LINK_SPEED_25G;
1324 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1325 speed_cap |= ETH_LINK_SPEED_40G;
1326 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1327 speed_cap |= ETH_LINK_SPEED_50G;
1328 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1329 speed_cap |= ETH_LINK_SPEED_100G;
1330 dev_info->speed_capa = speed_cap;
1333 /* return 0 means link status changed, -1 means not changed */
1335 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1337 struct qede_dev *qdev = eth_dev->data->dev_private;
1338 struct ecore_dev *edev = &qdev->edev;
1339 struct qed_link_output q_link;
1340 struct rte_eth_link link;
1341 uint16_t link_duplex;
1343 memset(&q_link, 0, sizeof(q_link));
1344 memset(&link, 0, sizeof(link));
1346 qdev->ops->common->get_link(edev, &q_link);
1349 link.link_speed = q_link.speed;
1352 switch (q_link.duplex) {
1353 case QEDE_DUPLEX_HALF:
1354 link_duplex = ETH_LINK_HALF_DUPLEX;
1356 case QEDE_DUPLEX_FULL:
1357 link_duplex = ETH_LINK_FULL_DUPLEX;
1359 case QEDE_DUPLEX_UNKNOWN:
1363 link.link_duplex = link_duplex;
1366 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1369 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1370 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1372 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1373 link.link_speed, link.link_duplex,
1374 link.link_autoneg, link.link_status);
1376 return rte_eth_linkstatus_set(eth_dev, &link);
1379 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1381 struct qede_dev *qdev = eth_dev->data->dev_private;
1382 struct ecore_dev *edev = &qdev->edev;
1383 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1385 PMD_INIT_FUNC_TRACE(edev);
1387 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1388 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1390 qed_configure_filter_rx_mode(eth_dev, type);
1393 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1395 struct qede_dev *qdev = eth_dev->data->dev_private;
1396 struct ecore_dev *edev = &qdev->edev;
1398 PMD_INIT_FUNC_TRACE(edev);
1400 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1401 qed_configure_filter_rx_mode(eth_dev,
1402 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1404 qed_configure_filter_rx_mode(eth_dev,
1405 QED_FILTER_RX_MODE_TYPE_REGULAR);
1408 static void qede_poll_sp_sb_cb(void *param)
1410 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1411 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1412 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1415 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1416 qede_interrupt_action(&edev->hwfns[1]);
1418 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1422 DP_ERR(edev, "Unable to start periodic"
1423 " timer rc %d\n", rc);
1427 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1429 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1430 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1431 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1433 PMD_INIT_FUNC_TRACE(edev);
1435 /* dev_stop() shall cleanup fp resources in hw but without releasing
1436 * dma memories and sw structures so that dev_start() can be called
1437 * by the app without reconfiguration. However, in dev_close() we
1438 * can release all the resources and device can be brought up newly
1440 if (eth_dev->data->dev_started)
1441 qede_dev_stop(eth_dev);
1443 qede_stop_vport(edev);
1444 qdev->vport_started = false;
1445 qede_fdir_dealloc_resc(eth_dev);
1446 qede_dealloc_fp_resc(eth_dev);
1448 eth_dev->data->nb_rx_queues = 0;
1449 eth_dev->data->nb_tx_queues = 0;
1451 /* Bring the link down */
1452 qede_dev_set_link_state(eth_dev, false);
1453 qdev->ops->common->slowpath_stop(edev);
1454 qdev->ops->common->remove(edev);
1455 rte_intr_disable(&pci_dev->intr_handle);
1457 switch (pci_dev->intr_handle.type) {
1458 case RTE_INTR_HANDLE_UIO_INTX:
1459 case RTE_INTR_HANDLE_VFIO_LEGACY:
1460 rte_intr_callback_unregister(&pci_dev->intr_handle,
1461 qede_interrupt_handler_intx,
1465 rte_intr_callback_unregister(&pci_dev->intr_handle,
1466 qede_interrupt_handler,
1470 if (ECORE_IS_CMT(edev))
1471 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1475 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1477 struct qede_dev *qdev = eth_dev->data->dev_private;
1478 struct ecore_dev *edev = &qdev->edev;
1479 struct ecore_eth_stats stats;
1480 unsigned int i = 0, j = 0, qid, idx, hw_fn;
1481 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1482 struct qede_tx_queue *txq;
1484 ecore_get_vport_stats(edev, &stats);
1487 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1488 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1490 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1491 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1493 eth_stats->ierrors = stats.common.rx_crc_errors +
1494 stats.common.rx_align_errors +
1495 stats.common.rx_carrier_errors +
1496 stats.common.rx_oversize_packets +
1497 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1499 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1501 eth_stats->imissed = stats.common.mftag_filter_discards +
1502 stats.common.mac_filter_discards +
1503 stats.common.no_buff_discards +
1504 stats.common.brb_truncates + stats.common.brb_discards;
1507 eth_stats->opackets = stats.common.tx_ucast_pkts +
1508 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1510 eth_stats->obytes = stats.common.tx_ucast_bytes +
1511 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1513 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1516 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
1517 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1518 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
1519 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1520 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
1521 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
1522 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1523 "Not all the queue stats will be displayed. Set"
1524 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1525 " appropriately and retry.\n");
1527 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
1528 eth_stats->q_ipackets[i] = 0;
1529 eth_stats->q_errors[i] = 0;
1531 for_each_hwfn(edev, hw_fn) {
1532 idx = qid * edev->num_hwfns + hw_fn;
1534 eth_stats->q_ipackets[i] +=
1536 (((char *)(qdev->fp_array[idx].rxq)) +
1537 offsetof(struct qede_rx_queue,
1539 eth_stats->q_errors[i] +=
1541 (((char *)(qdev->fp_array[idx].rxq)) +
1542 offsetof(struct qede_rx_queue,
1545 (((char *)(qdev->fp_array[idx].rxq)) +
1546 offsetof(struct qede_rx_queue,
1551 if (i == rxq_stat_cntrs)
1555 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
1556 eth_stats->q_opackets[j] = 0;
1558 for_each_hwfn(edev, hw_fn) {
1559 idx = qid * edev->num_hwfns + hw_fn;
1561 txq = qdev->fp_array[idx].txq;
1562 eth_stats->q_opackets[j] +=
1563 *((uint64_t *)(uintptr_t)
1564 (((uint64_t)(uintptr_t)(txq)) +
1565 offsetof(struct qede_tx_queue,
1570 if (j == txq_stat_cntrs)
1578 qede_get_xstats_count(struct qede_dev *qdev) {
1579 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
1581 if (ECORE_IS_BB(&qdev->edev))
1582 return RTE_DIM(qede_xstats_strings) +
1583 RTE_DIM(qede_bb_xstats_strings) +
1584 (RTE_DIM(qede_rxq_xstats_strings) *
1585 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
1587 return RTE_DIM(qede_xstats_strings) +
1588 RTE_DIM(qede_ah_xstats_strings) +
1589 (RTE_DIM(qede_rxq_xstats_strings) *
1590 QEDE_RSS_COUNT(dev));
1594 qede_get_xstats_names(struct rte_eth_dev *dev,
1595 struct rte_eth_xstat_name *xstats_names,
1596 __rte_unused unsigned int limit)
1598 struct qede_dev *qdev = dev->data->dev_private;
1599 struct ecore_dev *edev = &qdev->edev;
1600 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1601 unsigned int i, qid, hw_fn, stat_idx = 0;
1603 if (xstats_names == NULL)
1606 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1607 strlcpy(xstats_names[stat_idx].name,
1608 qede_xstats_strings[i].name,
1609 sizeof(xstats_names[stat_idx].name));
1613 if (ECORE_IS_BB(edev)) {
1614 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1615 strlcpy(xstats_names[stat_idx].name,
1616 qede_bb_xstats_strings[i].name,
1617 sizeof(xstats_names[stat_idx].name));
1621 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1622 strlcpy(xstats_names[stat_idx].name,
1623 qede_ah_xstats_strings[i].name,
1624 sizeof(xstats_names[stat_idx].name));
1629 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
1630 for_each_hwfn(edev, hw_fn) {
1631 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1632 snprintf(xstats_names[stat_idx].name,
1633 RTE_ETH_XSTATS_NAME_SIZE,
1635 qede_rxq_xstats_strings[i].name,
1637 qede_rxq_xstats_strings[i].name + 4);
1647 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1650 struct qede_dev *qdev = dev->data->dev_private;
1651 struct ecore_dev *edev = &qdev->edev;
1652 struct ecore_eth_stats stats;
1653 const unsigned int num = qede_get_xstats_count(qdev);
1654 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
1659 ecore_get_vport_stats(edev, &stats);
1661 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1662 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1663 qede_xstats_strings[i].offset);
1664 xstats[stat_idx].id = stat_idx;
1668 if (ECORE_IS_BB(edev)) {
1669 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1670 xstats[stat_idx].value =
1671 *(uint64_t *)(((char *)&stats) +
1672 qede_bb_xstats_strings[i].offset);
1673 xstats[stat_idx].id = stat_idx;
1677 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1678 xstats[stat_idx].value =
1679 *(uint64_t *)(((char *)&stats) +
1680 qede_ah_xstats_strings[i].offset);
1681 xstats[stat_idx].id = stat_idx;
1686 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
1687 for_each_hwfn(edev, hw_fn) {
1688 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1689 fpidx = qid * edev->num_hwfns + hw_fn;
1690 xstats[stat_idx].value = *(uint64_t *)
1691 (((char *)(qdev->fp_array[fpidx].rxq)) +
1692 qede_rxq_xstats_strings[i].offset);
1693 xstats[stat_idx].id = stat_idx;
1704 qede_reset_xstats(struct rte_eth_dev *dev)
1706 struct qede_dev *qdev = dev->data->dev_private;
1707 struct ecore_dev *edev = &qdev->edev;
1709 ecore_reset_vport_stats(edev);
1710 qede_reset_queue_stats(qdev, true);
1713 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1715 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1716 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1717 struct qed_link_params link_params;
1720 DP_INFO(edev, "setting link state %d\n", link_up);
1721 memset(&link_params, 0, sizeof(link_params));
1722 link_params.link_up = link_up;
1723 rc = qdev->ops->common->set_link(edev, &link_params);
1724 if (rc != ECORE_SUCCESS)
1725 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1730 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1732 return qede_dev_set_link_state(eth_dev, true);
1735 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1737 return qede_dev_set_link_state(eth_dev, false);
1740 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1742 struct qede_dev *qdev = eth_dev->data->dev_private;
1743 struct ecore_dev *edev = &qdev->edev;
1745 ecore_reset_vport_stats(edev);
1746 qede_reset_queue_stats(qdev, false);
1749 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1751 enum qed_filter_rx_mode_type type =
1752 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1754 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1755 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1757 qed_configure_filter_rx_mode(eth_dev, type);
1760 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1762 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1763 qed_configure_filter_rx_mode(eth_dev,
1764 QED_FILTER_RX_MODE_TYPE_PROMISC);
1766 qed_configure_filter_rx_mode(eth_dev,
1767 QED_FILTER_RX_MODE_TYPE_REGULAR);
1771 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev,
1772 struct rte_ether_addr *mc_addrs,
1773 uint32_t mc_addrs_num)
1775 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1776 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1779 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1780 DP_ERR(edev, "Reached max multicast filters limit,"
1781 "Please enable multicast promisc mode\n");
1785 for (i = 0; i < mc_addrs_num; i++) {
1786 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
1787 DP_ERR(edev, "Not a valid multicast MAC\n");
1792 /* Flush all existing entries */
1793 if (qede_del_mcast_filters(eth_dev))
1796 /* Set new mcast list */
1797 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1800 /* Update MTU via vport-update without doing port restart.
1801 * The vport must be deactivated before calling this API.
1803 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1805 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1806 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1807 struct ecore_hwfn *p_hwfn;
1812 struct ecore_sp_vport_update_params params;
1814 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1815 params.vport_id = 0;
1817 params.vport_id = 0;
1818 for_each_hwfn(edev, i) {
1819 p_hwfn = &edev->hwfns[i];
1820 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1821 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1822 ECORE_SPQ_MODE_EBLOCK, NULL);
1823 if (rc != ECORE_SUCCESS)
1827 for_each_hwfn(edev, i) {
1828 p_hwfn = &edev->hwfns[i];
1829 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1830 if (rc == ECORE_INVAL) {
1831 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1832 /* Recreate vport */
1833 rc = qede_start_vport(qdev, mtu);
1834 if (rc != ECORE_SUCCESS)
1837 /* Restore config lost due to vport stop */
1838 if (eth_dev->data->promiscuous)
1839 qede_promiscuous_enable(eth_dev);
1841 qede_promiscuous_disable(eth_dev);
1843 if (eth_dev->data->all_multicast)
1844 qede_allmulticast_enable(eth_dev);
1846 qede_allmulticast_disable(eth_dev);
1848 qede_vlan_offload_set(eth_dev,
1849 qdev->vlan_offload_mask);
1850 } else if (rc != ECORE_SUCCESS) {
1855 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1860 DP_ERR(edev, "Failed to update MTU\n");
1864 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1865 struct rte_eth_fc_conf *fc_conf)
1867 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1868 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1869 struct qed_link_output current_link;
1870 struct qed_link_params params;
1872 memset(¤t_link, 0, sizeof(current_link));
1873 qdev->ops->common->get_link(edev, ¤t_link);
1875 memset(¶ms, 0, sizeof(params));
1876 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1877 if (fc_conf->autoneg) {
1878 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1879 DP_ERR(edev, "Autoneg not supported\n");
1882 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1885 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1886 if (fc_conf->mode == RTE_FC_FULL)
1887 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1888 QED_LINK_PAUSE_RX_ENABLE);
1889 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1890 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1891 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1892 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1894 params.link_up = true;
1895 (void)qdev->ops->common->set_link(edev, ¶ms);
1900 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1901 struct rte_eth_fc_conf *fc_conf)
1903 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1904 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1905 struct qed_link_output current_link;
1907 memset(¤t_link, 0, sizeof(current_link));
1908 qdev->ops->common->get_link(edev, ¤t_link);
1910 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1911 fc_conf->autoneg = true;
1913 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1914 QED_LINK_PAUSE_TX_ENABLE))
1915 fc_conf->mode = RTE_FC_FULL;
1916 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1917 fc_conf->mode = RTE_FC_RX_PAUSE;
1918 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1919 fc_conf->mode = RTE_FC_TX_PAUSE;
1921 fc_conf->mode = RTE_FC_NONE;
1926 static const uint32_t *
1927 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1929 static const uint32_t ptypes[] = {
1931 RTE_PTYPE_L2_ETHER_VLAN,
1936 RTE_PTYPE_TUNNEL_VXLAN,
1938 RTE_PTYPE_TUNNEL_GENEVE,
1939 RTE_PTYPE_TUNNEL_GRE,
1941 RTE_PTYPE_INNER_L2_ETHER,
1942 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1943 RTE_PTYPE_INNER_L3_IPV4,
1944 RTE_PTYPE_INNER_L3_IPV6,
1945 RTE_PTYPE_INNER_L4_TCP,
1946 RTE_PTYPE_INNER_L4_UDP,
1947 RTE_PTYPE_INNER_L4_FRAG,
1951 if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
1952 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
1958 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1961 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1962 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1963 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1964 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1965 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1966 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1967 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
1968 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
1971 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1972 struct rte_eth_rss_conf *rss_conf)
1974 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1975 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1976 struct ecore_sp_vport_update_params vport_update_params;
1977 struct ecore_rss_params rss_params;
1978 struct ecore_hwfn *p_hwfn;
1979 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1980 uint64_t hf = rss_conf->rss_hf;
1981 uint8_t len = rss_conf->rss_key_len;
1982 uint8_t idx, i, j, fpidx;
1985 memset(&vport_update_params, 0, sizeof(vport_update_params));
1986 memset(&rss_params, 0, sizeof(rss_params));
1988 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1989 (unsigned long)hf, len, key);
1993 DP_INFO(edev, "Enabling rss\n");
1996 qede_init_rss_caps(&rss_params.rss_caps, hf);
1997 rss_params.update_rss_capabilities = 1;
2001 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2002 DP_ERR(edev, "RSS key length exceeds limit\n");
2005 DP_INFO(edev, "Applying user supplied hash key\n");
2006 rss_params.update_rss_key = 1;
2007 memcpy(&rss_params.rss_key, key, len);
2009 rss_params.rss_enable = 1;
2012 rss_params.update_rss_config = 1;
2013 /* tbl_size has to be set with capabilities */
2014 rss_params.rss_table_size_log = 7;
2015 vport_update_params.vport_id = 0;
2017 for_each_hwfn(edev, i) {
2018 /* pass the L2 handles instead of qids */
2019 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
2020 idx = j % QEDE_RSS_COUNT(eth_dev);
2021 fpidx = idx * edev->num_hwfns + i;
2022 rss_params.rss_ind_table[j] =
2023 qdev->fp_array[fpidx].rxq->handle;
2026 vport_update_params.rss_params = &rss_params;
2028 p_hwfn = &edev->hwfns[i];
2029 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2030 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2031 ECORE_SPQ_MODE_EBLOCK, NULL);
2033 DP_ERR(edev, "vport-update for RSS failed\n");
2037 qdev->rss_enable = rss_params.rss_enable;
2039 /* Update local structure for hash query */
2040 qdev->rss_conf.rss_hf = hf;
2041 qdev->rss_conf.rss_key_len = len;
2042 if (qdev->rss_enable) {
2043 if (qdev->rss_conf.rss_key == NULL) {
2044 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2045 if (qdev->rss_conf.rss_key == NULL) {
2046 DP_ERR(edev, "No memory to store RSS key\n");
2051 DP_INFO(edev, "Storing RSS key\n");
2052 memcpy(qdev->rss_conf.rss_key, key, len);
2054 } else if (!qdev->rss_enable && len == 0) {
2055 if (qdev->rss_conf.rss_key) {
2056 free(qdev->rss_conf.rss_key);
2057 qdev->rss_conf.rss_key = NULL;
2058 DP_INFO(edev, "Free RSS key\n");
2065 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2066 struct rte_eth_rss_conf *rss_conf)
2068 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2070 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2071 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2073 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2074 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2075 rss_conf->rss_key_len);
2079 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2080 struct rte_eth_rss_reta_entry64 *reta_conf,
2083 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2084 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2085 struct ecore_sp_vport_update_params vport_update_params;
2086 struct ecore_rss_params *params;
2087 uint16_t i, j, idx, fid, shift;
2088 struct ecore_hwfn *p_hwfn;
2092 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2093 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2098 memset(&vport_update_params, 0, sizeof(vport_update_params));
2099 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
2100 if (params == NULL) {
2101 DP_ERR(edev, "failed to allocate memory\n");
2105 params->update_rss_ind_table = 1;
2106 params->rss_table_size_log = 7;
2107 params->update_rss_config = 1;
2109 vport_update_params.vport_id = 0;
2110 /* Use the current value of rss_enable */
2111 params->rss_enable = qdev->rss_enable;
2112 vport_update_params.rss_params = params;
2114 for_each_hwfn(edev, i) {
2115 for (j = 0; j < reta_size; j++) {
2116 idx = j / RTE_RETA_GROUP_SIZE;
2117 shift = j % RTE_RETA_GROUP_SIZE;
2118 if (reta_conf[idx].mask & (1ULL << shift)) {
2119 entry = reta_conf[idx].reta[shift];
2120 fid = entry * edev->num_hwfns + i;
2121 /* Pass rxq handles to ecore */
2122 params->rss_ind_table[j] =
2123 qdev->fp_array[fid].rxq->handle;
2124 /* Update the local copy for RETA query cmd */
2125 qdev->rss_ind_table[j] = entry;
2129 p_hwfn = &edev->hwfns[i];
2130 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2131 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2132 ECORE_SPQ_MODE_EBLOCK, NULL);
2134 DP_ERR(edev, "vport-update for RSS failed\n");
2144 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2145 struct rte_eth_rss_reta_entry64 *reta_conf,
2148 struct qede_dev *qdev = eth_dev->data->dev_private;
2149 struct ecore_dev *edev = &qdev->edev;
2150 uint16_t i, idx, shift;
2153 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2154 DP_ERR(edev, "reta_size %d is not supported\n",
2159 for (i = 0; i < reta_size; i++) {
2160 idx = i / RTE_RETA_GROUP_SIZE;
2161 shift = i % RTE_RETA_GROUP_SIZE;
2162 if (reta_conf[idx].mask & (1ULL << shift)) {
2163 entry = qdev->rss_ind_table[i];
2164 reta_conf[idx].reta[shift] = entry;
2173 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2175 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2176 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2177 struct rte_eth_dev_info dev_info = {0};
2178 struct qede_fastpath *fp;
2179 uint32_t max_rx_pkt_len;
2180 uint32_t frame_size;
2182 bool restart = false;
2185 PMD_INIT_FUNC_TRACE(edev);
2186 qede_dev_info_get(dev, &dev_info);
2187 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2188 frame_size = max_rx_pkt_len;
2189 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
2190 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2191 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
2195 if (!dev->data->scattered_rx &&
2196 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2197 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2198 dev->data->min_rx_buf_size);
2201 /* Temporarily replace I/O functions with dummy ones. It cannot
2202 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2204 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2205 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2206 if (dev->data->dev_started) {
2207 dev->data->dev_started = 0;
2214 /* Fix up RX buf size for all queues of the port */
2215 for (i = 0; i < qdev->num_rx_queues; i++) {
2216 fp = &qdev->fp_array[i];
2217 if (fp->rxq != NULL) {
2218 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2219 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2220 /* cache align the mbuf size to simplfy rx_buf_size
2223 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2224 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2228 fp->rxq->rx_buf_size = rc;
2231 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
2232 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2234 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2236 if (!dev->data->dev_started && restart) {
2237 qede_dev_start(dev);
2238 dev->data->dev_started = 1;
2241 /* update max frame size */
2242 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2244 if (ECORE_IS_CMT(edev)) {
2245 dev->rx_pkt_burst = qede_recv_pkts_cmt;
2246 dev->tx_pkt_burst = qede_xmit_pkts_cmt;
2248 dev->rx_pkt_burst = qede_recv_pkts;
2249 dev->tx_pkt_burst = qede_xmit_pkts;
2255 qede_dev_reset(struct rte_eth_dev *dev)
2259 ret = qede_eth_dev_uninit(dev);
2263 return qede_eth_dev_init(dev);
2266 static const struct eth_dev_ops qede_eth_dev_ops = {
2267 .dev_configure = qede_dev_configure,
2268 .dev_infos_get = qede_dev_info_get,
2269 .rx_queue_setup = qede_rx_queue_setup,
2270 .rx_queue_release = qede_rx_queue_release,
2271 .rx_descriptor_status = qede_rx_descriptor_status,
2272 .tx_queue_setup = qede_tx_queue_setup,
2273 .tx_queue_release = qede_tx_queue_release,
2274 .dev_start = qede_dev_start,
2275 .dev_reset = qede_dev_reset,
2276 .dev_set_link_up = qede_dev_set_link_up,
2277 .dev_set_link_down = qede_dev_set_link_down,
2278 .link_update = qede_link_update,
2279 .promiscuous_enable = qede_promiscuous_enable,
2280 .promiscuous_disable = qede_promiscuous_disable,
2281 .allmulticast_enable = qede_allmulticast_enable,
2282 .allmulticast_disable = qede_allmulticast_disable,
2283 .set_mc_addr_list = qede_set_mc_addr_list,
2284 .dev_stop = qede_dev_stop,
2285 .dev_close = qede_dev_close,
2286 .stats_get = qede_get_stats,
2287 .stats_reset = qede_reset_stats,
2288 .xstats_get = qede_get_xstats,
2289 .xstats_reset = qede_reset_xstats,
2290 .xstats_get_names = qede_get_xstats_names,
2291 .mac_addr_add = qede_mac_addr_add,
2292 .mac_addr_remove = qede_mac_addr_remove,
2293 .mac_addr_set = qede_mac_addr_set,
2294 .vlan_offload_set = qede_vlan_offload_set,
2295 .vlan_filter_set = qede_vlan_filter_set,
2296 .flow_ctrl_set = qede_flow_ctrl_set,
2297 .flow_ctrl_get = qede_flow_ctrl_get,
2298 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2299 .rss_hash_update = qede_rss_hash_update,
2300 .rss_hash_conf_get = qede_rss_hash_conf_get,
2301 .reta_update = qede_rss_reta_update,
2302 .reta_query = qede_rss_reta_query,
2303 .mtu_set = qede_set_mtu,
2304 .filter_ctrl = qede_dev_filter_ctrl,
2305 .udp_tunnel_port_add = qede_udp_dst_port_add,
2306 .udp_tunnel_port_del = qede_udp_dst_port_del,
2309 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2310 .dev_configure = qede_dev_configure,
2311 .dev_infos_get = qede_dev_info_get,
2312 .rx_queue_setup = qede_rx_queue_setup,
2313 .rx_queue_release = qede_rx_queue_release,
2314 .rx_descriptor_status = qede_rx_descriptor_status,
2315 .tx_queue_setup = qede_tx_queue_setup,
2316 .tx_queue_release = qede_tx_queue_release,
2317 .dev_start = qede_dev_start,
2318 .dev_reset = qede_dev_reset,
2319 .dev_set_link_up = qede_dev_set_link_up,
2320 .dev_set_link_down = qede_dev_set_link_down,
2321 .link_update = qede_link_update,
2322 .promiscuous_enable = qede_promiscuous_enable,
2323 .promiscuous_disable = qede_promiscuous_disable,
2324 .allmulticast_enable = qede_allmulticast_enable,
2325 .allmulticast_disable = qede_allmulticast_disable,
2326 .set_mc_addr_list = qede_set_mc_addr_list,
2327 .dev_stop = qede_dev_stop,
2328 .dev_close = qede_dev_close,
2329 .stats_get = qede_get_stats,
2330 .stats_reset = qede_reset_stats,
2331 .xstats_get = qede_get_xstats,
2332 .xstats_reset = qede_reset_xstats,
2333 .xstats_get_names = qede_get_xstats_names,
2334 .vlan_offload_set = qede_vlan_offload_set,
2335 .vlan_filter_set = qede_vlan_filter_set,
2336 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2337 .rss_hash_update = qede_rss_hash_update,
2338 .rss_hash_conf_get = qede_rss_hash_conf_get,
2339 .reta_update = qede_rss_reta_update,
2340 .reta_query = qede_rss_reta_query,
2341 .mtu_set = qede_set_mtu,
2342 .udp_tunnel_port_add = qede_udp_dst_port_add,
2343 .udp_tunnel_port_del = qede_udp_dst_port_del,
2344 .mac_addr_add = qede_mac_addr_add,
2345 .mac_addr_remove = qede_mac_addr_remove,
2346 .mac_addr_set = qede_mac_addr_set,
2349 static void qede_update_pf_params(struct ecore_dev *edev)
2351 struct ecore_pf_params pf_params;
2353 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2354 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2355 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2356 qed_ops->common->update_pf_params(edev, &pf_params);
2359 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2361 struct rte_pci_device *pci_dev;
2362 struct rte_pci_addr pci_addr;
2363 struct qede_dev *adapter;
2364 struct ecore_dev *edev;
2365 struct qed_dev_eth_info dev_info;
2366 struct qed_slowpath_params params;
2367 static bool do_once = true;
2368 uint8_t bulletin_change;
2369 uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
2370 uint8_t is_mac_forced;
2372 /* Fix up ecore debug level */
2373 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2374 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2378 /* Extract key data structures */
2379 adapter = eth_dev->data->dev_private;
2380 adapter->ethdev = eth_dev;
2381 edev = &adapter->edev;
2382 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2383 pci_addr = pci_dev->addr;
2385 PMD_INIT_FUNC_TRACE(edev);
2387 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2388 pci_addr.bus, pci_addr.devid, pci_addr.function,
2389 eth_dev->data->port_id);
2391 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2392 DP_ERR(edev, "Skipping device init from secondary process\n");
2396 rte_eth_copy_pci_info(eth_dev, pci_dev);
2399 edev->vendor_id = pci_dev->id.vendor_id;
2400 edev->device_id = pci_dev->id.device_id;
2402 qed_ops = qed_get_eth_ops();
2404 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2408 DP_INFO(edev, "Starting qede probe\n");
2409 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2412 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2415 qede_update_pf_params(edev);
2417 switch (pci_dev->intr_handle.type) {
2418 case RTE_INTR_HANDLE_UIO_INTX:
2419 case RTE_INTR_HANDLE_VFIO_LEGACY:
2420 int_mode = ECORE_INT_MODE_INTA;
2421 rte_intr_callback_register(&pci_dev->intr_handle,
2422 qede_interrupt_handler_intx,
2426 int_mode = ECORE_INT_MODE_MSIX;
2427 rte_intr_callback_register(&pci_dev->intr_handle,
2428 qede_interrupt_handler,
2432 if (rte_intr_enable(&pci_dev->intr_handle)) {
2433 DP_ERR(edev, "rte_intr_enable() failed\n");
2437 /* Start the Slowpath-process */
2438 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2440 params.int_mode = int_mode;
2441 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2442 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2443 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2444 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2445 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2446 QEDE_PMD_DRV_VER_STR_SIZE);
2448 if (ECORE_IS_CMT(edev)) {
2449 eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
2450 eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
2452 eth_dev->rx_pkt_burst = qede_recv_pkts;
2453 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2456 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2458 /* For CMT mode device do periodic polling for slowpath events.
2459 * This is required since uio device uses only one MSI-x
2460 * interrupt vector but we need one for each engine.
2462 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2463 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2467 DP_ERR(edev, "Unable to start periodic"
2468 " timer rc %d\n", rc);
2473 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2475 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2476 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2481 rc = qed_ops->fill_dev_info(edev, &dev_info);
2483 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2484 qed_ops->common->slowpath_stop(edev);
2485 qed_ops->common->remove(edev);
2486 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2491 qede_alloc_etherdev(adapter, &dev_info);
2493 adapter->ops->common->set_name(edev, edev->name);
2496 adapter->dev_info.num_mac_filters =
2497 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2500 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2501 (uint32_t *)&adapter->dev_info.num_mac_filters);
2503 /* Allocate memory for storing MAC addr */
2504 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2505 (RTE_ETHER_ADDR_LEN *
2506 adapter->dev_info.num_mac_filters),
2507 RTE_CACHE_LINE_SIZE);
2509 if (eth_dev->data->mac_addrs == NULL) {
2510 DP_ERR(edev, "Failed to allocate MAC address\n");
2511 qed_ops->common->slowpath_stop(edev);
2512 qed_ops->common->remove(edev);
2513 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2519 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
2520 hw_info.hw_mac_addr,
2521 ð_dev->data->mac_addrs[0]);
2522 rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
2523 &adapter->primary_mac);
2525 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2527 if (bulletin_change) {
2529 ecore_vf_bulletin_get_forced_mac(
2530 ECORE_LEADING_HWFN(edev),
2534 DP_INFO(edev, "VF macaddr received from PF\n");
2535 rte_ether_addr_copy(
2536 (struct rte_ether_addr *)&vf_mac,
2537 ð_dev->data->mac_addrs[0]);
2538 rte_ether_addr_copy(
2539 ð_dev->data->mac_addrs[0],
2540 &adapter->primary_mac);
2542 DP_ERR(edev, "No VF macaddr assigned\n");
2547 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2550 qede_print_adapter_info(adapter);
2554 /* Bring-up the link */
2555 qede_dev_set_link_state(eth_dev, true);
2557 adapter->num_tx_queues = 0;
2558 adapter->num_rx_queues = 0;
2559 SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2560 SLIST_INIT(&adapter->vlan_list_head);
2561 SLIST_INIT(&adapter->uc_list_head);
2562 SLIST_INIT(&adapter->mc_list_head);
2563 adapter->mtu = RTE_ETHER_MTU;
2564 adapter->vport_started = false;
2566 /* VF tunnel offloads is enabled by default in PF driver */
2567 adapter->vxlan.num_filters = 0;
2568 adapter->geneve.num_filters = 0;
2569 adapter->ipgre.num_filters = 0;
2571 adapter->vxlan.enable = true;
2572 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2573 ETH_TUNNEL_FILTER_IVLAN;
2574 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2575 adapter->geneve.enable = true;
2576 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2577 ETH_TUNNEL_FILTER_IVLAN;
2578 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2579 adapter->ipgre.enable = true;
2580 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2581 ETH_TUNNEL_FILTER_IVLAN;
2583 adapter->vxlan.enable = false;
2584 adapter->geneve.enable = false;
2585 adapter->ipgre.enable = false;
2588 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2589 adapter->primary_mac.addr_bytes[0],
2590 adapter->primary_mac.addr_bytes[1],
2591 adapter->primary_mac.addr_bytes[2],
2592 adapter->primary_mac.addr_bytes[3],
2593 adapter->primary_mac.addr_bytes[4],
2594 adapter->primary_mac.addr_bytes[5]);
2596 DP_INFO(edev, "Device initialized\n");
2601 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2603 return qede_common_dev_init(eth_dev, 1);
2606 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2608 return qede_common_dev_init(eth_dev, 0);
2611 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2613 struct qede_dev *qdev = eth_dev->data->dev_private;
2614 struct ecore_dev *edev = &qdev->edev;
2616 PMD_INIT_FUNC_TRACE(edev);
2618 /* only uninitialize in the primary process */
2619 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2622 /* safe to close dev here */
2623 qede_dev_close(eth_dev);
2625 eth_dev->dev_ops = NULL;
2626 eth_dev->rx_pkt_burst = NULL;
2627 eth_dev->tx_pkt_burst = NULL;
2632 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2634 return qede_dev_common_uninit(eth_dev);
2637 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2639 return qede_dev_common_uninit(eth_dev);
2642 static const struct rte_pci_id pci_id_qedevf_map[] = {
2643 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2645 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2648 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2651 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2656 static const struct rte_pci_id pci_id_qede_map[] = {
2657 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2659 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2662 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2665 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2668 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2671 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2674 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2677 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2680 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2683 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2686 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2691 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2692 struct rte_pci_device *pci_dev)
2694 return rte_eth_dev_pci_generic_probe(pci_dev,
2695 sizeof(struct qede_dev), qedevf_eth_dev_init);
2698 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2700 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2703 static struct rte_pci_driver rte_qedevf_pmd = {
2704 .id_table = pci_id_qedevf_map,
2705 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2706 .probe = qedevf_eth_dev_pci_probe,
2707 .remove = qedevf_eth_dev_pci_remove,
2710 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2711 struct rte_pci_device *pci_dev)
2713 return rte_eth_dev_pci_generic_probe(pci_dev,
2714 sizeof(struct qede_dev), qede_eth_dev_init);
2717 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2719 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2722 static struct rte_pci_driver rte_qede_pmd = {
2723 .id_table = pci_id_qede_map,
2724 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2725 .probe = qede_eth_dev_pci_probe,
2726 .remove = qede_eth_dev_pci_remove,
2729 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2730 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2731 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2732 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2733 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2734 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2736 RTE_INIT(qede_init_log)
2738 qede_logtype_init = rte_log_register("pmd.net.qede.init");
2739 if (qede_logtype_init >= 0)
2740 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
2741 qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
2742 if (qede_logtype_driver >= 0)
2743 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);