2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_vxlan_tunn_types {
19 uint16_t rte_filter_type;
20 enum ecore_filter_ucast_type qede_type;
21 enum ecore_tunn_clss qede_tunn_clss;
23 } qede_tunn_types[] = {
25 ETH_TUNNEL_FILTER_OMAC,
27 ECORE_TUNN_CLSS_MAC_VLAN,
31 ETH_TUNNEL_FILTER_TENID,
33 ECORE_TUNN_CLSS_MAC_VNI,
37 ETH_TUNNEL_FILTER_IMAC,
38 ECORE_FILTER_INNER_MAC,
39 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43 ETH_TUNNEL_FILTER_IVLAN,
44 ECORE_FILTER_INNER_VLAN,
45 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50 ECORE_FILTER_MAC_VNI_PAIR,
51 ECORE_TUNN_CLSS_MAC_VNI,
55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
58 "outer-mac and inner-mac"
61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
64 "outer-mac and inner-vlan"
67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68 ECORE_FILTER_INNER_MAC_VNI_PAIR,
69 ECORE_TUNN_CLSS_INNER_MAC_VNI,
73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80 ECORE_FILTER_INNER_PAIR,
81 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82 "inner-mac and inner-vlan",
85 ETH_TUNNEL_FILTER_OIP,
91 ETH_TUNNEL_FILTER_IIP,
97 RTE_TUNNEL_FILTER_IMAC_IVLAN,
103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
109 RTE_TUNNEL_FILTER_IMAC_TENID,
115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
122 struct rte_qede_xstats_name_off {
123 char name[RTE_ETH_XSTATS_NAME_SIZE];
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
129 {"rx_multicast_bytes",
130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
131 {"rx_broadcast_bytes",
132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
134 {"rx_multicast_packets",
135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
136 {"rx_broadcast_packets",
137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
140 {"tx_multicast_bytes",
141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
142 {"tx_broadcast_bytes",
143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
145 {"tx_multicast_packets",
146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
147 {"tx_broadcast_packets",
148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
150 {"rx_64_byte_packets",
151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
152 {"rx_65_to_127_byte_packets",
153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
154 {"rx_128_to_255_byte_packets",
155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
156 {"rx_256_to_511_byte_packets",
157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
158 {"rx_512_to_1023_byte_packets",
159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
160 {"rx_1024_to_1518_byte_packets",
161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
162 {"rx_1519_to_1522_byte_packets",
163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
164 {"rx_1519_to_2047_byte_packets",
165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
166 {"rx_2048_to_4095_byte_packets",
167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
168 {"rx_4096_to_9216_byte_packets",
169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
170 {"rx_9217_to_16383_byte_packets",
171 offsetof(struct ecore_eth_stats,
172 rx_9217_to_16383_byte_packets)},
173 {"tx_64_byte_packets",
174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
175 {"tx_65_to_127_byte_packets",
176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
177 {"tx_128_to_255_byte_packets",
178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
179 {"tx_256_to_511_byte_packets",
180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
181 {"tx_512_to_1023_byte_packets",
182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
183 {"tx_1024_to_1518_byte_packets",
184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
185 {"trx_1519_to_1522_byte_packets",
186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
187 {"tx_2048_to_4095_byte_packets",
188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
189 {"tx_4096_to_9216_byte_packets",
190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
191 {"tx_9217_to_16383_byte_packets",
192 offsetof(struct ecore_eth_stats,
193 tx_9217_to_16383_byte_packets)},
195 {"rx_mac_crtl_frames",
196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
197 {"tx_mac_control_frames",
198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
201 {"rx_priority_flow_control_frames",
202 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
203 {"tx_priority_flow_control_frames",
204 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
208 {"rx_carrier_errors",
209 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
210 {"rx_oversize_packet_errors",
211 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
213 {"rx_undersize_packet_errors",
214 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
216 {"rx_host_buffer_not_available",
217 offsetof(struct ecore_eth_stats, no_buff_discards)},
218 /* Number of packets discarded because they are bigger than MTU */
219 {"rx_packet_too_big_discards",
220 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
221 {"rx_ttl_zero_discards",
222 offsetof(struct ecore_eth_stats, ttl0_discard)},
223 {"rx_multi_function_tag_filter_discards",
224 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
225 {"rx_mac_filter_discards",
226 offsetof(struct ecore_eth_stats, mac_filter_discards)},
227 {"rx_hw_buffer_truncates",
228 offsetof(struct ecore_eth_stats, brb_truncates)},
229 {"rx_hw_buffer_discards",
230 offsetof(struct ecore_eth_stats, brb_discards)},
231 {"tx_lpi_entry_count",
232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
233 {"tx_total_collisions",
234 offsetof(struct ecore_eth_stats, tx_total_collisions)},
235 {"tx_error_drop_packets",
236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
239 {"rx_mac_unicast_packets",
240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
241 {"rx_mac_multicast_packets",
242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
243 {"rx_mac_broadcast_packets",
244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
248 {"tx_mac_unicast_packets",
249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
250 {"tx_mac_multicast_packets",
251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
252 {"tx_mac_broadcast_packets",
253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
255 {"lro_coalesced_packets",
256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
257 {"lro_coalesced_events",
258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
260 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
261 {"lro_not_coalesced_packets",
262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
263 {"lro_coalesced_bytes",
264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
269 offsetof(struct qede_rx_queue, rx_segs)},
271 offsetof(struct qede_rx_queue, rx_hw_errors)},
272 {"rx_q_allocation_errors",
273 offsetof(struct qede_rx_queue, rx_alloc_errors)}
276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
282 qede_interrupt_handler(void *param)
284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
285 struct qede_dev *qdev = eth_dev->data->dev_private;
286 struct ecore_dev *edev = &qdev->edev;
288 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
289 if (rte_intr_enable(eth_dev->intr_handle))
290 DP_ERR(edev, "rte_intr_enable failed\n");
294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
296 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
300 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
301 static void qede_print_adapter_info(struct qede_dev *qdev)
303 struct ecore_dev *edev = &qdev->edev;
304 struct qed_dev_info *info = &qdev->dev_info.common;
305 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
306 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
308 DP_INFO(edev, "*********************************\n");
309 DP_INFO(edev, " DPDK version:%s\n", rte_version());
310 DP_INFO(edev, " Chip details : %s%d\n",
311 ECORE_IS_BB(edev) ? "BB" : "AH",
312 CHIP_REV_IS_A0(edev) ? 0 : 1);
313 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
314 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
315 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
316 ver_str, QEDE_PMD_VERSION);
317 DP_INFO(edev, " Driver version : %s\n", drv_ver);
318 DP_INFO(edev, " Firmware version : %s\n", ver_str);
320 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
322 (info->mfw_rev >> 24) & 0xff,
323 (info->mfw_rev >> 16) & 0xff,
324 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
325 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
326 DP_INFO(edev, " Firmware file : %s\n", fw_file);
327 DP_INFO(edev, "*********************************\n");
332 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
334 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
335 struct ecore_sp_vport_start_params params;
336 struct ecore_hwfn *p_hwfn;
340 memset(¶ms, 0, sizeof(params));
343 /* @DPDK - Disable FW placement */
344 params.zero_placement_offset = 1;
345 for_each_hwfn(edev, i) {
346 p_hwfn = &edev->hwfns[i];
347 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
348 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
349 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
350 if (rc != ECORE_SUCCESS) {
351 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
355 ecore_reset_vport_stats(edev);
356 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
362 qede_stop_vport(struct ecore_dev *edev)
364 struct ecore_hwfn *p_hwfn;
370 for_each_hwfn(edev, i) {
371 p_hwfn = &edev->hwfns[i];
372 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
374 if (rc != ECORE_SUCCESS) {
375 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
383 /* Activate or deactivate vport via vport-update */
384 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
386 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
387 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
388 struct ecore_sp_vport_update_params params;
389 struct ecore_hwfn *p_hwfn;
393 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
395 params.update_vport_active_rx_flg = 1;
396 params.update_vport_active_tx_flg = 1;
397 params.vport_active_rx_flg = flg;
398 params.vport_active_tx_flg = flg;
399 for_each_hwfn(edev, i) {
400 p_hwfn = &edev->hwfns[i];
401 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
402 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
403 ECORE_SPQ_MODE_EBLOCK, NULL);
404 if (rc != ECORE_SUCCESS) {
405 DP_ERR(edev, "Failed to update vport\n");
409 DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
414 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
415 uint16_t mtu, bool enable)
417 /* Enable LRO in split mode */
418 sge_tpa_params->tpa_ipv4_en_flg = enable;
419 sge_tpa_params->tpa_ipv6_en_flg = enable;
420 sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
421 sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
422 /* set if tpa enable changes */
423 sge_tpa_params->update_tpa_en_flg = 1;
424 /* set if tpa parameters should be handled */
425 sge_tpa_params->update_tpa_param_flg = enable;
427 sge_tpa_params->max_buffers_per_cqe = 20;
428 /* Enable TPA in split mode. In this mode each TPA segment
429 * starts on the new BD, so there is one BD per segment.
431 sge_tpa_params->tpa_pkt_split_flg = 1;
432 sge_tpa_params->tpa_hdr_data_split_flg = 0;
433 sge_tpa_params->tpa_gro_consistent_flg = 0;
434 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
435 sge_tpa_params->tpa_max_size = 0x7FFF;
436 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
437 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
440 /* Enable/disable LRO via vport-update */
441 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
443 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
444 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
445 struct ecore_sp_vport_update_params params;
446 struct ecore_sge_tpa_params tpa_params;
447 struct ecore_hwfn *p_hwfn;
451 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
452 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
453 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
455 params.sge_tpa_params = &tpa_params;
456 for_each_hwfn(edev, i) {
457 p_hwfn = &edev->hwfns[i];
458 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
459 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
460 ECORE_SPQ_MODE_EBLOCK, NULL);
461 if (rc != ECORE_SUCCESS) {
462 DP_ERR(edev, "Failed to update LRO\n");
467 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
472 /* Update MTU via vport-update without doing port restart.
473 * The vport must be deactivated before calling this API.
475 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
477 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
478 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
479 struct ecore_sp_vport_update_params params;
480 struct ecore_hwfn *p_hwfn;
484 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
488 for_each_hwfn(edev, i) {
489 p_hwfn = &edev->hwfns[i];
490 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
491 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
492 ECORE_SPQ_MODE_EBLOCK, NULL);
493 if (rc != ECORE_SUCCESS) {
494 DP_ERR(edev, "Failed to update MTU\n");
498 DP_INFO(edev, "MTU updated to %u\n", mtu);
503 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
505 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
506 ucast->is_rx_filter = true;
507 ucast->is_tx_filter = true;
508 /* ucast->assert_on_error = true; - For debug */
512 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
513 enum qed_filter_rx_mode_type type)
515 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
516 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
517 struct ecore_filter_accept_flags flags;
519 memset(&flags, 0, sizeof(flags));
521 flags.update_rx_mode_config = 1;
522 flags.update_tx_mode_config = 1;
523 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
524 ECORE_ACCEPT_MCAST_MATCHED |
527 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
528 ECORE_ACCEPT_MCAST_MATCHED |
531 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
532 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
534 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
535 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
537 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
538 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
539 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
540 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
541 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
542 ECORE_ACCEPT_MCAST_UNMATCHED;
545 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
546 ECORE_SPQ_MODE_CB, NULL);
548 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
549 uint8_t clss, bool mode, bool mask)
551 memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
552 p_tunn->vxlan.b_update_mode = mode;
553 p_tunn->vxlan.b_mode_enabled = mask;
554 p_tunn->b_update_rx_cls = true;
555 p_tunn->b_update_tx_cls = true;
556 p_tunn->vxlan.tun_cls = clss;
560 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
563 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
564 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
565 struct qede_ucast_entry *tmp = NULL;
566 struct qede_ucast_entry *u;
567 struct ether_addr *mac_addr;
569 mac_addr = (struct ether_addr *)ucast->mac;
571 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
572 if ((memcmp(mac_addr, &tmp->mac,
573 ETHER_ADDR_LEN) == 0) &&
574 ucast->vni == tmp->vni &&
575 ucast->vlan == tmp->vlan) {
576 DP_ERR(edev, "Unicast MAC is already added"
577 " with vlan = %u, vni = %u\n",
578 ucast->vlan, ucast->vni);
582 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
583 RTE_CACHE_LINE_SIZE);
585 DP_ERR(edev, "Did not allocate memory for ucast\n");
588 ether_addr_copy(mac_addr, &u->mac);
589 u->vlan = ucast->vlan;
591 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
594 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
595 if ((memcmp(mac_addr, &tmp->mac,
596 ETHER_ADDR_LEN) == 0) &&
597 ucast->vlan == tmp->vlan &&
598 ucast->vni == tmp->vni)
602 DP_INFO(edev, "Unicast MAC is not found\n");
605 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
613 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
616 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
617 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
618 struct ether_addr *mac_addr;
619 struct qede_mcast_entry *tmp = NULL;
620 struct qede_mcast_entry *m;
622 mac_addr = (struct ether_addr *)mcast->mac;
624 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
625 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
627 "Multicast MAC is already added\n");
631 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
632 RTE_CACHE_LINE_SIZE);
635 "Did not allocate memory for mcast\n");
638 ether_addr_copy(mac_addr, &m->mac);
639 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
642 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
643 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
647 DP_INFO(edev, "Multicast mac is not found\n");
650 SLIST_REMOVE(&qdev->mc_list_head, tmp,
651 qede_mcast_entry, list);
658 static enum _ecore_status_t
659 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
662 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
663 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
664 enum _ecore_status_t rc;
665 struct ecore_filter_mcast mcast;
666 struct qede_mcast_entry *tmp;
670 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
672 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
674 "Mcast filter table limit exceeded, "
675 "Please enable mcast promisc mode\n");
679 rc = qede_mcast_filter(eth_dev, ucast, add);
681 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
682 memset(&mcast, 0, sizeof(mcast));
683 mcast.num_mc_addrs = qdev->num_mc_addr;
684 mcast.opcode = ECORE_FILTER_ADD;
685 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
686 ether_addr_copy(&tmp->mac,
687 (struct ether_addr *)&mcast.mac[j]);
690 rc = ecore_filter_mcast_cmd(edev, &mcast,
691 ECORE_SPQ_MODE_CB, NULL);
693 if (rc != ECORE_SUCCESS) {
694 DP_ERR(edev, "Failed to add multicast filter"
695 " rc = %d, op = %d\n", rc, add);
697 } else { /* Unicast */
699 if (qdev->num_uc_addr >=
700 qdev->dev_info.num_mac_filters) {
702 "Ucast filter table limit exceeded,"
703 " Please enable promisc mode\n");
707 rc = qede_ucast_filter(eth_dev, ucast, add);
709 rc = ecore_filter_ucast_cmd(edev, ucast,
710 ECORE_SPQ_MODE_CB, NULL);
711 if (rc != ECORE_SUCCESS) {
712 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
721 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
722 __rte_unused uint32_t index, __rte_unused uint32_t pool)
724 struct ecore_filter_ucast ucast;
727 qede_set_ucast_cmn_params(&ucast);
728 ucast.type = ECORE_FILTER_MAC;
729 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
730 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
735 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
737 struct qede_dev *qdev = eth_dev->data->dev_private;
738 struct ecore_dev *edev = &qdev->edev;
739 struct ecore_filter_ucast ucast;
741 PMD_INIT_FUNC_TRACE(edev);
743 if (index >= qdev->dev_info.num_mac_filters) {
744 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
745 index, qdev->dev_info.num_mac_filters);
749 qede_set_ucast_cmn_params(&ucast);
750 ucast.opcode = ECORE_FILTER_REMOVE;
751 ucast.type = ECORE_FILTER_MAC;
753 /* Use the index maintained by rte */
754 ether_addr_copy(ð_dev->data->mac_addrs[index],
755 (struct ether_addr *)&ucast.mac);
757 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
761 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
763 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
764 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
766 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
767 mac_addr->addr_bytes)) {
768 DP_ERR(edev, "Setting MAC address is not allowed\n");
769 ether_addr_copy(&qdev->primary_mac,
770 ð_dev->data->mac_addrs[0]);
774 qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
777 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
779 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
780 struct ecore_sp_vport_update_params params;
781 struct ecore_hwfn *p_hwfn;
785 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
787 params.update_accept_any_vlan_flg = 1;
788 params.accept_any_vlan = flg;
789 for_each_hwfn(edev, i) {
790 p_hwfn = &edev->hwfns[i];
791 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
792 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
793 ECORE_SPQ_MODE_EBLOCK, NULL);
794 if (rc != ECORE_SUCCESS) {
795 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
800 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
803 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
805 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
806 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
807 struct ecore_sp_vport_update_params params;
808 struct ecore_hwfn *p_hwfn;
812 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
814 params.update_inner_vlan_removal_flg = 1;
815 params.inner_vlan_removal_flg = flg;
816 for_each_hwfn(edev, i) {
817 p_hwfn = &edev->hwfns[i];
818 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
819 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
820 ECORE_SPQ_MODE_EBLOCK, NULL);
821 if (rc != ECORE_SUCCESS) {
822 DP_ERR(edev, "Failed to update vport\n");
827 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
831 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
832 uint16_t vlan_id, int on)
834 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
835 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
836 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
837 struct qede_vlan_entry *tmp = NULL;
838 struct qede_vlan_entry *vlan;
839 struct ecore_filter_ucast ucast;
843 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
844 DP_ERR(edev, "Reached max VLAN filter limit"
845 " enabling accept_any_vlan\n");
846 qede_config_accept_any_vlan(qdev, true);
850 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
851 if (tmp->vid == vlan_id) {
852 DP_ERR(edev, "VLAN %u already configured\n",
858 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
859 RTE_CACHE_LINE_SIZE);
862 DP_ERR(edev, "Did not allocate memory for VLAN\n");
866 qede_set_ucast_cmn_params(&ucast);
867 ucast.opcode = ECORE_FILTER_ADD;
868 ucast.type = ECORE_FILTER_VLAN;
869 ucast.vlan = vlan_id;
870 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
873 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
878 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
879 qdev->configured_vlans++;
880 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
881 vlan_id, qdev->configured_vlans);
884 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
885 if (tmp->vid == vlan_id)
890 if (qdev->configured_vlans == 0) {
892 "No VLAN filters configured yet\n");
896 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
900 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
902 qede_set_ucast_cmn_params(&ucast);
903 ucast.opcode = ECORE_FILTER_REMOVE;
904 ucast.type = ECORE_FILTER_VLAN;
905 ucast.vlan = vlan_id;
906 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
909 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
912 qdev->configured_vlans--;
913 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
914 vlan_id, qdev->configured_vlans);
921 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
923 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
924 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
925 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
927 if (mask & ETH_VLAN_STRIP_MASK) {
928 if (rxmode->hw_vlan_strip)
929 (void)qede_vlan_stripping(eth_dev, 1);
931 (void)qede_vlan_stripping(eth_dev, 0);
934 if (mask & ETH_VLAN_FILTER_MASK) {
935 /* VLAN filtering kicks in when a VLAN is added */
936 if (rxmode->hw_vlan_filter) {
937 qede_vlan_filter_set(eth_dev, 0, 1);
939 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
941 " Please remove existing VLAN filters"
942 " before disabling VLAN filtering\n");
943 /* Signal app that VLAN filtering is still
946 rxmode->hw_vlan_filter = true;
948 qede_vlan_filter_set(eth_dev, 0, 0);
953 if (mask & ETH_VLAN_EXTEND_MASK)
954 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
955 " and classification is based on outer tag only\n");
957 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
958 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
961 static void qede_prandom_bytes(uint32_t *buff)
965 srand((unsigned int)time(NULL));
966 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
970 int qede_config_rss(struct rte_eth_dev *eth_dev)
972 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
973 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
974 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
976 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
977 struct rte_eth_rss_reta_entry64 reta_conf[2];
978 struct rte_eth_rss_conf rss_conf;
979 uint32_t i, id, pos, q;
981 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
982 if (!rss_conf.rss_key) {
983 DP_INFO(edev, "Applying driver default key\n");
984 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
985 qede_prandom_bytes(&def_rss_key[0]);
986 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
989 /* Configure RSS hash */
990 if (qede_rss_hash_update(eth_dev, &rss_conf))
993 /* Configure default RETA */
994 memset(reta_conf, 0, sizeof(reta_conf));
995 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
996 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
998 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
999 id = i / RTE_RETA_GROUP_SIZE;
1000 pos = i % RTE_RETA_GROUP_SIZE;
1001 q = i % QEDE_RSS_COUNT(qdev);
1002 reta_conf[id].reta[pos] = q;
1004 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1005 ECORE_RSS_IND_TABLE_SIZE))
1011 static void qede_fastpath_start(struct ecore_dev *edev)
1013 struct ecore_hwfn *p_hwfn;
1016 for_each_hwfn(edev, i) {
1017 p_hwfn = &edev->hwfns[i];
1018 ecore_hw_start_fastpath(p_hwfn);
1022 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1024 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1025 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1027 PMD_INIT_FUNC_TRACE(edev);
1029 /* Update MTU only if it has changed */
1030 if (qdev->mtu != qdev->new_mtu) {
1031 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1033 qdev->mtu = qdev->new_mtu;
1034 /* If MTU has changed then update TPA too */
1035 if (qdev->enable_lro)
1036 if (qede_enable_tpa(eth_dev, true))
1041 if (qede_start_queues(eth_dev))
1044 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1045 * enabling RSS. Hence RSS configuration is deferred upto this point.
1046 * Also, we would like to retain similar behavior in PF case, so we
1047 * don't do PF/VF specific check here.
1049 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1050 if (qede_config_rss(eth_dev))
1054 if (qede_activate_vport(eth_dev, true))
1057 /* Bring-up the link */
1058 qede_dev_set_link_state(eth_dev, true);
1060 /* Start/resume traffic */
1061 qede_fastpath_start(edev);
1063 DP_INFO(edev, "Device started\n");
1067 DP_ERR(edev, "Device start fails\n");
1068 return -1; /* common error code is < 0 */
1071 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1073 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1074 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1076 PMD_INIT_FUNC_TRACE(edev);
1079 if (qede_activate_vport(eth_dev, false))
1082 if (qdev->enable_lro)
1083 qede_enable_tpa(eth_dev, false);
1085 /* TODO: Do we need disable LRO or RSS */
1087 qede_stop_queues(eth_dev);
1089 /* Disable traffic */
1090 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1092 /* Bring the link down */
1093 qede_dev_set_link_state(eth_dev, false);
1095 DP_INFO(edev, "Device is stopped\n");
1098 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1102 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1104 PMD_INIT_FUNC_TRACE(edev);
1106 /* Check requirements for 100G mode */
1107 if (edev->num_hwfns > 1) {
1108 if (eth_dev->data->nb_rx_queues < 2 ||
1109 eth_dev->data->nb_tx_queues < 2) {
1110 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1114 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1115 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1117 "100G mode needs even no. of RX/TX queues\n");
1122 /* Sanity checks and throw warnings */
1123 if (rxmode->enable_scatter)
1124 eth_dev->data->scattered_rx = 1;
1126 if (!rxmode->hw_strip_crc)
1127 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1129 if (!rxmode->hw_ip_checksum)
1130 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1132 if (rxmode->header_split)
1133 DP_INFO(edev, "Header split enable is not supported\n");
1134 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1136 DP_ERR(edev, "Unsupported multi-queue mode\n");
1139 /* Flow director mode check */
1140 if (qede_check_fdir_support(eth_dev))
1143 /* Deallocate resources if held previously. It is needed only if the
1144 * queue count has been changed from previous configuration. If its
1145 * going to change then it means RX/TX queue setup will be called
1146 * again and the fastpath pointers will be reinitialized there.
1148 if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1149 qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1150 qede_dealloc_fp_resc(eth_dev);
1151 /* Proceed with updated queue count */
1152 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1153 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1154 if (qede_alloc_fp_resc(qdev))
1158 /* VF's MTU has to be set using vport-start where as
1159 * PF's MTU can be updated via vport-update.
1162 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1165 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1169 qdev->mtu = rxmode->max_rx_pkt_len;
1170 qdev->new_mtu = qdev->mtu;
1172 /* Configure TPA parameters */
1173 if (rxmode->enable_lro) {
1174 if (qede_enable_tpa(eth_dev, true))
1176 /* Enable scatter mode for LRO */
1177 if (!rxmode->enable_scatter)
1178 eth_dev->data->scattered_rx = 1;
1180 qdev->enable_lro = rxmode->enable_lro;
1182 /* Enable VLAN offloads by default */
1183 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1184 ETH_VLAN_FILTER_MASK |
1185 ETH_VLAN_EXTEND_MASK);
1187 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1188 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1193 /* Info about HW descriptor ring limitations */
1194 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1195 .nb_max = 0x8000, /* 32K */
1197 .nb_align = 128 /* lowest common multiple */
1200 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1201 .nb_max = 0x8000, /* 32K */
1204 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1205 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1209 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1210 struct rte_eth_dev_info *dev_info)
1212 struct qede_dev *qdev = eth_dev->data->dev_private;
1213 struct ecore_dev *edev = &qdev->edev;
1214 struct qed_link_output link;
1215 uint32_t speed_cap = 0;
1217 PMD_INIT_FUNC_TRACE(edev);
1219 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1220 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1221 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1222 dev_info->rx_desc_lim = qede_rx_desc_lim;
1223 dev_info->tx_desc_lim = qede_tx_desc_lim;
1226 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1227 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1229 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1230 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1231 dev_info->max_tx_queues = dev_info->max_rx_queues;
1233 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1234 dev_info->max_vfs = 0;
1235 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1236 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1237 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1239 dev_info->default_txconf = (struct rte_eth_txconf) {
1240 .txq_flags = QEDE_TXQ_FLAGS,
1243 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
1244 DEV_RX_OFFLOAD_IPV4_CKSUM |
1245 DEV_RX_OFFLOAD_UDP_CKSUM |
1246 DEV_RX_OFFLOAD_TCP_CKSUM |
1247 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1248 DEV_RX_OFFLOAD_TCP_LRO);
1250 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1251 DEV_TX_OFFLOAD_IPV4_CKSUM |
1252 DEV_TX_OFFLOAD_UDP_CKSUM |
1253 DEV_TX_OFFLOAD_TCP_CKSUM |
1254 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1255 DEV_TX_OFFLOAD_TCP_TSO |
1256 DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1258 memset(&link, 0, sizeof(struct qed_link_output));
1259 qdev->ops->common->get_link(edev, &link);
1260 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1261 speed_cap |= ETH_LINK_SPEED_1G;
1262 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1263 speed_cap |= ETH_LINK_SPEED_10G;
1264 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1265 speed_cap |= ETH_LINK_SPEED_25G;
1266 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1267 speed_cap |= ETH_LINK_SPEED_40G;
1268 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1269 speed_cap |= ETH_LINK_SPEED_50G;
1270 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1271 speed_cap |= ETH_LINK_SPEED_100G;
1272 dev_info->speed_capa = speed_cap;
1275 /* return 0 means link status changed, -1 means not changed */
1277 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1279 struct qede_dev *qdev = eth_dev->data->dev_private;
1280 struct ecore_dev *edev = &qdev->edev;
1281 uint16_t link_duplex;
1282 struct qed_link_output link;
1283 struct rte_eth_link *curr = ð_dev->data->dev_link;
1285 memset(&link, 0, sizeof(struct qed_link_output));
1286 qdev->ops->common->get_link(edev, &link);
1289 curr->link_speed = link.speed;
1292 switch (link.duplex) {
1293 case QEDE_DUPLEX_HALF:
1294 link_duplex = ETH_LINK_HALF_DUPLEX;
1296 case QEDE_DUPLEX_FULL:
1297 link_duplex = ETH_LINK_FULL_DUPLEX;
1299 case QEDE_DUPLEX_UNKNOWN:
1303 curr->link_duplex = link_duplex;
1306 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1309 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1310 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1312 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1313 curr->link_speed, curr->link_duplex,
1314 curr->link_autoneg, curr->link_status);
1316 /* return 0 means link status changed, -1 means not changed */
1317 return ((curr->link_status == link.link_up) ? -1 : 0);
1320 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1322 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1323 struct qede_dev *qdev = eth_dev->data->dev_private;
1324 struct ecore_dev *edev = &qdev->edev;
1326 PMD_INIT_FUNC_TRACE(edev);
1329 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1331 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1332 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1334 qed_configure_filter_rx_mode(eth_dev, type);
1337 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1339 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1340 struct qede_dev *qdev = eth_dev->data->dev_private;
1341 struct ecore_dev *edev = &qdev->edev;
1343 PMD_INIT_FUNC_TRACE(edev);
1346 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1347 qed_configure_filter_rx_mode(eth_dev,
1348 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1350 qed_configure_filter_rx_mode(eth_dev,
1351 QED_FILTER_RX_MODE_TYPE_REGULAR);
1354 static void qede_poll_sp_sb_cb(void *param)
1356 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1357 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1358 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1361 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1362 qede_interrupt_action(&edev->hwfns[1]);
1364 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1368 DP_ERR(edev, "Unable to start periodic"
1369 " timer rc %d\n", rc);
1370 assert(false && "Unable to start periodic timer");
1374 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1376 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1377 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1378 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1380 PMD_INIT_FUNC_TRACE(edev);
1382 /* dev_stop() shall cleanup fp resources in hw but without releasing
1383 * dma memories and sw structures so that dev_start() can be called
1384 * by the app without reconfiguration. However, in dev_close() we
1385 * can release all the resources and device can be brought up newly
1387 if (eth_dev->data->dev_started)
1388 qede_dev_stop(eth_dev);
1390 qede_stop_vport(edev);
1391 qede_fdir_dealloc_resc(eth_dev);
1392 qede_dealloc_fp_resc(eth_dev);
1394 eth_dev->data->nb_rx_queues = 0;
1395 eth_dev->data->nb_tx_queues = 0;
1397 qdev->ops->common->slowpath_stop(edev);
1398 qdev->ops->common->remove(edev);
1399 rte_intr_disable(&pci_dev->intr_handle);
1400 rte_intr_callback_unregister(&pci_dev->intr_handle,
1401 qede_interrupt_handler, (void *)eth_dev);
1402 if (edev->num_hwfns > 1)
1403 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1407 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1409 struct qede_dev *qdev = eth_dev->data->dev_private;
1410 struct ecore_dev *edev = &qdev->edev;
1411 struct ecore_eth_stats stats;
1412 unsigned int i = 0, j = 0, qid;
1413 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1414 struct qede_tx_queue *txq;
1416 ecore_get_vport_stats(edev, &stats);
1419 eth_stats->ipackets = stats.rx_ucast_pkts +
1420 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
1422 eth_stats->ibytes = stats.rx_ucast_bytes +
1423 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
1425 eth_stats->ierrors = stats.rx_crc_errors +
1426 stats.rx_align_errors +
1427 stats.rx_carrier_errors +
1428 stats.rx_oversize_packets +
1429 stats.rx_jabbers + stats.rx_undersize_packets;
1431 eth_stats->rx_nombuf = stats.no_buff_discards;
1433 eth_stats->imissed = stats.mftag_filter_discards +
1434 stats.mac_filter_discards +
1435 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
1438 eth_stats->opackets = stats.tx_ucast_pkts +
1439 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
1441 eth_stats->obytes = stats.tx_ucast_bytes +
1442 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
1444 eth_stats->oerrors = stats.tx_err_drop_pkts;
1447 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1448 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1449 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1450 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1451 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1452 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1453 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1454 "Not all the queue stats will be displayed. Set"
1455 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1456 " appropriately and retry.\n");
1459 eth_stats->q_ipackets[i] =
1461 ((char *)(qdev->fp_array[qid].rxq)) +
1462 offsetof(struct qede_rx_queue,
1464 eth_stats->q_errors[i] =
1466 ((char *)(qdev->fp_array[qid].rxq)) +
1467 offsetof(struct qede_rx_queue,
1470 ((char *)(qdev->fp_array[qid].rxq)) +
1471 offsetof(struct qede_rx_queue,
1474 if (i == rxq_stat_cntrs)
1479 txq = qdev->fp_array[qid].txq;
1480 eth_stats->q_opackets[j] =
1481 *((uint64_t *)(uintptr_t)
1482 (((uint64_t)(uintptr_t)(txq)) +
1483 offsetof(struct qede_tx_queue,
1486 if (j == txq_stat_cntrs)
1492 qede_get_xstats_count(struct qede_dev *qdev) {
1493 return RTE_DIM(qede_xstats_strings) +
1494 (RTE_DIM(qede_rxq_xstats_strings) *
1495 RTE_MIN(QEDE_RSS_COUNT(qdev),
1496 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1500 qede_get_xstats_names(struct rte_eth_dev *dev,
1501 struct rte_eth_xstat_name *xstats_names,
1502 __rte_unused unsigned int limit)
1504 struct qede_dev *qdev = dev->data->dev_private;
1505 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1506 unsigned int i, qid, stat_idx = 0;
1507 unsigned int rxq_stat_cntrs;
1509 if (xstats_names != NULL) {
1510 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1511 snprintf(xstats_names[stat_idx].name,
1512 sizeof(xstats_names[stat_idx].name),
1514 qede_xstats_strings[i].name);
1518 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1519 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1520 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1521 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1522 snprintf(xstats_names[stat_idx].name,
1523 sizeof(xstats_names[stat_idx].name),
1525 qede_rxq_xstats_strings[i].name, qid,
1526 qede_rxq_xstats_strings[i].name + 4);
1536 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1539 struct qede_dev *qdev = dev->data->dev_private;
1540 struct ecore_dev *edev = &qdev->edev;
1541 struct ecore_eth_stats stats;
1542 const unsigned int num = qede_get_xstats_count(qdev);
1543 unsigned int i, qid, stat_idx = 0;
1544 unsigned int rxq_stat_cntrs;
1549 ecore_get_vport_stats(edev, &stats);
1551 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1552 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1553 qede_xstats_strings[i].offset);
1554 xstats[stat_idx].id = stat_idx;
1558 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1559 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1560 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1562 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1563 xstats[stat_idx].value = *(uint64_t *)(
1564 ((char *)(qdev->fp_array[qid].rxq)) +
1565 qede_rxq_xstats_strings[i].offset);
1566 xstats[stat_idx].id = stat_idx;
1576 qede_reset_xstats(struct rte_eth_dev *dev)
1578 struct qede_dev *qdev = dev->data->dev_private;
1579 struct ecore_dev *edev = &qdev->edev;
1581 ecore_reset_vport_stats(edev);
1584 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1586 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1587 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1588 struct qed_link_params link_params;
1591 DP_INFO(edev, "setting link state %d\n", link_up);
1592 memset(&link_params, 0, sizeof(link_params));
1593 link_params.link_up = link_up;
1594 rc = qdev->ops->common->set_link(edev, &link_params);
1595 if (rc != ECORE_SUCCESS)
1596 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1601 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1603 return qede_dev_set_link_state(eth_dev, true);
1606 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1608 return qede_dev_set_link_state(eth_dev, false);
1611 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1613 struct qede_dev *qdev = eth_dev->data->dev_private;
1614 struct ecore_dev *edev = &qdev->edev;
1616 ecore_reset_vport_stats(edev);
1619 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1621 enum qed_filter_rx_mode_type type =
1622 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1624 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1625 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1627 qed_configure_filter_rx_mode(eth_dev, type);
1630 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1632 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1633 qed_configure_filter_rx_mode(eth_dev,
1634 QED_FILTER_RX_MODE_TYPE_PROMISC);
1636 qed_configure_filter_rx_mode(eth_dev,
1637 QED_FILTER_RX_MODE_TYPE_REGULAR);
1640 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1641 struct rte_eth_fc_conf *fc_conf)
1643 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1644 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1645 struct qed_link_output current_link;
1646 struct qed_link_params params;
1648 memset(¤t_link, 0, sizeof(current_link));
1649 qdev->ops->common->get_link(edev, ¤t_link);
1651 memset(¶ms, 0, sizeof(params));
1652 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1653 if (fc_conf->autoneg) {
1654 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1655 DP_ERR(edev, "Autoneg not supported\n");
1658 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1661 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1662 if (fc_conf->mode == RTE_FC_FULL)
1663 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1664 QED_LINK_PAUSE_RX_ENABLE);
1665 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1666 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1667 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1668 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1670 params.link_up = true;
1671 (void)qdev->ops->common->set_link(edev, ¶ms);
1676 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1677 struct rte_eth_fc_conf *fc_conf)
1679 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1680 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1681 struct qed_link_output current_link;
1683 memset(¤t_link, 0, sizeof(current_link));
1684 qdev->ops->common->get_link(edev, ¤t_link);
1686 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1687 fc_conf->autoneg = true;
1689 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1690 QED_LINK_PAUSE_TX_ENABLE))
1691 fc_conf->mode = RTE_FC_FULL;
1692 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1693 fc_conf->mode = RTE_FC_RX_PAUSE;
1694 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1695 fc_conf->mode = RTE_FC_TX_PAUSE;
1697 fc_conf->mode = RTE_FC_NONE;
1702 static const uint32_t *
1703 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1705 static const uint32_t ptypes[] = {
1711 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1717 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1720 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1721 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1722 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1723 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1724 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1725 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1726 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
1727 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
1730 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1731 struct rte_eth_rss_conf *rss_conf)
1733 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1734 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1735 struct ecore_sp_vport_update_params vport_update_params;
1736 struct ecore_rss_params rss_params;
1737 struct ecore_hwfn *p_hwfn;
1738 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1739 uint64_t hf = rss_conf->rss_hf;
1740 uint8_t len = rss_conf->rss_key_len;
1745 memset(&vport_update_params, 0, sizeof(vport_update_params));
1746 memset(&rss_params, 0, sizeof(rss_params));
1748 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1749 (unsigned long)hf, len, key);
1753 DP_INFO(edev, "Enabling rss\n");
1756 qede_init_rss_caps(&rss_params.rss_caps, hf);
1757 rss_params.update_rss_capabilities = 1;
1761 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1762 DP_ERR(edev, "RSS key length exceeds limit\n");
1765 DP_INFO(edev, "Applying user supplied hash key\n");
1766 rss_params.update_rss_key = 1;
1767 memcpy(&rss_params.rss_key, key, len);
1769 rss_params.rss_enable = 1;
1772 rss_params.update_rss_config = 1;
1773 /* tbl_size has to be set with capabilities */
1774 rss_params.rss_table_size_log = 7;
1775 vport_update_params.vport_id = 0;
1776 /* pass the L2 handles instead of qids */
1777 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
1778 idx = qdev->rss_ind_table[i];
1779 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
1781 vport_update_params.rss_params = &rss_params;
1783 for_each_hwfn(edev, i) {
1784 p_hwfn = &edev->hwfns[i];
1785 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1786 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1787 ECORE_SPQ_MODE_EBLOCK, NULL);
1789 DP_ERR(edev, "vport-update for RSS failed\n");
1793 qdev->rss_enable = rss_params.rss_enable;
1795 /* Update local structure for hash query */
1796 qdev->rss_conf.rss_hf = hf;
1797 qdev->rss_conf.rss_key_len = len;
1798 if (qdev->rss_enable) {
1799 if (qdev->rss_conf.rss_key == NULL) {
1800 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
1801 if (qdev->rss_conf.rss_key == NULL) {
1802 DP_ERR(edev, "No memory to store RSS key\n");
1807 DP_INFO(edev, "Storing RSS key\n");
1808 memcpy(qdev->rss_conf.rss_key, key, len);
1810 } else if (!qdev->rss_enable && len == 0) {
1811 if (qdev->rss_conf.rss_key) {
1812 free(qdev->rss_conf.rss_key);
1813 qdev->rss_conf.rss_key = NULL;
1814 DP_INFO(edev, "Free RSS key\n");
1821 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1822 struct rte_eth_rss_conf *rss_conf)
1824 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1826 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
1827 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
1829 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
1830 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
1831 rss_conf->rss_key_len);
1835 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
1836 struct ecore_rss_params *rss)
1839 bool rss_mode = 1; /* enable */
1840 struct ecore_queue_cid *cid;
1841 struct ecore_rss_params *t_rss;
1843 /* In regular scenario, we'd simply need to take input handlers.
1844 * But in CMT, we'd have to split the handlers according to the
1845 * engine they were configured on. We'd then have to understand
1846 * whether RSS is really required, since 2-queues on CMT doesn't
1850 /* CMT should be round-robin */
1851 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1852 cid = rss->rss_ind_table[i];
1854 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
1859 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
1863 t_rss->update_rss_ind_table = 1;
1864 t_rss->rss_table_size_log = 7;
1865 t_rss->update_rss_config = 1;
1867 /* Make sure RSS is actually required */
1868 for_each_hwfn(edev, fn) {
1869 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
1871 if (rss[fn].rss_ind_table[i] !=
1872 rss[fn].rss_ind_table[0])
1876 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
1878 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1885 t_rss->rss_enable = rss_mode;
1890 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1891 struct rte_eth_rss_reta_entry64 *reta_conf,
1894 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1895 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1896 struct ecore_sp_vport_update_params vport_update_params;
1897 struct ecore_rss_params *params;
1898 struct ecore_hwfn *p_hwfn;
1899 uint16_t i, idx, shift;
1903 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1904 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1909 memset(&vport_update_params, 0, sizeof(vport_update_params));
1910 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
1911 RTE_CACHE_LINE_SIZE);
1913 for (i = 0; i < reta_size; i++) {
1914 idx = i / RTE_RETA_GROUP_SIZE;
1915 shift = i % RTE_RETA_GROUP_SIZE;
1916 if (reta_conf[idx].mask & (1ULL << shift)) {
1917 entry = reta_conf[idx].reta[shift];
1918 /* Pass rxq handles to ecore */
1919 params->rss_ind_table[i] =
1920 qdev->fp_array[entry].rxq->handle;
1921 /* Update the local copy for RETA query command */
1922 qdev->rss_ind_table[i] = entry;
1926 params->update_rss_ind_table = 1;
1927 params->rss_table_size_log = 7;
1928 params->update_rss_config = 1;
1930 /* Fix up RETA for CMT mode device */
1931 if (edev->num_hwfns > 1)
1932 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
1934 vport_update_params.vport_id = 0;
1935 /* Use the current value of rss_enable */
1936 params->rss_enable = qdev->rss_enable;
1937 vport_update_params.rss_params = params;
1939 for_each_hwfn(edev, i) {
1940 p_hwfn = &edev->hwfns[i];
1941 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1942 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1943 ECORE_SPQ_MODE_EBLOCK, NULL);
1945 DP_ERR(edev, "vport-update for RSS failed\n");
1955 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1956 struct rte_eth_rss_reta_entry64 *reta_conf,
1959 struct qede_dev *qdev = eth_dev->data->dev_private;
1960 struct ecore_dev *edev = &qdev->edev;
1961 uint16_t i, idx, shift;
1964 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1965 DP_ERR(edev, "reta_size %d is not supported\n",
1970 for (i = 0; i < reta_size; i++) {
1971 idx = i / RTE_RETA_GROUP_SIZE;
1972 shift = i % RTE_RETA_GROUP_SIZE;
1973 if (reta_conf[idx].mask & (1ULL << shift)) {
1974 entry = qdev->rss_ind_table[i];
1975 reta_conf[idx].reta[shift] = entry;
1984 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1986 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
1987 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1988 struct rte_eth_dev_info dev_info = {0};
1989 struct qede_fastpath *fp;
1990 uint32_t frame_size;
1991 uint16_t rx_buf_size;
1995 PMD_INIT_FUNC_TRACE(edev);
1996 qede_dev_info_get(dev, &dev_info);
1997 frame_size = mtu + QEDE_ETH_OVERHEAD;
1998 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
1999 DP_ERR(edev, "MTU %u out of range\n", mtu);
2002 if (!dev->data->scattered_rx &&
2003 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2004 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2005 dev->data->min_rx_buf_size);
2008 /* Temporarily replace I/O functions with dummy ones. It cannot
2009 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2011 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2012 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2016 /* Fix up RX buf size for all queues of the port */
2018 fp = &qdev->fp_array[i];
2019 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2020 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2021 if (dev->data->scattered_rx)
2022 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2024 rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2025 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2026 fp->rxq->rx_buf_size = rx_buf_size;
2027 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2029 qede_dev_start(dev);
2030 if (frame_size > ETHER_MAX_LEN)
2031 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2033 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2034 /* update max frame size */
2035 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2037 dev->rx_pkt_burst = qede_recv_pkts;
2038 dev->tx_pkt_burst = qede_xmit_pkts;
2044 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
2045 struct rte_eth_udp_tunnel *tunnel_udp,
2048 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2049 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2050 struct ecore_tunnel_info tunn; /* @DPDK */
2051 struct ecore_hwfn *p_hwfn;
2054 PMD_INIT_FUNC_TRACE(edev);
2056 memset(&tunn, 0, sizeof(tunn));
2057 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
2058 tunn.vxlan_port.b_update_port = true;
2059 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
2060 QEDE_VXLAN_DEF_PORT;
2061 for_each_hwfn(edev, i) {
2062 p_hwfn = &edev->hwfns[i];
2063 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2064 ECORE_SPQ_MODE_CB, NULL);
2065 if (rc != ECORE_SUCCESS) {
2066 DP_ERR(edev, "Unable to config UDP port %u\n",
2067 tunn.vxlan_port.port);
2077 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2078 struct rte_eth_udp_tunnel *tunnel_udp)
2080 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
2084 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2085 struct rte_eth_udp_tunnel *tunnel_udp)
2087 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
2090 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2091 uint32_t *clss, char *str)
2094 *clss = MAX_ECORE_TUNN_CLSS;
2096 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2097 if (filter == qede_tunn_types[j].rte_filter_type) {
2098 *type = qede_tunn_types[j].qede_type;
2099 *clss = qede_tunn_types[j].qede_tunn_clss;
2100 strcpy(str, qede_tunn_types[j].string);
2107 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2108 const struct rte_eth_tunnel_filter_conf *conf,
2111 /* Init commmon ucast params first */
2112 qede_set_ucast_cmn_params(ucast);
2114 /* Copy out the required fields based on classification type */
2118 case ECORE_FILTER_VNI:
2119 ucast->vni = conf->tenant_id;
2121 case ECORE_FILTER_INNER_VLAN:
2122 ucast->vlan = conf->inner_vlan;
2124 case ECORE_FILTER_MAC:
2125 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2128 case ECORE_FILTER_INNER_MAC:
2129 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2132 case ECORE_FILTER_MAC_VNI_PAIR:
2133 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2135 ucast->vni = conf->tenant_id;
2137 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2138 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2140 ucast->vni = conf->tenant_id;
2142 case ECORE_FILTER_INNER_PAIR:
2143 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2145 ucast->vlan = conf->inner_vlan;
2151 return ECORE_SUCCESS;
2154 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2155 enum rte_filter_op filter_op,
2156 const struct rte_eth_tunnel_filter_conf *conf)
2158 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2159 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2160 struct ecore_tunnel_info tunn;
2161 struct ecore_hwfn *p_hwfn;
2162 enum ecore_filter_ucast_type type;
2163 enum ecore_tunn_clss clss;
2164 struct ecore_filter_ucast ucast;
2166 uint16_t filter_type;
2169 PMD_INIT_FUNC_TRACE(edev);
2171 filter_type = conf->filter_type | qdev->vxlan_filter_type;
2172 /* First determine if the given filter classification is supported */
2173 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2174 if (clss == MAX_ECORE_TUNN_CLSS) {
2175 DP_ERR(edev, "Wrong filter type\n");
2178 /* Init tunnel ucast params */
2179 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2180 if (rc != ECORE_SUCCESS) {
2181 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2185 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2186 str, filter_op, ucast.type);
2187 switch (filter_op) {
2188 case RTE_ETH_FILTER_ADD:
2189 ucast.opcode = ECORE_FILTER_ADD;
2191 /* Skip MAC/VLAN if filter is based on VNI */
2192 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2193 rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2195 /* Enable accept anyvlan */
2196 qede_config_accept_any_vlan(qdev, true);
2199 rc = qede_ucast_filter(eth_dev, &ucast, 1);
2201 rc = ecore_filter_ucast_cmd(edev, &ucast,
2202 ECORE_SPQ_MODE_CB, NULL);
2205 if (rc != ECORE_SUCCESS)
2208 qdev->vxlan_filter_type = filter_type;
2210 DP_INFO(edev, "Enabling VXLAN tunneling\n");
2211 qede_set_cmn_tunn_param(&tunn, clss, true, true);
2212 for_each_hwfn(edev, i) {
2213 p_hwfn = &edev->hwfns[i];
2214 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
2215 &tunn, ECORE_SPQ_MODE_CB, NULL);
2216 if (rc != ECORE_SUCCESS) {
2217 DP_ERR(edev, "Failed to update tunn_clss %u\n",
2218 tunn.vxlan.tun_cls);
2221 qdev->num_tunn_filters++; /* Filter added successfully */
2223 case RTE_ETH_FILTER_DELETE:
2224 ucast.opcode = ECORE_FILTER_REMOVE;
2226 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2227 rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2229 rc = qede_ucast_filter(eth_dev, &ucast, 0);
2231 rc = ecore_filter_ucast_cmd(edev, &ucast,
2232 ECORE_SPQ_MODE_CB, NULL);
2234 if (rc != ECORE_SUCCESS)
2237 qdev->vxlan_filter_type = filter_type;
2238 qdev->num_tunn_filters--;
2240 /* Disable VXLAN if VXLAN filters become 0 */
2241 if (qdev->num_tunn_filters == 0) {
2242 DP_INFO(edev, "Disabling VXLAN tunneling\n");
2244 /* Use 0 as tunnel mode */
2245 qede_set_cmn_tunn_param(&tunn, clss, false, true);
2246 for_each_hwfn(edev, i) {
2247 p_hwfn = &edev->hwfns[i];
2248 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2249 ECORE_SPQ_MODE_CB, NULL);
2250 if (rc != ECORE_SUCCESS) {
2252 "Failed to update tunn_clss %u\n",
2253 tunn.vxlan.tun_cls);
2260 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2263 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
2268 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2269 enum rte_filter_type filter_type,
2270 enum rte_filter_op filter_op,
2273 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2274 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2275 struct rte_eth_tunnel_filter_conf *filter_conf =
2276 (struct rte_eth_tunnel_filter_conf *)arg;
2278 switch (filter_type) {
2279 case RTE_ETH_FILTER_TUNNEL:
2280 switch (filter_conf->tunnel_type) {
2281 case RTE_TUNNEL_TYPE_VXLAN:
2283 "Packet steering to the specified Rx queue"
2284 " is not supported with VXLAN tunneling");
2285 return(qede_vxlan_tunn_config(eth_dev, filter_op,
2287 /* Place holders for future tunneling support */
2288 case RTE_TUNNEL_TYPE_GENEVE:
2289 case RTE_TUNNEL_TYPE_TEREDO:
2290 case RTE_TUNNEL_TYPE_NVGRE:
2291 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2292 case RTE_L2_TUNNEL_TYPE_E_TAG:
2293 DP_ERR(edev, "Unsupported tunnel type %d\n",
2294 filter_conf->tunnel_type);
2296 case RTE_TUNNEL_TYPE_NONE:
2301 case RTE_ETH_FILTER_FDIR:
2302 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2303 case RTE_ETH_FILTER_NTUPLE:
2304 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2305 case RTE_ETH_FILTER_MACVLAN:
2306 case RTE_ETH_FILTER_ETHERTYPE:
2307 case RTE_ETH_FILTER_FLEXIBLE:
2308 case RTE_ETH_FILTER_SYN:
2309 case RTE_ETH_FILTER_HASH:
2310 case RTE_ETH_FILTER_L2_TUNNEL:
2311 case RTE_ETH_FILTER_MAX:
2313 DP_ERR(edev, "Unsupported filter type %d\n",
2321 static const struct eth_dev_ops qede_eth_dev_ops = {
2322 .dev_configure = qede_dev_configure,
2323 .dev_infos_get = qede_dev_info_get,
2324 .rx_queue_setup = qede_rx_queue_setup,
2325 .rx_queue_release = qede_rx_queue_release,
2326 .tx_queue_setup = qede_tx_queue_setup,
2327 .tx_queue_release = qede_tx_queue_release,
2328 .dev_start = qede_dev_start,
2329 .dev_set_link_up = qede_dev_set_link_up,
2330 .dev_set_link_down = qede_dev_set_link_down,
2331 .link_update = qede_link_update,
2332 .promiscuous_enable = qede_promiscuous_enable,
2333 .promiscuous_disable = qede_promiscuous_disable,
2334 .allmulticast_enable = qede_allmulticast_enable,
2335 .allmulticast_disable = qede_allmulticast_disable,
2336 .dev_stop = qede_dev_stop,
2337 .dev_close = qede_dev_close,
2338 .stats_get = qede_get_stats,
2339 .stats_reset = qede_reset_stats,
2340 .xstats_get = qede_get_xstats,
2341 .xstats_reset = qede_reset_xstats,
2342 .xstats_get_names = qede_get_xstats_names,
2343 .mac_addr_add = qede_mac_addr_add,
2344 .mac_addr_remove = qede_mac_addr_remove,
2345 .mac_addr_set = qede_mac_addr_set,
2346 .vlan_offload_set = qede_vlan_offload_set,
2347 .vlan_filter_set = qede_vlan_filter_set,
2348 .flow_ctrl_set = qede_flow_ctrl_set,
2349 .flow_ctrl_get = qede_flow_ctrl_get,
2350 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2351 .rss_hash_update = qede_rss_hash_update,
2352 .rss_hash_conf_get = qede_rss_hash_conf_get,
2353 .reta_update = qede_rss_reta_update,
2354 .reta_query = qede_rss_reta_query,
2355 .mtu_set = qede_set_mtu,
2356 .filter_ctrl = qede_dev_filter_ctrl,
2357 .udp_tunnel_port_add = qede_udp_dst_port_add,
2358 .udp_tunnel_port_del = qede_udp_dst_port_del,
2361 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2362 .dev_configure = qede_dev_configure,
2363 .dev_infos_get = qede_dev_info_get,
2364 .rx_queue_setup = qede_rx_queue_setup,
2365 .rx_queue_release = qede_rx_queue_release,
2366 .tx_queue_setup = qede_tx_queue_setup,
2367 .tx_queue_release = qede_tx_queue_release,
2368 .dev_start = qede_dev_start,
2369 .dev_set_link_up = qede_dev_set_link_up,
2370 .dev_set_link_down = qede_dev_set_link_down,
2371 .link_update = qede_link_update,
2372 .promiscuous_enable = qede_promiscuous_enable,
2373 .promiscuous_disable = qede_promiscuous_disable,
2374 .allmulticast_enable = qede_allmulticast_enable,
2375 .allmulticast_disable = qede_allmulticast_disable,
2376 .dev_stop = qede_dev_stop,
2377 .dev_close = qede_dev_close,
2378 .stats_get = qede_get_stats,
2379 .stats_reset = qede_reset_stats,
2380 .xstats_get = qede_get_xstats,
2381 .xstats_reset = qede_reset_xstats,
2382 .xstats_get_names = qede_get_xstats_names,
2383 .vlan_offload_set = qede_vlan_offload_set,
2384 .vlan_filter_set = qede_vlan_filter_set,
2385 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2386 .rss_hash_update = qede_rss_hash_update,
2387 .rss_hash_conf_get = qede_rss_hash_conf_get,
2388 .reta_update = qede_rss_reta_update,
2389 .reta_query = qede_rss_reta_query,
2390 .mtu_set = qede_set_mtu,
2393 static void qede_update_pf_params(struct ecore_dev *edev)
2395 struct ecore_pf_params pf_params;
2397 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2398 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2399 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2400 qed_ops->common->update_pf_params(edev, &pf_params);
2403 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2405 struct rte_pci_device *pci_dev;
2406 struct rte_pci_addr pci_addr;
2407 struct qede_dev *adapter;
2408 struct ecore_dev *edev;
2409 struct qed_dev_eth_info dev_info;
2410 struct qed_slowpath_params params;
2411 static bool do_once = true;
2412 uint8_t bulletin_change;
2413 uint8_t vf_mac[ETHER_ADDR_LEN];
2414 uint8_t is_mac_forced;
2416 /* Fix up ecore debug level */
2417 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2418 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2421 /* Extract key data structures */
2422 adapter = eth_dev->data->dev_private;
2423 edev = &adapter->edev;
2424 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2425 pci_addr = pci_dev->addr;
2427 PMD_INIT_FUNC_TRACE(edev);
2429 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2430 pci_addr.bus, pci_addr.devid, pci_addr.function,
2431 eth_dev->data->port_id);
2433 eth_dev->rx_pkt_burst = qede_recv_pkts;
2434 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2435 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2437 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2438 DP_ERR(edev, "Skipping device init from secondary process\n");
2442 rte_eth_copy_pci_info(eth_dev, pci_dev);
2445 edev->vendor_id = pci_dev->id.vendor_id;
2446 edev->device_id = pci_dev->id.device_id;
2448 qed_ops = qed_get_eth_ops();
2450 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2454 DP_INFO(edev, "Starting qede probe\n");
2455 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2458 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2461 qede_update_pf_params(edev);
2462 rte_intr_callback_register(&pci_dev->intr_handle,
2463 qede_interrupt_handler, (void *)eth_dev);
2464 if (rte_intr_enable(&pci_dev->intr_handle)) {
2465 DP_ERR(edev, "rte_intr_enable() failed\n");
2469 /* Start the Slowpath-process */
2470 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2471 params.int_mode = ECORE_INT_MODE_MSIX;
2472 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2473 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2474 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2475 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2476 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2477 QEDE_PMD_DRV_VER_STR_SIZE);
2479 /* For CMT mode device do periodic polling for slowpath events.
2480 * This is required since uio device uses only one MSI-x
2481 * interrupt vector but we need one for each engine.
2483 if (edev->num_hwfns > 1 && IS_PF(edev)) {
2484 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2488 DP_ERR(edev, "Unable to start periodic"
2489 " timer rc %d\n", rc);
2494 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2496 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2497 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2502 rc = qed_ops->fill_dev_info(edev, &dev_info);
2504 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2505 qed_ops->common->slowpath_stop(edev);
2506 qed_ops->common->remove(edev);
2507 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2512 qede_alloc_etherdev(adapter, &dev_info);
2514 adapter->ops->common->set_name(edev, edev->name);
2517 adapter->dev_info.num_mac_filters =
2518 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2521 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2522 (uint32_t *)&adapter->dev_info.num_mac_filters);
2524 /* Allocate memory for storing MAC addr */
2525 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2527 adapter->dev_info.num_mac_filters),
2528 RTE_CACHE_LINE_SIZE);
2530 if (eth_dev->data->mac_addrs == NULL) {
2531 DP_ERR(edev, "Failed to allocate MAC address\n");
2532 qed_ops->common->slowpath_stop(edev);
2533 qed_ops->common->remove(edev);
2534 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2540 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2541 hw_info.hw_mac_addr,
2542 ð_dev->data->mac_addrs[0]);
2543 ether_addr_copy(ð_dev->data->mac_addrs[0],
2544 &adapter->primary_mac);
2546 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2548 if (bulletin_change) {
2550 ecore_vf_bulletin_get_forced_mac(
2551 ECORE_LEADING_HWFN(edev),
2554 if (is_mac_exist && is_mac_forced) {
2555 DP_INFO(edev, "VF macaddr received from PF\n");
2556 ether_addr_copy((struct ether_addr *)&vf_mac,
2557 ð_dev->data->mac_addrs[0]);
2558 ether_addr_copy(ð_dev->data->mac_addrs[0],
2559 &adapter->primary_mac);
2561 DP_ERR(edev, "No VF macaddr assigned\n");
2566 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2569 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2570 qede_print_adapter_info(adapter);
2575 adapter->num_tx_queues = 0;
2576 adapter->num_rx_queues = 0;
2577 SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2578 SLIST_INIT(&adapter->vlan_list_head);
2579 SLIST_INIT(&adapter->uc_list_head);
2580 adapter->mtu = ETHER_MTU;
2581 adapter->new_mtu = ETHER_MTU;
2583 if (qede_start_vport(adapter, adapter->mtu))
2586 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2587 adapter->primary_mac.addr_bytes[0],
2588 adapter->primary_mac.addr_bytes[1],
2589 adapter->primary_mac.addr_bytes[2],
2590 adapter->primary_mac.addr_bytes[3],
2591 adapter->primary_mac.addr_bytes[4],
2592 adapter->primary_mac.addr_bytes[5]);
2594 DP_INFO(edev, "Device initialized\n");
2599 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2601 return qede_common_dev_init(eth_dev, 1);
2604 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2606 return qede_common_dev_init(eth_dev, 0);
2609 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2611 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
2612 struct qede_dev *qdev = eth_dev->data->dev_private;
2613 struct ecore_dev *edev = &qdev->edev;
2615 PMD_INIT_FUNC_TRACE(edev);
2618 /* only uninitialize in the primary process */
2619 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2622 /* safe to close dev here */
2623 qede_dev_close(eth_dev);
2625 eth_dev->dev_ops = NULL;
2626 eth_dev->rx_pkt_burst = NULL;
2627 eth_dev->tx_pkt_burst = NULL;
2629 if (eth_dev->data->mac_addrs)
2630 rte_free(eth_dev->data->mac_addrs);
2632 eth_dev->data->mac_addrs = NULL;
2637 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2639 return qede_dev_common_uninit(eth_dev);
2642 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2644 return qede_dev_common_uninit(eth_dev);
2647 static const struct rte_pci_id pci_id_qedevf_map[] = {
2648 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2650 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2653 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2656 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2661 static const struct rte_pci_id pci_id_qede_map[] = {
2662 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2664 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2667 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2670 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2673 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2676 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2679 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2682 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2685 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2688 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2691 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2696 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2697 struct rte_pci_device *pci_dev)
2699 return rte_eth_dev_pci_generic_probe(pci_dev,
2700 sizeof(struct qede_dev), qedevf_eth_dev_init);
2703 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2705 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2708 static struct rte_pci_driver rte_qedevf_pmd = {
2709 .id_table = pci_id_qedevf_map,
2710 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2711 .probe = qedevf_eth_dev_pci_probe,
2712 .remove = qedevf_eth_dev_pci_remove,
2715 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2716 struct rte_pci_device *pci_dev)
2718 return rte_eth_dev_pci_generic_probe(pci_dev,
2719 sizeof(struct qede_dev), qede_eth_dev_init);
2722 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2724 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2727 static struct rte_pci_driver rte_qede_pmd = {
2728 .id_table = pci_id_qede_map,
2729 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2730 .probe = qede_eth_dev_pci_probe,
2731 .remove = qede_eth_dev_pci_remove,
2734 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2735 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2736 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2737 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2738 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2739 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");