net/qede: fix default config option
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
16
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_vxlan_tunn_types {
19         uint16_t rte_filter_type;
20         enum ecore_filter_ucast_type qede_type;
21         enum ecore_tunn_clss qede_tunn_clss;
22         const char *string;
23 } qede_tunn_types[] = {
24         {
25                 ETH_TUNNEL_FILTER_OMAC,
26                 ECORE_FILTER_MAC,
27                 ECORE_TUNN_CLSS_MAC_VLAN,
28                 "outer-mac"
29         },
30         {
31                 ETH_TUNNEL_FILTER_TENID,
32                 ECORE_FILTER_VNI,
33                 ECORE_TUNN_CLSS_MAC_VNI,
34                 "vni"
35         },
36         {
37                 ETH_TUNNEL_FILTER_IMAC,
38                 ECORE_FILTER_INNER_MAC,
39                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40                 "inner-mac"
41         },
42         {
43                 ETH_TUNNEL_FILTER_IVLAN,
44                 ECORE_FILTER_INNER_VLAN,
45                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46                 "inner-vlan"
47         },
48         {
49                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50                 ECORE_FILTER_MAC_VNI_PAIR,
51                 ECORE_TUNN_CLSS_MAC_VNI,
52                 "outer-mac and vni"
53         },
54         {
55                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56                 ECORE_FILTER_UNUSED,
57                 MAX_ECORE_TUNN_CLSS,
58                 "outer-mac and inner-mac"
59         },
60         {
61                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62                 ECORE_FILTER_UNUSED,
63                 MAX_ECORE_TUNN_CLSS,
64                 "outer-mac and inner-vlan"
65         },
66         {
67                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
69                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
70                 "vni and inner-mac",
71         },
72         {
73                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
74                 ECORE_FILTER_UNUSED,
75                 MAX_ECORE_TUNN_CLSS,
76                 "vni and inner-vlan",
77         },
78         {
79                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80                 ECORE_FILTER_INNER_PAIR,
81                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82                 "inner-mac and inner-vlan",
83         },
84         {
85                 ETH_TUNNEL_FILTER_OIP,
86                 ECORE_FILTER_UNUSED,
87                 MAX_ECORE_TUNN_CLSS,
88                 "outer-IP"
89         },
90         {
91                 ETH_TUNNEL_FILTER_IIP,
92                 ECORE_FILTER_UNUSED,
93                 MAX_ECORE_TUNN_CLSS,
94                 "inner-IP"
95         },
96         {
97                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
98                 ECORE_FILTER_UNUSED,
99                 MAX_ECORE_TUNN_CLSS,
100                 "IMAC_IVLAN"
101         },
102         {
103                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
104                 ECORE_FILTER_UNUSED,
105                 MAX_ECORE_TUNN_CLSS,
106                 "IMAC_IVLAN_TENID"
107         },
108         {
109                 RTE_TUNNEL_FILTER_IMAC_TENID,
110                 ECORE_FILTER_UNUSED,
111                 MAX_ECORE_TUNN_CLSS,
112                 "IMAC_TENID"
113         },
114         {
115                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
116                 ECORE_FILTER_UNUSED,
117                 MAX_ECORE_TUNN_CLSS,
118                 "OMAC_TENID_IMAC"
119         },
120 };
121
122 struct rte_qede_xstats_name_off {
123         char name[RTE_ETH_XSTATS_NAME_SIZE];
124         uint64_t offset;
125 };
126
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
128         {"rx_unicast_bytes",
129                 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
130         {"rx_multicast_bytes",
131                 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
132         {"rx_broadcast_bytes",
133                 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
134         {"rx_unicast_packets",
135                 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
136         {"rx_multicast_packets",
137                 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
138         {"rx_broadcast_packets",
139                 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
140
141         {"tx_unicast_bytes",
142                 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
143         {"tx_multicast_bytes",
144                 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
145         {"tx_broadcast_bytes",
146                 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
147         {"tx_unicast_packets",
148                 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
149         {"tx_multicast_packets",
150                 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
151         {"tx_broadcast_packets",
152                 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
153
154         {"rx_64_byte_packets",
155                 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
156         {"rx_65_to_127_byte_packets",
157                 offsetof(struct ecore_eth_stats_common,
158                          rx_65_to_127_byte_packets)},
159         {"rx_128_to_255_byte_packets",
160                 offsetof(struct ecore_eth_stats_common,
161                          rx_128_to_255_byte_packets)},
162         {"rx_256_to_511_byte_packets",
163                 offsetof(struct ecore_eth_stats_common,
164                          rx_256_to_511_byte_packets)},
165         {"rx_512_to_1023_byte_packets",
166                 offsetof(struct ecore_eth_stats_common,
167                          rx_512_to_1023_byte_packets)},
168         {"rx_1024_to_1518_byte_packets",
169                 offsetof(struct ecore_eth_stats_common,
170                          rx_1024_to_1518_byte_packets)},
171         {"tx_64_byte_packets",
172                 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
173         {"tx_65_to_127_byte_packets",
174                 offsetof(struct ecore_eth_stats_common,
175                          tx_65_to_127_byte_packets)},
176         {"tx_128_to_255_byte_packets",
177                 offsetof(struct ecore_eth_stats_common,
178                          tx_128_to_255_byte_packets)},
179         {"tx_256_to_511_byte_packets",
180                 offsetof(struct ecore_eth_stats_common,
181                          tx_256_to_511_byte_packets)},
182         {"tx_512_to_1023_byte_packets",
183                 offsetof(struct ecore_eth_stats_common,
184                          tx_512_to_1023_byte_packets)},
185         {"tx_1024_to_1518_byte_packets",
186                 offsetof(struct ecore_eth_stats_common,
187                          tx_1024_to_1518_byte_packets)},
188
189         {"rx_mac_crtl_frames",
190                 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
191         {"tx_mac_control_frames",
192                 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
193         {"rx_pause_frames",
194                 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
195         {"tx_pause_frames",
196                 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
197         {"rx_priority_flow_control_frames",
198                 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
199         {"tx_priority_flow_control_frames",
200                 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
201
202         {"rx_crc_errors",
203                 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
204         {"rx_align_errors",
205                 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
206         {"rx_carrier_errors",
207                 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
208         {"rx_oversize_packet_errors",
209                 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
210         {"rx_jabber_errors",
211                 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
212         {"rx_undersize_packet_errors",
213                 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
214         {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
215         {"rx_host_buffer_not_available",
216                 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
217         /* Number of packets discarded because they are bigger than MTU */
218         {"rx_packet_too_big_discards",
219                 offsetof(struct ecore_eth_stats_common,
220                          packet_too_big_discard)},
221         {"rx_ttl_zero_discards",
222                 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
223         {"rx_multi_function_tag_filter_discards",
224                 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
225         {"rx_mac_filter_discards",
226                 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
227         {"rx_hw_buffer_truncates",
228                 offsetof(struct ecore_eth_stats_common, brb_truncates)},
229         {"rx_hw_buffer_discards",
230                 offsetof(struct ecore_eth_stats_common, brb_discards)},
231         {"tx_error_drop_packets",
232                 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
233
234         {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
235         {"rx_mac_unicast_packets",
236                 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
237         {"rx_mac_multicast_packets",
238                 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
239         {"rx_mac_broadcast_packets",
240                 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
241         {"rx_mac_frames_ok",
242                 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
243         {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
244         {"tx_mac_unicast_packets",
245                 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
246         {"tx_mac_multicast_packets",
247                 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
248         {"tx_mac_broadcast_packets",
249                 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
250
251         {"lro_coalesced_packets",
252                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
253         {"lro_coalesced_events",
254                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
255         {"lro_aborts_num",
256                 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
257         {"lro_not_coalesced_packets",
258                 offsetof(struct ecore_eth_stats_common,
259                          tpa_not_coalesced_pkts)},
260         {"lro_coalesced_bytes",
261                 offsetof(struct ecore_eth_stats_common,
262                          tpa_coalesced_bytes)},
263 };
264
265 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
266         {"rx_1519_to_1522_byte_packets",
267                 offsetof(struct ecore_eth_stats, bb) +
268                 offsetof(struct ecore_eth_stats_bb,
269                          rx_1519_to_1522_byte_packets)},
270         {"rx_1519_to_2047_byte_packets",
271                 offsetof(struct ecore_eth_stats, bb) +
272                 offsetof(struct ecore_eth_stats_bb,
273                          rx_1519_to_2047_byte_packets)},
274         {"rx_2048_to_4095_byte_packets",
275                 offsetof(struct ecore_eth_stats, bb) +
276                 offsetof(struct ecore_eth_stats_bb,
277                          rx_2048_to_4095_byte_packets)},
278         {"rx_4096_to_9216_byte_packets",
279                 offsetof(struct ecore_eth_stats, bb) +
280                 offsetof(struct ecore_eth_stats_bb,
281                          rx_4096_to_9216_byte_packets)},
282         {"rx_9217_to_16383_byte_packets",
283                 offsetof(struct ecore_eth_stats, bb) +
284                 offsetof(struct ecore_eth_stats_bb,
285                          rx_9217_to_16383_byte_packets)},
286
287         {"tx_1519_to_2047_byte_packets",
288                 offsetof(struct ecore_eth_stats, bb) +
289                 offsetof(struct ecore_eth_stats_bb,
290                          tx_1519_to_2047_byte_packets)},
291         {"tx_2048_to_4095_byte_packets",
292                 offsetof(struct ecore_eth_stats, bb) +
293                 offsetof(struct ecore_eth_stats_bb,
294                          tx_2048_to_4095_byte_packets)},
295         {"tx_4096_to_9216_byte_packets",
296                 offsetof(struct ecore_eth_stats, bb) +
297                 offsetof(struct ecore_eth_stats_bb,
298                          tx_4096_to_9216_byte_packets)},
299         {"tx_9217_to_16383_byte_packets",
300                 offsetof(struct ecore_eth_stats, bb) +
301                 offsetof(struct ecore_eth_stats_bb,
302                          tx_9217_to_16383_byte_packets)},
303
304         {"tx_lpi_entry_count",
305                 offsetof(struct ecore_eth_stats, bb) +
306                 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
307         {"tx_total_collisions",
308                 offsetof(struct ecore_eth_stats, bb) +
309                 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
310 };
311
312 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
313         {"rx_1519_to_max_byte_packets",
314                 offsetof(struct ecore_eth_stats, ah) +
315                 offsetof(struct ecore_eth_stats_ah,
316                          rx_1519_to_max_byte_packets)},
317         {"tx_1519_to_max_byte_packets",
318                 offsetof(struct ecore_eth_stats, ah) +
319                 offsetof(struct ecore_eth_stats_ah,
320                          tx_1519_to_max_byte_packets)},
321 };
322
323 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
324         {"rx_q_segments",
325                 offsetof(struct qede_rx_queue, rx_segs)},
326         {"rx_q_hw_errors",
327                 offsetof(struct qede_rx_queue, rx_hw_errors)},
328         {"rx_q_allocation_errors",
329                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
330 };
331
332 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
333 {
334         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
335 }
336
337 static void
338 qede_interrupt_handler(void *param)
339 {
340         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
341         struct qede_dev *qdev = eth_dev->data->dev_private;
342         struct ecore_dev *edev = &qdev->edev;
343
344         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
345         if (rte_intr_enable(eth_dev->intr_handle))
346                 DP_ERR(edev, "rte_intr_enable failed\n");
347 }
348
349 static void
350 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
351 {
352         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
353         qdev->ops = qed_ops;
354 }
355
356 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
357 static void qede_print_adapter_info(struct qede_dev *qdev)
358 {
359         struct ecore_dev *edev = &qdev->edev;
360         struct qed_dev_info *info = &qdev->dev_info.common;
361         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
362         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
363
364         DP_INFO(edev, "*********************************\n");
365         DP_INFO(edev, " DPDK version:%s\n", rte_version());
366         DP_INFO(edev, " Chip details : %s %c%d\n",
367                   ECORE_IS_BB(edev) ? "BB" : "AH",
368                   'A' + edev->chip_rev,
369                   (int)edev->chip_metal);
370         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
371                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
372         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
373                  ver_str, QEDE_PMD_VERSION);
374         DP_INFO(edev, " Driver version : %s\n", drv_ver);
375         DP_INFO(edev, " Firmware version : %s\n", ver_str);
376
377         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
378                  "%d.%d.%d.%d",
379                 (info->mfw_rev >> 24) & 0xff,
380                 (info->mfw_rev >> 16) & 0xff,
381                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
382         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
383         DP_INFO(edev, " Firmware file : %s\n", fw_file);
384         DP_INFO(edev, "*********************************\n");
385 }
386 #endif
387
388 static int
389 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
390 {
391         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
392         struct ecore_sp_vport_start_params params;
393         struct ecore_hwfn *p_hwfn;
394         int rc;
395         int i;
396
397         memset(&params, 0, sizeof(params));
398         params.vport_id = 0;
399         params.mtu = mtu;
400         /* @DPDK - Disable FW placement */
401         params.zero_placement_offset = 1;
402         for_each_hwfn(edev, i) {
403                 p_hwfn = &edev->hwfns[i];
404                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
405                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
406                 rc = ecore_sp_vport_start(p_hwfn, &params);
407                 if (rc != ECORE_SUCCESS) {
408                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
409                         return rc;
410                 }
411         }
412         ecore_reset_vport_stats(edev);
413         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
414
415         return 0;
416 }
417
418 static int
419 qede_stop_vport(struct ecore_dev *edev)
420 {
421         struct ecore_hwfn *p_hwfn;
422         uint8_t vport_id;
423         int rc;
424         int i;
425
426         vport_id = 0;
427         for_each_hwfn(edev, i) {
428                 p_hwfn = &edev->hwfns[i];
429                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
430                                          vport_id);
431                 if (rc != ECORE_SUCCESS) {
432                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
433                         return rc;
434                 }
435         }
436
437         return 0;
438 }
439
440 /* Activate or deactivate vport via vport-update */
441 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
442 {
443         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
444         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
445         struct ecore_sp_vport_update_params params;
446         struct ecore_hwfn *p_hwfn;
447         uint8_t i;
448         int rc = -1;
449
450         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
451         params.vport_id = 0;
452         params.update_vport_active_rx_flg = 1;
453         params.update_vport_active_tx_flg = 1;
454         params.vport_active_rx_flg = flg;
455         params.vport_active_tx_flg = flg;
456 #ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
457         if (IS_VF(edev)) {
458                 params.update_tx_switching_flg = 1;
459                 params.tx_switching_flg = !flg;
460                 DP_INFO(edev, "VF tx-switching is disabled\n");
461         }
462 #endif
463         for_each_hwfn(edev, i) {
464                 p_hwfn = &edev->hwfns[i];
465                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
466                 rc = ecore_sp_vport_update(p_hwfn, &params,
467                                 ECORE_SPQ_MODE_EBLOCK, NULL);
468                 if (rc != ECORE_SUCCESS) {
469                         DP_ERR(edev, "Failed to update vport\n");
470                         break;
471                 }
472         }
473         DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
474
475         return rc;
476 }
477
478 static void
479 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
480                            uint16_t mtu, bool enable)
481 {
482         /* Enable LRO in split mode */
483         sge_tpa_params->tpa_ipv4_en_flg = enable;
484         sge_tpa_params->tpa_ipv6_en_flg = enable;
485         sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
486         sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
487         /* set if tpa enable changes */
488         sge_tpa_params->update_tpa_en_flg = 1;
489         /* set if tpa parameters should be handled */
490         sge_tpa_params->update_tpa_param_flg = enable;
491
492         sge_tpa_params->max_buffers_per_cqe = 20;
493         /* Enable TPA in split mode. In this mode each TPA segment
494          * starts on the new BD, so there is one BD per segment.
495          */
496         sge_tpa_params->tpa_pkt_split_flg = 1;
497         sge_tpa_params->tpa_hdr_data_split_flg = 0;
498         sge_tpa_params->tpa_gro_consistent_flg = 0;
499         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
500         sge_tpa_params->tpa_max_size = 0x7FFF;
501         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
502         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
503 }
504
505 /* Enable/disable LRO via vport-update */
506 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
507 {
508         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
509         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
510         struct ecore_sp_vport_update_params params;
511         struct ecore_sge_tpa_params tpa_params;
512         struct ecore_hwfn *p_hwfn;
513         int rc;
514         int i;
515
516         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
517         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
518         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
519         params.vport_id = 0;
520         params.sge_tpa_params = &tpa_params;
521         for_each_hwfn(edev, i) {
522                 p_hwfn = &edev->hwfns[i];
523                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
524                 rc = ecore_sp_vport_update(p_hwfn, &params,
525                                 ECORE_SPQ_MODE_EBLOCK, NULL);
526                 if (rc != ECORE_SUCCESS) {
527                         DP_ERR(edev, "Failed to update LRO\n");
528                         return -1;
529                 }
530         }
531         qdev->enable_lro = flg;
532         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
533
534         return 0;
535 }
536
537 /* Update MTU via vport-update without doing port restart.
538  * The vport must be deactivated before calling this API.
539  */
540 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
541 {
542         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
543         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
544         struct ecore_sp_vport_update_params params;
545         struct ecore_hwfn *p_hwfn;
546         int rc;
547         int i;
548
549         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
550         params.vport_id = 0;
551         params.mtu = mtu;
552         params.vport_id = 0;
553         for_each_hwfn(edev, i) {
554                 p_hwfn = &edev->hwfns[i];
555                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
556                 rc = ecore_sp_vport_update(p_hwfn, &params,
557                                 ECORE_SPQ_MODE_EBLOCK, NULL);
558                 if (rc != ECORE_SUCCESS) {
559                         DP_ERR(edev, "Failed to update MTU\n");
560                         return -1;
561                 }
562         }
563         DP_INFO(edev, "MTU updated to %u\n", mtu);
564
565         return 0;
566 }
567
568 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
569 {
570         memset(ucast, 0, sizeof(struct ecore_filter_ucast));
571         ucast->is_rx_filter = true;
572         ucast->is_tx_filter = true;
573         /* ucast->assert_on_error = true; - For debug */
574 }
575
576 static int
577 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
578                              enum qed_filter_rx_mode_type type)
579 {
580         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
581         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
582         struct ecore_filter_accept_flags flags;
583
584         memset(&flags, 0, sizeof(flags));
585
586         flags.update_rx_mode_config = 1;
587         flags.update_tx_mode_config = 1;
588         flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
589                 ECORE_ACCEPT_MCAST_MATCHED |
590                 ECORE_ACCEPT_BCAST;
591
592         flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
593                 ECORE_ACCEPT_MCAST_MATCHED |
594                 ECORE_ACCEPT_BCAST;
595
596         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
597                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
598                 if (IS_VF(edev)) {
599                         flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
600                         DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
601                 }
602         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
603                 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
604         } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
605                                 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
606                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
607                         ECORE_ACCEPT_MCAST_UNMATCHED;
608         }
609
610         return ecore_filter_accept_cmd(edev, 0, flags, false, false,
611                         ECORE_SPQ_MODE_CB, NULL);
612 }
613
614 static int
615 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
616                   bool enable, bool mask)
617 {
618         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
619         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
620         enum _ecore_status_t rc = ECORE_INVAL;
621         struct ecore_ptt *p_ptt;
622         struct ecore_tunnel_info tunn;
623         struct ecore_hwfn *p_hwfn;
624         int i;
625
626         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
627         tunn.vxlan.b_update_mode = enable;
628         tunn.vxlan.b_mode_enabled = mask;
629         tunn.b_update_rx_cls = true;
630         tunn.b_update_tx_cls = true;
631         tunn.vxlan.tun_cls = clss;
632
633         for_each_hwfn(edev, i) {
634                 p_hwfn = &edev->hwfns[i];
635                 if (IS_PF(edev)) {
636                         p_ptt = ecore_ptt_acquire(p_hwfn);
637                         if (!p_ptt)
638                                 return -EAGAIN;
639                 } else {
640                         p_ptt = NULL;
641                 }
642                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
643                                 &tunn, ECORE_SPQ_MODE_CB, NULL);
644                 if (rc != ECORE_SUCCESS) {
645                         DP_ERR(edev, "Failed to update tunn_clss %u\n",
646                                         tunn.vxlan.tun_cls);
647                         if (IS_PF(edev))
648                                 ecore_ptt_release(p_hwfn, p_ptt);
649                         break;
650                 }
651         }
652
653         if (rc == ECORE_SUCCESS) {
654                 qdev->vxlan.enable = enable;
655                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
656                 DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
657         }
658
659         return rc;
660 }
661
662 static int
663 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
664                   bool add)
665 {
666         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
667         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
668         struct qede_ucast_entry *tmp = NULL;
669         struct qede_ucast_entry *u;
670         struct ether_addr *mac_addr;
671
672         mac_addr  = (struct ether_addr *)ucast->mac;
673         if (add) {
674                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
675                         if ((memcmp(mac_addr, &tmp->mac,
676                                     ETHER_ADDR_LEN) == 0) &&
677                              ucast->vni == tmp->vni &&
678                              ucast->vlan == tmp->vlan) {
679                                 DP_ERR(edev, "Unicast MAC is already added"
680                                        " with vlan = %u, vni = %u\n",
681                                        ucast->vlan,  ucast->vni);
682                                         return -EEXIST;
683                         }
684                 }
685                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
686                                RTE_CACHE_LINE_SIZE);
687                 if (!u) {
688                         DP_ERR(edev, "Did not allocate memory for ucast\n");
689                         return -ENOMEM;
690                 }
691                 ether_addr_copy(mac_addr, &u->mac);
692                 u->vlan = ucast->vlan;
693                 u->vni = ucast->vni;
694                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
695                 qdev->num_uc_addr++;
696         } else {
697                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
698                         if ((memcmp(mac_addr, &tmp->mac,
699                                     ETHER_ADDR_LEN) == 0) &&
700                             ucast->vlan == tmp->vlan      &&
701                             ucast->vni == tmp->vni)
702                         break;
703                 }
704                 if (tmp == NULL) {
705                         DP_INFO(edev, "Unicast MAC is not found\n");
706                         return -EINVAL;
707                 }
708                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
709                 qdev->num_uc_addr--;
710         }
711
712         return 0;
713 }
714
715 static int
716 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
717                   bool add)
718 {
719         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
720         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
721         struct ether_addr *mac_addr;
722         struct qede_mcast_entry *tmp = NULL;
723         struct qede_mcast_entry *m;
724
725         mac_addr  = (struct ether_addr *)mcast->mac;
726         if (add) {
727                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
728                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
729                                 DP_ERR(edev,
730                                         "Multicast MAC is already added\n");
731                                 return -EEXIST;
732                         }
733                 }
734                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
735                         RTE_CACHE_LINE_SIZE);
736                 if (!m) {
737                         DP_ERR(edev,
738                                 "Did not allocate memory for mcast\n");
739                         return -ENOMEM;
740                 }
741                 ether_addr_copy(mac_addr, &m->mac);
742                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
743                 qdev->num_mc_addr++;
744         } else {
745                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
746                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
747                                 break;
748                 }
749                 if (tmp == NULL) {
750                         DP_INFO(edev, "Multicast mac is not found\n");
751                         return -EINVAL;
752                 }
753                 SLIST_REMOVE(&qdev->mc_list_head, tmp,
754                              qede_mcast_entry, list);
755                 qdev->num_mc_addr--;
756         }
757
758         return 0;
759 }
760
761 static enum _ecore_status_t
762 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
763                  bool add)
764 {
765         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
766         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
767         enum _ecore_status_t rc;
768         struct ecore_filter_mcast mcast;
769         struct qede_mcast_entry *tmp;
770         uint16_t j = 0;
771
772         /* Multicast */
773         if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
774                 if (add) {
775                         if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
776                                 DP_ERR(edev,
777                                        "Mcast filter table limit exceeded, "
778                                        "Please enable mcast promisc mode\n");
779                                 return -ECORE_INVAL;
780                         }
781                 }
782                 rc = qede_mcast_filter(eth_dev, ucast, add);
783                 if (rc == 0) {
784                         DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
785                         memset(&mcast, 0, sizeof(mcast));
786                         mcast.num_mc_addrs = qdev->num_mc_addr;
787                         mcast.opcode = ECORE_FILTER_ADD;
788                         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
789                                 ether_addr_copy(&tmp->mac,
790                                         (struct ether_addr *)&mcast.mac[j]);
791                                 j++;
792                         }
793                         rc = ecore_filter_mcast_cmd(edev, &mcast,
794                                                     ECORE_SPQ_MODE_CB, NULL);
795                 }
796                 if (rc != ECORE_SUCCESS) {
797                         DP_ERR(edev, "Failed to add multicast filter"
798                                " rc = %d, op = %d\n", rc, add);
799                 }
800         } else { /* Unicast */
801                 if (add) {
802                         if (qdev->num_uc_addr >=
803                             qdev->dev_info.num_mac_filters) {
804                                 DP_ERR(edev,
805                                        "Ucast filter table limit exceeded,"
806                                        " Please enable promisc mode\n");
807                                 return -ECORE_INVAL;
808                         }
809                 }
810                 rc = qede_ucast_filter(eth_dev, ucast, add);
811                 if (rc == 0)
812                         rc = ecore_filter_ucast_cmd(edev, ucast,
813                                                     ECORE_SPQ_MODE_CB, NULL);
814                 if (rc != ECORE_SUCCESS) {
815                         DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
816                                rc, add);
817                 }
818         }
819
820         return rc;
821 }
822
823 static int
824 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
825                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
826 {
827         struct ecore_filter_ucast ucast;
828         int re;
829
830         qede_set_ucast_cmn_params(&ucast);
831         ucast.type = ECORE_FILTER_MAC;
832         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
833         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
834         return re;
835 }
836
837 static void
838 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
839 {
840         struct qede_dev *qdev = eth_dev->data->dev_private;
841         struct ecore_dev *edev = &qdev->edev;
842         struct ecore_filter_ucast ucast;
843
844         PMD_INIT_FUNC_TRACE(edev);
845
846         if (index >= qdev->dev_info.num_mac_filters) {
847                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
848                        index, qdev->dev_info.num_mac_filters);
849                 return;
850         }
851
852         qede_set_ucast_cmn_params(&ucast);
853         ucast.opcode = ECORE_FILTER_REMOVE;
854         ucast.type = ECORE_FILTER_MAC;
855
856         /* Use the index maintained by rte */
857         ether_addr_copy(&eth_dev->data->mac_addrs[index],
858                         (struct ether_addr *)&ucast.mac);
859
860         ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
861 }
862
863 static void
864 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
865 {
866         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
867         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
868
869         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
870                                                mac_addr->addr_bytes)) {
871                 DP_ERR(edev, "Setting MAC address is not allowed\n");
872                 ether_addr_copy(&qdev->primary_mac,
873                                 &eth_dev->data->mac_addrs[0]);
874                 return;
875         }
876
877         qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
878 }
879
880 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
881 {
882         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
883         struct ecore_sp_vport_update_params params;
884         struct ecore_hwfn *p_hwfn;
885         uint8_t i;
886         int rc;
887
888         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
889         params.vport_id = 0;
890         params.update_accept_any_vlan_flg = 1;
891         params.accept_any_vlan = flg;
892         for_each_hwfn(edev, i) {
893                 p_hwfn = &edev->hwfns[i];
894                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
895                 rc = ecore_sp_vport_update(p_hwfn, &params,
896                                 ECORE_SPQ_MODE_EBLOCK, NULL);
897                 if (rc != ECORE_SUCCESS) {
898                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
899                         return;
900                 }
901         }
902
903         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
904 }
905
906 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
907 {
908         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
909         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
910         struct ecore_sp_vport_update_params params;
911         struct ecore_hwfn *p_hwfn;
912         uint8_t i;
913         int rc;
914
915         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
916         params.vport_id = 0;
917         params.update_inner_vlan_removal_flg = 1;
918         params.inner_vlan_removal_flg = flg;
919         for_each_hwfn(edev, i) {
920                 p_hwfn = &edev->hwfns[i];
921                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
922                 rc = ecore_sp_vport_update(p_hwfn, &params,
923                                 ECORE_SPQ_MODE_EBLOCK, NULL);
924                 if (rc != ECORE_SUCCESS) {
925                         DP_ERR(edev, "Failed to update vport\n");
926                         return -1;
927                 }
928         }
929
930         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
931         return 0;
932 }
933
934 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
935                                 uint16_t vlan_id, int on)
936 {
937         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
938         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
939         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
940         struct qede_vlan_entry *tmp = NULL;
941         struct qede_vlan_entry *vlan;
942         struct ecore_filter_ucast ucast;
943         int rc;
944
945         if (on) {
946                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
947                         DP_ERR(edev, "Reached max VLAN filter limit"
948                                       " enabling accept_any_vlan\n");
949                         qede_config_accept_any_vlan(qdev, true);
950                         return 0;
951                 }
952
953                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
954                         if (tmp->vid == vlan_id) {
955                                 DP_ERR(edev, "VLAN %u already configured\n",
956                                        vlan_id);
957                                 return -EEXIST;
958                         }
959                 }
960
961                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
962                                   RTE_CACHE_LINE_SIZE);
963
964                 if (!vlan) {
965                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
966                         return -ENOMEM;
967                 }
968
969                 qede_set_ucast_cmn_params(&ucast);
970                 ucast.opcode = ECORE_FILTER_ADD;
971                 ucast.type = ECORE_FILTER_VLAN;
972                 ucast.vlan = vlan_id;
973                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
974                                             NULL);
975                 if (rc != 0) {
976                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
977                                rc);
978                         rte_free(vlan);
979                 } else {
980                         vlan->vid = vlan_id;
981                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
982                         qdev->configured_vlans++;
983                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
984                                 vlan_id, qdev->configured_vlans);
985                 }
986         } else {
987                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
988                         if (tmp->vid == vlan_id)
989                                 break;
990                 }
991
992                 if (!tmp) {
993                         if (qdev->configured_vlans == 0) {
994                                 DP_INFO(edev,
995                                         "No VLAN filters configured yet\n");
996                                 return 0;
997                         }
998
999                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1000                         return -EINVAL;
1001                 }
1002
1003                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1004
1005                 qede_set_ucast_cmn_params(&ucast);
1006                 ucast.opcode = ECORE_FILTER_REMOVE;
1007                 ucast.type = ECORE_FILTER_VLAN;
1008                 ucast.vlan = vlan_id;
1009                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1010                                             NULL);
1011                 if (rc != 0) {
1012                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1013                                vlan_id, rc);
1014                 } else {
1015                         qdev->configured_vlans--;
1016                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1017                                 vlan_id, qdev->configured_vlans);
1018                 }
1019         }
1020
1021         return rc;
1022 }
1023
1024 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1025 {
1026         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1027         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1028         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1029
1030         if (mask & ETH_VLAN_STRIP_MASK) {
1031                 if (rxmode->hw_vlan_strip)
1032                         (void)qede_vlan_stripping(eth_dev, 1);
1033                 else
1034                         (void)qede_vlan_stripping(eth_dev, 0);
1035         }
1036
1037         if (mask & ETH_VLAN_FILTER_MASK) {
1038                 /* VLAN filtering kicks in when a VLAN is added */
1039                 if (rxmode->hw_vlan_filter) {
1040                         qede_vlan_filter_set(eth_dev, 0, 1);
1041                 } else {
1042                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1043                                 DP_ERR(edev,
1044                                   " Please remove existing VLAN filters"
1045                                   " before disabling VLAN filtering\n");
1046                                 /* Signal app that VLAN filtering is still
1047                                  * enabled
1048                                  */
1049                                 rxmode->hw_vlan_filter = true;
1050                         } else {
1051                                 qede_vlan_filter_set(eth_dev, 0, 0);
1052                         }
1053                 }
1054         }
1055
1056         if (mask & ETH_VLAN_EXTEND_MASK)
1057                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1058                         " and classification is based on outer tag only\n");
1059
1060         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1061                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1062
1063         return 0;
1064 }
1065
1066 static void qede_prandom_bytes(uint32_t *buff)
1067 {
1068         uint8_t i;
1069
1070         srand((unsigned int)time(NULL));
1071         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1072                 buff[i] = rand();
1073 }
1074
1075 int qede_config_rss(struct rte_eth_dev *eth_dev)
1076 {
1077         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1078 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1079         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1080 #endif
1081         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1082         struct rte_eth_rss_reta_entry64 reta_conf[2];
1083         struct rte_eth_rss_conf rss_conf;
1084         uint32_t i, id, pos, q;
1085
1086         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1087         if (!rss_conf.rss_key) {
1088                 DP_INFO(edev, "Applying driver default key\n");
1089                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1090                 qede_prandom_bytes(&def_rss_key[0]);
1091                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1092         }
1093
1094         /* Configure RSS hash */
1095         if (qede_rss_hash_update(eth_dev, &rss_conf))
1096                 return -EINVAL;
1097
1098         /* Configure default RETA */
1099         memset(reta_conf, 0, sizeof(reta_conf));
1100         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1101                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1102
1103         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1104                 id = i / RTE_RETA_GROUP_SIZE;
1105                 pos = i % RTE_RETA_GROUP_SIZE;
1106                 q = i % QEDE_RSS_COUNT(qdev);
1107                 reta_conf[id].reta[pos] = q;
1108         }
1109         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1110                                  ECORE_RSS_IND_TABLE_SIZE))
1111                 return -EINVAL;
1112
1113         return 0;
1114 }
1115
1116 static void qede_fastpath_start(struct ecore_dev *edev)
1117 {
1118         struct ecore_hwfn *p_hwfn;
1119         int i;
1120
1121         for_each_hwfn(edev, i) {
1122                 p_hwfn = &edev->hwfns[i];
1123                 ecore_hw_start_fastpath(p_hwfn);
1124         }
1125 }
1126
1127 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1128 {
1129         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1130         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1131         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1132
1133         PMD_INIT_FUNC_TRACE(edev);
1134
1135         /* Update MTU only if it has changed */
1136         if (qdev->mtu != qdev->new_mtu) {
1137                 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1138                         goto err;
1139                 qdev->mtu = qdev->new_mtu;
1140         }
1141
1142         /* Configure TPA parameters */
1143         if (rxmode->enable_lro) {
1144                 if (qede_enable_tpa(eth_dev, true))
1145                         return -EINVAL;
1146                 /* Enable scatter mode for LRO */
1147                 if (!rxmode->enable_scatter)
1148                         eth_dev->data->scattered_rx = 1;
1149         }
1150
1151         /* Start queues */
1152         if (qede_start_queues(eth_dev))
1153                 goto err;
1154
1155         /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1156          * enabling RSS. Hence RSS configuration is deferred upto this point.
1157          * Also, we would like to retain similar behavior in PF case, so we
1158          * don't do PF/VF specific check here.
1159          */
1160         if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1161                 if (qede_config_rss(eth_dev))
1162                         goto err;
1163
1164         /* Enable vport*/
1165         if (qede_activate_vport(eth_dev, true))
1166                 goto err;
1167
1168         /* Bring-up the link */
1169         qede_dev_set_link_state(eth_dev, true);
1170
1171         /* Update link status */
1172         qede_link_update(eth_dev, 0);
1173
1174         /* Start/resume traffic */
1175         qede_fastpath_start(edev);
1176
1177         DP_INFO(edev, "Device started\n");
1178
1179         return 0;
1180 err:
1181         DP_ERR(edev, "Device start fails\n");
1182         return -1; /* common error code is < 0 */
1183 }
1184
1185 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1186 {
1187         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1188         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1189
1190         PMD_INIT_FUNC_TRACE(edev);
1191
1192         /* Disable vport */
1193         if (qede_activate_vport(eth_dev, false))
1194                 return;
1195
1196         if (qdev->enable_lro)
1197                 qede_enable_tpa(eth_dev, false);
1198
1199         /* Stop queues */
1200         qede_stop_queues(eth_dev);
1201
1202         /* Disable traffic */
1203         ecore_hw_stop_fastpath(edev); /* TBD - loop */
1204
1205         /* Bring the link down */
1206         qede_dev_set_link_state(eth_dev, false);
1207
1208         DP_INFO(edev, "Device is stopped\n");
1209 }
1210
1211 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1212 {
1213         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1214         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1215         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1216         int ret;
1217
1218         PMD_INIT_FUNC_TRACE(edev);
1219
1220         /* Check requirements for 100G mode */
1221         if (ECORE_IS_CMT(edev)) {
1222                 if (eth_dev->data->nb_rx_queues < 2 ||
1223                                 eth_dev->data->nb_tx_queues < 2) {
1224                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1225                         return -EINVAL;
1226                 }
1227
1228                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1229                                 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1230                         DP_ERR(edev,
1231                                         "100G mode needs even no. of RX/TX queues\n");
1232                         return -EINVAL;
1233                 }
1234         }
1235
1236         /* Sanity checks and throw warnings */
1237         if (rxmode->enable_scatter)
1238                 eth_dev->data->scattered_rx = 1;
1239
1240         if (!rxmode->hw_strip_crc)
1241                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1242
1243         if (!rxmode->hw_ip_checksum)
1244                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1245                                 "in hw\n");
1246         if (rxmode->header_split)
1247                 DP_INFO(edev, "Header split enable is not supported\n");
1248         if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1249                                 ETH_MQ_RX_RSS)) {
1250                 DP_ERR(edev, "Unsupported multi-queue mode\n");
1251                 return -ENOTSUP;
1252         }
1253         /* Flow director mode check */
1254         if (qede_check_fdir_support(eth_dev))
1255                 return -ENOTSUP;
1256
1257         /* Deallocate resources if held previously. It is needed only if the
1258          * queue count has been changed from previous configuration. If its
1259          * going to change then it means RX/TX queue setup will be called
1260          * again and the fastpath pointers will be reinitialized there.
1261          */
1262         if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1263             qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1264                 qede_dealloc_fp_resc(eth_dev);
1265                 /* Proceed with updated queue count */
1266                 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1267                 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1268                 if (qede_alloc_fp_resc(qdev))
1269                         return -ENOMEM;
1270         }
1271
1272         /* VF's MTU has to be set using vport-start where as
1273          * PF's MTU can be updated via vport-update.
1274          */
1275         if (IS_VF(edev)) {
1276                 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1277                         return -1;
1278         } else {
1279                 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1280                         return -1;
1281         }
1282
1283         qdev->mtu = rxmode->max_rx_pkt_len;
1284         qdev->new_mtu = qdev->mtu;
1285
1286         /* Enable VLAN offloads by default */
1287         ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1288                         ETH_VLAN_FILTER_MASK |
1289                         ETH_VLAN_EXTEND_MASK);
1290         if (ret)
1291                 return ret;
1292
1293         DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1294                         QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1295
1296         return 0;
1297 }
1298
1299 /* Info about HW descriptor ring limitations */
1300 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1301         .nb_max = 0x8000, /* 32K */
1302         .nb_min = 128,
1303         .nb_align = 128 /* lowest common multiple */
1304 };
1305
1306 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1307         .nb_max = 0x8000, /* 32K */
1308         .nb_min = 256,
1309         .nb_align = 256,
1310         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1311         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1312 };
1313
1314 static void
1315 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1316                   struct rte_eth_dev_info *dev_info)
1317 {
1318         struct qede_dev *qdev = eth_dev->data->dev_private;
1319         struct ecore_dev *edev = &qdev->edev;
1320         struct qed_link_output link;
1321         uint32_t speed_cap = 0;
1322
1323         PMD_INIT_FUNC_TRACE(edev);
1324
1325         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1326         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1327         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1328         dev_info->rx_desc_lim = qede_rx_desc_lim;
1329         dev_info->tx_desc_lim = qede_tx_desc_lim;
1330
1331         if (IS_PF(edev))
1332                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1333                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1334         else
1335                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1336                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1337         dev_info->max_tx_queues = dev_info->max_rx_queues;
1338
1339         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1340         dev_info->max_vfs = 0;
1341         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1342         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1343         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1344
1345         dev_info->default_txconf = (struct rte_eth_txconf) {
1346                 .txq_flags = QEDE_TXQ_FLAGS,
1347         };
1348
1349         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP  |
1350                                      DEV_RX_OFFLOAD_IPV4_CKSUM  |
1351                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1352                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1353                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1354                                      DEV_RX_OFFLOAD_TCP_LRO);
1355
1356         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1357                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1358                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1359                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1360                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1361                                      DEV_TX_OFFLOAD_TCP_TSO |
1362                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1363
1364         memset(&link, 0, sizeof(struct qed_link_output));
1365         qdev->ops->common->get_link(edev, &link);
1366         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1367                 speed_cap |= ETH_LINK_SPEED_1G;
1368         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1369                 speed_cap |= ETH_LINK_SPEED_10G;
1370         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1371                 speed_cap |= ETH_LINK_SPEED_25G;
1372         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1373                 speed_cap |= ETH_LINK_SPEED_40G;
1374         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1375                 speed_cap |= ETH_LINK_SPEED_50G;
1376         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1377                 speed_cap |= ETH_LINK_SPEED_100G;
1378         dev_info->speed_capa = speed_cap;
1379 }
1380
1381 /* return 0 means link status changed, -1 means not changed */
1382 int
1383 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1384 {
1385         struct qede_dev *qdev = eth_dev->data->dev_private;
1386         struct ecore_dev *edev = &qdev->edev;
1387         uint16_t link_duplex;
1388         struct qed_link_output link;
1389         struct rte_eth_link *curr = &eth_dev->data->dev_link;
1390
1391         memset(&link, 0, sizeof(struct qed_link_output));
1392         qdev->ops->common->get_link(edev, &link);
1393
1394         /* Link Speed */
1395         curr->link_speed = link.speed;
1396
1397         /* Link Mode */
1398         switch (link.duplex) {
1399         case QEDE_DUPLEX_HALF:
1400                 link_duplex = ETH_LINK_HALF_DUPLEX;
1401                 break;
1402         case QEDE_DUPLEX_FULL:
1403                 link_duplex = ETH_LINK_FULL_DUPLEX;
1404                 break;
1405         case QEDE_DUPLEX_UNKNOWN:
1406         default:
1407                 link_duplex = -1;
1408         }
1409         curr->link_duplex = link_duplex;
1410
1411         /* Link Status */
1412         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1413
1414         /* AN */
1415         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1416                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1417
1418         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1419                 curr->link_speed, curr->link_duplex,
1420                 curr->link_autoneg, curr->link_status);
1421
1422         /* return 0 means link status changed, -1 means not changed */
1423         return ((curr->link_status == link.link_up) ? -1 : 0);
1424 }
1425
1426 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1427 {
1428 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1429         struct qede_dev *qdev = eth_dev->data->dev_private;
1430         struct ecore_dev *edev = &qdev->edev;
1431
1432         PMD_INIT_FUNC_TRACE(edev);
1433 #endif
1434
1435         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1436
1437         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1438                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1439
1440         qed_configure_filter_rx_mode(eth_dev, type);
1441 }
1442
1443 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1444 {
1445 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1446         struct qede_dev *qdev = eth_dev->data->dev_private;
1447         struct ecore_dev *edev = &qdev->edev;
1448
1449         PMD_INIT_FUNC_TRACE(edev);
1450 #endif
1451
1452         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1453                 qed_configure_filter_rx_mode(eth_dev,
1454                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1455         else
1456                 qed_configure_filter_rx_mode(eth_dev,
1457                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1458 }
1459
1460 static void qede_poll_sp_sb_cb(void *param)
1461 {
1462         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1463         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1464         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1465         int rc;
1466
1467         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1468         qede_interrupt_action(&edev->hwfns[1]);
1469
1470         rc = rte_eal_alarm_set(timer_period * US_PER_S,
1471                                qede_poll_sp_sb_cb,
1472                                (void *)eth_dev);
1473         if (rc != 0) {
1474                 DP_ERR(edev, "Unable to start periodic"
1475                              " timer rc %d\n", rc);
1476                 assert(false && "Unable to start periodic timer");
1477         }
1478 }
1479
1480 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1481 {
1482         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1483         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1484         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1485
1486         PMD_INIT_FUNC_TRACE(edev);
1487
1488         /* dev_stop() shall cleanup fp resources in hw but without releasing
1489          * dma memories and sw structures so that dev_start() can be called
1490          * by the app without reconfiguration. However, in dev_close() we
1491          * can release all the resources and device can be brought up newly
1492          */
1493         if (eth_dev->data->dev_started)
1494                 qede_dev_stop(eth_dev);
1495
1496         qede_stop_vport(edev);
1497         qede_fdir_dealloc_resc(eth_dev);
1498         qede_dealloc_fp_resc(eth_dev);
1499
1500         eth_dev->data->nb_rx_queues = 0;
1501         eth_dev->data->nb_tx_queues = 0;
1502
1503         qdev->ops->common->slowpath_stop(edev);
1504         qdev->ops->common->remove(edev);
1505         rte_intr_disable(&pci_dev->intr_handle);
1506         rte_intr_callback_unregister(&pci_dev->intr_handle,
1507                                      qede_interrupt_handler, (void *)eth_dev);
1508         if (ECORE_IS_CMT(edev))
1509                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1510 }
1511
1512 static int
1513 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1514 {
1515         struct qede_dev *qdev = eth_dev->data->dev_private;
1516         struct ecore_dev *edev = &qdev->edev;
1517         struct ecore_eth_stats stats;
1518         unsigned int i = 0, j = 0, qid;
1519         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1520         struct qede_tx_queue *txq;
1521
1522         ecore_get_vport_stats(edev, &stats);
1523
1524         /* RX Stats */
1525         eth_stats->ipackets = stats.common.rx_ucast_pkts +
1526             stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1527
1528         eth_stats->ibytes = stats.common.rx_ucast_bytes +
1529             stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1530
1531         eth_stats->ierrors = stats.common.rx_crc_errors +
1532             stats.common.rx_align_errors +
1533             stats.common.rx_carrier_errors +
1534             stats.common.rx_oversize_packets +
1535             stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1536
1537         eth_stats->rx_nombuf = stats.common.no_buff_discards;
1538
1539         eth_stats->imissed = stats.common.mftag_filter_discards +
1540             stats.common.mac_filter_discards +
1541             stats.common.no_buff_discards +
1542             stats.common.brb_truncates + stats.common.brb_discards;
1543
1544         /* TX stats */
1545         eth_stats->opackets = stats.common.tx_ucast_pkts +
1546             stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1547
1548         eth_stats->obytes = stats.common.tx_ucast_bytes +
1549             stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1550
1551         eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1552
1553         /* Queue stats */
1554         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1555                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1556         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1557                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1558         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1559             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1560                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1561                        "Not all the queue stats will be displayed. Set"
1562                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1563                        " appropriately and retry.\n");
1564
1565         for_each_rss(qid) {
1566                 eth_stats->q_ipackets[i] =
1567                         *(uint64_t *)(
1568                                 ((char *)(qdev->fp_array[qid].rxq)) +
1569                                 offsetof(struct qede_rx_queue,
1570                                 rcv_pkts));
1571                 eth_stats->q_errors[i] =
1572                         *(uint64_t *)(
1573                                 ((char *)(qdev->fp_array[qid].rxq)) +
1574                                 offsetof(struct qede_rx_queue,
1575                                 rx_hw_errors)) +
1576                         *(uint64_t *)(
1577                                 ((char *)(qdev->fp_array[qid].rxq)) +
1578                                 offsetof(struct qede_rx_queue,
1579                                 rx_alloc_errors));
1580                 i++;
1581                 if (i == rxq_stat_cntrs)
1582                         break;
1583         }
1584
1585         for_each_tss(qid) {
1586                 txq = qdev->fp_array[qid].txq;
1587                 eth_stats->q_opackets[j] =
1588                         *((uint64_t *)(uintptr_t)
1589                                 (((uint64_t)(uintptr_t)(txq)) +
1590                                  offsetof(struct qede_tx_queue,
1591                                           xmit_pkts)));
1592                 j++;
1593                 if (j == txq_stat_cntrs)
1594                         break;
1595         }
1596
1597         return 0;
1598 }
1599
1600 static unsigned
1601 qede_get_xstats_count(struct qede_dev *qdev) {
1602         if (ECORE_IS_BB(&qdev->edev))
1603                 return RTE_DIM(qede_xstats_strings) +
1604                        RTE_DIM(qede_bb_xstats_strings) +
1605                        (RTE_DIM(qede_rxq_xstats_strings) *
1606                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1607                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1608         else
1609                 return RTE_DIM(qede_xstats_strings) +
1610                        RTE_DIM(qede_ah_xstats_strings) +
1611                        (RTE_DIM(qede_rxq_xstats_strings) *
1612                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1613                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1614 }
1615
1616 static int
1617 qede_get_xstats_names(struct rte_eth_dev *dev,
1618                       struct rte_eth_xstat_name *xstats_names,
1619                       __rte_unused unsigned int limit)
1620 {
1621         struct qede_dev *qdev = dev->data->dev_private;
1622         struct ecore_dev *edev = &qdev->edev;
1623         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1624         unsigned int i, qid, stat_idx = 0;
1625         unsigned int rxq_stat_cntrs;
1626
1627         if (xstats_names != NULL) {
1628                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1629                         snprintf(xstats_names[stat_idx].name,
1630                                 sizeof(xstats_names[stat_idx].name),
1631                                 "%s",
1632                                 qede_xstats_strings[i].name);
1633                         stat_idx++;
1634                 }
1635
1636                 if (ECORE_IS_BB(edev)) {
1637                         for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1638                                 snprintf(xstats_names[stat_idx].name,
1639                                         sizeof(xstats_names[stat_idx].name),
1640                                         "%s",
1641                                         qede_bb_xstats_strings[i].name);
1642                                 stat_idx++;
1643                         }
1644                 } else {
1645                         for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1646                                 snprintf(xstats_names[stat_idx].name,
1647                                         sizeof(xstats_names[stat_idx].name),
1648                                         "%s",
1649                                         qede_ah_xstats_strings[i].name);
1650                                 stat_idx++;
1651                         }
1652                 }
1653
1654                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1655                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1656                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1657                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1658                                 snprintf(xstats_names[stat_idx].name,
1659                                         sizeof(xstats_names[stat_idx].name),
1660                                         "%.4s%d%s",
1661                                         qede_rxq_xstats_strings[i].name, qid,
1662                                         qede_rxq_xstats_strings[i].name + 4);
1663                                 stat_idx++;
1664                         }
1665                 }
1666         }
1667
1668         return stat_cnt;
1669 }
1670
1671 static int
1672 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1673                 unsigned int n)
1674 {
1675         struct qede_dev *qdev = dev->data->dev_private;
1676         struct ecore_dev *edev = &qdev->edev;
1677         struct ecore_eth_stats stats;
1678         const unsigned int num = qede_get_xstats_count(qdev);
1679         unsigned int i, qid, stat_idx = 0;
1680         unsigned int rxq_stat_cntrs;
1681
1682         if (n < num)
1683                 return num;
1684
1685         ecore_get_vport_stats(edev, &stats);
1686
1687         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1688                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1689                                              qede_xstats_strings[i].offset);
1690                 xstats[stat_idx].id = stat_idx;
1691                 stat_idx++;
1692         }
1693
1694         if (ECORE_IS_BB(edev)) {
1695                 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1696                         xstats[stat_idx].value =
1697                                         *(uint64_t *)(((char *)&stats) +
1698                                         qede_bb_xstats_strings[i].offset);
1699                         xstats[stat_idx].id = stat_idx;
1700                         stat_idx++;
1701                 }
1702         } else {
1703                 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1704                         xstats[stat_idx].value =
1705                                         *(uint64_t *)(((char *)&stats) +
1706                                         qede_ah_xstats_strings[i].offset);
1707                         xstats[stat_idx].id = stat_idx;
1708                         stat_idx++;
1709                 }
1710         }
1711
1712         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1713                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1714         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1715                 for_each_rss(qid) {
1716                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1717                                 xstats[stat_idx].value = *(uint64_t *)(
1718                                         ((char *)(qdev->fp_array[qid].rxq)) +
1719                                          qede_rxq_xstats_strings[i].offset);
1720                                 xstats[stat_idx].id = stat_idx;
1721                                 stat_idx++;
1722                         }
1723                 }
1724         }
1725
1726         return stat_idx;
1727 }
1728
1729 static void
1730 qede_reset_xstats(struct rte_eth_dev *dev)
1731 {
1732         struct qede_dev *qdev = dev->data->dev_private;
1733         struct ecore_dev *edev = &qdev->edev;
1734
1735         ecore_reset_vport_stats(edev);
1736 }
1737
1738 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1739 {
1740         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1741         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1742         struct qed_link_params link_params;
1743         int rc;
1744
1745         DP_INFO(edev, "setting link state %d\n", link_up);
1746         memset(&link_params, 0, sizeof(link_params));
1747         link_params.link_up = link_up;
1748         rc = qdev->ops->common->set_link(edev, &link_params);
1749         if (rc != ECORE_SUCCESS)
1750                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1751
1752         return rc;
1753 }
1754
1755 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1756 {
1757         return qede_dev_set_link_state(eth_dev, true);
1758 }
1759
1760 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1761 {
1762         return qede_dev_set_link_state(eth_dev, false);
1763 }
1764
1765 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1766 {
1767         struct qede_dev *qdev = eth_dev->data->dev_private;
1768         struct ecore_dev *edev = &qdev->edev;
1769
1770         ecore_reset_vport_stats(edev);
1771 }
1772
1773 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1774 {
1775         enum qed_filter_rx_mode_type type =
1776             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1777
1778         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1779                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1780
1781         qed_configure_filter_rx_mode(eth_dev, type);
1782 }
1783
1784 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1785 {
1786         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1787                 qed_configure_filter_rx_mode(eth_dev,
1788                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1789         else
1790                 qed_configure_filter_rx_mode(eth_dev,
1791                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1792 }
1793
1794 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1795                               struct rte_eth_fc_conf *fc_conf)
1796 {
1797         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1798         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1799         struct qed_link_output current_link;
1800         struct qed_link_params params;
1801
1802         memset(&current_link, 0, sizeof(current_link));
1803         qdev->ops->common->get_link(edev, &current_link);
1804
1805         memset(&params, 0, sizeof(params));
1806         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1807         if (fc_conf->autoneg) {
1808                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1809                         DP_ERR(edev, "Autoneg not supported\n");
1810                         return -EINVAL;
1811                 }
1812                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1813         }
1814
1815         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1816         if (fc_conf->mode == RTE_FC_FULL)
1817                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1818                                         QED_LINK_PAUSE_RX_ENABLE);
1819         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1820                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1821         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1822                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1823
1824         params.link_up = true;
1825         (void)qdev->ops->common->set_link(edev, &params);
1826
1827         return 0;
1828 }
1829
1830 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1831                               struct rte_eth_fc_conf *fc_conf)
1832 {
1833         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1834         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1835         struct qed_link_output current_link;
1836
1837         memset(&current_link, 0, sizeof(current_link));
1838         qdev->ops->common->get_link(edev, &current_link);
1839
1840         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1841                 fc_conf->autoneg = true;
1842
1843         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1844                                          QED_LINK_PAUSE_TX_ENABLE))
1845                 fc_conf->mode = RTE_FC_FULL;
1846         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1847                 fc_conf->mode = RTE_FC_RX_PAUSE;
1848         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1849                 fc_conf->mode = RTE_FC_TX_PAUSE;
1850         else
1851                 fc_conf->mode = RTE_FC_NONE;
1852
1853         return 0;
1854 }
1855
1856 static const uint32_t *
1857 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1858 {
1859         static const uint32_t ptypes[] = {
1860                 RTE_PTYPE_L2_ETHER,
1861                 RTE_PTYPE_L2_ETHER_VLAN,
1862                 RTE_PTYPE_L3_IPV4,
1863                 RTE_PTYPE_L3_IPV6,
1864                 RTE_PTYPE_L4_TCP,
1865                 RTE_PTYPE_L4_UDP,
1866                 RTE_PTYPE_TUNNEL_VXLAN,
1867                 RTE_PTYPE_L4_FRAG,
1868                 /* Inner */
1869                 RTE_PTYPE_INNER_L2_ETHER,
1870                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1871                 RTE_PTYPE_INNER_L3_IPV4,
1872                 RTE_PTYPE_INNER_L3_IPV6,
1873                 RTE_PTYPE_INNER_L4_TCP,
1874                 RTE_PTYPE_INNER_L4_UDP,
1875                 RTE_PTYPE_INNER_L4_FRAG,
1876                 RTE_PTYPE_UNKNOWN
1877         };
1878
1879         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1880                 return ptypes;
1881
1882         return NULL;
1883 }
1884
1885 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1886 {
1887         *rss_caps = 0;
1888         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1889         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1890         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1891         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1892         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1893         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1894         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1895         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1896 }
1897
1898 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1899                          struct rte_eth_rss_conf *rss_conf)
1900 {
1901         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1902         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1903         struct ecore_sp_vport_update_params vport_update_params;
1904         struct ecore_rss_params rss_params;
1905         struct ecore_hwfn *p_hwfn;
1906         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1907         uint64_t hf = rss_conf->rss_hf;
1908         uint8_t len = rss_conf->rss_key_len;
1909         uint8_t idx;
1910         uint8_t i;
1911         int rc;
1912
1913         memset(&vport_update_params, 0, sizeof(vport_update_params));
1914         memset(&rss_params, 0, sizeof(rss_params));
1915
1916         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1917                 (unsigned long)hf, len, key);
1918
1919         if (hf != 0) {
1920                 /* Enabling RSS */
1921                 DP_INFO(edev, "Enabling rss\n");
1922
1923                 /* RSS caps */
1924                 qede_init_rss_caps(&rss_params.rss_caps, hf);
1925                 rss_params.update_rss_capabilities = 1;
1926
1927                 /* RSS hash key */
1928                 if (key) {
1929                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1930                                 DP_ERR(edev, "RSS key length exceeds limit\n");
1931                                 return -EINVAL;
1932                         }
1933                         DP_INFO(edev, "Applying user supplied hash key\n");
1934                         rss_params.update_rss_key = 1;
1935                         memcpy(&rss_params.rss_key, key, len);
1936                 }
1937                 rss_params.rss_enable = 1;
1938         }
1939
1940         rss_params.update_rss_config = 1;
1941         /* tbl_size has to be set with capabilities */
1942         rss_params.rss_table_size_log = 7;
1943         vport_update_params.vport_id = 0;
1944         /* pass the L2 handles instead of qids */
1945         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
1946                 idx = qdev->rss_ind_table[i];
1947                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
1948         }
1949         vport_update_params.rss_params = &rss_params;
1950
1951         for_each_hwfn(edev, i) {
1952                 p_hwfn = &edev->hwfns[i];
1953                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1954                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1955                                            ECORE_SPQ_MODE_EBLOCK, NULL);
1956                 if (rc) {
1957                         DP_ERR(edev, "vport-update for RSS failed\n");
1958                         return rc;
1959                 }
1960         }
1961         qdev->rss_enable = rss_params.rss_enable;
1962
1963         /* Update local structure for hash query */
1964         qdev->rss_conf.rss_hf = hf;
1965         qdev->rss_conf.rss_key_len = len;
1966         if (qdev->rss_enable) {
1967                 if  (qdev->rss_conf.rss_key == NULL) {
1968                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
1969                         if (qdev->rss_conf.rss_key == NULL) {
1970                                 DP_ERR(edev, "No memory to store RSS key\n");
1971                                 return -ENOMEM;
1972                         }
1973                 }
1974                 if (key && len) {
1975                         DP_INFO(edev, "Storing RSS key\n");
1976                         memcpy(qdev->rss_conf.rss_key, key, len);
1977                 }
1978         } else if (!qdev->rss_enable && len == 0) {
1979                 if (qdev->rss_conf.rss_key) {
1980                         free(qdev->rss_conf.rss_key);
1981                         qdev->rss_conf.rss_key = NULL;
1982                         DP_INFO(edev, "Free RSS key\n");
1983                 }
1984         }
1985
1986         return 0;
1987 }
1988
1989 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1990                            struct rte_eth_rss_conf *rss_conf)
1991 {
1992         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1993
1994         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
1995         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
1996
1997         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
1998                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
1999                        rss_conf->rss_key_len);
2000         return 0;
2001 }
2002
2003 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2004                                     struct ecore_rss_params *rss)
2005 {
2006         int i, fn;
2007         bool rss_mode = 1; /* enable */
2008         struct ecore_queue_cid *cid;
2009         struct ecore_rss_params *t_rss;
2010
2011         /* In regular scenario, we'd simply need to take input handlers.
2012          * But in CMT, we'd have to split the handlers according to the
2013          * engine they were configured on. We'd then have to understand
2014          * whether RSS is really required, since 2-queues on CMT doesn't
2015          * require RSS.
2016          */
2017
2018         /* CMT should be round-robin */
2019         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2020                 cid = rss->rss_ind_table[i];
2021
2022                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2023                         t_rss = &rss[0];
2024                 else
2025                         t_rss = &rss[1];
2026
2027                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2028         }
2029
2030         t_rss = &rss[1];
2031         t_rss->update_rss_ind_table = 1;
2032         t_rss->rss_table_size_log = 7;
2033         t_rss->update_rss_config = 1;
2034
2035         /* Make sure RSS is actually required */
2036         for_each_hwfn(edev, fn) {
2037                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2038                      i++) {
2039                         if (rss[fn].rss_ind_table[i] !=
2040                             rss[fn].rss_ind_table[0])
2041                                 break;
2042                 }
2043
2044                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2045                         DP_INFO(edev,
2046                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2047                         rss_mode = 0;
2048                         goto out;
2049                 }
2050         }
2051
2052 out:
2053         t_rss->rss_enable = rss_mode;
2054
2055         return rss_mode;
2056 }
2057
2058 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2059                          struct rte_eth_rss_reta_entry64 *reta_conf,
2060                          uint16_t reta_size)
2061 {
2062         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2063         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2064         struct ecore_sp_vport_update_params vport_update_params;
2065         struct ecore_rss_params *params;
2066         struct ecore_hwfn *p_hwfn;
2067         uint16_t i, idx, shift;
2068         uint8_t entry;
2069         int rc = 0;
2070
2071         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2072                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2073                        reta_size);
2074                 return -EINVAL;
2075         }
2076
2077         memset(&vport_update_params, 0, sizeof(vport_update_params));
2078         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2079                              RTE_CACHE_LINE_SIZE);
2080         if (params == NULL) {
2081                 DP_ERR(edev, "failed to allocate memory\n");
2082                 return -ENOMEM;
2083         }
2084
2085         for (i = 0; i < reta_size; i++) {
2086                 idx = i / RTE_RETA_GROUP_SIZE;
2087                 shift = i % RTE_RETA_GROUP_SIZE;
2088                 if (reta_conf[idx].mask & (1ULL << shift)) {
2089                         entry = reta_conf[idx].reta[shift];
2090                         /* Pass rxq handles to ecore */
2091                         params->rss_ind_table[i] =
2092                                         qdev->fp_array[entry].rxq->handle;
2093                         /* Update the local copy for RETA query command */
2094                         qdev->rss_ind_table[i] = entry;
2095                 }
2096         }
2097
2098         params->update_rss_ind_table = 1;
2099         params->rss_table_size_log = 7;
2100         params->update_rss_config = 1;
2101
2102         /* Fix up RETA for CMT mode device */
2103         if (ECORE_IS_CMT(edev))
2104                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2105                                                             params);
2106         vport_update_params.vport_id = 0;
2107         /* Use the current value of rss_enable */
2108         params->rss_enable = qdev->rss_enable;
2109         vport_update_params.rss_params = params;
2110
2111         for_each_hwfn(edev, i) {
2112                 p_hwfn = &edev->hwfns[i];
2113                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2114                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2115                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2116                 if (rc) {
2117                         DP_ERR(edev, "vport-update for RSS failed\n");
2118                         goto out;
2119                 }
2120         }
2121
2122 out:
2123         rte_free(params);
2124         return rc;
2125 }
2126
2127 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2128                                struct rte_eth_rss_reta_entry64 *reta_conf,
2129                                uint16_t reta_size)
2130 {
2131         struct qede_dev *qdev = eth_dev->data->dev_private;
2132         struct ecore_dev *edev = &qdev->edev;
2133         uint16_t i, idx, shift;
2134         uint8_t entry;
2135
2136         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2137                 DP_ERR(edev, "reta_size %d is not supported\n",
2138                        reta_size);
2139                 return -EINVAL;
2140         }
2141
2142         for (i = 0; i < reta_size; i++) {
2143                 idx = i / RTE_RETA_GROUP_SIZE;
2144                 shift = i % RTE_RETA_GROUP_SIZE;
2145                 if (reta_conf[idx].mask & (1ULL << shift)) {
2146                         entry = qdev->rss_ind_table[i];
2147                         reta_conf[idx].reta[shift] = entry;
2148                 }
2149         }
2150
2151         return 0;
2152 }
2153
2154
2155
2156 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2157 {
2158         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2159         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2160         struct rte_eth_dev_info dev_info = {0};
2161         struct qede_fastpath *fp;
2162         uint32_t frame_size;
2163         uint16_t rx_buf_size;
2164         uint16_t bufsz;
2165         int i;
2166
2167         PMD_INIT_FUNC_TRACE(edev);
2168         qede_dev_info_get(dev, &dev_info);
2169         frame_size = mtu + QEDE_ETH_OVERHEAD;
2170         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2171                 DP_ERR(edev, "MTU %u out of range\n", mtu);
2172                 return -EINVAL;
2173         }
2174         if (!dev->data->scattered_rx &&
2175             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2176                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2177                         dev->data->min_rx_buf_size);
2178                 return -EINVAL;
2179         }
2180         /* Temporarily replace I/O functions with dummy ones. It cannot
2181          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2182          */
2183         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2184         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2185         qede_dev_stop(dev);
2186         rte_delay_ms(1000);
2187         qdev->mtu = mtu;
2188         /* Fix up RX buf size for all queues of the port */
2189         for_each_rss(i) {
2190                 fp = &qdev->fp_array[i];
2191                 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2192                         fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2193                 if (dev->data->scattered_rx)
2194                         rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2195                 else
2196                         rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2197                 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2198                 fp->rxq->rx_buf_size = rx_buf_size;
2199                 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2200         }
2201         qede_dev_start(dev);
2202         if (frame_size > ETHER_MAX_LEN)
2203                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2204         else
2205                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2206         /* update max frame size */
2207         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2208         /* Reassign back */
2209         dev->rx_pkt_burst = qede_recv_pkts;
2210         dev->tx_pkt_burst = qede_xmit_pkts;
2211
2212         return 0;
2213 }
2214
2215 static int
2216 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
2217                        struct rte_eth_udp_tunnel *tunnel_udp,
2218                        bool add)
2219 {
2220         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2221         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2222         struct ecore_tunnel_info tunn; /* @DPDK */
2223         struct ecore_hwfn *p_hwfn;
2224         struct ecore_ptt *p_ptt;
2225         uint16_t udp_port;
2226         int rc, i;
2227
2228         PMD_INIT_FUNC_TRACE(edev);
2229
2230         memset(&tunn, 0, sizeof(tunn));
2231         if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
2232                 /* Enable VxLAN tunnel if needed before UDP port update using
2233                  * default MAC/VLAN classification.
2234                  */
2235                 if (add) {
2236                         if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2237                                 DP_INFO(edev,
2238                                         "UDP port %u was already configured\n",
2239                                         tunnel_udp->udp_port);
2240                                 return ECORE_SUCCESS;
2241                         }
2242                         /* Enable VXLAN if it was not enabled while adding
2243                          * VXLAN filter.
2244                          */
2245                         if (!qdev->vxlan.enable) {
2246                                 rc = qede_vxlan_enable(eth_dev,
2247                                         ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2248                                 if (rc != ECORE_SUCCESS) {
2249                                         DP_ERR(edev, "Failed to enable VXLAN "
2250                                                 "prior to updating UDP port\n");
2251                                         return rc;
2252                                 }
2253                         }
2254                         udp_port = tunnel_udp->udp_port;
2255                 } else {
2256                         if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2257                                 DP_ERR(edev, "UDP port %u doesn't exist\n",
2258                                         tunnel_udp->udp_port);
2259                                 return ECORE_INVAL;
2260                         }
2261                         udp_port = 0;
2262                 }
2263
2264                 tunn.vxlan_port.b_update_port = true;
2265                 tunn.vxlan_port.port = udp_port;
2266                 for_each_hwfn(edev, i) {
2267                         p_hwfn = &edev->hwfns[i];
2268                         if (IS_PF(edev)) {
2269                                 p_ptt = ecore_ptt_acquire(p_hwfn);
2270                                 if (!p_ptt)
2271                                         return -EAGAIN;
2272                         } else {
2273                                 p_ptt = NULL;
2274                         }
2275                         rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2276                                                 ECORE_SPQ_MODE_CB, NULL);
2277                         if (rc != ECORE_SUCCESS) {
2278                                 DP_ERR(edev, "Unable to config UDP port %u\n",
2279                                        tunn.vxlan_port.port);
2280                                 if (IS_PF(edev))
2281                                         ecore_ptt_release(p_hwfn, p_ptt);
2282                                 return rc;
2283                         }
2284                 }
2285
2286                 qdev->vxlan.udp_port = udp_port;
2287                 /* If the request is to delete UDP port and if the number of
2288                  * VXLAN filters have reached 0 then VxLAN offload can be be
2289                  * disabled.
2290                  */
2291                 if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2292                         return qede_vxlan_enable(eth_dev,
2293                                         ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2301                       struct rte_eth_udp_tunnel *tunnel_udp)
2302 {
2303         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
2304 }
2305
2306 static int
2307 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2308                       struct rte_eth_udp_tunnel *tunnel_udp)
2309 {
2310         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
2311 }
2312
2313 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2314                                        uint32_t *clss, char *str)
2315 {
2316         uint16_t j;
2317         *clss = MAX_ECORE_TUNN_CLSS;
2318
2319         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2320                 if (filter == qede_tunn_types[j].rte_filter_type) {
2321                         *type = qede_tunn_types[j].qede_type;
2322                         *clss = qede_tunn_types[j].qede_tunn_clss;
2323                         strcpy(str, qede_tunn_types[j].string);
2324                         return;
2325                 }
2326         }
2327 }
2328
2329 static int
2330 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2331                               const struct rte_eth_tunnel_filter_conf *conf,
2332                               uint32_t type)
2333 {
2334         /* Init commmon ucast params first */
2335         qede_set_ucast_cmn_params(ucast);
2336
2337         /* Copy out the required fields based on classification type */
2338         ucast->type = type;
2339
2340         switch (type) {
2341         case ECORE_FILTER_VNI:
2342                 ucast->vni = conf->tenant_id;
2343         break;
2344         case ECORE_FILTER_INNER_VLAN:
2345                 ucast->vlan = conf->inner_vlan;
2346         break;
2347         case ECORE_FILTER_MAC:
2348                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2349                        ETHER_ADDR_LEN);
2350         break;
2351         case ECORE_FILTER_INNER_MAC:
2352                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2353                        ETHER_ADDR_LEN);
2354         break;
2355         case ECORE_FILTER_MAC_VNI_PAIR:
2356                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2357                         ETHER_ADDR_LEN);
2358                 ucast->vni = conf->tenant_id;
2359         break;
2360         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2361                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2362                         ETHER_ADDR_LEN);
2363                 ucast->vni = conf->tenant_id;
2364         break;
2365         case ECORE_FILTER_INNER_PAIR:
2366                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2367                         ETHER_ADDR_LEN);
2368                 ucast->vlan = conf->inner_vlan;
2369         break;
2370         default:
2371                 return -EINVAL;
2372         }
2373
2374         return ECORE_SUCCESS;
2375 }
2376
2377 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2378                                   enum rte_filter_op filter_op,
2379                                   const struct rte_eth_tunnel_filter_conf *conf)
2380 {
2381         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2382         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2383         enum ecore_filter_ucast_type type;
2384         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2385         struct ecore_filter_ucast ucast = {0};
2386         char str[80];
2387         uint16_t filter_type = 0;
2388         int rc;
2389
2390         PMD_INIT_FUNC_TRACE(edev);
2391
2392         switch (filter_op) {
2393         case RTE_ETH_FILTER_ADD:
2394                 if (IS_VF(edev))
2395                         return qede_vxlan_enable(eth_dev,
2396                                         ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2397
2398                 filter_type = conf->filter_type;
2399                 /* Determine if the given filter classification is supported */
2400                 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2401                 if (clss == MAX_ECORE_TUNN_CLSS) {
2402                         DP_ERR(edev, "Unsupported filter type\n");
2403                         return -EINVAL;
2404                 }
2405                 /* Init tunnel ucast params */
2406                 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2407                 if (rc != ECORE_SUCCESS) {
2408                         DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2409                         conf->filter_type);
2410                         return rc;
2411                 }
2412                 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2413                         str, filter_op, ucast.type);
2414
2415                 ucast.opcode = ECORE_FILTER_ADD;
2416
2417                 /* Skip MAC/VLAN if filter is based on VNI */
2418                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2419                         rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2420                         if (rc == 0) {
2421                                 /* Enable accept anyvlan */
2422                                 qede_config_accept_any_vlan(qdev, true);
2423                         }
2424                 } else {
2425                         rc = qede_ucast_filter(eth_dev, &ucast, 1);
2426                         if (rc == 0)
2427                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2428                                                     ECORE_SPQ_MODE_CB, NULL);
2429                 }
2430
2431                 if (rc != ECORE_SUCCESS)
2432                         return rc;
2433
2434                 qdev->vxlan.num_filters++;
2435                 qdev->vxlan.filter_type = filter_type;
2436                 if (!qdev->vxlan.enable)
2437                         return qede_vxlan_enable(eth_dev, clss, true, true);
2438
2439         break;
2440         case RTE_ETH_FILTER_DELETE:
2441                 if (IS_VF(edev))
2442                         return qede_vxlan_enable(eth_dev,
2443                                 ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2444
2445                 filter_type = conf->filter_type;
2446                 /* Determine if the given filter classification is supported */
2447                 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2448                 if (clss == MAX_ECORE_TUNN_CLSS) {
2449                         DP_ERR(edev, "Unsupported filter type\n");
2450                         return -EINVAL;
2451                 }
2452                 /* Init tunnel ucast params */
2453                 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2454                 if (rc != ECORE_SUCCESS) {
2455                         DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2456                         conf->filter_type);
2457                         return rc;
2458                 }
2459                 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2460                         str, filter_op, ucast.type);
2461
2462                 ucast.opcode = ECORE_FILTER_REMOVE;
2463
2464                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2465                         rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2466                 } else {
2467                         rc = qede_ucast_filter(eth_dev, &ucast, 0);
2468                         if (rc == 0)
2469                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2470                                                     ECORE_SPQ_MODE_CB, NULL);
2471                 }
2472                 if (rc != ECORE_SUCCESS)
2473                         return rc;
2474
2475                 qdev->vxlan.num_filters--;
2476
2477                 /* Disable VXLAN if VXLAN filters become 0 */
2478                 if (qdev->vxlan.num_filters == 0)
2479                         return qede_vxlan_enable(eth_dev, clss, false, true);
2480         break;
2481         default:
2482                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2483                 return -EINVAL;
2484         }
2485
2486         return 0;
2487 }
2488
2489 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2490                          enum rte_filter_type filter_type,
2491                          enum rte_filter_op filter_op,
2492                          void *arg)
2493 {
2494         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2495         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2496         struct rte_eth_tunnel_filter_conf *filter_conf =
2497                         (struct rte_eth_tunnel_filter_conf *)arg;
2498
2499         switch (filter_type) {
2500         case RTE_ETH_FILTER_TUNNEL:
2501                 switch (filter_conf->tunnel_type) {
2502                 case RTE_TUNNEL_TYPE_VXLAN:
2503                         DP_INFO(edev,
2504                                 "Packet steering to the specified Rx queue"
2505                                 " is not supported with VXLAN tunneling");
2506                         return(qede_vxlan_tunn_config(eth_dev, filter_op,
2507                                                       filter_conf));
2508                 /* Place holders for future tunneling support */
2509                 case RTE_TUNNEL_TYPE_GENEVE:
2510                 case RTE_TUNNEL_TYPE_TEREDO:
2511                 case RTE_TUNNEL_TYPE_NVGRE:
2512                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2513                 case RTE_L2_TUNNEL_TYPE_E_TAG:
2514                         DP_ERR(edev, "Unsupported tunnel type %d\n",
2515                                 filter_conf->tunnel_type);
2516                         return -EINVAL;
2517                 case RTE_TUNNEL_TYPE_NONE:
2518                 default:
2519                         return 0;
2520                 }
2521                 break;
2522         case RTE_ETH_FILTER_FDIR:
2523                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2524         case RTE_ETH_FILTER_NTUPLE:
2525                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2526         case RTE_ETH_FILTER_MACVLAN:
2527         case RTE_ETH_FILTER_ETHERTYPE:
2528         case RTE_ETH_FILTER_FLEXIBLE:
2529         case RTE_ETH_FILTER_SYN:
2530         case RTE_ETH_FILTER_HASH:
2531         case RTE_ETH_FILTER_L2_TUNNEL:
2532         case RTE_ETH_FILTER_MAX:
2533         default:
2534                 DP_ERR(edev, "Unsupported filter type %d\n",
2535                         filter_type);
2536                 return -EINVAL;
2537         }
2538
2539         return 0;
2540 }
2541
2542 static const struct eth_dev_ops qede_eth_dev_ops = {
2543         .dev_configure = qede_dev_configure,
2544         .dev_infos_get = qede_dev_info_get,
2545         .rx_queue_setup = qede_rx_queue_setup,
2546         .rx_queue_release = qede_rx_queue_release,
2547         .tx_queue_setup = qede_tx_queue_setup,
2548         .tx_queue_release = qede_tx_queue_release,
2549         .dev_start = qede_dev_start,
2550         .dev_set_link_up = qede_dev_set_link_up,
2551         .dev_set_link_down = qede_dev_set_link_down,
2552         .link_update = qede_link_update,
2553         .promiscuous_enable = qede_promiscuous_enable,
2554         .promiscuous_disable = qede_promiscuous_disable,
2555         .allmulticast_enable = qede_allmulticast_enable,
2556         .allmulticast_disable = qede_allmulticast_disable,
2557         .dev_stop = qede_dev_stop,
2558         .dev_close = qede_dev_close,
2559         .stats_get = qede_get_stats,
2560         .stats_reset = qede_reset_stats,
2561         .xstats_get = qede_get_xstats,
2562         .xstats_reset = qede_reset_xstats,
2563         .xstats_get_names = qede_get_xstats_names,
2564         .mac_addr_add = qede_mac_addr_add,
2565         .mac_addr_remove = qede_mac_addr_remove,
2566         .mac_addr_set = qede_mac_addr_set,
2567         .vlan_offload_set = qede_vlan_offload_set,
2568         .vlan_filter_set = qede_vlan_filter_set,
2569         .flow_ctrl_set = qede_flow_ctrl_set,
2570         .flow_ctrl_get = qede_flow_ctrl_get,
2571         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2572         .rss_hash_update = qede_rss_hash_update,
2573         .rss_hash_conf_get = qede_rss_hash_conf_get,
2574         .reta_update  = qede_rss_reta_update,
2575         .reta_query  = qede_rss_reta_query,
2576         .mtu_set = qede_set_mtu,
2577         .filter_ctrl = qede_dev_filter_ctrl,
2578         .udp_tunnel_port_add = qede_udp_dst_port_add,
2579         .udp_tunnel_port_del = qede_udp_dst_port_del,
2580 };
2581
2582 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2583         .dev_configure = qede_dev_configure,
2584         .dev_infos_get = qede_dev_info_get,
2585         .rx_queue_setup = qede_rx_queue_setup,
2586         .rx_queue_release = qede_rx_queue_release,
2587         .tx_queue_setup = qede_tx_queue_setup,
2588         .tx_queue_release = qede_tx_queue_release,
2589         .dev_start = qede_dev_start,
2590         .dev_set_link_up = qede_dev_set_link_up,
2591         .dev_set_link_down = qede_dev_set_link_down,
2592         .link_update = qede_link_update,
2593         .promiscuous_enable = qede_promiscuous_enable,
2594         .promiscuous_disable = qede_promiscuous_disable,
2595         .allmulticast_enable = qede_allmulticast_enable,
2596         .allmulticast_disable = qede_allmulticast_disable,
2597         .dev_stop = qede_dev_stop,
2598         .dev_close = qede_dev_close,
2599         .stats_get = qede_get_stats,
2600         .stats_reset = qede_reset_stats,
2601         .xstats_get = qede_get_xstats,
2602         .xstats_reset = qede_reset_xstats,
2603         .xstats_get_names = qede_get_xstats_names,
2604         .vlan_offload_set = qede_vlan_offload_set,
2605         .vlan_filter_set = qede_vlan_filter_set,
2606         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2607         .rss_hash_update = qede_rss_hash_update,
2608         .rss_hash_conf_get = qede_rss_hash_conf_get,
2609         .reta_update  = qede_rss_reta_update,
2610         .reta_query  = qede_rss_reta_query,
2611         .mtu_set = qede_set_mtu,
2612         .udp_tunnel_port_add = qede_udp_dst_port_add,
2613         .udp_tunnel_port_del = qede_udp_dst_port_del,
2614 };
2615
2616 static void qede_update_pf_params(struct ecore_dev *edev)
2617 {
2618         struct ecore_pf_params pf_params;
2619
2620         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2621         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2622         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2623         qed_ops->common->update_pf_params(edev, &pf_params);
2624 }
2625
2626 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2627 {
2628         struct rte_pci_device *pci_dev;
2629         struct rte_pci_addr pci_addr;
2630         struct qede_dev *adapter;
2631         struct ecore_dev *edev;
2632         struct qed_dev_eth_info dev_info;
2633         struct qed_slowpath_params params;
2634         static bool do_once = true;
2635         uint8_t bulletin_change;
2636         uint8_t vf_mac[ETHER_ADDR_LEN];
2637         uint8_t is_mac_forced;
2638         bool is_mac_exist;
2639         /* Fix up ecore debug level */
2640         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2641         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2642         int rc;
2643
2644         /* Extract key data structures */
2645         adapter = eth_dev->data->dev_private;
2646         adapter->ethdev = eth_dev;
2647         edev = &adapter->edev;
2648         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2649         pci_addr = pci_dev->addr;
2650
2651         PMD_INIT_FUNC_TRACE(edev);
2652
2653         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2654                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2655                  eth_dev->data->port_id);
2656
2657         eth_dev->rx_pkt_burst = qede_recv_pkts;
2658         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2659         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2660
2661         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2662                 DP_ERR(edev, "Skipping device init from secondary process\n");
2663                 return 0;
2664         }
2665
2666         rte_eth_copy_pci_info(eth_dev, pci_dev);
2667
2668         /* @DPDK */
2669         edev->vendor_id = pci_dev->id.vendor_id;
2670         edev->device_id = pci_dev->id.device_id;
2671
2672         qed_ops = qed_get_eth_ops();
2673         if (!qed_ops) {
2674                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2675                 return -EINVAL;
2676         }
2677
2678         DP_INFO(edev, "Starting qede probe\n");
2679         rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2680                                     dp_level, is_vf);
2681         if (rc != 0) {
2682                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2683                 return -ENODEV;
2684         }
2685         qede_update_pf_params(edev);
2686         rte_intr_callback_register(&pci_dev->intr_handle,
2687                                    qede_interrupt_handler, (void *)eth_dev);
2688         if (rte_intr_enable(&pci_dev->intr_handle)) {
2689                 DP_ERR(edev, "rte_intr_enable() failed\n");
2690                 return -ENODEV;
2691         }
2692
2693         /* Start the Slowpath-process */
2694         memset(&params, 0, sizeof(struct qed_slowpath_params));
2695         params.int_mode = ECORE_INT_MODE_MSIX;
2696         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2697         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2698         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2699         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2700         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2701                 QEDE_PMD_DRV_VER_STR_SIZE);
2702
2703         /* For CMT mode device do periodic polling for slowpath events.
2704          * This is required since uio device uses only one MSI-x
2705          * interrupt vector but we need one for each engine.
2706          */
2707         if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2708                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2709                                        qede_poll_sp_sb_cb,
2710                                        (void *)eth_dev);
2711                 if (rc != 0) {
2712                         DP_ERR(edev, "Unable to start periodic"
2713                                      " timer rc %d\n", rc);
2714                         return -EINVAL;
2715                 }
2716         }
2717
2718         rc = qed_ops->common->slowpath_start(edev, &params);
2719         if (rc) {
2720                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2721                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2722                                      (void *)eth_dev);
2723                 return -ENODEV;
2724         }
2725
2726         rc = qed_ops->fill_dev_info(edev, &dev_info);
2727         if (rc) {
2728                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2729                 qed_ops->common->slowpath_stop(edev);
2730                 qed_ops->common->remove(edev);
2731                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2732                                      (void *)eth_dev);
2733                 return -ENODEV;
2734         }
2735
2736         qede_alloc_etherdev(adapter, &dev_info);
2737
2738         adapter->ops->common->set_name(edev, edev->name);
2739
2740         if (!is_vf)
2741                 adapter->dev_info.num_mac_filters =
2742                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2743                                             ECORE_MAC);
2744         else
2745                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2746                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2747
2748         /* Allocate memory for storing MAC addr */
2749         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2750                                         (ETHER_ADDR_LEN *
2751                                         adapter->dev_info.num_mac_filters),
2752                                         RTE_CACHE_LINE_SIZE);
2753
2754         if (eth_dev->data->mac_addrs == NULL) {
2755                 DP_ERR(edev, "Failed to allocate MAC address\n");
2756                 qed_ops->common->slowpath_stop(edev);
2757                 qed_ops->common->remove(edev);
2758                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2759                                      (void *)eth_dev);
2760                 return -ENOMEM;
2761         }
2762
2763         if (!is_vf) {
2764                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2765                                 hw_info.hw_mac_addr,
2766                                 &eth_dev->data->mac_addrs[0]);
2767                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2768                                 &adapter->primary_mac);
2769         } else {
2770                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2771                                        &bulletin_change);
2772                 if (bulletin_change) {
2773                         is_mac_exist =
2774                             ecore_vf_bulletin_get_forced_mac(
2775                                                 ECORE_LEADING_HWFN(edev),
2776                                                 vf_mac,
2777                                                 &is_mac_forced);
2778                         if (is_mac_exist && is_mac_forced) {
2779                                 DP_INFO(edev, "VF macaddr received from PF\n");
2780                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2781                                                 &eth_dev->data->mac_addrs[0]);
2782                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2783                                                 &adapter->primary_mac);
2784                         } else {
2785                                 DP_ERR(edev, "No VF macaddr assigned\n");
2786                         }
2787                 }
2788         }
2789
2790         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2791
2792         if (do_once) {
2793 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2794                 qede_print_adapter_info(adapter);
2795 #endif
2796                 do_once = false;
2797         }
2798
2799         adapter->num_tx_queues = 0;
2800         adapter->num_rx_queues = 0;
2801         SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2802         SLIST_INIT(&adapter->vlan_list_head);
2803         SLIST_INIT(&adapter->uc_list_head);
2804         adapter->mtu = ETHER_MTU;
2805         adapter->new_mtu = ETHER_MTU;
2806         if (!is_vf)
2807                 if (qede_start_vport(adapter, adapter->mtu))
2808                         return -1;
2809
2810         DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2811                 adapter->primary_mac.addr_bytes[0],
2812                 adapter->primary_mac.addr_bytes[1],
2813                 adapter->primary_mac.addr_bytes[2],
2814                 adapter->primary_mac.addr_bytes[3],
2815                 adapter->primary_mac.addr_bytes[4],
2816                 adapter->primary_mac.addr_bytes[5]);
2817
2818         DP_INFO(edev, "Device initialized\n");
2819
2820         return 0;
2821 }
2822
2823 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2824 {
2825         return qede_common_dev_init(eth_dev, 1);
2826 }
2827
2828 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2829 {
2830         return qede_common_dev_init(eth_dev, 0);
2831 }
2832
2833 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2834 {
2835 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
2836         struct qede_dev *qdev = eth_dev->data->dev_private;
2837         struct ecore_dev *edev = &qdev->edev;
2838
2839         PMD_INIT_FUNC_TRACE(edev);
2840 #endif
2841
2842         /* only uninitialize in the primary process */
2843         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2844                 return 0;
2845
2846         /* safe to close dev here */
2847         qede_dev_close(eth_dev);
2848
2849         eth_dev->dev_ops = NULL;
2850         eth_dev->rx_pkt_burst = NULL;
2851         eth_dev->tx_pkt_burst = NULL;
2852
2853         if (eth_dev->data->mac_addrs)
2854                 rte_free(eth_dev->data->mac_addrs);
2855
2856         eth_dev->data->mac_addrs = NULL;
2857
2858         return 0;
2859 }
2860
2861 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2862 {
2863         return qede_dev_common_uninit(eth_dev);
2864 }
2865
2866 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2867 {
2868         return qede_dev_common_uninit(eth_dev);
2869 }
2870
2871 static const struct rte_pci_id pci_id_qedevf_map[] = {
2872 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2873         {
2874                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2875         },
2876         {
2877                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2878         },
2879         {
2880                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2881         },
2882         {.vendor_id = 0,}
2883 };
2884
2885 static const struct rte_pci_id pci_id_qede_map[] = {
2886 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2887         {
2888                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2889         },
2890         {
2891                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2892         },
2893         {
2894                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2895         },
2896         {
2897                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2898         },
2899         {
2900                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2901         },
2902         {
2903                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2904         },
2905         {
2906                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2907         },
2908         {
2909                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2910         },
2911         {
2912                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2913         },
2914         {
2915                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2916         },
2917         {.vendor_id = 0,}
2918 };
2919
2920 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2921         struct rte_pci_device *pci_dev)
2922 {
2923         return rte_eth_dev_pci_generic_probe(pci_dev,
2924                 sizeof(struct qede_dev), qedevf_eth_dev_init);
2925 }
2926
2927 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2928 {
2929         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2930 }
2931
2932 static struct rte_pci_driver rte_qedevf_pmd = {
2933         .id_table = pci_id_qedevf_map,
2934         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2935         .probe = qedevf_eth_dev_pci_probe,
2936         .remove = qedevf_eth_dev_pci_remove,
2937 };
2938
2939 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2940         struct rte_pci_device *pci_dev)
2941 {
2942         return rte_eth_dev_pci_generic_probe(pci_dev,
2943                 sizeof(struct qede_dev), qede_eth_dev_init);
2944 }
2945
2946 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2947 {
2948         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2949 }
2950
2951 static struct rte_pci_driver rte_qede_pmd = {
2952         .id_table = pci_id_qede_map,
2953         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2954         .probe = qede_eth_dev_pci_probe,
2955         .remove = qede_eth_dev_pci_remove,
2956 };
2957
2958 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2959 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2960 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2961 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2962 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2963 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");