071c44110a79492ef6b2dc6d48cefb96eff79b1f
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 int qede_logtype_init;
15 int qede_logtype_driver;
16
17 static const struct qed_eth_ops *qed_ops;
18 static int64_t timer_period = 1;
19
20 /* VXLAN tunnel classification mapping */
21 const struct _qede_udp_tunn_types {
22         uint16_t rte_filter_type;
23         enum ecore_filter_ucast_type qede_type;
24         enum ecore_tunn_clss qede_tunn_clss;
25         const char *string;
26 } qede_tunn_types[] = {
27         {
28                 ETH_TUNNEL_FILTER_OMAC,
29                 ECORE_FILTER_MAC,
30                 ECORE_TUNN_CLSS_MAC_VLAN,
31                 "outer-mac"
32         },
33         {
34                 ETH_TUNNEL_FILTER_TENID,
35                 ECORE_FILTER_VNI,
36                 ECORE_TUNN_CLSS_MAC_VNI,
37                 "vni"
38         },
39         {
40                 ETH_TUNNEL_FILTER_IMAC,
41                 ECORE_FILTER_INNER_MAC,
42                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43                 "inner-mac"
44         },
45         {
46                 ETH_TUNNEL_FILTER_IVLAN,
47                 ECORE_FILTER_INNER_VLAN,
48                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
49                 "inner-vlan"
50         },
51         {
52                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
53                 ECORE_FILTER_MAC_VNI_PAIR,
54                 ECORE_TUNN_CLSS_MAC_VNI,
55                 "outer-mac and vni"
56         },
57         {
58                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
59                 ECORE_FILTER_UNUSED,
60                 MAX_ECORE_TUNN_CLSS,
61                 "outer-mac and inner-mac"
62         },
63         {
64                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
65                 ECORE_FILTER_UNUSED,
66                 MAX_ECORE_TUNN_CLSS,
67                 "outer-mac and inner-vlan"
68         },
69         {
70                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
71                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
72                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
73                 "vni and inner-mac",
74         },
75         {
76                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
77                 ECORE_FILTER_UNUSED,
78                 MAX_ECORE_TUNN_CLSS,
79                 "vni and inner-vlan",
80         },
81         {
82                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
83                 ECORE_FILTER_INNER_PAIR,
84                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
85                 "inner-mac and inner-vlan",
86         },
87         {
88                 ETH_TUNNEL_FILTER_OIP,
89                 ECORE_FILTER_UNUSED,
90                 MAX_ECORE_TUNN_CLSS,
91                 "outer-IP"
92         },
93         {
94                 ETH_TUNNEL_FILTER_IIP,
95                 ECORE_FILTER_UNUSED,
96                 MAX_ECORE_TUNN_CLSS,
97                 "inner-IP"
98         },
99         {
100                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
101                 ECORE_FILTER_UNUSED,
102                 MAX_ECORE_TUNN_CLSS,
103                 "IMAC_IVLAN"
104         },
105         {
106                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
107                 ECORE_FILTER_UNUSED,
108                 MAX_ECORE_TUNN_CLSS,
109                 "IMAC_IVLAN_TENID"
110         },
111         {
112                 RTE_TUNNEL_FILTER_IMAC_TENID,
113                 ECORE_FILTER_UNUSED,
114                 MAX_ECORE_TUNN_CLSS,
115                 "IMAC_TENID"
116         },
117         {
118                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
119                 ECORE_FILTER_UNUSED,
120                 MAX_ECORE_TUNN_CLSS,
121                 "OMAC_TENID_IMAC"
122         },
123 };
124
125 struct rte_qede_xstats_name_off {
126         char name[RTE_ETH_XSTATS_NAME_SIZE];
127         uint64_t offset;
128 };
129
130 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
131         {"rx_unicast_bytes",
132                 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
133         {"rx_multicast_bytes",
134                 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
135         {"rx_broadcast_bytes",
136                 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
137         {"rx_unicast_packets",
138                 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
139         {"rx_multicast_packets",
140                 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
141         {"rx_broadcast_packets",
142                 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
143
144         {"tx_unicast_bytes",
145                 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
146         {"tx_multicast_bytes",
147                 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
148         {"tx_broadcast_bytes",
149                 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
150         {"tx_unicast_packets",
151                 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
152         {"tx_multicast_packets",
153                 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
154         {"tx_broadcast_packets",
155                 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
156
157         {"rx_64_byte_packets",
158                 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
159         {"rx_65_to_127_byte_packets",
160                 offsetof(struct ecore_eth_stats_common,
161                          rx_65_to_127_byte_packets)},
162         {"rx_128_to_255_byte_packets",
163                 offsetof(struct ecore_eth_stats_common,
164                          rx_128_to_255_byte_packets)},
165         {"rx_256_to_511_byte_packets",
166                 offsetof(struct ecore_eth_stats_common,
167                          rx_256_to_511_byte_packets)},
168         {"rx_512_to_1023_byte_packets",
169                 offsetof(struct ecore_eth_stats_common,
170                          rx_512_to_1023_byte_packets)},
171         {"rx_1024_to_1518_byte_packets",
172                 offsetof(struct ecore_eth_stats_common,
173                          rx_1024_to_1518_byte_packets)},
174         {"tx_64_byte_packets",
175                 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
176         {"tx_65_to_127_byte_packets",
177                 offsetof(struct ecore_eth_stats_common,
178                          tx_65_to_127_byte_packets)},
179         {"tx_128_to_255_byte_packets",
180                 offsetof(struct ecore_eth_stats_common,
181                          tx_128_to_255_byte_packets)},
182         {"tx_256_to_511_byte_packets",
183                 offsetof(struct ecore_eth_stats_common,
184                          tx_256_to_511_byte_packets)},
185         {"tx_512_to_1023_byte_packets",
186                 offsetof(struct ecore_eth_stats_common,
187                          tx_512_to_1023_byte_packets)},
188         {"tx_1024_to_1518_byte_packets",
189                 offsetof(struct ecore_eth_stats_common,
190                          tx_1024_to_1518_byte_packets)},
191
192         {"rx_mac_crtl_frames",
193                 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
194         {"tx_mac_control_frames",
195                 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
196         {"rx_pause_frames",
197                 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
198         {"tx_pause_frames",
199                 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
200         {"rx_priority_flow_control_frames",
201                 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
202         {"tx_priority_flow_control_frames",
203                 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
204
205         {"rx_crc_errors",
206                 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
207         {"rx_align_errors",
208                 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
209         {"rx_carrier_errors",
210                 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
211         {"rx_oversize_packet_errors",
212                 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
213         {"rx_jabber_errors",
214                 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
215         {"rx_undersize_packet_errors",
216                 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
217         {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
218         {"rx_host_buffer_not_available",
219                 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
220         /* Number of packets discarded because they are bigger than MTU */
221         {"rx_packet_too_big_discards",
222                 offsetof(struct ecore_eth_stats_common,
223                          packet_too_big_discard)},
224         {"rx_ttl_zero_discards",
225                 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
226         {"rx_multi_function_tag_filter_discards",
227                 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
228         {"rx_mac_filter_discards",
229                 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
230         {"rx_hw_buffer_truncates",
231                 offsetof(struct ecore_eth_stats_common, brb_truncates)},
232         {"rx_hw_buffer_discards",
233                 offsetof(struct ecore_eth_stats_common, brb_discards)},
234         {"tx_error_drop_packets",
235                 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
236
237         {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
238         {"rx_mac_unicast_packets",
239                 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
240         {"rx_mac_multicast_packets",
241                 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
242         {"rx_mac_broadcast_packets",
243                 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
244         {"rx_mac_frames_ok",
245                 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
246         {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
247         {"tx_mac_unicast_packets",
248                 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
249         {"tx_mac_multicast_packets",
250                 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
251         {"tx_mac_broadcast_packets",
252                 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
253
254         {"lro_coalesced_packets",
255                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
256         {"lro_coalesced_events",
257                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
258         {"lro_aborts_num",
259                 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
260         {"lro_not_coalesced_packets",
261                 offsetof(struct ecore_eth_stats_common,
262                          tpa_not_coalesced_pkts)},
263         {"lro_coalesced_bytes",
264                 offsetof(struct ecore_eth_stats_common,
265                          tpa_coalesced_bytes)},
266 };
267
268 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
269         {"rx_1519_to_1522_byte_packets",
270                 offsetof(struct ecore_eth_stats, bb) +
271                 offsetof(struct ecore_eth_stats_bb,
272                          rx_1519_to_1522_byte_packets)},
273         {"rx_1519_to_2047_byte_packets",
274                 offsetof(struct ecore_eth_stats, bb) +
275                 offsetof(struct ecore_eth_stats_bb,
276                          rx_1519_to_2047_byte_packets)},
277         {"rx_2048_to_4095_byte_packets",
278                 offsetof(struct ecore_eth_stats, bb) +
279                 offsetof(struct ecore_eth_stats_bb,
280                          rx_2048_to_4095_byte_packets)},
281         {"rx_4096_to_9216_byte_packets",
282                 offsetof(struct ecore_eth_stats, bb) +
283                 offsetof(struct ecore_eth_stats_bb,
284                          rx_4096_to_9216_byte_packets)},
285         {"rx_9217_to_16383_byte_packets",
286                 offsetof(struct ecore_eth_stats, bb) +
287                 offsetof(struct ecore_eth_stats_bb,
288                          rx_9217_to_16383_byte_packets)},
289
290         {"tx_1519_to_2047_byte_packets",
291                 offsetof(struct ecore_eth_stats, bb) +
292                 offsetof(struct ecore_eth_stats_bb,
293                          tx_1519_to_2047_byte_packets)},
294         {"tx_2048_to_4095_byte_packets",
295                 offsetof(struct ecore_eth_stats, bb) +
296                 offsetof(struct ecore_eth_stats_bb,
297                          tx_2048_to_4095_byte_packets)},
298         {"tx_4096_to_9216_byte_packets",
299                 offsetof(struct ecore_eth_stats, bb) +
300                 offsetof(struct ecore_eth_stats_bb,
301                          tx_4096_to_9216_byte_packets)},
302         {"tx_9217_to_16383_byte_packets",
303                 offsetof(struct ecore_eth_stats, bb) +
304                 offsetof(struct ecore_eth_stats_bb,
305                          tx_9217_to_16383_byte_packets)},
306
307         {"tx_lpi_entry_count",
308                 offsetof(struct ecore_eth_stats, bb) +
309                 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
310         {"tx_total_collisions",
311                 offsetof(struct ecore_eth_stats, bb) +
312                 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
313 };
314
315 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
316         {"rx_1519_to_max_byte_packets",
317                 offsetof(struct ecore_eth_stats, ah) +
318                 offsetof(struct ecore_eth_stats_ah,
319                          rx_1519_to_max_byte_packets)},
320         {"tx_1519_to_max_byte_packets",
321                 offsetof(struct ecore_eth_stats, ah) +
322                 offsetof(struct ecore_eth_stats_ah,
323                          tx_1519_to_max_byte_packets)},
324 };
325
326 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
327         {"rx_q_segments",
328                 offsetof(struct qede_rx_queue, rx_segs)},
329         {"rx_q_hw_errors",
330                 offsetof(struct qede_rx_queue, rx_hw_errors)},
331         {"rx_q_allocation_errors",
332                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
333 };
334
335 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
336 {
337         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
338 }
339
340 static void
341 qede_interrupt_handler(void *param)
342 {
343         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
344         struct qede_dev *qdev = eth_dev->data->dev_private;
345         struct ecore_dev *edev = &qdev->edev;
346
347         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
348         if (rte_intr_enable(eth_dev->intr_handle))
349                 DP_ERR(edev, "rte_intr_enable failed\n");
350 }
351
352 static void
353 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
354 {
355         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
356         qdev->ops = qed_ops;
357 }
358
359 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
360 static void qede_print_adapter_info(struct qede_dev *qdev)
361 {
362         struct ecore_dev *edev = &qdev->edev;
363         struct qed_dev_info *info = &qdev->dev_info.common;
364         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
365         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
366
367         DP_INFO(edev, "*********************************\n");
368         DP_INFO(edev, " DPDK version:%s\n", rte_version());
369         DP_INFO(edev, " Chip details : %s %c%d\n",
370                   ECORE_IS_BB(edev) ? "BB" : "AH",
371                   'A' + edev->chip_rev,
372                   (int)edev->chip_metal);
373         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
374                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
375         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
376                  ver_str, QEDE_PMD_VERSION);
377         DP_INFO(edev, " Driver version : %s\n", drv_ver);
378         DP_INFO(edev, " Firmware version : %s\n", ver_str);
379
380         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
381                  "%d.%d.%d.%d",
382                 (info->mfw_rev >> 24) & 0xff,
383                 (info->mfw_rev >> 16) & 0xff,
384                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
385         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
386         DP_INFO(edev, " Firmware file : %s\n", fw_file);
387         DP_INFO(edev, "*********************************\n");
388 }
389 #endif
390
391 static int
392 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
393 {
394         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
395         struct ecore_sp_vport_start_params params;
396         struct ecore_hwfn *p_hwfn;
397         int rc;
398         int i;
399
400         memset(&params, 0, sizeof(params));
401         params.vport_id = 0;
402         params.mtu = mtu;
403         /* @DPDK - Disable FW placement */
404         params.zero_placement_offset = 1;
405         for_each_hwfn(edev, i) {
406                 p_hwfn = &edev->hwfns[i];
407                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
408                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
409                 rc = ecore_sp_vport_start(p_hwfn, &params);
410                 if (rc != ECORE_SUCCESS) {
411                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
412                         return rc;
413                 }
414         }
415         ecore_reset_vport_stats(edev);
416         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
417
418         return 0;
419 }
420
421 static int
422 qede_stop_vport(struct ecore_dev *edev)
423 {
424         struct ecore_hwfn *p_hwfn;
425         uint8_t vport_id;
426         int rc;
427         int i;
428
429         vport_id = 0;
430         for_each_hwfn(edev, i) {
431                 p_hwfn = &edev->hwfns[i];
432                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
433                                          vport_id);
434                 if (rc != ECORE_SUCCESS) {
435                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
436                         return rc;
437                 }
438         }
439
440         return 0;
441 }
442
443 /* Activate or deactivate vport via vport-update */
444 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
445 {
446         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
447         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
448         struct ecore_sp_vport_update_params params;
449         struct ecore_hwfn *p_hwfn;
450         uint8_t i;
451         int rc = -1;
452
453         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
454         params.vport_id = 0;
455         params.update_vport_active_rx_flg = 1;
456         params.update_vport_active_tx_flg = 1;
457         params.vport_active_rx_flg = flg;
458         params.vport_active_tx_flg = flg;
459 #ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
460         if (IS_VF(edev)) {
461                 params.update_tx_switching_flg = 1;
462                 params.tx_switching_flg = !flg;
463                 DP_INFO(edev, "VF tx-switching is disabled\n");
464         }
465 #endif
466         for_each_hwfn(edev, i) {
467                 p_hwfn = &edev->hwfns[i];
468                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
469                 rc = ecore_sp_vport_update(p_hwfn, &params,
470                                 ECORE_SPQ_MODE_EBLOCK, NULL);
471                 if (rc != ECORE_SUCCESS) {
472                         DP_ERR(edev, "Failed to update vport\n");
473                         break;
474                 }
475         }
476         DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
477
478         return rc;
479 }
480
481 static void
482 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
483                            uint16_t mtu, bool enable)
484 {
485         /* Enable LRO in split mode */
486         sge_tpa_params->tpa_ipv4_en_flg = enable;
487         sge_tpa_params->tpa_ipv6_en_flg = enable;
488         sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
489         sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
490         /* set if tpa enable changes */
491         sge_tpa_params->update_tpa_en_flg = 1;
492         /* set if tpa parameters should be handled */
493         sge_tpa_params->update_tpa_param_flg = enable;
494
495         sge_tpa_params->max_buffers_per_cqe = 20;
496         /* Enable TPA in split mode. In this mode each TPA segment
497          * starts on the new BD, so there is one BD per segment.
498          */
499         sge_tpa_params->tpa_pkt_split_flg = 1;
500         sge_tpa_params->tpa_hdr_data_split_flg = 0;
501         sge_tpa_params->tpa_gro_consistent_flg = 0;
502         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
503         sge_tpa_params->tpa_max_size = 0x7FFF;
504         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
505         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
506 }
507
508 /* Enable/disable LRO via vport-update */
509 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
510 {
511         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
512         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
513         struct ecore_sp_vport_update_params params;
514         struct ecore_sge_tpa_params tpa_params;
515         struct ecore_hwfn *p_hwfn;
516         int rc;
517         int i;
518
519         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
520         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
521         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
522         params.vport_id = 0;
523         params.sge_tpa_params = &tpa_params;
524         for_each_hwfn(edev, i) {
525                 p_hwfn = &edev->hwfns[i];
526                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
527                 rc = ecore_sp_vport_update(p_hwfn, &params,
528                                 ECORE_SPQ_MODE_EBLOCK, NULL);
529                 if (rc != ECORE_SUCCESS) {
530                         DP_ERR(edev, "Failed to update LRO\n");
531                         return -1;
532                 }
533         }
534         qdev->enable_lro = flg;
535         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
536
537         return 0;
538 }
539
540 /* Update MTU via vport-update without doing port restart.
541  * The vport must be deactivated before calling this API.
542  */
543 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
544 {
545         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
546         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
547         struct ecore_sp_vport_update_params params;
548         struct ecore_hwfn *p_hwfn;
549         int rc;
550         int i;
551
552         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
553         params.vport_id = 0;
554         params.mtu = mtu;
555         params.vport_id = 0;
556         for_each_hwfn(edev, i) {
557                 p_hwfn = &edev->hwfns[i];
558                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
559                 rc = ecore_sp_vport_update(p_hwfn, &params,
560                                 ECORE_SPQ_MODE_EBLOCK, NULL);
561                 if (rc != ECORE_SUCCESS) {
562                         DP_ERR(edev, "Failed to update MTU\n");
563                         return -1;
564                 }
565         }
566         DP_INFO(edev, "MTU updated to %u\n", mtu);
567
568         return 0;
569 }
570
571 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
572 {
573         memset(ucast, 0, sizeof(struct ecore_filter_ucast));
574         ucast->is_rx_filter = true;
575         ucast->is_tx_filter = true;
576         /* ucast->assert_on_error = true; - For debug */
577 }
578
579 static int
580 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
581                              enum qed_filter_rx_mode_type type)
582 {
583         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
584         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
585         struct ecore_filter_accept_flags flags;
586
587         memset(&flags, 0, sizeof(flags));
588
589         flags.update_rx_mode_config = 1;
590         flags.update_tx_mode_config = 1;
591         flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
592                 ECORE_ACCEPT_MCAST_MATCHED |
593                 ECORE_ACCEPT_BCAST;
594
595         flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
596                 ECORE_ACCEPT_MCAST_MATCHED |
597                 ECORE_ACCEPT_BCAST;
598
599         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
600                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
601                 if (IS_VF(edev)) {
602                         flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
603                         DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
604                 }
605         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
606                 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
607         } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
608                                 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
609                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
610                         ECORE_ACCEPT_MCAST_UNMATCHED;
611         }
612
613         return ecore_filter_accept_cmd(edev, 0, flags, false, false,
614                         ECORE_SPQ_MODE_CB, NULL);
615 }
616
617 static int
618 qede_tunnel_update(struct qede_dev *qdev,
619                    struct ecore_tunnel_info *tunn_info)
620 {
621         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
622         enum _ecore_status_t rc = ECORE_INVAL;
623         struct ecore_hwfn *p_hwfn;
624         struct ecore_ptt *p_ptt;
625         int i;
626
627         for_each_hwfn(edev, i) {
628                 p_hwfn = &edev->hwfns[i];
629                 p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
630                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
631                                 tunn_info, ECORE_SPQ_MODE_CB, NULL);
632                 if (IS_PF(edev))
633                         ecore_ptt_release(p_hwfn, p_ptt);
634
635                 if (rc != ECORE_SUCCESS)
636                         break;
637         }
638
639         return rc;
640 }
641
642 static int
643 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
644                   bool enable)
645 {
646         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
647         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
648         enum _ecore_status_t rc = ECORE_INVAL;
649         struct ecore_tunnel_info tunn;
650
651         if (qdev->vxlan.enable == enable)
652                 return ECORE_SUCCESS;
653
654         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
655         tunn.vxlan.b_update_mode = true;
656         tunn.vxlan.b_mode_enabled = enable;
657         tunn.b_update_rx_cls = true;
658         tunn.b_update_tx_cls = true;
659         tunn.vxlan.tun_cls = clss;
660
661         tunn.vxlan_port.b_update_port = true;
662         tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
663
664         rc = qede_tunnel_update(qdev, &tunn);
665         if (rc == ECORE_SUCCESS) {
666                 qdev->vxlan.enable = enable;
667                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
668                 DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
669                         enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
670         } else {
671                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
672                        tunn.vxlan.tun_cls);
673         }
674
675         return rc;
676 }
677
678 static int
679 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
680                   bool enable)
681 {
682         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
683         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
684         enum _ecore_status_t rc = ECORE_INVAL;
685         struct ecore_tunnel_info tunn;
686
687         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
688         tunn.l2_geneve.b_update_mode = true;
689         tunn.l2_geneve.b_mode_enabled = enable;
690         tunn.ip_geneve.b_update_mode = true;
691         tunn.ip_geneve.b_mode_enabled = enable;
692         tunn.l2_geneve.tun_cls = clss;
693         tunn.ip_geneve.tun_cls = clss;
694         tunn.b_update_rx_cls = true;
695         tunn.b_update_tx_cls = true;
696
697         tunn.geneve_port.b_update_port = true;
698         tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
699
700         rc = qede_tunnel_update(qdev, &tunn);
701         if (rc == ECORE_SUCCESS) {
702                 qdev->geneve.enable = enable;
703                 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
704                 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
705                         enable ? "enabled" : "disabled", qdev->geneve.udp_port);
706         } else {
707                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
708                        clss);
709         }
710
711         return rc;
712 }
713
714 static int
715 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
716                  enum rte_eth_tunnel_type tunn_type, bool enable)
717 {
718         int rc = -EINVAL;
719
720         switch (tunn_type) {
721         case RTE_TUNNEL_TYPE_VXLAN:
722                 rc = qede_vxlan_enable(eth_dev, clss, enable);
723                 break;
724         case RTE_TUNNEL_TYPE_GENEVE:
725                 rc = qede_geneve_enable(eth_dev, clss, enable);
726                 break;
727         default:
728                 rc = -EINVAL;
729                 break;
730         }
731
732         return rc;
733 }
734
735 static int
736 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
737                   bool add)
738 {
739         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
740         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
741         struct qede_ucast_entry *tmp = NULL;
742         struct qede_ucast_entry *u;
743         struct ether_addr *mac_addr;
744
745         mac_addr  = (struct ether_addr *)ucast->mac;
746         if (add) {
747                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
748                         if ((memcmp(mac_addr, &tmp->mac,
749                                     ETHER_ADDR_LEN) == 0) &&
750                              ucast->vni == tmp->vni &&
751                              ucast->vlan == tmp->vlan) {
752                                 DP_ERR(edev, "Unicast MAC is already added"
753                                        " with vlan = %u, vni = %u\n",
754                                        ucast->vlan,  ucast->vni);
755                                         return -EEXIST;
756                         }
757                 }
758                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
759                                RTE_CACHE_LINE_SIZE);
760                 if (!u) {
761                         DP_ERR(edev, "Did not allocate memory for ucast\n");
762                         return -ENOMEM;
763                 }
764                 ether_addr_copy(mac_addr, &u->mac);
765                 u->vlan = ucast->vlan;
766                 u->vni = ucast->vni;
767                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
768                 qdev->num_uc_addr++;
769         } else {
770                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
771                         if ((memcmp(mac_addr, &tmp->mac,
772                                     ETHER_ADDR_LEN) == 0) &&
773                             ucast->vlan == tmp->vlan      &&
774                             ucast->vni == tmp->vni)
775                         break;
776                 }
777                 if (tmp == NULL) {
778                         DP_INFO(edev, "Unicast MAC is not found\n");
779                         return -EINVAL;
780                 }
781                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
782                 qdev->num_uc_addr--;
783         }
784
785         return 0;
786 }
787
788 static int
789 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
790                   bool add)
791 {
792         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
793         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
794         struct ether_addr *mac_addr;
795         struct qede_mcast_entry *tmp = NULL;
796         struct qede_mcast_entry *m;
797
798         mac_addr  = (struct ether_addr *)mcast->mac;
799         if (add) {
800                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
801                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
802                                 DP_ERR(edev,
803                                         "Multicast MAC is already added\n");
804                                 return -EEXIST;
805                         }
806                 }
807                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
808                         RTE_CACHE_LINE_SIZE);
809                 if (!m) {
810                         DP_ERR(edev,
811                                 "Did not allocate memory for mcast\n");
812                         return -ENOMEM;
813                 }
814                 ether_addr_copy(mac_addr, &m->mac);
815                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
816                 qdev->num_mc_addr++;
817         } else {
818                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
819                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
820                                 break;
821                 }
822                 if (tmp == NULL) {
823                         DP_INFO(edev, "Multicast mac is not found\n");
824                         return -EINVAL;
825                 }
826                 SLIST_REMOVE(&qdev->mc_list_head, tmp,
827                              qede_mcast_entry, list);
828                 qdev->num_mc_addr--;
829         }
830
831         return 0;
832 }
833
834 static enum _ecore_status_t
835 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
836                  bool add)
837 {
838         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
839         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
840         enum _ecore_status_t rc;
841         struct ecore_filter_mcast mcast;
842         struct qede_mcast_entry *tmp;
843         uint16_t j = 0;
844
845         /* Multicast */
846         if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
847                 if (add) {
848                         if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
849                                 DP_ERR(edev,
850                                        "Mcast filter table limit exceeded, "
851                                        "Please enable mcast promisc mode\n");
852                                 return -ECORE_INVAL;
853                         }
854                 }
855                 rc = qede_mcast_filter(eth_dev, ucast, add);
856                 if (rc == 0) {
857                         DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
858                         memset(&mcast, 0, sizeof(mcast));
859                         mcast.num_mc_addrs = qdev->num_mc_addr;
860                         mcast.opcode = ECORE_FILTER_ADD;
861                         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
862                                 ether_addr_copy(&tmp->mac,
863                                         (struct ether_addr *)&mcast.mac[j]);
864                                 j++;
865                         }
866                         rc = ecore_filter_mcast_cmd(edev, &mcast,
867                                                     ECORE_SPQ_MODE_CB, NULL);
868                 }
869                 if (rc != ECORE_SUCCESS) {
870                         DP_ERR(edev, "Failed to add multicast filter"
871                                " rc = %d, op = %d\n", rc, add);
872                 }
873         } else { /* Unicast */
874                 if (add) {
875                         if (qdev->num_uc_addr >=
876                             qdev->dev_info.num_mac_filters) {
877                                 DP_ERR(edev,
878                                        "Ucast filter table limit exceeded,"
879                                        " Please enable promisc mode\n");
880                                 return -ECORE_INVAL;
881                         }
882                 }
883                 rc = qede_ucast_filter(eth_dev, ucast, add);
884                 if (rc == 0)
885                         rc = ecore_filter_ucast_cmd(edev, ucast,
886                                                     ECORE_SPQ_MODE_CB, NULL);
887                 if (rc != ECORE_SUCCESS) {
888                         DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
889                                rc, add);
890                 }
891         }
892
893         return rc;
894 }
895
896 static int
897 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
898                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
899 {
900         struct ecore_filter_ucast ucast;
901         int re;
902
903         qede_set_ucast_cmn_params(&ucast);
904         ucast.type = ECORE_FILTER_MAC;
905         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
906         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
907         return re;
908 }
909
910 static void
911 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
912 {
913         struct qede_dev *qdev = eth_dev->data->dev_private;
914         struct ecore_dev *edev = &qdev->edev;
915         struct ecore_filter_ucast ucast;
916
917         PMD_INIT_FUNC_TRACE(edev);
918
919         if (index >= qdev->dev_info.num_mac_filters) {
920                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
921                        index, qdev->dev_info.num_mac_filters);
922                 return;
923         }
924
925         qede_set_ucast_cmn_params(&ucast);
926         ucast.opcode = ECORE_FILTER_REMOVE;
927         ucast.type = ECORE_FILTER_MAC;
928
929         /* Use the index maintained by rte */
930         ether_addr_copy(&eth_dev->data->mac_addrs[index],
931                         (struct ether_addr *)&ucast.mac);
932
933         ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
934 }
935
936 static void
937 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
938 {
939         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
940         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
941
942         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
943                                                mac_addr->addr_bytes)) {
944                 DP_ERR(edev, "Setting MAC address is not allowed\n");
945                 ether_addr_copy(&qdev->primary_mac,
946                                 &eth_dev->data->mac_addrs[0]);
947                 return;
948         }
949
950         qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
951 }
952
953 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
954 {
955         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
956         struct ecore_sp_vport_update_params params;
957         struct ecore_hwfn *p_hwfn;
958         uint8_t i;
959         int rc;
960
961         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
962         params.vport_id = 0;
963         params.update_accept_any_vlan_flg = 1;
964         params.accept_any_vlan = flg;
965         for_each_hwfn(edev, i) {
966                 p_hwfn = &edev->hwfns[i];
967                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
968                 rc = ecore_sp_vport_update(p_hwfn, &params,
969                                 ECORE_SPQ_MODE_EBLOCK, NULL);
970                 if (rc != ECORE_SUCCESS) {
971                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
972                         return;
973                 }
974         }
975
976         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
977 }
978
979 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
980 {
981         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
982         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
983         struct ecore_sp_vport_update_params params;
984         struct ecore_hwfn *p_hwfn;
985         uint8_t i;
986         int rc;
987
988         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
989         params.vport_id = 0;
990         params.update_inner_vlan_removal_flg = 1;
991         params.inner_vlan_removal_flg = flg;
992         for_each_hwfn(edev, i) {
993                 p_hwfn = &edev->hwfns[i];
994                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
995                 rc = ecore_sp_vport_update(p_hwfn, &params,
996                                 ECORE_SPQ_MODE_EBLOCK, NULL);
997                 if (rc != ECORE_SUCCESS) {
998                         DP_ERR(edev, "Failed to update vport\n");
999                         return -1;
1000                 }
1001         }
1002
1003         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
1004         return 0;
1005 }
1006
1007 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
1008                                 uint16_t vlan_id, int on)
1009 {
1010         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1011         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1012         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
1013         struct qede_vlan_entry *tmp = NULL;
1014         struct qede_vlan_entry *vlan;
1015         struct ecore_filter_ucast ucast;
1016         int rc;
1017
1018         if (on) {
1019                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1020                         DP_ERR(edev, "Reached max VLAN filter limit"
1021                                       " enabling accept_any_vlan\n");
1022                         qede_config_accept_any_vlan(qdev, true);
1023                         return 0;
1024                 }
1025
1026                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1027                         if (tmp->vid == vlan_id) {
1028                                 DP_ERR(edev, "VLAN %u already configured\n",
1029                                        vlan_id);
1030                                 return -EEXIST;
1031                         }
1032                 }
1033
1034                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1035                                   RTE_CACHE_LINE_SIZE);
1036
1037                 if (!vlan) {
1038                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
1039                         return -ENOMEM;
1040                 }
1041
1042                 qede_set_ucast_cmn_params(&ucast);
1043                 ucast.opcode = ECORE_FILTER_ADD;
1044                 ucast.type = ECORE_FILTER_VLAN;
1045                 ucast.vlan = vlan_id;
1046                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1047                                             NULL);
1048                 if (rc != 0) {
1049                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1050                                rc);
1051                         rte_free(vlan);
1052                 } else {
1053                         vlan->vid = vlan_id;
1054                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1055                         qdev->configured_vlans++;
1056                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1057                                 vlan_id, qdev->configured_vlans);
1058                 }
1059         } else {
1060                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1061                         if (tmp->vid == vlan_id)
1062                                 break;
1063                 }
1064
1065                 if (!tmp) {
1066                         if (qdev->configured_vlans == 0) {
1067                                 DP_INFO(edev,
1068                                         "No VLAN filters configured yet\n");
1069                                 return 0;
1070                         }
1071
1072                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1073                         return -EINVAL;
1074                 }
1075
1076                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1077
1078                 qede_set_ucast_cmn_params(&ucast);
1079                 ucast.opcode = ECORE_FILTER_REMOVE;
1080                 ucast.type = ECORE_FILTER_VLAN;
1081                 ucast.vlan = vlan_id;
1082                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1083                                             NULL);
1084                 if (rc != 0) {
1085                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1086                                vlan_id, rc);
1087                 } else {
1088                         qdev->configured_vlans--;
1089                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1090                                 vlan_id, qdev->configured_vlans);
1091                 }
1092         }
1093
1094         return rc;
1095 }
1096
1097 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1098 {
1099         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1100         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1101         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1102
1103         if (mask & ETH_VLAN_STRIP_MASK) {
1104                 if (rxmode->hw_vlan_strip)
1105                         (void)qede_vlan_stripping(eth_dev, 1);
1106                 else
1107                         (void)qede_vlan_stripping(eth_dev, 0);
1108         }
1109
1110         if (mask & ETH_VLAN_FILTER_MASK) {
1111                 /* VLAN filtering kicks in when a VLAN is added */
1112                 if (rxmode->hw_vlan_filter) {
1113                         qede_vlan_filter_set(eth_dev, 0, 1);
1114                 } else {
1115                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1116                                 DP_ERR(edev,
1117                                   " Please remove existing VLAN filters"
1118                                   " before disabling VLAN filtering\n");
1119                                 /* Signal app that VLAN filtering is still
1120                                  * enabled
1121                                  */
1122                                 rxmode->hw_vlan_filter = true;
1123                         } else {
1124                                 qede_vlan_filter_set(eth_dev, 0, 0);
1125                         }
1126                 }
1127         }
1128
1129         if (mask & ETH_VLAN_EXTEND_MASK)
1130                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1131                         " and classification is based on outer tag only\n");
1132
1133         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1134                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1135
1136         return 0;
1137 }
1138
1139 static void qede_prandom_bytes(uint32_t *buff)
1140 {
1141         uint8_t i;
1142
1143         srand((unsigned int)time(NULL));
1144         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1145                 buff[i] = rand();
1146 }
1147
1148 int qede_config_rss(struct rte_eth_dev *eth_dev)
1149 {
1150         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1151 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1152         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1153 #endif
1154         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1155         struct rte_eth_rss_reta_entry64 reta_conf[2];
1156         struct rte_eth_rss_conf rss_conf;
1157         uint32_t i, id, pos, q;
1158
1159         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1160         if (!rss_conf.rss_key) {
1161                 DP_INFO(edev, "Applying driver default key\n");
1162                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1163                 qede_prandom_bytes(&def_rss_key[0]);
1164                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1165         }
1166
1167         /* Configure RSS hash */
1168         if (qede_rss_hash_update(eth_dev, &rss_conf))
1169                 return -EINVAL;
1170
1171         /* Configure default RETA */
1172         memset(reta_conf, 0, sizeof(reta_conf));
1173         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1174                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1175
1176         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1177                 id = i / RTE_RETA_GROUP_SIZE;
1178                 pos = i % RTE_RETA_GROUP_SIZE;
1179                 q = i % QEDE_RSS_COUNT(qdev);
1180                 reta_conf[id].reta[pos] = q;
1181         }
1182         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1183                                  ECORE_RSS_IND_TABLE_SIZE))
1184                 return -EINVAL;
1185
1186         return 0;
1187 }
1188
1189 static void qede_fastpath_start(struct ecore_dev *edev)
1190 {
1191         struct ecore_hwfn *p_hwfn;
1192         int i;
1193
1194         for_each_hwfn(edev, i) {
1195                 p_hwfn = &edev->hwfns[i];
1196                 ecore_hw_start_fastpath(p_hwfn);
1197         }
1198 }
1199
1200 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1201 {
1202         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1203         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1204         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1205
1206         PMD_INIT_FUNC_TRACE(edev);
1207
1208         /* Update MTU only if it has changed */
1209         if (qdev->mtu != qdev->new_mtu) {
1210                 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1211                         goto err;
1212                 qdev->mtu = qdev->new_mtu;
1213         }
1214
1215         /* Configure TPA parameters */
1216         if (rxmode->enable_lro) {
1217                 if (qede_enable_tpa(eth_dev, true))
1218                         return -EINVAL;
1219                 /* Enable scatter mode for LRO */
1220                 if (!rxmode->enable_scatter)
1221                         eth_dev->data->scattered_rx = 1;
1222         }
1223
1224         /* Start queues */
1225         if (qede_start_queues(eth_dev))
1226                 goto err;
1227
1228         /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1229          * enabling RSS. Hence RSS configuration is deferred upto this point.
1230          * Also, we would like to retain similar behavior in PF case, so we
1231          * don't do PF/VF specific check here.
1232          */
1233         if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1234                 if (qede_config_rss(eth_dev))
1235                         goto err;
1236
1237         /* Enable vport*/
1238         if (qede_activate_vport(eth_dev, true))
1239                 goto err;
1240
1241         /* Bring-up the link */
1242         qede_dev_set_link_state(eth_dev, true);
1243
1244         /* Update link status */
1245         qede_link_update(eth_dev, 0);
1246
1247         /* Start/resume traffic */
1248         qede_fastpath_start(edev);
1249
1250         DP_INFO(edev, "Device started\n");
1251
1252         return 0;
1253 err:
1254         DP_ERR(edev, "Device start fails\n");
1255         return -1; /* common error code is < 0 */
1256 }
1257
1258 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1259 {
1260         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1261         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1262
1263         PMD_INIT_FUNC_TRACE(edev);
1264
1265         /* Disable vport */
1266         if (qede_activate_vport(eth_dev, false))
1267                 return;
1268
1269         if (qdev->enable_lro)
1270                 qede_enable_tpa(eth_dev, false);
1271
1272         /* Stop queues */
1273         qede_stop_queues(eth_dev);
1274
1275         /* Disable traffic */
1276         ecore_hw_stop_fastpath(edev); /* TBD - loop */
1277
1278         /* Bring the link down */
1279         qede_dev_set_link_state(eth_dev, false);
1280
1281         DP_INFO(edev, "Device is stopped\n");
1282 }
1283
1284 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1285 {
1286         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1287         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1288         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1289         int ret;
1290
1291         PMD_INIT_FUNC_TRACE(edev);
1292
1293         /* Check requirements for 100G mode */
1294         if (ECORE_IS_CMT(edev)) {
1295                 if (eth_dev->data->nb_rx_queues < 2 ||
1296                                 eth_dev->data->nb_tx_queues < 2) {
1297                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1298                         return -EINVAL;
1299                 }
1300
1301                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1302                                 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1303                         DP_ERR(edev,
1304                                         "100G mode needs even no. of RX/TX queues\n");
1305                         return -EINVAL;
1306                 }
1307         }
1308
1309         /* We need to have min 1 RX queue.There is no min check in
1310          * rte_eth_dev_configure(), so we are checking it here.
1311          */
1312         if (eth_dev->data->nb_rx_queues == 0) {
1313                 DP_ERR(edev, "Minimum one RX queue is required\n");
1314                 return -EINVAL;
1315         }
1316
1317         /* Sanity checks and throw warnings */
1318         if (rxmode->enable_scatter)
1319                 eth_dev->data->scattered_rx = 1;
1320
1321         if (!rxmode->hw_strip_crc)
1322                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1323
1324         if (!rxmode->hw_ip_checksum)
1325                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1326                                 "in hw\n");
1327         if (rxmode->header_split)
1328                 DP_INFO(edev, "Header split enable is not supported\n");
1329         if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1330                                 ETH_MQ_RX_RSS)) {
1331                 DP_ERR(edev, "Unsupported multi-queue mode\n");
1332                 return -ENOTSUP;
1333         }
1334         /* Flow director mode check */
1335         if (qede_check_fdir_support(eth_dev))
1336                 return -ENOTSUP;
1337
1338         /* Deallocate resources if held previously. It is needed only if the
1339          * queue count has been changed from previous configuration. If its
1340          * going to change then it means RX/TX queue setup will be called
1341          * again and the fastpath pointers will be reinitialized there.
1342          */
1343         if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1344             qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1345                 qede_dealloc_fp_resc(eth_dev);
1346                 /* Proceed with updated queue count */
1347                 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1348                 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1349                 if (qede_alloc_fp_resc(qdev))
1350                         return -ENOMEM;
1351         }
1352
1353         /* VF's MTU has to be set using vport-start where as
1354          * PF's MTU can be updated via vport-update.
1355          */
1356         if (IS_VF(edev)) {
1357                 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1358                         return -1;
1359         } else {
1360                 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1361                         return -1;
1362         }
1363
1364         qdev->mtu = rxmode->max_rx_pkt_len;
1365         qdev->new_mtu = qdev->mtu;
1366
1367         /* Enable VLAN offloads by default */
1368         ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1369                         ETH_VLAN_FILTER_MASK |
1370                         ETH_VLAN_EXTEND_MASK);
1371         if (ret)
1372                 return ret;
1373
1374         DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1375                         QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1376
1377         return 0;
1378 }
1379
1380 /* Info about HW descriptor ring limitations */
1381 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1382         .nb_max = 0x8000, /* 32K */
1383         .nb_min = 128,
1384         .nb_align = 128 /* lowest common multiple */
1385 };
1386
1387 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1388         .nb_max = 0x8000, /* 32K */
1389         .nb_min = 256,
1390         .nb_align = 256,
1391         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1392         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1393 };
1394
1395 static void
1396 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1397                   struct rte_eth_dev_info *dev_info)
1398 {
1399         struct qede_dev *qdev = eth_dev->data->dev_private;
1400         struct ecore_dev *edev = &qdev->edev;
1401         struct qed_link_output link;
1402         uint32_t speed_cap = 0;
1403
1404         PMD_INIT_FUNC_TRACE(edev);
1405
1406         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1407         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1408         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1409         dev_info->rx_desc_lim = qede_rx_desc_lim;
1410         dev_info->tx_desc_lim = qede_tx_desc_lim;
1411
1412         if (IS_PF(edev))
1413                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1414                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1415         else
1416                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1417                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1418         dev_info->max_tx_queues = dev_info->max_rx_queues;
1419
1420         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1421         dev_info->max_vfs = 0;
1422         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1423         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1424         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1425
1426         dev_info->default_txconf = (struct rte_eth_txconf) {
1427                 .txq_flags = QEDE_TXQ_FLAGS,
1428         };
1429
1430         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP  |
1431                                      DEV_RX_OFFLOAD_IPV4_CKSUM  |
1432                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1433                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1434                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1435                                      DEV_RX_OFFLOAD_TCP_LRO);
1436
1437         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1438                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1439                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1440                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1441                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1442                                      DEV_TX_OFFLOAD_TCP_TSO |
1443                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1444                                      DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1445
1446         memset(&link, 0, sizeof(struct qed_link_output));
1447         qdev->ops->common->get_link(edev, &link);
1448         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1449                 speed_cap |= ETH_LINK_SPEED_1G;
1450         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1451                 speed_cap |= ETH_LINK_SPEED_10G;
1452         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1453                 speed_cap |= ETH_LINK_SPEED_25G;
1454         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1455                 speed_cap |= ETH_LINK_SPEED_40G;
1456         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1457                 speed_cap |= ETH_LINK_SPEED_50G;
1458         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1459                 speed_cap |= ETH_LINK_SPEED_100G;
1460         dev_info->speed_capa = speed_cap;
1461 }
1462
1463 /* return 0 means link status changed, -1 means not changed */
1464 int
1465 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1466 {
1467         struct qede_dev *qdev = eth_dev->data->dev_private;
1468         struct ecore_dev *edev = &qdev->edev;
1469         uint16_t link_duplex;
1470         struct qed_link_output link;
1471         struct rte_eth_link *curr = &eth_dev->data->dev_link;
1472
1473         memset(&link, 0, sizeof(struct qed_link_output));
1474         qdev->ops->common->get_link(edev, &link);
1475
1476         /* Link Speed */
1477         curr->link_speed = link.speed;
1478
1479         /* Link Mode */
1480         switch (link.duplex) {
1481         case QEDE_DUPLEX_HALF:
1482                 link_duplex = ETH_LINK_HALF_DUPLEX;
1483                 break;
1484         case QEDE_DUPLEX_FULL:
1485                 link_duplex = ETH_LINK_FULL_DUPLEX;
1486                 break;
1487         case QEDE_DUPLEX_UNKNOWN:
1488         default:
1489                 link_duplex = -1;
1490         }
1491         curr->link_duplex = link_duplex;
1492
1493         /* Link Status */
1494         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1495
1496         /* AN */
1497         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1498                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1499
1500         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1501                 curr->link_speed, curr->link_duplex,
1502                 curr->link_autoneg, curr->link_status);
1503
1504         /* return 0 means link status changed, -1 means not changed */
1505         return ((curr->link_status == link.link_up) ? -1 : 0);
1506 }
1507
1508 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1509 {
1510 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1511         struct qede_dev *qdev = eth_dev->data->dev_private;
1512         struct ecore_dev *edev = &qdev->edev;
1513
1514         PMD_INIT_FUNC_TRACE(edev);
1515 #endif
1516
1517         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1518
1519         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1520                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1521
1522         qed_configure_filter_rx_mode(eth_dev, type);
1523 }
1524
1525 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1526 {
1527 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1528         struct qede_dev *qdev = eth_dev->data->dev_private;
1529         struct ecore_dev *edev = &qdev->edev;
1530
1531         PMD_INIT_FUNC_TRACE(edev);
1532 #endif
1533
1534         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1535                 qed_configure_filter_rx_mode(eth_dev,
1536                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1537         else
1538                 qed_configure_filter_rx_mode(eth_dev,
1539                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1540 }
1541
1542 static void qede_poll_sp_sb_cb(void *param)
1543 {
1544         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1545         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1546         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1547         int rc;
1548
1549         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1550         qede_interrupt_action(&edev->hwfns[1]);
1551
1552         rc = rte_eal_alarm_set(timer_period * US_PER_S,
1553                                qede_poll_sp_sb_cb,
1554                                (void *)eth_dev);
1555         if (rc != 0) {
1556                 DP_ERR(edev, "Unable to start periodic"
1557                              " timer rc %d\n", rc);
1558                 assert(false && "Unable to start periodic timer");
1559         }
1560 }
1561
1562 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1563 {
1564         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1565         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1566         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1567
1568         PMD_INIT_FUNC_TRACE(edev);
1569
1570         /* dev_stop() shall cleanup fp resources in hw but without releasing
1571          * dma memories and sw structures so that dev_start() can be called
1572          * by the app without reconfiguration. However, in dev_close() we
1573          * can release all the resources and device can be brought up newly
1574          */
1575         if (eth_dev->data->dev_started)
1576                 qede_dev_stop(eth_dev);
1577
1578         qede_stop_vport(edev);
1579         qede_fdir_dealloc_resc(eth_dev);
1580         qede_dealloc_fp_resc(eth_dev);
1581
1582         eth_dev->data->nb_rx_queues = 0;
1583         eth_dev->data->nb_tx_queues = 0;
1584
1585         qdev->ops->common->slowpath_stop(edev);
1586         qdev->ops->common->remove(edev);
1587         rte_intr_disable(&pci_dev->intr_handle);
1588         rte_intr_callback_unregister(&pci_dev->intr_handle,
1589                                      qede_interrupt_handler, (void *)eth_dev);
1590         if (ECORE_IS_CMT(edev))
1591                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1592 }
1593
1594 static int
1595 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1596 {
1597         struct qede_dev *qdev = eth_dev->data->dev_private;
1598         struct ecore_dev *edev = &qdev->edev;
1599         struct ecore_eth_stats stats;
1600         unsigned int i = 0, j = 0, qid;
1601         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1602         struct qede_tx_queue *txq;
1603
1604         ecore_get_vport_stats(edev, &stats);
1605
1606         /* RX Stats */
1607         eth_stats->ipackets = stats.common.rx_ucast_pkts +
1608             stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1609
1610         eth_stats->ibytes = stats.common.rx_ucast_bytes +
1611             stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1612
1613         eth_stats->ierrors = stats.common.rx_crc_errors +
1614             stats.common.rx_align_errors +
1615             stats.common.rx_carrier_errors +
1616             stats.common.rx_oversize_packets +
1617             stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1618
1619         eth_stats->rx_nombuf = stats.common.no_buff_discards;
1620
1621         eth_stats->imissed = stats.common.mftag_filter_discards +
1622             stats.common.mac_filter_discards +
1623             stats.common.no_buff_discards +
1624             stats.common.brb_truncates + stats.common.brb_discards;
1625
1626         /* TX stats */
1627         eth_stats->opackets = stats.common.tx_ucast_pkts +
1628             stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1629
1630         eth_stats->obytes = stats.common.tx_ucast_bytes +
1631             stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1632
1633         eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1634
1635         /* Queue stats */
1636         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1637                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1638         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1639                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1640         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1641             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1642                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1643                        "Not all the queue stats will be displayed. Set"
1644                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1645                        " appropriately and retry.\n");
1646
1647         for_each_rss(qid) {
1648                 eth_stats->q_ipackets[i] =
1649                         *(uint64_t *)(
1650                                 ((char *)(qdev->fp_array[qid].rxq)) +
1651                                 offsetof(struct qede_rx_queue,
1652                                 rcv_pkts));
1653                 eth_stats->q_errors[i] =
1654                         *(uint64_t *)(
1655                                 ((char *)(qdev->fp_array[qid].rxq)) +
1656                                 offsetof(struct qede_rx_queue,
1657                                 rx_hw_errors)) +
1658                         *(uint64_t *)(
1659                                 ((char *)(qdev->fp_array[qid].rxq)) +
1660                                 offsetof(struct qede_rx_queue,
1661                                 rx_alloc_errors));
1662                 i++;
1663                 if (i == rxq_stat_cntrs)
1664                         break;
1665         }
1666
1667         for_each_tss(qid) {
1668                 txq = qdev->fp_array[qid].txq;
1669                 eth_stats->q_opackets[j] =
1670                         *((uint64_t *)(uintptr_t)
1671                                 (((uint64_t)(uintptr_t)(txq)) +
1672                                  offsetof(struct qede_tx_queue,
1673                                           xmit_pkts)));
1674                 j++;
1675                 if (j == txq_stat_cntrs)
1676                         break;
1677         }
1678
1679         return 0;
1680 }
1681
1682 static unsigned
1683 qede_get_xstats_count(struct qede_dev *qdev) {
1684         if (ECORE_IS_BB(&qdev->edev))
1685                 return RTE_DIM(qede_xstats_strings) +
1686                        RTE_DIM(qede_bb_xstats_strings) +
1687                        (RTE_DIM(qede_rxq_xstats_strings) *
1688                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1689                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1690         else
1691                 return RTE_DIM(qede_xstats_strings) +
1692                        RTE_DIM(qede_ah_xstats_strings) +
1693                        (RTE_DIM(qede_rxq_xstats_strings) *
1694                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1695                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1696 }
1697
1698 static int
1699 qede_get_xstats_names(struct rte_eth_dev *dev,
1700                       struct rte_eth_xstat_name *xstats_names,
1701                       __rte_unused unsigned int limit)
1702 {
1703         struct qede_dev *qdev = dev->data->dev_private;
1704         struct ecore_dev *edev = &qdev->edev;
1705         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1706         unsigned int i, qid, stat_idx = 0;
1707         unsigned int rxq_stat_cntrs;
1708
1709         if (xstats_names != NULL) {
1710                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1711                         snprintf(xstats_names[stat_idx].name,
1712                                 sizeof(xstats_names[stat_idx].name),
1713                                 "%s",
1714                                 qede_xstats_strings[i].name);
1715                         stat_idx++;
1716                 }
1717
1718                 if (ECORE_IS_BB(edev)) {
1719                         for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1720                                 snprintf(xstats_names[stat_idx].name,
1721                                         sizeof(xstats_names[stat_idx].name),
1722                                         "%s",
1723                                         qede_bb_xstats_strings[i].name);
1724                                 stat_idx++;
1725                         }
1726                 } else {
1727                         for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1728                                 snprintf(xstats_names[stat_idx].name,
1729                                         sizeof(xstats_names[stat_idx].name),
1730                                         "%s",
1731                                         qede_ah_xstats_strings[i].name);
1732                                 stat_idx++;
1733                         }
1734                 }
1735
1736                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1737                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1738                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1739                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1740                                 snprintf(xstats_names[stat_idx].name,
1741                                         sizeof(xstats_names[stat_idx].name),
1742                                         "%.4s%d%s",
1743                                         qede_rxq_xstats_strings[i].name, qid,
1744                                         qede_rxq_xstats_strings[i].name + 4);
1745                                 stat_idx++;
1746                         }
1747                 }
1748         }
1749
1750         return stat_cnt;
1751 }
1752
1753 static int
1754 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1755                 unsigned int n)
1756 {
1757         struct qede_dev *qdev = dev->data->dev_private;
1758         struct ecore_dev *edev = &qdev->edev;
1759         struct ecore_eth_stats stats;
1760         const unsigned int num = qede_get_xstats_count(qdev);
1761         unsigned int i, qid, stat_idx = 0;
1762         unsigned int rxq_stat_cntrs;
1763
1764         if (n < num)
1765                 return num;
1766
1767         ecore_get_vport_stats(edev, &stats);
1768
1769         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1770                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1771                                              qede_xstats_strings[i].offset);
1772                 xstats[stat_idx].id = stat_idx;
1773                 stat_idx++;
1774         }
1775
1776         if (ECORE_IS_BB(edev)) {
1777                 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1778                         xstats[stat_idx].value =
1779                                         *(uint64_t *)(((char *)&stats) +
1780                                         qede_bb_xstats_strings[i].offset);
1781                         xstats[stat_idx].id = stat_idx;
1782                         stat_idx++;
1783                 }
1784         } else {
1785                 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1786                         xstats[stat_idx].value =
1787                                         *(uint64_t *)(((char *)&stats) +
1788                                         qede_ah_xstats_strings[i].offset);
1789                         xstats[stat_idx].id = stat_idx;
1790                         stat_idx++;
1791                 }
1792         }
1793
1794         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1795                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1796         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1797                 for_each_rss(qid) {
1798                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1799                                 xstats[stat_idx].value = *(uint64_t *)(
1800                                         ((char *)(qdev->fp_array[qid].rxq)) +
1801                                          qede_rxq_xstats_strings[i].offset);
1802                                 xstats[stat_idx].id = stat_idx;
1803                                 stat_idx++;
1804                         }
1805                 }
1806         }
1807
1808         return stat_idx;
1809 }
1810
1811 static void
1812 qede_reset_xstats(struct rte_eth_dev *dev)
1813 {
1814         struct qede_dev *qdev = dev->data->dev_private;
1815         struct ecore_dev *edev = &qdev->edev;
1816
1817         ecore_reset_vport_stats(edev);
1818 }
1819
1820 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1821 {
1822         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1823         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1824         struct qed_link_params link_params;
1825         int rc;
1826
1827         DP_INFO(edev, "setting link state %d\n", link_up);
1828         memset(&link_params, 0, sizeof(link_params));
1829         link_params.link_up = link_up;
1830         rc = qdev->ops->common->set_link(edev, &link_params);
1831         if (rc != ECORE_SUCCESS)
1832                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1833
1834         return rc;
1835 }
1836
1837 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1838 {
1839         return qede_dev_set_link_state(eth_dev, true);
1840 }
1841
1842 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1843 {
1844         return qede_dev_set_link_state(eth_dev, false);
1845 }
1846
1847 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1848 {
1849         struct qede_dev *qdev = eth_dev->data->dev_private;
1850         struct ecore_dev *edev = &qdev->edev;
1851
1852         ecore_reset_vport_stats(edev);
1853 }
1854
1855 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1856 {
1857         enum qed_filter_rx_mode_type type =
1858             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1859
1860         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1861                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1862
1863         qed_configure_filter_rx_mode(eth_dev, type);
1864 }
1865
1866 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1867 {
1868         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1869                 qed_configure_filter_rx_mode(eth_dev,
1870                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1871         else
1872                 qed_configure_filter_rx_mode(eth_dev,
1873                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1874 }
1875
1876 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1877                               struct rte_eth_fc_conf *fc_conf)
1878 {
1879         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1880         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1881         struct qed_link_output current_link;
1882         struct qed_link_params params;
1883
1884         memset(&current_link, 0, sizeof(current_link));
1885         qdev->ops->common->get_link(edev, &current_link);
1886
1887         memset(&params, 0, sizeof(params));
1888         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1889         if (fc_conf->autoneg) {
1890                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1891                         DP_ERR(edev, "Autoneg not supported\n");
1892                         return -EINVAL;
1893                 }
1894                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1895         }
1896
1897         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1898         if (fc_conf->mode == RTE_FC_FULL)
1899                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1900                                         QED_LINK_PAUSE_RX_ENABLE);
1901         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1902                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1903         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1904                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1905
1906         params.link_up = true;
1907         (void)qdev->ops->common->set_link(edev, &params);
1908
1909         return 0;
1910 }
1911
1912 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1913                               struct rte_eth_fc_conf *fc_conf)
1914 {
1915         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1916         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1917         struct qed_link_output current_link;
1918
1919         memset(&current_link, 0, sizeof(current_link));
1920         qdev->ops->common->get_link(edev, &current_link);
1921
1922         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1923                 fc_conf->autoneg = true;
1924
1925         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1926                                          QED_LINK_PAUSE_TX_ENABLE))
1927                 fc_conf->mode = RTE_FC_FULL;
1928         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1929                 fc_conf->mode = RTE_FC_RX_PAUSE;
1930         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1931                 fc_conf->mode = RTE_FC_TX_PAUSE;
1932         else
1933                 fc_conf->mode = RTE_FC_NONE;
1934
1935         return 0;
1936 }
1937
1938 static const uint32_t *
1939 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1940 {
1941         static const uint32_t ptypes[] = {
1942                 RTE_PTYPE_L2_ETHER,
1943                 RTE_PTYPE_L2_ETHER_VLAN,
1944                 RTE_PTYPE_L3_IPV4,
1945                 RTE_PTYPE_L3_IPV6,
1946                 RTE_PTYPE_L4_TCP,
1947                 RTE_PTYPE_L4_UDP,
1948                 RTE_PTYPE_TUNNEL_VXLAN,
1949                 RTE_PTYPE_L4_FRAG,
1950                 RTE_PTYPE_TUNNEL_GENEVE,
1951                 /* Inner */
1952                 RTE_PTYPE_INNER_L2_ETHER,
1953                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1954                 RTE_PTYPE_INNER_L3_IPV4,
1955                 RTE_PTYPE_INNER_L3_IPV6,
1956                 RTE_PTYPE_INNER_L4_TCP,
1957                 RTE_PTYPE_INNER_L4_UDP,
1958                 RTE_PTYPE_INNER_L4_FRAG,
1959                 RTE_PTYPE_UNKNOWN
1960         };
1961
1962         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1963                 return ptypes;
1964
1965         return NULL;
1966 }
1967
1968 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1969 {
1970         *rss_caps = 0;
1971         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1972         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1973         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1974         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1975         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1976         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1977         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1978         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1979 }
1980
1981 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1982                          struct rte_eth_rss_conf *rss_conf)
1983 {
1984         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1985         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1986         struct ecore_sp_vport_update_params vport_update_params;
1987         struct ecore_rss_params rss_params;
1988         struct ecore_hwfn *p_hwfn;
1989         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1990         uint64_t hf = rss_conf->rss_hf;
1991         uint8_t len = rss_conf->rss_key_len;
1992         uint8_t idx;
1993         uint8_t i;
1994         int rc;
1995
1996         memset(&vport_update_params, 0, sizeof(vport_update_params));
1997         memset(&rss_params, 0, sizeof(rss_params));
1998
1999         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2000                 (unsigned long)hf, len, key);
2001
2002         if (hf != 0) {
2003                 /* Enabling RSS */
2004                 DP_INFO(edev, "Enabling rss\n");
2005
2006                 /* RSS caps */
2007                 qede_init_rss_caps(&rss_params.rss_caps, hf);
2008                 rss_params.update_rss_capabilities = 1;
2009
2010                 /* RSS hash key */
2011                 if (key) {
2012                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2013                                 DP_ERR(edev, "RSS key length exceeds limit\n");
2014                                 return -EINVAL;
2015                         }
2016                         DP_INFO(edev, "Applying user supplied hash key\n");
2017                         rss_params.update_rss_key = 1;
2018                         memcpy(&rss_params.rss_key, key, len);
2019                 }
2020                 rss_params.rss_enable = 1;
2021         }
2022
2023         rss_params.update_rss_config = 1;
2024         /* tbl_size has to be set with capabilities */
2025         rss_params.rss_table_size_log = 7;
2026         vport_update_params.vport_id = 0;
2027         /* pass the L2 handles instead of qids */
2028         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2029                 idx = qdev->rss_ind_table[i];
2030                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2031         }
2032         vport_update_params.rss_params = &rss_params;
2033
2034         for_each_hwfn(edev, i) {
2035                 p_hwfn = &edev->hwfns[i];
2036                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2037                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2038                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2039                 if (rc) {
2040                         DP_ERR(edev, "vport-update for RSS failed\n");
2041                         return rc;
2042                 }
2043         }
2044         qdev->rss_enable = rss_params.rss_enable;
2045
2046         /* Update local structure for hash query */
2047         qdev->rss_conf.rss_hf = hf;
2048         qdev->rss_conf.rss_key_len = len;
2049         if (qdev->rss_enable) {
2050                 if  (qdev->rss_conf.rss_key == NULL) {
2051                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2052                         if (qdev->rss_conf.rss_key == NULL) {
2053                                 DP_ERR(edev, "No memory to store RSS key\n");
2054                                 return -ENOMEM;
2055                         }
2056                 }
2057                 if (key && len) {
2058                         DP_INFO(edev, "Storing RSS key\n");
2059                         memcpy(qdev->rss_conf.rss_key, key, len);
2060                 }
2061         } else if (!qdev->rss_enable && len == 0) {
2062                 if (qdev->rss_conf.rss_key) {
2063                         free(qdev->rss_conf.rss_key);
2064                         qdev->rss_conf.rss_key = NULL;
2065                         DP_INFO(edev, "Free RSS key\n");
2066                 }
2067         }
2068
2069         return 0;
2070 }
2071
2072 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2073                            struct rte_eth_rss_conf *rss_conf)
2074 {
2075         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2076
2077         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2078         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2079
2080         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2081                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2082                        rss_conf->rss_key_len);
2083         return 0;
2084 }
2085
2086 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2087                                     struct ecore_rss_params *rss)
2088 {
2089         int i, fn;
2090         bool rss_mode = 1; /* enable */
2091         struct ecore_queue_cid *cid;
2092         struct ecore_rss_params *t_rss;
2093
2094         /* In regular scenario, we'd simply need to take input handlers.
2095          * But in CMT, we'd have to split the handlers according to the
2096          * engine they were configured on. We'd then have to understand
2097          * whether RSS is really required, since 2-queues on CMT doesn't
2098          * require RSS.
2099          */
2100
2101         /* CMT should be round-robin */
2102         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2103                 cid = rss->rss_ind_table[i];
2104
2105                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2106                         t_rss = &rss[0];
2107                 else
2108                         t_rss = &rss[1];
2109
2110                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2111         }
2112
2113         t_rss = &rss[1];
2114         t_rss->update_rss_ind_table = 1;
2115         t_rss->rss_table_size_log = 7;
2116         t_rss->update_rss_config = 1;
2117
2118         /* Make sure RSS is actually required */
2119         for_each_hwfn(edev, fn) {
2120                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2121                      i++) {
2122                         if (rss[fn].rss_ind_table[i] !=
2123                             rss[fn].rss_ind_table[0])
2124                                 break;
2125                 }
2126
2127                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2128                         DP_INFO(edev,
2129                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2130                         rss_mode = 0;
2131                         goto out;
2132                 }
2133         }
2134
2135 out:
2136         t_rss->rss_enable = rss_mode;
2137
2138         return rss_mode;
2139 }
2140
2141 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2142                          struct rte_eth_rss_reta_entry64 *reta_conf,
2143                          uint16_t reta_size)
2144 {
2145         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2146         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2147         struct ecore_sp_vport_update_params vport_update_params;
2148         struct ecore_rss_params *params;
2149         struct ecore_hwfn *p_hwfn;
2150         uint16_t i, idx, shift;
2151         uint8_t entry;
2152         int rc = 0;
2153
2154         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2155                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2156                        reta_size);
2157                 return -EINVAL;
2158         }
2159
2160         memset(&vport_update_params, 0, sizeof(vport_update_params));
2161         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2162                              RTE_CACHE_LINE_SIZE);
2163         if (params == NULL) {
2164                 DP_ERR(edev, "failed to allocate memory\n");
2165                 return -ENOMEM;
2166         }
2167
2168         for (i = 0; i < reta_size; i++) {
2169                 idx = i / RTE_RETA_GROUP_SIZE;
2170                 shift = i % RTE_RETA_GROUP_SIZE;
2171                 if (reta_conf[idx].mask & (1ULL << shift)) {
2172                         entry = reta_conf[idx].reta[shift];
2173                         /* Pass rxq handles to ecore */
2174                         params->rss_ind_table[i] =
2175                                         qdev->fp_array[entry].rxq->handle;
2176                         /* Update the local copy for RETA query command */
2177                         qdev->rss_ind_table[i] = entry;
2178                 }
2179         }
2180
2181         params->update_rss_ind_table = 1;
2182         params->rss_table_size_log = 7;
2183         params->update_rss_config = 1;
2184
2185         /* Fix up RETA for CMT mode device */
2186         if (ECORE_IS_CMT(edev))
2187                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2188                                                             params);
2189         vport_update_params.vport_id = 0;
2190         /* Use the current value of rss_enable */
2191         params->rss_enable = qdev->rss_enable;
2192         vport_update_params.rss_params = params;
2193
2194         for_each_hwfn(edev, i) {
2195                 p_hwfn = &edev->hwfns[i];
2196                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2197                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2198                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2199                 if (rc) {
2200                         DP_ERR(edev, "vport-update for RSS failed\n");
2201                         goto out;
2202                 }
2203         }
2204
2205 out:
2206         rte_free(params);
2207         return rc;
2208 }
2209
2210 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2211                                struct rte_eth_rss_reta_entry64 *reta_conf,
2212                                uint16_t reta_size)
2213 {
2214         struct qede_dev *qdev = eth_dev->data->dev_private;
2215         struct ecore_dev *edev = &qdev->edev;
2216         uint16_t i, idx, shift;
2217         uint8_t entry;
2218
2219         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2220                 DP_ERR(edev, "reta_size %d is not supported\n",
2221                        reta_size);
2222                 return -EINVAL;
2223         }
2224
2225         for (i = 0; i < reta_size; i++) {
2226                 idx = i / RTE_RETA_GROUP_SIZE;
2227                 shift = i % RTE_RETA_GROUP_SIZE;
2228                 if (reta_conf[idx].mask & (1ULL << shift)) {
2229                         entry = qdev->rss_ind_table[i];
2230                         reta_conf[idx].reta[shift] = entry;
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237
2238
2239 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2240 {
2241         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2242         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2243         struct rte_eth_dev_info dev_info = {0};
2244         struct qede_fastpath *fp;
2245         uint32_t frame_size;
2246         uint16_t rx_buf_size;
2247         uint16_t bufsz;
2248         int i;
2249
2250         PMD_INIT_FUNC_TRACE(edev);
2251         qede_dev_info_get(dev, &dev_info);
2252         frame_size = mtu + QEDE_ETH_OVERHEAD;
2253         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2254                 DP_ERR(edev, "MTU %u out of range\n", mtu);
2255                 return -EINVAL;
2256         }
2257         if (!dev->data->scattered_rx &&
2258             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2259                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2260                         dev->data->min_rx_buf_size);
2261                 return -EINVAL;
2262         }
2263         /* Temporarily replace I/O functions with dummy ones. It cannot
2264          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2265          */
2266         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2267         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2268         qede_dev_stop(dev);
2269         rte_delay_ms(1000);
2270         qdev->mtu = mtu;
2271         /* Fix up RX buf size for all queues of the port */
2272         for_each_rss(i) {
2273                 fp = &qdev->fp_array[i];
2274                 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2275                         fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2276                 if (dev->data->scattered_rx)
2277                         rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
2278                 else
2279                         rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
2280                 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2281                 fp->rxq->rx_buf_size = rx_buf_size;
2282                 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2283         }
2284         qede_dev_start(dev);
2285         if (frame_size > ETHER_MAX_LEN)
2286                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2287         else
2288                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2289         /* update max frame size */
2290         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2291         /* Reassign back */
2292         dev->rx_pkt_burst = qede_recv_pkts;
2293         dev->tx_pkt_burst = qede_xmit_pkts;
2294
2295         return 0;
2296 }
2297
2298 static int
2299 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2300                       struct rte_eth_udp_tunnel *tunnel_udp)
2301 {
2302         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2303         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2304         struct ecore_tunnel_info tunn; /* @DPDK */
2305         uint16_t udp_port;
2306         int rc;
2307
2308         PMD_INIT_FUNC_TRACE(edev);
2309
2310         memset(&tunn, 0, sizeof(tunn));
2311
2312         switch (tunnel_udp->prot_type) {
2313         case RTE_TUNNEL_TYPE_VXLAN:
2314                 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2315                         DP_ERR(edev, "UDP port %u doesn't exist\n",
2316                                 tunnel_udp->udp_port);
2317                         return ECORE_INVAL;
2318                 }
2319                 udp_port = 0;
2320
2321                 tunn.vxlan_port.b_update_port = true;
2322                 tunn.vxlan_port.port = udp_port;
2323
2324                 rc = qede_tunnel_update(qdev, &tunn);
2325                 if (rc != ECORE_SUCCESS) {
2326                         DP_ERR(edev, "Unable to config UDP port %u\n",
2327                                tunn.vxlan_port.port);
2328                         return rc;
2329                 }
2330
2331                 qdev->vxlan.udp_port = udp_port;
2332                 /* If the request is to delete UDP port and if the number of
2333                  * VXLAN filters have reached 0 then VxLAN offload can be be
2334                  * disabled.
2335                  */
2336                 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2337                         return qede_vxlan_enable(eth_dev,
2338                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
2339
2340                 break;
2341
2342         case RTE_TUNNEL_TYPE_GENEVE:
2343                 if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
2344                         DP_ERR(edev, "UDP port %u doesn't exist\n",
2345                                 tunnel_udp->udp_port);
2346                         return ECORE_INVAL;
2347                 }
2348
2349                 udp_port = 0;
2350
2351                 tunn.geneve_port.b_update_port = true;
2352                 tunn.geneve_port.port = udp_port;
2353
2354                 rc = qede_tunnel_update(qdev, &tunn);
2355                 if (rc != ECORE_SUCCESS) {
2356                         DP_ERR(edev, "Unable to config UDP port %u\n",
2357                                tunn.vxlan_port.port);
2358                         return rc;
2359                 }
2360
2361                 qdev->vxlan.udp_port = udp_port;
2362                 /* If the request is to delete UDP port and if the number of
2363                  * GENEVE filters have reached 0 then GENEVE offload can be be
2364                  * disabled.
2365                  */
2366                 if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
2367                         return qede_geneve_enable(eth_dev,
2368                                         ECORE_TUNN_CLSS_MAC_VLAN, false);
2369
2370                 break;
2371
2372         default:
2373                 return ECORE_INVAL;
2374         }
2375
2376         return 0;
2377
2378 }
2379 static int
2380 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2381                       struct rte_eth_udp_tunnel *tunnel_udp)
2382 {
2383         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2384         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2385         struct ecore_tunnel_info tunn; /* @DPDK */
2386         uint16_t udp_port;
2387         int rc;
2388
2389         PMD_INIT_FUNC_TRACE(edev);
2390
2391         memset(&tunn, 0, sizeof(tunn));
2392
2393         switch (tunnel_udp->prot_type) {
2394         case RTE_TUNNEL_TYPE_VXLAN:
2395                 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2396                         DP_INFO(edev,
2397                                 "UDP port %u for VXLAN was already configured\n",
2398                                 tunnel_udp->udp_port);
2399                         return ECORE_SUCCESS;
2400                 }
2401
2402                 /* Enable VxLAN tunnel with default MAC/VLAN classification if
2403                  * it was not enabled while adding VXLAN filter before UDP port
2404                  * update.
2405                  */
2406                 if (!qdev->vxlan.enable) {
2407                         rc = qede_vxlan_enable(eth_dev,
2408                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
2409                         if (rc != ECORE_SUCCESS) {
2410                                 DP_ERR(edev, "Failed to enable VXLAN "
2411                                         "prior to updating UDP port\n");
2412                                 return rc;
2413                         }
2414                 }
2415                 udp_port = tunnel_udp->udp_port;
2416
2417                 tunn.vxlan_port.b_update_port = true;
2418                 tunn.vxlan_port.port = udp_port;
2419
2420                 rc = qede_tunnel_update(qdev, &tunn);
2421                 if (rc != ECORE_SUCCESS) {
2422                         DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
2423                                udp_port);
2424                         return rc;
2425                 }
2426
2427                 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
2428
2429                 qdev->vxlan.udp_port = udp_port;
2430                 break;
2431
2432         case RTE_TUNNEL_TYPE_GENEVE:
2433                 if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
2434                         DP_INFO(edev,
2435                                 "UDP port %u for GENEVE was already configured\n",
2436                                 tunnel_udp->udp_port);
2437                         return ECORE_SUCCESS;
2438                 }
2439
2440                 /* Enable GENEVE tunnel with default MAC/VLAN classification if
2441                  * it was not enabled while adding GENEVE filter before UDP port
2442                  * update.
2443                  */
2444                 if (!qdev->geneve.enable) {
2445                         rc = qede_geneve_enable(eth_dev,
2446                                 ECORE_TUNN_CLSS_MAC_VLAN, true);
2447                         if (rc != ECORE_SUCCESS) {
2448                                 DP_ERR(edev, "Failed to enable GENEVE "
2449                                         "prior to updating UDP port\n");
2450                                 return rc;
2451                         }
2452                 }
2453                 udp_port = tunnel_udp->udp_port;
2454
2455                 tunn.geneve_port.b_update_port = true;
2456                 tunn.geneve_port.port = udp_port;
2457
2458                 rc = qede_tunnel_update(qdev, &tunn);
2459                 if (rc != ECORE_SUCCESS) {
2460                         DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
2461                                udp_port);
2462                         return rc;
2463                 }
2464
2465                 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
2466
2467                 qdev->geneve.udp_port = udp_port;
2468                 break;
2469
2470         default:
2471                 return ECORE_INVAL;
2472         }
2473
2474         return 0;
2475 }
2476
2477 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2478                                        uint32_t *clss, char *str)
2479 {
2480         uint16_t j;
2481         *clss = MAX_ECORE_TUNN_CLSS;
2482
2483         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2484                 if (filter == qede_tunn_types[j].rte_filter_type) {
2485                         *type = qede_tunn_types[j].qede_type;
2486                         *clss = qede_tunn_types[j].qede_tunn_clss;
2487                         strcpy(str, qede_tunn_types[j].string);
2488                         return;
2489                 }
2490         }
2491 }
2492
2493 static int
2494 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2495                               const struct rte_eth_tunnel_filter_conf *conf,
2496                               uint32_t type)
2497 {
2498         /* Init commmon ucast params first */
2499         qede_set_ucast_cmn_params(ucast);
2500
2501         /* Copy out the required fields based on classification type */
2502         ucast->type = type;
2503
2504         switch (type) {
2505         case ECORE_FILTER_VNI:
2506                 ucast->vni = conf->tenant_id;
2507         break;
2508         case ECORE_FILTER_INNER_VLAN:
2509                 ucast->vlan = conf->inner_vlan;
2510         break;
2511         case ECORE_FILTER_MAC:
2512                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2513                        ETHER_ADDR_LEN);
2514         break;
2515         case ECORE_FILTER_INNER_MAC:
2516                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2517                        ETHER_ADDR_LEN);
2518         break;
2519         case ECORE_FILTER_MAC_VNI_PAIR:
2520                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2521                         ETHER_ADDR_LEN);
2522                 ucast->vni = conf->tenant_id;
2523         break;
2524         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2525                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2526                         ETHER_ADDR_LEN);
2527                 ucast->vni = conf->tenant_id;
2528         break;
2529         case ECORE_FILTER_INNER_PAIR:
2530                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2531                         ETHER_ADDR_LEN);
2532                 ucast->vlan = conf->inner_vlan;
2533         break;
2534         default:
2535                 return -EINVAL;
2536         }
2537
2538         return ECORE_SUCCESS;
2539 }
2540
2541 static int
2542 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2543                          const struct rte_eth_tunnel_filter_conf *conf,
2544                          __attribute__((unused)) enum rte_filter_op filter_op,
2545                          enum ecore_tunn_clss *clss,
2546                          bool add)
2547 {
2548         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2549         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2550         struct ecore_filter_ucast ucast = {0};
2551         enum ecore_filter_ucast_type type;
2552         uint16_t filter_type = 0;
2553         char str[80];
2554         int rc;
2555
2556         filter_type = conf->filter_type;
2557         /* Determine if the given filter classification is supported */
2558         qede_get_ecore_tunn_params(filter_type, &type, clss, str);
2559         if (*clss == MAX_ECORE_TUNN_CLSS) {
2560                 DP_ERR(edev, "Unsupported filter type\n");
2561                 return -EINVAL;
2562         }
2563         /* Init tunnel ucast params */
2564         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2565         if (rc != ECORE_SUCCESS) {
2566                 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
2567                 conf->filter_type);
2568                 return rc;
2569         }
2570         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2571                 str, filter_op, ucast.type);
2572
2573         ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
2574
2575         /* Skip MAC/VLAN if filter is based on VNI */
2576         if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2577                 rc = qede_mac_int_ops(eth_dev, &ucast, add);
2578                 if ((rc == 0) && add) {
2579                         /* Enable accept anyvlan */
2580                         qede_config_accept_any_vlan(qdev, true);
2581                 }
2582         } else {
2583                 rc = qede_ucast_filter(eth_dev, &ucast, add);
2584                 if (rc == 0)
2585                         rc = ecore_filter_ucast_cmd(edev, &ucast,
2586                                             ECORE_SPQ_MODE_CB, NULL);
2587         }
2588
2589         return rc;
2590 }
2591
2592 static int
2593 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2594                         enum rte_filter_op filter_op,
2595                         const struct rte_eth_tunnel_filter_conf *conf)
2596 {
2597         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2598         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2599         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2600         bool add;
2601         int rc;
2602
2603         PMD_INIT_FUNC_TRACE(edev);
2604
2605         switch (filter_op) {
2606         case RTE_ETH_FILTER_ADD:
2607                 add = true;
2608                 break;
2609         case RTE_ETH_FILTER_DELETE:
2610                 add = false;
2611                 break;
2612         default:
2613                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2614                 return -EINVAL;
2615         }
2616
2617         if (IS_VF(edev))
2618                 return qede_tunn_enable(eth_dev,
2619                                         ECORE_TUNN_CLSS_MAC_VLAN,
2620                                         conf->tunnel_type, add);
2621
2622         rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
2623         if (rc != ECORE_SUCCESS)
2624                 return rc;
2625
2626         if (add) {
2627                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
2628                         qdev->vxlan.num_filters++;
2629                         qdev->vxlan.filter_type = conf->filter_type;
2630                 } else { /* GENEVE */
2631                         qdev->geneve.num_filters++;
2632                         qdev->geneve.filter_type = conf->filter_type;
2633                 }
2634
2635                 if (!qdev->vxlan.enable || !qdev->geneve.enable)
2636                         return qede_tunn_enable(eth_dev, clss,
2637                                                 conf->tunnel_type,
2638                                                 true);
2639         } else {
2640                 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
2641                         qdev->vxlan.num_filters--;
2642                 else /*GENEVE*/
2643                         qdev->geneve.num_filters--;
2644
2645                 /* Disable VXLAN if VXLAN filters become 0 */
2646                 if ((qdev->vxlan.num_filters == 0) ||
2647                     (qdev->geneve.num_filters == 0))
2648                         return qede_tunn_enable(eth_dev, clss,
2649                                                 conf->tunnel_type,
2650                                                 false);
2651         }
2652
2653         return 0;
2654 }
2655
2656 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2657                          enum rte_filter_type filter_type,
2658                          enum rte_filter_op filter_op,
2659                          void *arg)
2660 {
2661         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2662         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2663         struct rte_eth_tunnel_filter_conf *filter_conf =
2664                         (struct rte_eth_tunnel_filter_conf *)arg;
2665
2666         switch (filter_type) {
2667         case RTE_ETH_FILTER_TUNNEL:
2668                 switch (filter_conf->tunnel_type) {
2669                 case RTE_TUNNEL_TYPE_VXLAN:
2670                 case RTE_TUNNEL_TYPE_GENEVE:
2671                         DP_INFO(edev,
2672                                 "Packet steering to the specified Rx queue"
2673                                 " is not supported with UDP tunneling");
2674                         return(qede_tunn_filter_config(eth_dev, filter_op,
2675                                                       filter_conf));
2676                 /* Place holders for future tunneling support */
2677                 case RTE_TUNNEL_TYPE_TEREDO:
2678                 case RTE_TUNNEL_TYPE_NVGRE:
2679                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2680                 case RTE_L2_TUNNEL_TYPE_E_TAG:
2681                         DP_ERR(edev, "Unsupported tunnel type %d\n",
2682                                 filter_conf->tunnel_type);
2683                         return -EINVAL;
2684                 case RTE_TUNNEL_TYPE_NONE:
2685                 default:
2686                         return 0;
2687                 }
2688                 break;
2689         case RTE_ETH_FILTER_FDIR:
2690                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2691         case RTE_ETH_FILTER_NTUPLE:
2692                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2693         case RTE_ETH_FILTER_MACVLAN:
2694         case RTE_ETH_FILTER_ETHERTYPE:
2695         case RTE_ETH_FILTER_FLEXIBLE:
2696         case RTE_ETH_FILTER_SYN:
2697         case RTE_ETH_FILTER_HASH:
2698         case RTE_ETH_FILTER_L2_TUNNEL:
2699         case RTE_ETH_FILTER_MAX:
2700         default:
2701                 DP_ERR(edev, "Unsupported filter type %d\n",
2702                         filter_type);
2703                 return -EINVAL;
2704         }
2705
2706         return 0;
2707 }
2708
2709 static const struct eth_dev_ops qede_eth_dev_ops = {
2710         .dev_configure = qede_dev_configure,
2711         .dev_infos_get = qede_dev_info_get,
2712         .rx_queue_setup = qede_rx_queue_setup,
2713         .rx_queue_release = qede_rx_queue_release,
2714         .tx_queue_setup = qede_tx_queue_setup,
2715         .tx_queue_release = qede_tx_queue_release,
2716         .dev_start = qede_dev_start,
2717         .dev_set_link_up = qede_dev_set_link_up,
2718         .dev_set_link_down = qede_dev_set_link_down,
2719         .link_update = qede_link_update,
2720         .promiscuous_enable = qede_promiscuous_enable,
2721         .promiscuous_disable = qede_promiscuous_disable,
2722         .allmulticast_enable = qede_allmulticast_enable,
2723         .allmulticast_disable = qede_allmulticast_disable,
2724         .dev_stop = qede_dev_stop,
2725         .dev_close = qede_dev_close,
2726         .stats_get = qede_get_stats,
2727         .stats_reset = qede_reset_stats,
2728         .xstats_get = qede_get_xstats,
2729         .xstats_reset = qede_reset_xstats,
2730         .xstats_get_names = qede_get_xstats_names,
2731         .mac_addr_add = qede_mac_addr_add,
2732         .mac_addr_remove = qede_mac_addr_remove,
2733         .mac_addr_set = qede_mac_addr_set,
2734         .vlan_offload_set = qede_vlan_offload_set,
2735         .vlan_filter_set = qede_vlan_filter_set,
2736         .flow_ctrl_set = qede_flow_ctrl_set,
2737         .flow_ctrl_get = qede_flow_ctrl_get,
2738         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2739         .rss_hash_update = qede_rss_hash_update,
2740         .rss_hash_conf_get = qede_rss_hash_conf_get,
2741         .reta_update  = qede_rss_reta_update,
2742         .reta_query  = qede_rss_reta_query,
2743         .mtu_set = qede_set_mtu,
2744         .filter_ctrl = qede_dev_filter_ctrl,
2745         .udp_tunnel_port_add = qede_udp_dst_port_add,
2746         .udp_tunnel_port_del = qede_udp_dst_port_del,
2747 };
2748
2749 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2750         .dev_configure = qede_dev_configure,
2751         .dev_infos_get = qede_dev_info_get,
2752         .rx_queue_setup = qede_rx_queue_setup,
2753         .rx_queue_release = qede_rx_queue_release,
2754         .tx_queue_setup = qede_tx_queue_setup,
2755         .tx_queue_release = qede_tx_queue_release,
2756         .dev_start = qede_dev_start,
2757         .dev_set_link_up = qede_dev_set_link_up,
2758         .dev_set_link_down = qede_dev_set_link_down,
2759         .link_update = qede_link_update,
2760         .promiscuous_enable = qede_promiscuous_enable,
2761         .promiscuous_disable = qede_promiscuous_disable,
2762         .allmulticast_enable = qede_allmulticast_enable,
2763         .allmulticast_disable = qede_allmulticast_disable,
2764         .dev_stop = qede_dev_stop,
2765         .dev_close = qede_dev_close,
2766         .stats_get = qede_get_stats,
2767         .stats_reset = qede_reset_stats,
2768         .xstats_get = qede_get_xstats,
2769         .xstats_reset = qede_reset_xstats,
2770         .xstats_get_names = qede_get_xstats_names,
2771         .vlan_offload_set = qede_vlan_offload_set,
2772         .vlan_filter_set = qede_vlan_filter_set,
2773         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2774         .rss_hash_update = qede_rss_hash_update,
2775         .rss_hash_conf_get = qede_rss_hash_conf_get,
2776         .reta_update  = qede_rss_reta_update,
2777         .reta_query  = qede_rss_reta_query,
2778         .mtu_set = qede_set_mtu,
2779         .udp_tunnel_port_add = qede_udp_dst_port_add,
2780         .udp_tunnel_port_del = qede_udp_dst_port_del,
2781 };
2782
2783 static void qede_update_pf_params(struct ecore_dev *edev)
2784 {
2785         struct ecore_pf_params pf_params;
2786
2787         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2788         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2789         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2790         qed_ops->common->update_pf_params(edev, &pf_params);
2791 }
2792
2793 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2794 {
2795         struct rte_pci_device *pci_dev;
2796         struct rte_pci_addr pci_addr;
2797         struct qede_dev *adapter;
2798         struct ecore_dev *edev;
2799         struct qed_dev_eth_info dev_info;
2800         struct qed_slowpath_params params;
2801         static bool do_once = true;
2802         uint8_t bulletin_change;
2803         uint8_t vf_mac[ETHER_ADDR_LEN];
2804         uint8_t is_mac_forced;
2805         bool is_mac_exist;
2806         /* Fix up ecore debug level */
2807         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2808         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2809         int rc;
2810
2811         /* Extract key data structures */
2812         adapter = eth_dev->data->dev_private;
2813         adapter->ethdev = eth_dev;
2814         edev = &adapter->edev;
2815         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2816         pci_addr = pci_dev->addr;
2817
2818         PMD_INIT_FUNC_TRACE(edev);
2819
2820         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2821                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2822                  eth_dev->data->port_id);
2823
2824         eth_dev->rx_pkt_burst = qede_recv_pkts;
2825         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2826         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2827
2828         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2829                 DP_ERR(edev, "Skipping device init from secondary process\n");
2830                 return 0;
2831         }
2832
2833         rte_eth_copy_pci_info(eth_dev, pci_dev);
2834
2835         /* @DPDK */
2836         edev->vendor_id = pci_dev->id.vendor_id;
2837         edev->device_id = pci_dev->id.device_id;
2838
2839         qed_ops = qed_get_eth_ops();
2840         if (!qed_ops) {
2841                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2842                 return -EINVAL;
2843         }
2844
2845         DP_INFO(edev, "Starting qede probe\n");
2846         rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2847                                     dp_level, is_vf);
2848         if (rc != 0) {
2849                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2850                 return -ENODEV;
2851         }
2852         qede_update_pf_params(edev);
2853         rte_intr_callback_register(&pci_dev->intr_handle,
2854                                    qede_interrupt_handler, (void *)eth_dev);
2855         if (rte_intr_enable(&pci_dev->intr_handle)) {
2856                 DP_ERR(edev, "rte_intr_enable() failed\n");
2857                 return -ENODEV;
2858         }
2859
2860         /* Start the Slowpath-process */
2861         memset(&params, 0, sizeof(struct qed_slowpath_params));
2862         params.int_mode = ECORE_INT_MODE_MSIX;
2863         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2864         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2865         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2866         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2867         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2868                 QEDE_PMD_DRV_VER_STR_SIZE);
2869
2870         /* For CMT mode device do periodic polling for slowpath events.
2871          * This is required since uio device uses only one MSI-x
2872          * interrupt vector but we need one for each engine.
2873          */
2874         if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2875                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2876                                        qede_poll_sp_sb_cb,
2877                                        (void *)eth_dev);
2878                 if (rc != 0) {
2879                         DP_ERR(edev, "Unable to start periodic"
2880                                      " timer rc %d\n", rc);
2881                         return -EINVAL;
2882                 }
2883         }
2884
2885         rc = qed_ops->common->slowpath_start(edev, &params);
2886         if (rc) {
2887                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2888                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2889                                      (void *)eth_dev);
2890                 return -ENODEV;
2891         }
2892
2893         rc = qed_ops->fill_dev_info(edev, &dev_info);
2894         if (rc) {
2895                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2896                 qed_ops->common->slowpath_stop(edev);
2897                 qed_ops->common->remove(edev);
2898                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2899                                      (void *)eth_dev);
2900                 return -ENODEV;
2901         }
2902
2903         qede_alloc_etherdev(adapter, &dev_info);
2904
2905         adapter->ops->common->set_name(edev, edev->name);
2906
2907         if (!is_vf)
2908                 adapter->dev_info.num_mac_filters =
2909                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2910                                             ECORE_MAC);
2911         else
2912                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2913                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2914
2915         /* Allocate memory for storing MAC addr */
2916         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2917                                         (ETHER_ADDR_LEN *
2918                                         adapter->dev_info.num_mac_filters),
2919                                         RTE_CACHE_LINE_SIZE);
2920
2921         if (eth_dev->data->mac_addrs == NULL) {
2922                 DP_ERR(edev, "Failed to allocate MAC address\n");
2923                 qed_ops->common->slowpath_stop(edev);
2924                 qed_ops->common->remove(edev);
2925                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2926                                      (void *)eth_dev);
2927                 return -ENOMEM;
2928         }
2929
2930         if (!is_vf) {
2931                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2932                                 hw_info.hw_mac_addr,
2933                                 &eth_dev->data->mac_addrs[0]);
2934                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2935                                 &adapter->primary_mac);
2936         } else {
2937                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2938                                        &bulletin_change);
2939                 if (bulletin_change) {
2940                         is_mac_exist =
2941                             ecore_vf_bulletin_get_forced_mac(
2942                                                 ECORE_LEADING_HWFN(edev),
2943                                                 vf_mac,
2944                                                 &is_mac_forced);
2945                         if (is_mac_exist && is_mac_forced) {
2946                                 DP_INFO(edev, "VF macaddr received from PF\n");
2947                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2948                                                 &eth_dev->data->mac_addrs[0]);
2949                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2950                                                 &adapter->primary_mac);
2951                         } else {
2952                                 DP_ERR(edev, "No VF macaddr assigned\n");
2953                         }
2954                 }
2955         }
2956
2957         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2958
2959         if (do_once) {
2960 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2961                 qede_print_adapter_info(adapter);
2962 #endif
2963                 do_once = false;
2964         }
2965
2966         adapter->num_tx_queues = 0;
2967         adapter->num_rx_queues = 0;
2968         SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2969         SLIST_INIT(&adapter->vlan_list_head);
2970         SLIST_INIT(&adapter->uc_list_head);
2971         adapter->mtu = ETHER_MTU;
2972         adapter->new_mtu = ETHER_MTU;
2973         if (!is_vf)
2974                 if (qede_start_vport(adapter, adapter->mtu))
2975                         return -1;
2976
2977         DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2978                 adapter->primary_mac.addr_bytes[0],
2979                 adapter->primary_mac.addr_bytes[1],
2980                 adapter->primary_mac.addr_bytes[2],
2981                 adapter->primary_mac.addr_bytes[3],
2982                 adapter->primary_mac.addr_bytes[4],
2983                 adapter->primary_mac.addr_bytes[5]);
2984
2985         DP_INFO(edev, "Device initialized\n");
2986
2987         return 0;
2988 }
2989
2990 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2991 {
2992         return qede_common_dev_init(eth_dev, 1);
2993 }
2994
2995 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2996 {
2997         return qede_common_dev_init(eth_dev, 0);
2998 }
2999
3000 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
3001 {
3002 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3003         struct qede_dev *qdev = eth_dev->data->dev_private;
3004         struct ecore_dev *edev = &qdev->edev;
3005
3006         PMD_INIT_FUNC_TRACE(edev);
3007 #endif
3008
3009         /* only uninitialize in the primary process */
3010         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3011                 return 0;
3012
3013         /* safe to close dev here */
3014         qede_dev_close(eth_dev);
3015
3016         eth_dev->dev_ops = NULL;
3017         eth_dev->rx_pkt_burst = NULL;
3018         eth_dev->tx_pkt_burst = NULL;
3019
3020         if (eth_dev->data->mac_addrs)
3021                 rte_free(eth_dev->data->mac_addrs);
3022
3023         eth_dev->data->mac_addrs = NULL;
3024
3025         return 0;
3026 }
3027
3028 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3029 {
3030         return qede_dev_common_uninit(eth_dev);
3031 }
3032
3033 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3034 {
3035         return qede_dev_common_uninit(eth_dev);
3036 }
3037
3038 static const struct rte_pci_id pci_id_qedevf_map[] = {
3039 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3040         {
3041                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3042         },
3043         {
3044                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3045         },
3046         {
3047                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3048         },
3049         {.vendor_id = 0,}
3050 };
3051
3052 static const struct rte_pci_id pci_id_qede_map[] = {
3053 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3054         {
3055                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3056         },
3057         {
3058                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3059         },
3060         {
3061                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3062         },
3063         {
3064                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3065         },
3066         {
3067                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3068         },
3069         {
3070                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3071         },
3072         {
3073                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3074         },
3075         {
3076                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3077         },
3078         {
3079                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3080         },
3081         {
3082                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3083         },
3084         {.vendor_id = 0,}
3085 };
3086
3087 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3088         struct rte_pci_device *pci_dev)
3089 {
3090         return rte_eth_dev_pci_generic_probe(pci_dev,
3091                 sizeof(struct qede_dev), qedevf_eth_dev_init);
3092 }
3093
3094 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3095 {
3096         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3097 }
3098
3099 static struct rte_pci_driver rte_qedevf_pmd = {
3100         .id_table = pci_id_qedevf_map,
3101         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3102         .probe = qedevf_eth_dev_pci_probe,
3103         .remove = qedevf_eth_dev_pci_remove,
3104 };
3105
3106 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3107         struct rte_pci_device *pci_dev)
3108 {
3109         return rte_eth_dev_pci_generic_probe(pci_dev,
3110                 sizeof(struct qede_dev), qede_eth_dev_init);
3111 }
3112
3113 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3114 {
3115         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3116 }
3117
3118 static struct rte_pci_driver rte_qede_pmd = {
3119         .id_table = pci_id_qede_map,
3120         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3121         .probe = qede_eth_dev_pci_probe,
3122         .remove = qede_eth_dev_pci_remove,
3123 };
3124
3125 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3126 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3127 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3128 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3129 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3130 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
3131
3132 RTE_INIT(qede_init_log);
3133 static void
3134 qede_init_log(void)
3135 {
3136         qede_logtype_init = rte_log_register("pmd.qede.init");
3137         if (qede_logtype_init >= 0)
3138                 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
3139         qede_logtype_driver = rte_log_register("pmd.qede.driver");
3140         if (qede_logtype_driver >= 0)
3141                 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
3142 }